diff --git a/src/alloy/compiler/passes/context_promotion_pass.cc b/src/alloy/compiler/passes/context_promotion_pass.cc index 6d9e062f1..d2e29fcec 100644 --- a/src/alloy/compiler/passes/context_promotion_pass.cc +++ b/src/alloy/compiler/passes/context_promotion_pass.cc @@ -120,7 +120,6 @@ void ContextPromotionPass::RemoveDeadStoresBlock(Block* block) { Instr* prev = i->prev; if (i->opcode == &OPCODE_STORE_CONTEXT_info) { size_t offset = i->src1.offset; - Value* value = i->src2.value; if (context_values_[offset] != token) { // Mark offset as written to. context_values_[offset] = token; diff --git a/src/alloy/frontend/ppc/ppc_emit_altivec.cc b/src/alloy/frontend/ppc/ppc_emit_altivec.cc index c6f34dad6..181ba4378 100644 --- a/src/alloy/frontend/ppc/ppc_emit_altivec.cc +++ b/src/alloy/frontend/ppc/ppc_emit_altivec.cc @@ -334,7 +334,6 @@ int InstrEmit_stvrx_(PPCFunctionBuilder& f, InstrData& i, uint32_t vd, uint32_t // we could optimize this to prevent the other load/mask, in that case. Value* ea = ra ? f.Add(f.LoadGPR(ra), f.LoadGPR(rb)) : f.LoadGPR(rb); Value* eb = f.And(f.Truncate(ea, INT8_TYPE), f.LoadConstant((int8_t)0xF)); - Value* ebits = f.Mul(eb, f.LoadConstant((int8_t)8)); Value* new_value = f.LoadVR(vd); // ea &= ~0xF (load takes care of this) Value* old_value = f.ByteSwap(f.Load(ea, VEC128_TYPE)); diff --git a/src/alloy/frontend/ppc/ppc_emit_alu.cc b/src/alloy/frontend/ppc/ppc_emit_alu.cc index c05f60267..5c0206f0e 100644 --- a/src/alloy/frontend/ppc/ppc_emit_alu.cc +++ b/src/alloy/frontend/ppc/ppc_emit_alu.cc @@ -957,7 +957,6 @@ XEEMITTER(rlwimix, 0x50000000, M )(PPCFunctionBuilder& f, InstrData& i) { v = f.And(v, f.LoadConstant(m)); } v = f.ZeroExtend(v, INT64_TYPE); - Value* ra = f.LoadGPR(i.M.RA); v = f.Or(v, f.And(f.LoadGPR(i.M.RA), f.LoadConstant((~(uint64_t)m)))); if (i.M.Rc) { f.UpdateCR(0, v);