diff options
Diffstat (limited to 'compiler/rustc_codegen_ssa/src')
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs | 6 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/block.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/operand.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/place.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 19 |
5 files changed, 23 insertions, 18 deletions
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs index ffc8f55bc0c..369ca950e87 100644 --- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs +++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs @@ -464,13 +464,13 @@ fn push_debuginfo_type_name<'tcx>( // calculate the range of values for the dataful variant let dataful_discriminant_range = - dataful_variant_layout.largest_niche().unwrap().scalar.valid_range; + dataful_variant_layout.largest_niche().unwrap().valid_range; let min = dataful_discriminant_range.start; - let min = tag.value.size(&tcx).truncate(min); + let min = tag.size(&tcx).truncate(min); let max = dataful_discriminant_range.end; - let max = tag.value.size(&tcx).truncate(max); + let max = tag.size(&tcx).truncate(max); let dataful_variant_name = variant_name(*dataful_variant); write!(output, ", {}, {}, {}", min, max, dataful_variant_name).unwrap(); diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index a87daa8d6b8..b7d760bfbab 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -1572,7 +1572,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match (src.layout.abi, dst.layout.abi) { (abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => { // HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers. - if (src_scalar.value == abi::Pointer) == (dst_scalar.value == abi::Pointer) { + if (src_scalar.primitive() == abi::Pointer) + == (dst_scalar.primitive() == abi::Pointer) + { assert_eq!(src.layout.size, dst.layout.size); // NOTE(eddyb) the `from_immediate` and `to_immediate_scalar` diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 858f71ebc39..08be4c0a7b6 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -207,11 +207,11 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { // Extract a scalar component from a pair. (OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => { if offset.bytes() == 0 { - assert_eq!(field.size, a.value.size(bx.cx())); + assert_eq!(field.size, a.size(bx.cx())); OperandValue::Immediate(a_llval) } else { - assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi)); - assert_eq!(field.size, b.value.size(bx.cx())); + assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi)); + assert_eq!(field.size, b.size(bx.cx())); OperandValue::Immediate(b_llval) } } @@ -316,7 +316,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> { bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout); }; let ty = bx.backend_type(dest.layout); - let b_offset = a_scalar.value.size(bx).align_to(b_scalar.value.align(bx).abi); + let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi); let llptr = bx.struct_gep(ty, dest.llval, 0); let val = bx.from_immediate(a); diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 17cfb6c5dfb..cf69c8fd8fd 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -100,7 +100,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { self.llval } Abi::ScalarPair(a, b) - if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) => + if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) => { // Offset matches second field. let ty = bx.backend_type(self.layout); @@ -234,7 +234,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { // Decode the discriminant (specifically if it's niche-encoded). match *tag_encoding { TagEncoding::Direct => { - let signed = match tag_scalar.value { + let signed = match tag_scalar.primitive() { // We use `i1` for bytes that are always `0` or `1`, // e.g., `#[repr(i8)] enum E { A, B }`, but we can't // let LLVM interpret the `i1` as signed, because diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 68decce82ab..fd29c9e281b 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -299,7 +299,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let mut signed = false; if let Abi::Scalar(scalar) = operand.layout.abi { - if let Int(_, s) = scalar.value { + if let Int(_, s) = scalar.primitive() { // We use `i1` for bytes that are always `0` or `1`, // e.g., `#[repr(i8)] enum E { A, B }`, but we can't // let LLVM interpret the `i1` as signed, because @@ -307,15 +307,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { signed = !scalar.is_bool() && s; if !scalar.is_always_valid(bx.cx()) - && scalar.valid_range.end >= scalar.valid_range.start + && scalar.valid_range(bx.cx()).end + >= scalar.valid_range(bx.cx()).start { // We want `table[e as usize ± k]` to not // have bound checks, and this is the most // convenient place to put the `assume`s. - if scalar.valid_range.start > 0 { - let enum_value_lower_bound = bx - .cx() - .const_uint_big(ll_t_in, scalar.valid_range.start); + if scalar.valid_range(bx.cx()).start > 0 { + let enum_value_lower_bound = bx.cx().const_uint_big( + ll_t_in, + scalar.valid_range(bx.cx()).start, + ); let cmp_start = bx.icmp( IntPredicate::IntUGE, llval, @@ -324,8 +326,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.assume(cmp_start); } - let enum_value_upper_bound = - bx.cx().const_uint_big(ll_t_in, scalar.valid_range.end); + let enum_value_upper_bound = bx + .cx() + .const_uint_big(ll_t_in, scalar.valid_range(bx.cx()).end); let cmp_end = bx.icmp( IntPredicate::IntULE, llval, |
