about summary refs log tree commit diff
path: root/compiler/rustc_codegen_ssa/src/mir
diff options
context:
space:
mode:
authorNikita Popov <nikita.ppv@gmail.com>2021-07-04 17:49:51 +0200
committerNikita Popov <nikita.ppv@gmail.com>2021-07-09 22:00:19 +0200
commit33e9a6b565ddd7f20a5fd3f455eb2f3109d41801 (patch)
tree104635f13538c1b8ed8157802f77289e12effc79 /compiler/rustc_codegen_ssa/src/mir
parent619c27a53959e2d79aadb01cba5b2c49756df771 (diff)
downloadrust-33e9a6b565ddd7f20a5fd3f455eb2f3109d41801.tar.gz
rust-33e9a6b565ddd7f20a5fd3f455eb2f3109d41801.zip
Pass type when creating atomic load
Instead of determining it from the pointer type, explicitly pass
the type to load.
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/mir')
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs9
1 files changed, 4 insertions, 5 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 8502309b90e..56ff1b3934c 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -448,15 +448,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                             if ty.is_unsafe_ptr() {
                                 // Some platforms do not support atomic operations on pointers,
                                 // so we cast to integer first...
-                                let ptr_llty = bx.type_ptr_to(bx.type_isize());
+                                let llty = bx.type_isize();
+                                let ptr_llty = bx.type_ptr_to(llty);
                                 source = bx.pointercast(source, ptr_llty);
-                            }
-                            let result = bx.atomic_load(source, order, size);
-                            if ty.is_unsafe_ptr() {
+                                let result = bx.atomic_load(llty, source, order, size);
                                 // ... and then cast the result back to a pointer
                                 bx.inttoptr(result, bx.backend_type(layout))
                             } else {
-                                result
+                                bx.atomic_load(bx.backend_type(layout), source, order, size)
                             }
                         } else {
                             return invalid_monomorphization(ty);