about summary refs log tree commit diff
path: root/compiler/rustc_codegen_cranelift
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_cranelift')
-rw-r--r--compiler/rustc_codegen_cranelift/.cirrus.yml25
-rw-r--r--compiler/rustc_codegen_cranelift/.gitattributes2
-rw-r--r--compiler/rustc_codegen_cranelift/.github/workflows/main.yml160
-rw-r--r--compiler/rustc_codegen_cranelift/.github/workflows/rustc.yml82
-rw-r--r--compiler/rustc_codegen_cranelift/.gitignore17
-rw-r--r--compiler/rustc_codegen_cranelift/.vscode/settings.json73
-rw-r--r--compiler/rustc_codegen_cranelift/Cargo.lock304
-rw-r--r--compiler/rustc_codegen_cranelift/Cargo.toml74
-rw-r--r--compiler/rustc_codegen_cranelift/LICENSE-APACHE201
-rw-r--r--compiler/rustc_codegen_cranelift/LICENSE-MIT23
-rw-r--r--compiler/rustc_codegen_cranelift/Readme.md75
-rw-r--r--compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock327
-rw-r--r--compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml26
-rw-r--r--compiler/rustc_codegen_cranelift/build_sysroot/src/lib.rs1
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/build_backend.rs40
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs216
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/config.rs55
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/prepare.rs133
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/rustc_info.rs65
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/utils.rs35
-rwxr-xr-xcompiler/rustc_codegen_cranelift/clean_all.sh6
-rw-r--r--compiler/rustc_codegen_cranelift/config.txt17
-rw-r--r--compiler/rustc_codegen_cranelift/docs/dwarf.md153
-rw-r--r--compiler/rustc_codegen_cranelift/docs/usage.md65
-rw-r--r--compiler/rustc_codegen_cranelift/example/alloc_example.rs38
-rw-r--r--compiler/rustc_codegen_cranelift/example/alloc_system.rs212
-rw-r--r--compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs69
-rw-r--r--compiler/rustc_codegen_cranelift/example/dst-field-align.rs67
-rw-r--r--compiler/rustc_codegen_cranelift/example/example.rs208
-rw-r--r--compiler/rustc_codegen_cranelift/example/mini_core.rs630
-rw-r--r--compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs475
-rw-r--r--compiler/rustc_codegen_cranelift/example/mod_bench.rs36
-rw-r--r--compiler/rustc_codegen_cranelift/example/std_example.rs333
-rw-r--r--compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs97
-rw-r--r--compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs40
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0001-compiler-builtins-Disable-128bit-atomic-operations.patch48
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0001-rand-Enable-c2-chacha-simd-feature.patch23
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0002-rand-Disable-failing-test.patch33
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0022-sysroot-Disable-not-compiling-tests.patch83
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch90
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0027-sysroot-128bit-atomic-operations.patch103
-rw-r--r--compiler/rustc_codegen_cranelift/rust-toolchain3
-rw-r--r--compiler/rustc_codegen_cranelift/rustfmt.toml4
-rw-r--r--compiler/rustc_codegen_cranelift/scripts/Readme.md2
-rw-r--r--compiler/rustc_codegen_cranelift/scripts/cargo.rs70
-rw-r--r--compiler/rustc_codegen_cranelift/scripts/config.sh6
-rw-r--r--compiler/rustc_codegen_cranelift/scripts/ext_config.sh32
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/filter_profile.rs126
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/rustup.sh58
-rw-r--r--compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh57
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh15
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh94
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/tests.sh154
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/comments.rs129
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/mod.rs555
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs300
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/returning.rs188
-rw-r--r--compiler/rustc_codegen_cranelift/src/allocator.rs139
-rw-r--r--compiler/rustc_codegen_cranelift/src/analyze.rs59
-rw-r--r--compiler/rustc_codegen_cranelift/src/archive.rs291
-rw-r--r--compiler/rustc_codegen_cranelift/src/backend.rs152
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs920
-rw-r--r--compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs87
-rw-r--r--compiler/rustc_codegen_cranelift/src/bin/cg_clif_build_sysroot.rs91
-rw-r--r--compiler/rustc_codegen_cranelift/src/cast.rs179
-rw-r--r--compiler/rustc_codegen_cranelift/src/codegen_i128.rs167
-rw-r--r--compiler/rustc_codegen_cranelift/src/common.rs405
-rw-r--r--compiler/rustc_codegen_cranelift/src/compiler_builtins.rs41
-rw-r--r--compiler/rustc_codegen_cranelift/src/config.rs116
-rw-r--r--compiler/rustc_codegen_cranelift/src/constant.rs549
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs192
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs219
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs384
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs130
-rw-r--r--compiler/rustc_codegen_cranelift/src/discriminant.rs169
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/aot.rs420
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/jit.rs380
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/mod.rs53
-rw-r--r--compiler/rustc_codegen_cranelift/src/inline_asm.rs336
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs74
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs181
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs1126
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs281
-rw-r--r--compiler/rustc_codegen_cranelift/src/lib.rs306
-rw-r--r--compiler/rustc_codegen_cranelift/src/linkage.rs36
-rw-r--r--compiler/rustc_codegen_cranelift/src/main_shim.rs159
-rw-r--r--compiler/rustc_codegen_cranelift/src/metadata.rs20
-rw-r--r--compiler/rustc_codegen_cranelift/src/num.rs437
-rw-r--r--compiler/rustc_codegen_cranelift/src/optimize/mod.rs20
-rw-r--r--compiler/rustc_codegen_cranelift/src/optimize/peephole.rs106
-rw-r--r--compiler/rustc_codegen_cranelift/src/pointer.rs134
-rw-r--r--compiler/rustc_codegen_cranelift/src/pretty_clif.rs284
-rw-r--r--compiler/rustc_codegen_cranelift/src/toolchain.rs31
-rw-r--r--compiler/rustc_codegen_cranelift/src/trap.rs78
-rw-r--r--compiler/rustc_codegen_cranelift/src/unsize.rs211
-rw-r--r--compiler/rustc_codegen_cranelift/src/value_and_place.rs730
-rw-r--r--compiler/rustc_codegen_cranelift/src/vtable.rs79
-rwxr-xr-xcompiler/rustc_codegen_cranelift/test.sh13
-rwxr-xr-xcompiler/rustc_codegen_cranelift/y.rs153
99 files changed, 16491 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_cranelift/.cirrus.yml b/compiler/rustc_codegen_cranelift/.cirrus.yml
new file mode 100644
index 00000000000..61da6a2491c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.cirrus.yml
@@ -0,0 +1,25 @@
+task:
+  name: freebsd
+  freebsd_instance:
+    image: freebsd-12-1-release-amd64
+  setup_rust_script:
+    - pkg install -y curl git bash
+    - curl https://sh.rustup.rs -sSf --output rustup.sh
+    - sh rustup.sh --default-toolchain none -y --profile=minimal
+  cargo_bin_cache:
+    folder: ~/.cargo/bin
+  target_cache:
+    folder: target
+  prepare_script:
+    - . $HOME/.cargo/env
+    - git config --global user.email "user@example.com"
+    - git config --global user.name "User"
+    - ./y.rs prepare
+  test_script:
+    - . $HOME/.cargo/env
+    - # Enable backtraces for easier debugging
+    - export RUST_BACKTRACE=1
+    - # Reduce amount of benchmark runs as they are slow
+    - export COMPILE_RUNS=2
+    - export RUN_RUNS=2
+    - ./test.sh
diff --git a/compiler/rustc_codegen_cranelift/.gitattributes b/compiler/rustc_codegen_cranelift/.gitattributes
new file mode 100644
index 00000000000..0ceb3fe646c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.gitattributes
@@ -0,0 +1,2 @@
+* text=auto eol=lf
+*.rs diff=rust
diff --git a/compiler/rustc_codegen_cranelift/.github/workflows/main.yml b/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
new file mode 100644
index 00000000000..f81ac877260
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
@@ -0,0 +1,160 @@
+name: CI
+
+on:
+  - push
+  - pull_request
+
+jobs:
+  build:
+    runs-on: ${{ matrix.os }}
+    timeout-minutes: 60
+
+    strategy:
+      fail-fast: false
+      matrix:
+        include:
+          - os: ubuntu-latest
+          - os: macos-latest
+          # cross-compile from Linux to Windows using mingw
+          - os: ubuntu-latest
+            env:
+              TARGET_TRIPLE: x86_64-pc-windows-gnu
+          - os: ubuntu-latest
+            env:
+              TARGET_TRIPLE: aarch64-unknown-linux-gnu
+
+    steps:
+    - uses: actions/checkout@v2
+
+    - name: Cache cargo installed crates
+      uses: actions/cache@v2
+      with:
+        path: ~/.cargo/bin
+        key: ${{ runner.os }}-cargo-installed-crates
+
+    - name: Cache cargo registry and index
+      uses: actions/cache@v2
+      with:
+        path: |
+            ~/.cargo/registry
+            ~/.cargo/git
+        key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+    - name: Cache cargo target dir
+      uses: actions/cache@v2
+      with:
+        path: target
+        key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+    - name: Install MinGW toolchain and wine
+      if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
+      run: |
+        sudo apt-get install -y gcc-mingw-w64-x86-64 wine-stable
+        rustup target add x86_64-pc-windows-gnu
+
+    - name: Install AArch64 toolchain and qemu
+      if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'aarch64-unknown-linux-gnu'
+      run: |
+        sudo apt-get install -y gcc-aarch64-linux-gnu qemu-user
+
+    - name: Prepare dependencies
+      run: |
+        git config --global user.email "user@example.com"
+        git config --global user.name "User"
+        ./y.rs prepare
+
+    - name: Build
+      run: ./y.rs build --sysroot none
+
+    - name: Test
+      env:
+        TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
+      run: |
+        # Enable backtraces for easier debugging
+        export RUST_BACKTRACE=1
+
+        # Reduce amount of benchmark runs as they are slow
+        export COMPILE_RUNS=2
+        export RUN_RUNS=2
+
+        # Enable extra checks
+        export CG_CLIF_ENABLE_VERIFIER=1
+
+        ./test.sh
+
+    - name: Package prebuilt cg_clif
+      run: tar cvfJ cg_clif.tar.xz build
+
+    - name: Upload prebuilt cg_clif
+      if: matrix.env.TARGET_TRIPLE != 'x86_64-pc-windows-gnu'
+      uses: actions/upload-artifact@v2
+      with:
+        name: cg_clif-${{ runner.os }}
+        path: cg_clif.tar.xz
+
+    - name: Upload prebuilt cg_clif (cross compile)
+      if: matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
+      uses: actions/upload-artifact@v2
+      with:
+        name: cg_clif-${{ runner.os }}-cross-x86_64-mingw
+        path: cg_clif.tar.xz
+
+  build_windows:
+    runs-on: windows-latest
+    timeout-minutes: 60
+
+    steps:
+    - uses: actions/checkout@v2
+
+    #- name: Cache cargo installed crates
+    #  uses: actions/cache@v2
+    #  with:
+    #    path: ~/.cargo/bin
+    #    key: ${{ runner.os }}-cargo-installed-crates
+
+    #- name: Cache cargo registry and index
+    #  uses: actions/cache@v2
+    #  with:
+    #    path: |
+    #        ~/.cargo/registry
+    #        ~/.cargo/git
+    #    key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+    #- name: Cache cargo target dir
+    #  uses: actions/cache@v2
+    #  with:
+    #    path: target
+    #    key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+    - name: Prepare dependencies
+      run: |
+        git config --global user.email "user@example.com"
+        git config --global user.name "User"
+        git config --global core.autocrlf false
+        rustup set default-host x86_64-pc-windows-gnu
+        rustc y.rs -o y.exe -g
+        ./y.exe prepare
+
+    - name: Build
+      #name: Test
+      run: |
+        # Enable backtraces for easier debugging
+        #export RUST_BACKTRACE=1
+
+        # Reduce amount of benchmark runs as they are slow
+        #export COMPILE_RUNS=2
+        #export RUN_RUNS=2
+
+        # Enable extra checks
+        #export CG_CLIF_ENABLE_VERIFIER=1
+
+        ./y.exe build
+
+    #- name: Package prebuilt cg_clif
+    #  run: tar cvfJ cg_clif.tar.xz build
+
+    #- name: Upload prebuilt cg_clif
+    #  uses: actions/upload-artifact@v2
+    #  with:
+    #    name: cg_clif-${{ runner.os }}
+    #    path: cg_clif.tar.xz
diff --git a/compiler/rustc_codegen_cranelift/.github/workflows/rustc.yml b/compiler/rustc_codegen_cranelift/.github/workflows/rustc.yml
new file mode 100644
index 00000000000..1c08e5ece33
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.github/workflows/rustc.yml
@@ -0,0 +1,82 @@
+name: Various rustc tests
+
+on:
+  - push
+
+jobs:
+  bootstrap_rustc:
+    runs-on: ubuntu-latest
+
+    steps:
+    - uses: actions/checkout@v2
+
+    - name: Cache cargo installed crates
+      uses: actions/cache@v2
+      with:
+        path: ~/.cargo/bin
+        key: ${{ runner.os }}-cargo-installed-crates
+
+    - name: Cache cargo registry and index
+      uses: actions/cache@v2
+      with:
+        path: |
+            ~/.cargo/registry
+            ~/.cargo/git
+        key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+    - name: Cache cargo target dir
+      uses: actions/cache@v2
+      with:
+        path: target
+        key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+    - name: Prepare dependencies
+      run: |
+        git config --global user.email "user@example.com"
+        git config --global user.name "User"
+        ./y.rs prepare
+
+    - name: Test
+      run: |
+        # Enable backtraces for easier debugging
+        export RUST_BACKTRACE=1
+
+        ./scripts/test_bootstrap.sh
+  rustc_test_suite:
+    runs-on: ubuntu-latest
+
+    steps:
+    - uses: actions/checkout@v2
+
+    - name: Cache cargo installed crates
+      uses: actions/cache@v2
+      with:
+        path: ~/.cargo/bin
+        key: ${{ runner.os }}-cargo-installed-crates
+
+    - name: Cache cargo registry and index
+      uses: actions/cache@v2
+      with:
+        path: |
+            ~/.cargo/registry
+            ~/.cargo/git
+        key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+    - name: Cache cargo target dir
+      uses: actions/cache@v2
+      with:
+        path: target
+        key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+    - name: Prepare dependencies
+      run: |
+        git config --global user.email "user@example.com"
+        git config --global user.name "User"
+        ./y.rs prepare
+
+    - name: Test
+      run: |
+        # Enable backtraces for easier debugging
+        export RUST_BACKTRACE=1
+
+        ./scripts/test_rustc_tests.sh
diff --git a/compiler/rustc_codegen_cranelift/.gitignore b/compiler/rustc_codegen_cranelift/.gitignore
new file mode 100644
index 00000000000..12e779fe7c7
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.gitignore
@@ -0,0 +1,17 @@
+target
+**/*.rs.bk
+*.rlib
+*.o
+perf.data
+perf.data.old
+*.events
+*.string*
+/y.bin
+/build
+/build_sysroot/sysroot_src
+/build_sysroot/compiler-builtins
+/build_sysroot/rustc_version
+/rust
+/rand
+/regex
+/simple-raytracer
diff --git a/compiler/rustc_codegen_cranelift/.vscode/settings.json b/compiler/rustc_codegen_cranelift/.vscode/settings.json
new file mode 100644
index 00000000000..f62e59cefc2
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.vscode/settings.json
@@ -0,0 +1,73 @@
+{
+    // source for rustc_* is not included in the rust-src component; disable the errors about this
+    "rust-analyzer.diagnostics.disabled": ["unresolved-extern-crate", "unresolved-macro-call"],
+    "rust-analyzer.assist.importGranularity": "module",
+    "rust-analyzer.assist.importEnforceGranularity": true,
+    "rust-analyzer.assist.importPrefix": "crate",
+    "rust-analyzer.cargo.runBuildScripts": true,
+    "rust-analyzer.linkedProjects": [
+        "./Cargo.toml",
+        //"./build_sysroot/sysroot_src/src/libstd/Cargo.toml",
+        {
+            "roots": [
+                "./example/mini_core.rs",
+                "./example/mini_core_hello_world.rs",
+                "./example/mod_bench.rs"
+            ],
+            "crates": [
+                {
+                    "root_module": "./example/mini_core.rs",
+                    "edition": "2018",
+                    "deps": [],
+                    "cfg": [],
+                },
+                {
+                    "root_module": "./example/mini_core_hello_world.rs",
+                    "edition": "2018",
+                    "deps": [{ "crate": 0, "name": "mini_core" }],
+                    "cfg": [],
+                },
+                {
+                    "root_module": "./example/mod_bench.rs",
+                    "edition": "2018",
+                    "deps": [],
+                    "cfg": [],
+                },
+            ]
+        },
+        {
+            "roots": ["./scripts/filter_profile.rs"],
+            "crates": [
+                {
+                    "root_module": "./scripts/filter_profile.rs",
+                    "edition": "2018",
+                    "deps": [{ "crate": 1, "name": "std" }],
+                    "cfg": [],
+                },
+                {
+                    "root_module": "./build_sysroot/sysroot_src/library/std/src/lib.rs",
+                    "edition": "2018",
+                    "deps": [],
+                    "cfg": [],
+                },
+            ]
+        },
+        {
+            "roots": ["./y.rs"],
+            "crates": [
+                {
+                    "root_module": "./y.rs",
+                    "edition": "2018",
+                    "deps": [{ "crate": 1, "name": "std" }],
+                    "cfg": [],
+                },
+                {
+                    "root_module": "./build_sysroot/sysroot_src/library/std/src/lib.rs",
+                    "edition": "2018",
+                    "deps": [],
+                    "cfg": [],
+                },
+            ]
+        }
+    ]
+}
diff --git a/compiler/rustc_codegen_cranelift/Cargo.lock b/compiler/rustc_codegen_cranelift/Cargo.lock
new file mode 100644
index 00000000000..56d0974b253
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/Cargo.lock
@@ -0,0 +1,304 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "anyhow"
+version = "1.0.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1"
+
+[[package]]
+name = "ar"
+version = "0.8.0"
+source = "git+https://github.com/bjorn3/rust-ar.git?branch=do_not_remove_cg_clif_ranlib#de9ab0e56bf3a208381d342aa5b60f9ff2891648"
+
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "bitflags"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "cranelift-bforest"
+version = "0.75.0"
+source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+dependencies = [
+ "cranelift-entity",
+]
+
+[[package]]
+name = "cranelift-codegen"
+version = "0.75.0"
+source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+dependencies = [
+ "cranelift-bforest",
+ "cranelift-codegen-meta",
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+ "gimli",
+ "log",
+ "regalloc",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-codegen-meta"
+version = "0.75.0"
+source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+dependencies = [
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+]
+
+[[package]]
+name = "cranelift-codegen-shared"
+version = "0.75.0"
+source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+
+[[package]]
+name = "cranelift-entity"
+version = "0.75.0"
+source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+
+[[package]]
+name = "cranelift-frontend"
+version = "0.75.0"
+source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+dependencies = [
+ "cranelift-codegen",
+ "log",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-jit"
+version = "0.75.0"
+source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-entity",
+ "cranelift-module",
+ "cranelift-native",
+ "libc",
+ "log",
+ "region",
+ "target-lexicon",
+ "winapi",
+]
+
+[[package]]
+name = "cranelift-module"
+version = "0.75.0"
+source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-entity",
+ "log",
+]
+
+[[package]]
+name = "cranelift-native"
+version = "0.75.0"
+source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+dependencies = [
+ "cranelift-codegen",
+ "libc",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-object"
+version = "0.75.0"
+source = "git+https://github.com/bytecodealliance/wasmtime.git?branch=main#c71ad9490e7f3e19bbcae7e28bbe50f8a0b4a5d8"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-module",
+ "log",
+ "object",
+ "target-lexicon",
+]
+
+[[package]]
+name = "crc32fast"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "gimli"
+version = "0.24.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189"
+dependencies = [
+ "indexmap",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
+
+[[package]]
+name = "indexmap"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b"
+dependencies = [
+ "autocfg",
+ "hashbrown",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.97"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6"
+
+[[package]]
+name = "libloading"
+version = "0.6.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883"
+dependencies = [
+ "cfg-if",
+ "winapi",
+]
+
+[[package]]
+name = "log"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "mach"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "memchr"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc"
+
+[[package]]
+name = "object"
+version = "0.25.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a38f2be3697a57b4060074ff41b44c16870d916ad7877c17696e063257482bc7"
+dependencies = [
+ "crc32fast",
+ "indexmap",
+ "memchr",
+]
+
+[[package]]
+name = "regalloc"
+version = "0.0.31"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5"
+dependencies = [
+ "log",
+ "rustc-hash",
+ "smallvec",
+]
+
+[[package]]
+name = "region"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0"
+dependencies = [
+ "bitflags",
+ "libc",
+ "mach",
+ "winapi",
+]
+
+[[package]]
+name = "rustc-hash"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
+
+[[package]]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+dependencies = [
+ "ar",
+ "cranelift-codegen",
+ "cranelift-frontend",
+ "cranelift-jit",
+ "cranelift-module",
+ "cranelift-native",
+ "cranelift-object",
+ "gimli",
+ "indexmap",
+ "libloading",
+ "object",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e"
+
+[[package]]
+name = "target-lexicon"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "64ae3b39281e4b14b8123bdbaddd472b7dfe215e444181f2f9d2443c2444f834"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/compiler/rustc_codegen_cranelift/Cargo.toml b/compiler/rustc_codegen_cranelift/Cargo.toml
new file mode 100644
index 00000000000..ef68d7ee532
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/Cargo.toml
@@ -0,0 +1,74 @@
+[package]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+authors = ["bjorn3 <bjorn3@users.noreply.github.com>"]
+edition = "2018"
+
+[lib]
+crate-type = ["dylib"]
+
+[dependencies]
+# These have to be in sync with each other
+cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main", features = ["unwind", "all-arch"] }
+cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
+cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
+cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
+cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main", optional = true }
+cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "main" }
+target-lexicon = "0.12.0"
+gimli = { version = "0.24.0", default-features = false, features = ["write"]}
+object = { version = "0.25.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
+
+ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
+indexmap = "1.0.2"
+libloading = { version = "0.6.0", optional = true }
+smallvec = "1.6.1"
+
+# Uncomment to use local checkout of cranelift
+#[patch."https://github.com/bytecodealliance/wasmtime.git"]
+#cranelift-codegen = { path = "../wasmtime/cranelift/codegen" }
+#cranelift-frontend = { path = "../wasmtime/cranelift/frontend" }
+#cranelift-module = { path = "../wasmtime/cranelift/module" }
+#cranelift-native = { path = "../wasmtime/cranelift/native" }
+#cranelift-jit = { path = "../wasmtime/cranelift/jit" }
+#cranelift-object = { path = "../wasmtime/cranelift/object" }
+
+#[patch.crates-io]
+#gimli = { path = "../" }
+
+[features]
+default = ["jit", "inline_asm"]
+jit = ["cranelift-jit", "libloading"]
+inline_asm = []
+
+[profile.dev]
+# By compiling dependencies with optimizations, performing tests gets much faster.
+opt-level = 3
+
+[profile.dev.package.rustc_codegen_cranelift]
+# Disabling optimizations for cg_clif itself makes compilation after a change faster.
+opt-level = 0
+
+[profile.release.package.rustc_codegen_cranelift]
+incremental = true
+
+# Disable optimizations and debuginfo of build scripts and some of the heavy build deps, as the
+# execution time of build scripts is so fast that optimizing them slows down the total build time.
+[profile.dev.build-override]
+opt-level = 0
+debug = false
+
+[profile.release.build-override]
+opt-level = 0
+debug = false
+
+[profile.dev.package.cranelift-codegen-meta]
+opt-level = 0
+debug = false
+
+[profile.release.package.cranelift-codegen-meta]
+opt-level = 0
+debug = false
+
+[package.metadata.rust-analyzer]
+rustc_private = true
diff --git a/compiler/rustc_codegen_cranelift/LICENSE-APACHE b/compiler/rustc_codegen_cranelift/LICENSE-APACHE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/compiler/rustc_codegen_cranelift/LICENSE-MIT b/compiler/rustc_codegen_cranelift/LICENSE-MIT
new file mode 100644
index 00000000000..31aa79387f2
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/compiler/rustc_codegen_cranelift/Readme.md b/compiler/rustc_codegen_cranelift/Readme.md
new file mode 100644
index 00000000000..dad8ed90b53
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/Readme.md
@@ -0,0 +1,75 @@
+# Cranelift codegen backend for rust
+
+The goal of this project is to create an alternative codegen backend for the rust compiler based on [Cranelift](https://github.com/bytecodealliance/wasmtime/blob/main/cranelift).
+This has the potential to improve compilation times in debug mode.
+If your project doesn't use any of the things listed under "Not yet supported", it should work fine.
+If not please open an issue.
+
+## Building and testing
+
+```bash
+$ git clone https://github.com/bjorn3/rustc_codegen_cranelift.git
+$ cd rustc_codegen_cranelift
+$ ./y.rs prepare # download and patch sysroot src and install hyperfine for benchmarking
+$ ./y.rs build
+```
+
+To run the test suite replace the last command with:
+
+```bash
+$ ./test.sh
+```
+
+This will implicitly build cg_clif too. Both `y.rs build` and `test.sh` accept a `--debug` argument to
+build in debug mode.
+
+Alternatively you can download a pre built version from [GHA]. It is listed in the artifacts section
+of workflow runs. Unfortunately due to GHA restrictions you need to be logged in to access it.
+
+[GHA]: https://github.com/bjorn3/rustc_codegen_cranelift/actions?query=branch%3Amaster+event%3Apush+is%3Asuccess
+
+## Usage
+
+rustc_codegen_cranelift can be used as a near-drop-in replacement for `cargo build` or `cargo run` for existing projects.
+
+Assuming `$cg_clif_dir` is the directory you cloned this repo into and you followed the instructions (`y.rs prepare` and `y.rs build` or `test.sh`).
+
+In the directory with your project (where you can do the usual `cargo build`), run:
+
+```bash
+$ $cg_clif_dir/build/cargo build
+```
+
+This will build your project with rustc_codegen_cranelift instead of the usual LLVM backend.
+
+For additional ways to use rustc_codegen_cranelift like the JIT mode see [usage.md](docs/usage.md).
+
+## Configuration
+
+See the documentation on the `BackendConfig` struct in [config.rs](src/config.rs) for all
+configuration options.
+
+## Not yet supported
+
+* Inline assembly ([no cranelift support](https://github.com/bytecodealliance/wasmtime/issues/1041))
+    * On Linux there is support for invoking an external assembler for `global_asm!` and `asm!`.
+      `llvm_asm!` will remain unimplemented forever. `asm!` doesn't yet support reg classes. You
+      have to specify specific registers instead.
+* SIMD ([tracked here](https://github.com/bjorn3/rustc_codegen_cranelift/issues/171), some basic things work)
+
+## License
+
+Licensed under either of
+
+  * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or
+    http://www.apache.org/licenses/LICENSE-2.0)
+  * MIT license ([LICENSE-MIT](LICENSE-MIT) or
+    http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you shall be dual licensed as above, without any
+additional terms or conditions.
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
new file mode 100644
index 00000000000..46f661107e7
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
@@ -0,0 +1,327 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "addr2line"
+version = "0.14.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7"
+dependencies = [
+ "compiler_builtins",
+ "gimli",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "alloc"
+version = "0.0.0"
+dependencies = [
+ "compiler_builtins",
+ "core",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "cc"
+version = "1.0.68"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "compiler_builtins"
+version = "0.1.46"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "core"
+version = "0.0.0"
+
+[[package]]
+name = "dlmalloc"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "332570860c2edf2d57914987bf9e24835425f75825086b6ba7d1e6a3e4f1f254"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "fortanix-sgx-abi"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c56c422ef86062869b2d57ae87270608dc5929969dd130a6e248979cf4fb6ca6"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+ "unicode-width",
+]
+
+[[package]]
+name = "gimli"
+version = "0.23.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.97"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "miniz_oxide"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b"
+dependencies = [
+ "adler",
+ "autocfg",
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "object"
+version = "0.22.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "panic_abort"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "panic_unwind"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+ "unwind",
+]
+
+[[package]]
+name = "proc_macro"
+version = "0.0.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dead70b0b5e03e9c814bcb6b01e03e68f7c57a80aa48c72ec92152ab3e818d49"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "rustc-std-workspace-alloc"
+version = "1.99.0"
+dependencies = [
+ "alloc",
+]
+
+[[package]]
+name = "rustc-std-workspace-core"
+version = "1.99.0"
+dependencies = [
+ "core",
+]
+
+[[package]]
+name = "rustc-std-workspace-std"
+version = "1.99.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "std"
+version = "0.0.0"
+dependencies = [
+ "addr2line",
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "dlmalloc",
+ "fortanix-sgx-abi",
+ "hashbrown",
+ "hermit-abi",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "panic_abort",
+ "panic_unwind",
+ "rustc-demangle",
+ "std_detect",
+ "unwind",
+ "wasi",
+]
+
+[[package]]
+name = "std_detect"
+version = "0.1.5"
+dependencies = [
+ "cfg-if",
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "sysroot"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "compiler_builtins",
+ "core",
+ "std",
+ "test",
+]
+
+[[package]]
+name = "term"
+version = "0.0.0"
+dependencies = [
+ "core",
+ "std",
+]
+
+[[package]]
+name = "test"
+version = "0.0.0"
+dependencies = [
+ "cfg-if",
+ "core",
+ "getopts",
+ "libc",
+ "panic_abort",
+ "panic_unwind",
+ "proc_macro",
+ "std",
+ "term",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+]
+
+[[package]]
+name = "unwind"
+version = "0.0.0"
+dependencies = [
+ "cc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "wasi"
+version = "0.9.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml
new file mode 100644
index 00000000000..04748d5dbab
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml
@@ -0,0 +1,26 @@
+[package]
+authors = ["bjorn3 <bjorn3@users.noreply.github.com>"]
+name = "sysroot"
+version = "0.0.0"
+
+[dependencies]
+core = { path = "./sysroot_src/library/core" }
+alloc = { path = "./sysroot_src/library/alloc" }
+std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] }
+test = { path = "./sysroot_src/library/test" }
+
+compiler_builtins = { version = "0.1.39", default-features = false, features = ["no-asm"] }
+
+[patch.crates-io]
+rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" }
+rustc-std-workspace-alloc = { path = "./sysroot_src/library/rustc-std-workspace-alloc" }
+rustc-std-workspace-std = { path = "./sysroot_src/library/rustc-std-workspace-std" }
+compiler_builtins = { path = "./compiler-builtins" }
+
+[profile.dev]
+lto = "off"
+
+[profile.release]
+debug = true
+incremental = true
+lto = "off"
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/src/lib.rs b/compiler/rustc_codegen_cranelift/build_sysroot/src/lib.rs
new file mode 100644
index 00000000000..0c9ac1ac8e4
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/src/lib.rs
@@ -0,0 +1 @@
+#![no_std]
diff --git a/compiler/rustc_codegen_cranelift/build_system/build_backend.rs b/compiler/rustc_codegen_cranelift/build_system/build_backend.rs
new file mode 100644
index 00000000000..1df2bcc4541
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/build_backend.rs
@@ -0,0 +1,40 @@
+use std::env;
+use std::path::{Path, PathBuf};
+use std::process::Command;
+
+pub(crate) fn build_backend(channel: &str, host_triple: &str) -> PathBuf {
+    let mut cmd = Command::new("cargo");
+    cmd.arg("build").arg("--target").arg(host_triple);
+
+    match channel {
+        "debug" => {}
+        "release" => {
+            cmd.arg("--release");
+        }
+        _ => unreachable!(),
+    }
+
+    if cfg!(unix) {
+        if cfg!(target_os = "macos") {
+            cmd.env(
+                "RUSTFLAGS",
+                "-Csplit-debuginfo=unpacked \
+                -Clink-arg=-Wl,-rpath,@loader_path/../lib \
+                -Zosx-rpath-install-name"
+                    .to_string()
+                    + env::var("RUSTFLAGS").as_deref().unwrap_or(""),
+            );
+        } else {
+            cmd.env(
+                "RUSTFLAGS",
+                "-Clink-arg=-Wl,-rpath=$ORIGIN/../lib ".to_string()
+                    + env::var("RUSTFLAGS").as_deref().unwrap_or(""),
+            );
+        }
+    }
+
+    eprintln!("[BUILD] rustc_codegen_cranelift");
+    crate::utils::spawn_and_wait(cmd);
+
+    Path::new("target").join(host_triple).join(channel)
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs b/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs
new file mode 100644
index 00000000000..9fb88c27961
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs
@@ -0,0 +1,216 @@
+use std::env;
+use std::fs;
+use std::path::{Path, PathBuf};
+use std::process::{self, Command};
+
+use crate::rustc_info::{get_file_name, get_rustc_version};
+use crate::utils::{spawn_and_wait, try_hard_link};
+use crate::SysrootKind;
+
+pub(crate) fn build_sysroot(
+    channel: &str,
+    sysroot_kind: SysrootKind,
+    target_dir: &Path,
+    cg_clif_build_dir: PathBuf,
+    host_triple: &str,
+    target_triple: &str,
+) {
+    if target_dir.exists() {
+        fs::remove_dir_all(target_dir).unwrap();
+    }
+    fs::create_dir_all(target_dir.join("bin")).unwrap();
+    fs::create_dir_all(target_dir.join("lib")).unwrap();
+
+    // Copy the backend
+    for file in ["cg_clif", "cg_clif_build_sysroot"] {
+        try_hard_link(
+            cg_clif_build_dir.join(get_file_name(file, "bin")),
+            target_dir.join("bin").join(get_file_name(file, "bin")),
+        );
+    }
+
+    let cg_clif_dylib = get_file_name("rustc_codegen_cranelift", "dylib");
+    try_hard_link(
+        cg_clif_build_dir.join(&cg_clif_dylib),
+        target_dir
+            .join(if cfg!(windows) {
+                // Windows doesn't have rpath support, so the cg_clif dylib needs to be next to the
+                // binaries.
+                "bin"
+            } else {
+                "lib"
+            })
+            .join(cg_clif_dylib),
+    );
+
+    // Build and copy cargo wrapper
+    let mut build_cargo_wrapper_cmd = Command::new("rustc");
+    build_cargo_wrapper_cmd
+        .arg("scripts/cargo.rs")
+        .arg("-o")
+        .arg(target_dir.join("cargo"))
+        .arg("-g");
+    spawn_and_wait(build_cargo_wrapper_cmd);
+
+    let default_sysroot = crate::rustc_info::get_default_sysroot();
+
+    let rustlib = target_dir.join("lib").join("rustlib");
+    let host_rustlib_lib = rustlib.join(host_triple).join("lib");
+    let target_rustlib_lib = rustlib.join(target_triple).join("lib");
+    fs::create_dir_all(&host_rustlib_lib).unwrap();
+    fs::create_dir_all(&target_rustlib_lib).unwrap();
+
+    if target_triple == "x86_64-pc-windows-gnu" {
+        if !default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib").exists() {
+            eprintln!(
+                "The x86_64-pc-windows-gnu target needs to be installed first before it is possible \
+                to compile a sysroot for it.",
+            );
+            process::exit(1);
+        }
+        for file in fs::read_dir(
+            default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib"),
+        )
+        .unwrap()
+        {
+            let file = file.unwrap().path();
+            if file.extension().map_or(true, |ext| ext.to_str().unwrap() != "o") {
+                continue; // only copy object files
+            }
+            try_hard_link(&file, target_rustlib_lib.join(file.file_name().unwrap()));
+        }
+    }
+
+    match sysroot_kind {
+        SysrootKind::None => {} // Nothing to do
+        SysrootKind::Llvm => {
+            for file in fs::read_dir(
+                default_sysroot.join("lib").join("rustlib").join(host_triple).join("lib"),
+            )
+            .unwrap()
+            {
+                let file = file.unwrap().path();
+                let file_name_str = file.file_name().unwrap().to_str().unwrap();
+                if file_name_str.contains("rustc_")
+                    || file_name_str.contains("chalk")
+                    || file_name_str.contains("tracing")
+                    || file_name_str.contains("regex")
+                {
+                    // These are large crates that are part of the rustc-dev component and are not
+                    // necessary to run regular programs.
+                    continue;
+                }
+                try_hard_link(&file, host_rustlib_lib.join(file.file_name().unwrap()));
+            }
+
+            if target_triple != host_triple {
+                for file in fs::read_dir(
+                    default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib"),
+                )
+                .unwrap()
+                {
+                    let file = file.unwrap().path();
+                    try_hard_link(&file, target_rustlib_lib.join(file.file_name().unwrap()));
+                }
+            }
+        }
+        SysrootKind::Clif => {
+            build_clif_sysroot_for_triple(channel, target_dir, host_triple, None);
+
+            if host_triple != target_triple {
+                // When cross-compiling it is often necessary to manually pick the right linker
+                let linker = if target_triple == "aarch64-unknown-linux-gnu" {
+                    Some("aarch64-linux-gnu-gcc")
+                } else {
+                    None
+                };
+                build_clif_sysroot_for_triple(channel, target_dir, target_triple, linker);
+            }
+
+            // Copy std for the host to the lib dir. This is necessary for the jit mode to find
+            // libstd.
+            for file in fs::read_dir(host_rustlib_lib).unwrap() {
+                let file = file.unwrap().path();
+                if file.file_name().unwrap().to_str().unwrap().contains("std-") {
+                    try_hard_link(&file, target_dir.join("lib").join(file.file_name().unwrap()));
+                }
+            }
+        }
+    }
+}
+
+fn build_clif_sysroot_for_triple(
+    channel: &str,
+    target_dir: &Path,
+    triple: &str,
+    linker: Option<&str>,
+) {
+    match fs::read_to_string(Path::new("build_sysroot").join("rustc_version")) {
+        Err(e) => {
+            eprintln!("Failed to get rustc version for patched sysroot source: {}", e);
+            eprintln!("Hint: Try `./y.rs prepare` to patch the sysroot source");
+            process::exit(1);
+        }
+        Ok(source_version) => {
+            let rustc_version = get_rustc_version();
+            if source_version != rustc_version {
+                eprintln!("The patched sysroot source is outdated");
+                eprintln!("Source version: {}", source_version.trim());
+                eprintln!("Rustc version:  {}", rustc_version.trim());
+                eprintln!("Hint: Try `./y.rs prepare` to update the patched sysroot source");
+                process::exit(1);
+            }
+        }
+    }
+
+    let build_dir = Path::new("build_sysroot").join("target").join(triple).join(channel);
+
+    if !crate::config::get_bool("keep_sysroot") {
+        // Cleanup the target dir with the exception of build scripts and the incremental cache
+        for dir in ["build", "deps", "examples", "native"] {
+            if build_dir.join(dir).exists() {
+                fs::remove_dir_all(build_dir.join(dir)).unwrap();
+            }
+        }
+    }
+
+    // Build sysroot
+    let mut build_cmd = Command::new("cargo");
+    build_cmd.arg("build").arg("--target").arg(triple).current_dir("build_sysroot");
+    let mut rustflags = "--clif -Zforce-unstable-if-unmarked".to_string();
+    if channel == "release" {
+        build_cmd.arg("--release");
+        rustflags.push_str(" -Zmir-opt-level=3");
+    }
+    if let Some(linker) = linker {
+        use std::fmt::Write;
+        write!(rustflags, " -Clinker={}", linker).unwrap();
+    }
+    build_cmd.env("RUSTFLAGS", rustflags);
+    build_cmd.env(
+        "RUSTC",
+        env::current_dir().unwrap().join(target_dir).join("bin").join("cg_clif_build_sysroot"),
+    );
+    // FIXME Enable incremental again once rust-lang/rust#74946 is fixed
+    build_cmd.env("CARGO_INCREMENTAL", "0").env("__CARGO_DEFAULT_LIB_METADATA", "cg_clif");
+    spawn_and_wait(build_cmd);
+
+    // Copy all relevant files to the sysroot
+    for entry in
+        fs::read_dir(Path::new("build_sysroot/target").join(triple).join(channel).join("deps"))
+            .unwrap()
+    {
+        let entry = entry.unwrap();
+        if let Some(ext) = entry.path().extension() {
+            if ext == "rmeta" || ext == "d" || ext == "dSYM" {
+                continue;
+            }
+        } else {
+            continue;
+        };
+        try_hard_link(
+            entry.path(),
+            target_dir.join("lib").join("rustlib").join(triple).join("lib").join(entry.file_name()),
+        );
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/config.rs b/compiler/rustc_codegen_cranelift/build_system/config.rs
new file mode 100644
index 00000000000..ef540cf1f82
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/config.rs
@@ -0,0 +1,55 @@
+use std::{fs, process};
+
+fn load_config_file() -> Vec<(String, Option<String>)> {
+    fs::read_to_string("config.txt")
+        .unwrap()
+        .lines()
+        .map(|line| if let Some((line, _comment)) = line.split_once('#') { line } else { line })
+        .map(|line| line.trim())
+        .filter(|line| !line.is_empty())
+        .map(|line| {
+            if let Some((key, val)) = line.split_once('=') {
+                (key.trim().to_owned(), Some(val.trim().to_owned()))
+            } else {
+                (line.to_owned(), None)
+            }
+        })
+        .collect()
+}
+
+pub(crate) fn get_bool(name: &str) -> bool {
+    let values = load_config_file()
+        .into_iter()
+        .filter(|(key, _)| key == name)
+        .map(|(_, val)| val)
+        .collect::<Vec<_>>();
+    if values.is_empty() {
+        false
+    } else {
+        if values.iter().any(|val| val.is_some()) {
+            eprintln!("Boolean config `{}` has a value", name);
+            process::exit(1);
+        }
+        true
+    }
+}
+
+pub(crate) fn get_value(name: &str) -> Option<String> {
+    let values = load_config_file()
+        .into_iter()
+        .filter(|(key, _)| key == name)
+        .map(|(_, val)| val)
+        .collect::<Vec<_>>();
+    if values.is_empty() {
+        None
+    } else if values.len() == 1 {
+        if values[0].is_none() {
+            eprintln!("Config `{}` missing value", name);
+            process::exit(1);
+        }
+        values.into_iter().next().unwrap()
+    } else {
+        eprintln!("Config `{}` given multiple values: {:?}", name, values);
+        process::exit(1);
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/prepare.rs b/compiler/rustc_codegen_cranelift/build_system/prepare.rs
new file mode 100644
index 00000000000..401b8271abc
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/prepare.rs
@@ -0,0 +1,133 @@
+use std::env;
+use std::ffi::OsStr;
+use std::ffi::OsString;
+use std::fs;
+use std::path::Path;
+use std::process::Command;
+
+use crate::rustc_info::{get_file_name, get_rustc_path, get_rustc_version};
+use crate::utils::{copy_dir_recursively, spawn_and_wait};
+
+pub(crate) fn prepare() {
+    prepare_sysroot();
+
+    eprintln!("[INSTALL] hyperfine");
+    Command::new("cargo").arg("install").arg("hyperfine").spawn().unwrap().wait().unwrap();
+
+    clone_repo(
+        "rand",
+        "https://github.com/rust-random/rand.git",
+        "0f933f9c7176e53b2a3c7952ded484e1783f0bf1",
+    );
+    apply_patches("rand", Path::new("rand"));
+
+    clone_repo(
+        "regex",
+        "https://github.com/rust-lang/regex.git",
+        "341f207c1071f7290e3f228c710817c280c8dca1",
+    );
+
+    clone_repo(
+        "simple-raytracer",
+        "https://github.com/ebobby/simple-raytracer",
+        "804a7a21b9e673a482797aa289a18ed480e4d813",
+    );
+
+    eprintln!("[LLVM BUILD] simple-raytracer");
+    let mut build_cmd = Command::new("cargo");
+    build_cmd.arg("build").env_remove("CARGO_TARGET_DIR").current_dir("simple-raytracer");
+    spawn_and_wait(build_cmd);
+    fs::copy(
+        Path::new("simple-raytracer/target/debug").join(get_file_name("main", "bin")),
+        // FIXME use get_file_name here too once testing is migrated to rust
+        "simple-raytracer/raytracer_cg_llvm",
+    )
+    .unwrap();
+}
+
+fn prepare_sysroot() {
+    let rustc_path = get_rustc_path();
+    let sysroot_src_orig = rustc_path.parent().unwrap().join("../lib/rustlib/src/rust");
+    let sysroot_src = env::current_dir().unwrap().join("build_sysroot").join("sysroot_src");
+
+    assert!(sysroot_src_orig.exists());
+
+    if sysroot_src.exists() {
+        fs::remove_dir_all(&sysroot_src).unwrap();
+    }
+    fs::create_dir_all(sysroot_src.join("library")).unwrap();
+    eprintln!("[COPY] sysroot src");
+    copy_dir_recursively(&sysroot_src_orig.join("library"), &sysroot_src.join("library"));
+
+    let rustc_version = get_rustc_version();
+    fs::write(
+        Path::new("build_sysroot").join("rustc_version"),
+        &rustc_version,
+    )
+    .unwrap();
+
+    eprintln!("[GIT] init");
+    let mut git_init_cmd = Command::new("git");
+    git_init_cmd.arg("init").arg("-q").current_dir(&sysroot_src);
+    spawn_and_wait(git_init_cmd);
+
+    let mut git_add_cmd = Command::new("git");
+    git_add_cmd.arg("add").arg(".").current_dir(&sysroot_src);
+    spawn_and_wait(git_add_cmd);
+
+    let mut git_commit_cmd = Command::new("git");
+    git_commit_cmd
+        .arg("commit")
+        .arg("-m")
+        .arg("Initial commit")
+        .arg("-q")
+        .current_dir(&sysroot_src);
+    spawn_and_wait(git_commit_cmd);
+
+    apply_patches("sysroot", &sysroot_src);
+
+    clone_repo(
+        "build_sysroot/compiler-builtins",
+        "https://github.com/rust-lang/compiler-builtins.git",
+        "0.1.46",
+    );
+    apply_patches("compiler-builtins", Path::new("build_sysroot/compiler-builtins"));
+}
+
+fn clone_repo(target_dir: &str, repo: &str, rev: &str) {
+    eprintln!("[CLONE] {}", repo);
+    // Ignore exit code as the repo may already have been checked out
+    Command::new("git").arg("clone").arg(repo).arg(target_dir).spawn().unwrap().wait().unwrap();
+
+    let mut clean_cmd = Command::new("git");
+    clean_cmd.arg("checkout").arg("--").arg(".").current_dir(target_dir);
+    spawn_and_wait(clean_cmd);
+
+    let mut checkout_cmd = Command::new("git");
+    checkout_cmd.arg("checkout").arg("-q").arg(rev).current_dir(target_dir);
+    spawn_and_wait(checkout_cmd);
+}
+
+fn get_patches(crate_name: &str) -> Vec<OsString> {
+    let mut patches: Vec<_> = fs::read_dir("patches")
+        .unwrap()
+        .map(|entry| entry.unwrap().path())
+        .filter(|path| path.extension() == Some(OsStr::new("patch")))
+        .map(|path| path.file_name().unwrap().to_owned())
+        .filter(|file_name| {
+            file_name.to_str().unwrap().split_once("-").unwrap().1.starts_with(crate_name)
+        })
+        .collect();
+    patches.sort();
+    patches
+}
+
+fn apply_patches(crate_name: &str, target_dir: &Path) {
+    for patch in get_patches(crate_name) {
+        eprintln!("[PATCH] {:?} <- {:?}", target_dir.file_name().unwrap(), patch);
+        let patch_arg = env::current_dir().unwrap().join("patches").join(patch);
+        let mut apply_patch_cmd = Command::new("git");
+        apply_patch_cmd.arg("am").arg(patch_arg).arg("-q").current_dir(target_dir);
+        spawn_and_wait(apply_patch_cmd);
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/rustc_info.rs b/compiler/rustc_codegen_cranelift/build_system/rustc_info.rs
new file mode 100644
index 00000000000..9206bb02bd3
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/rustc_info.rs
@@ -0,0 +1,65 @@
+use std::path::{Path, PathBuf};
+use std::process::{Command, Stdio};
+
+pub(crate) fn get_rustc_version() -> String {
+    let version_info =
+        Command::new("rustc").stderr(Stdio::inherit()).args(&["-V"]).output().unwrap().stdout;
+    String::from_utf8(version_info).unwrap()
+}
+
+pub(crate) fn get_host_triple() -> String {
+    let version_info =
+        Command::new("rustc").stderr(Stdio::inherit()).args(&["-vV"]).output().unwrap().stdout;
+    String::from_utf8(version_info)
+        .unwrap()
+        .lines()
+        .to_owned()
+        .find(|line| line.starts_with("host"))
+        .unwrap()
+        .split(":")
+        .nth(1)
+        .unwrap()
+        .trim()
+        .to_owned()
+}
+
+pub(crate) fn get_rustc_path() -> PathBuf {
+    let rustc_path = Command::new("rustup")
+        .stderr(Stdio::inherit())
+        .args(&["which", "rustc"])
+        .output()
+        .unwrap()
+        .stdout;
+    Path::new(String::from_utf8(rustc_path).unwrap().trim()).to_owned()
+}
+
+pub(crate) fn get_default_sysroot() -> PathBuf {
+    let default_sysroot = Command::new("rustc")
+        .stderr(Stdio::inherit())
+        .args(&["--print", "sysroot"])
+        .output()
+        .unwrap()
+        .stdout;
+    Path::new(String::from_utf8(default_sysroot).unwrap().trim()).to_owned()
+}
+
+pub(crate) fn get_file_name(crate_name: &str, crate_type: &str) -> String {
+    let file_name = Command::new("rustc")
+        .stderr(Stdio::inherit())
+        .args(&[
+            "--crate-name",
+            crate_name,
+            "--crate-type",
+            crate_type,
+            "--print",
+            "file-names",
+            "-",
+        ])
+        .output()
+        .unwrap()
+        .stdout;
+    let file_name = String::from_utf8(file_name).unwrap().trim().to_owned();
+    assert!(!file_name.contains('\n'));
+    assert!(file_name.contains(crate_name));
+    file_name
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/utils.rs b/compiler/rustc_codegen_cranelift/build_system/utils.rs
new file mode 100644
index 00000000000..12b5d70fad8
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/utils.rs
@@ -0,0 +1,35 @@
+use std::fs;
+use std::path::Path;
+use std::process::{self, Command};
+
+#[track_caller]
+pub(crate) fn try_hard_link(src: impl AsRef<Path>, dst: impl AsRef<Path>) {
+    let src = src.as_ref();
+    let dst = dst.as_ref();
+    if let Err(_) = fs::hard_link(src, dst) {
+        fs::copy(src, dst).unwrap(); // Fallback to copying if hardlinking failed
+    }
+}
+
+#[track_caller]
+pub(crate) fn spawn_and_wait(mut cmd: Command) {
+    if !cmd.spawn().unwrap().wait().unwrap().success() {
+        process::exit(1);
+    }
+}
+
+pub(crate) fn copy_dir_recursively(from: &Path, to: &Path) {
+    for entry in fs::read_dir(from).unwrap() {
+        let entry = entry.unwrap();
+        let filename = entry.file_name();
+        if filename == "." || filename == ".." {
+            continue;
+        }
+        if entry.metadata().unwrap().is_dir() {
+            fs::create_dir(to.join(&filename)).unwrap();
+            copy_dir_recursively(&from.join(&filename), &to.join(&filename));
+        } else {
+            fs::copy(from.join(&filename), to.join(&filename)).unwrap();
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/clean_all.sh b/compiler/rustc_codegen_cranelift/clean_all.sh
new file mode 100755
index 00000000000..f4f8c82d69f
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/clean_all.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+set -e
+
+rm -rf build_sysroot/{sysroot_src/,target/,compiler-builtins/,rustc_version}
+rm -rf target/ build/ perf.data{,.old}
+rm -rf rand/ regex/ simple-raytracer/
diff --git a/compiler/rustc_codegen_cranelift/config.txt b/compiler/rustc_codegen_cranelift/config.txt
new file mode 100644
index 00000000000..b14db27d620
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/config.txt
@@ -0,0 +1,17 @@
+# This file allows configuring the build system.
+
+# Which triple to produce a compiler toolchain for.
+#
+# Defaults to the default triple of rustc on the host system.
+#host = x86_64-unknown-linux-gnu
+
+# Which triple to build libraries (core/alloc/std/test/proc_macro) for.
+#
+# Defaults to `host`.
+#target = x86_64-unknown-linux-gnu
+
+# Disables cleaning of the sysroot dir. This will cause old compiled artifacts to be re-used when
+# the sysroot source hasn't changed. This is useful when the codegen backend hasn't been modified.
+# This option can be changed while the build system is already running for as long as sysroot
+# building hasn't started yet.
+#keep_sysroot
diff --git a/compiler/rustc_codegen_cranelift/docs/dwarf.md b/compiler/rustc_codegen_cranelift/docs/dwarf.md
new file mode 100644
index 00000000000..502b1b03623
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/docs/dwarf.md
@@ -0,0 +1,153 @@
+# Line number information
+
+Line number information maps between machine code instructions and the source level location.
+
+## Encoding
+
+The line number information is stored in the `.debug_line` section for ELF and `__debug_line`
+section of the `__DWARF` segment for Mach-O object files. The line number information contains a
+header followed by the line program. The line program is a program for a virtual machine with
+instructions like set line number for the current machine code instruction and advance the current
+machine code instruction.
+
+## Tips
+
+You need to set either `DW_AT_low_pc` and `DW_AT_high_pc` **or** `DW_AT_ranges` of a
+`DW_TAG_compilation_unit` to the range of addresses in the compilation unit. After that you need
+to set `DW_AT_stmt_list` to the `.debug_line` section offset of the line program. Otherwise a
+debugger won't find the line number information. On macOS the debuginfo relocations **must** be
+section relative and not symbol relative.
+See [#303 (comment)](https://github.com/bjorn3/rustc_codegen_cranelift/issues/303#issuecomment-457825535)
+for more information.
+
+# Function debuginfo
+
+## Tips
+
+`DW_TAG_subprogram` requires `DW_AT_name`, `DW_AT_low_pc` and `DW_AT_high_pc` **or** `DW_AT_ranges`.
+Otherwise gdb will silently skip it. When `DW_AT_high_pc` is a length instead of an address, the
+DWARF version must be at least 4.
+
+<details>
+<summary>IRC log of #gdb on irc.freenode.org at 2020-04-23</summary>
+
+```
+(13:46:11) bjorn3: i am writing a backend for a compiler that uses DWARF for debuginfo. for some reason gdb seems to completely ignore all DW_TAG_subprogram, while lldb works fine. any idea what the problem could be?
+(13:47:49) bjorn3: this is the output of llvm-dwarfdump: https://gist.github.com/bjorn3/8a34e333c80f13cb048381e94b4a3756
+(13:47:50) osa1: luispm: why is that problem not exists in 'commands'? (the target vs. host)
+(13:52:16) luispm: osa1, commands is a bit more high level. It executes isolated commands. Breakpoint conditions need to be evaluated in the context of a valid expression. That expression may involve variables, symbols etc.
+(13:52:36) luispm: osa1, Oh, i see your point now. Commands is only executed on the host.
+(13:53:18) luispm: osa1, The commands are not tied to the execution context of the debugged program. The breakpoint conditions determine if execution must stop or continue etc.
+(13:55:00) luispm: bjorn3, Likely something GDB thinks is wrong. Does enabling "set debug dwarf*" show anything?
+(13:56:01) bjorn3: luispm: no
+(13:56:12) bjorn3: for more context: https://github.com/bjorn3/rustc_codegen_cranelift/pull/978
+(13:58:16) osa1 verliet de ruimte (quit: Quit: osa1).
+(13:58:28) bjorn3: luispm: wait, for b m<TAB> it shows nothing, but when stepping into a new function it does
+(13:58:45) bjorn3: it still doesn't show anything for `info args` though
+(13:58:50) bjorn3: No symbol table info available.
+(14:00:50) luispm: bjorn3, Is that expected given the nature of the binary?
+(14:01:17) bjorn3: b main<TAB> may show nothing as I only set DW_AT_linkage_name and not DW_AT_name
+(14:01:24) bjorn3: info args should work though
+(14:03:26) luispm: Sorry, I'm not sure what's up. There may be a genuine bug there.
+(14:03:41) luispm: tromey (not currently in the channel, but maybe later today) may have more input.
+(14:04:08) bjorn3: okay, thanks luispm!
+(14:04:27) luispm: In the worst case, reporting a bug may prompt someone to look into that as well.
+(14:04:48) luispm: Or send an e-mail to the gdb@sourceware.org mailing list.
+(14:05:11) bjorn3: I don't know if it is a bug in gdb, or just me producing (slightly) wrong DWARF
+(14:39:40) irker749: gdb: tom binutils-gdb.git:master * 740480b88af / gdb/ChangeLog gdb/darwin-nat.c gdb/inferior.c gdb/inferior.h: Remove iterate_over_inferiors
+(15:22:45) irker749: gdb: tromey binutils-gdb.git:master * ecc6c6066b5 / gdb/ChangeLog gdb/dwarf2/read.c gdb/unittests/lookup_name_info-selftests.c: Fix Ada crash with .debug_names
+(15:23:13) bjorn3: tromey: ping
+(15:23:29) tromey: bjorn3: hey
+(15:24:16) bjorn3: I am writing a backend for a compiler which uses DWARF for debuginfo. I unfortunately can't get gdb to show arguments. lldb works fine.
+(15:25:13) bjorn3: it just says: No symbol table info available.
+(15:25:21) bjorn3: any idea what it could be?
+(15:25:34) bjorn3: dwarfdump output: https://gist.github.com/bjorn3/8a34e333c80f13cb048381e94b4a3756
+(15:26:48) bjorn3: more context: https://github.com/bjorn3/rustc_codegen_cranelift/pull/978
+(15:28:05) tromey: offhand I don't know, but if you can send me an executable I can look
+(15:28:17) bjorn3: how should I send it?
+(15:29:26) tromey: good question
+(15:29:41) tromey: you could try emailing it to tromey at adacore.com
+(15:29:47) tromey: dunno if that will work or not
+(15:30:26) bjorn3: i will try
+(15:37:27) bjorn3: tromey: i sent an email with the subject "gdb args not showing"
+(15:38:29) tromey: will check now
+(15:38:40) bjorn3: thanks!
+(15:42:51) irker749: gdb: tdevries binutils-gdb.git:master * de82891ce5b / gdb/ChangeLog gdb/block.c gdb/block.h gdb/symtab.c gdb/testsuite/ChangeLog gdb/testsuite/gdb.base/decl-before-def-decl.c gdb/testsuite/gdb.base/decl-before-def-def.c gdb/testsuite/gdb.base/decl-before-def.exp: [gdb/symtab] Prefer def over decl (inter-CU case)
+(15:42:52) irker749: gdb: tdevries binutils-gdb.git:master * 70bc38f5138 / gdb/ChangeLog gdb/symtab.c gdb/testsuite/ChangeLog gdb/testsuite/gdb.base/decl-before-def.exp: [gdb/symtab] Prefer def over decl (inter-CU case, with context)
+(15:43:36) tromey: bjorn3: sorry, got distracted.  I have the file now
+(15:45:35) tromey: my first thing when investigating was to enable complaints
+(15:45:37) tromey: so I did
+(15:45:40) tromey: set complaints 1000
+(15:45:42) tromey: then
+(15:45:51) tromey: file -readnow mini_core_hello_world
+(15:46:00) tromey: gdb printed just one style of complaint
+(15:46:07) tromey: During symbol reading: missing name for subprogram DIE at 0x3f7
+(15:46:18) tromey: (which is really pretty good, most compilers manage to generate a bunch)
+(15:46:29) tromey: and then the gdb DWARF reader says
+(15:46:34) tromey:   /* Ignore functions with missing or empty names.  These are actually
+(15:46:34) tromey:      illegal according to the DWARF standard.  */
+(15:46:34) tromey:   if (name == NULL)
+(15:46:34) tromey:     {
+(15:46:37) tromey:       complaint (_("missing name for subprogram DIE at %s"),
+(15:46:40) tromey: 		 sect_offset_str (die->sect_off));
+(15:46:47) tromey: I wonder if that comment is correct though
+(15:47:34) tromey: I guess pedantically maybe it is, DWARF 5 3.3.1 says
+(15:47:43) tromey: The subroutine or entry point entry has a DW_AT_name attribute whose value is
+(15:47:43) tromey: a null-terminated string containing the subroutine or entry point name.
+(15:48:14) bjorn3: i tried set complaints, but it returned complaints for system files. i didn't know about file -readnow.
+(15:48:21) tromey: cool
+(15:48:26) bjorn3: i will try adding DW_AT_name
+(15:48:45) tromey: without readnow unfortunately you get less stuff, because for whatever reason gdb has 2 separate DWARF scanners
+(15:49:02) tromey: sort of anyway
+(15:49:43) tromey: this seems kind of pedantic of gdb, like if there's a linkage name but no DW_AT_name, then why bail?
+(15:50:01) tromey: also what about anonymous functions
+(15:50:17) tromey: but anyway this explains the current situation and if you don't mind adding DW_AT_name, then that's probably simplest
+(15:51:47) bjorn3: i added DW_AT_name.
+(15:51:54) bjorn3: now it says cannot get low and high bounds for subprogram DIE at ...
+(15:52:01) tromey: ugh
+(15:52:10) bjorn3: i will add DW_AT_low_pc and DW_AT_high_pc
+(15:52:15) tromey:   /* Ignore functions with missing or invalid low and high pc attributes.  */
+(15:52:37) tromey: you can also use DW_AT_ranges
+(15:52:55) tromey: if you'd prefer
+(15:53:08) bjorn3: already using DW_AT_ranges for DW_TAG_compilation_unit
+(15:53:19) bjorn3: for individual functions, there are no gaps
+(15:57:07) bjorn3: still the same error with DW_AT_low_pc and DW_AT_high_pc
+(15:57:24) bjorn3: tromey: ^
+(15:58:08) tromey: hmmm
+(15:58:30) bjorn3: should i send the new executable?
+(15:58:31) tromey: send me another executable & I will debug
+(15:58:33) tromey: yep
+(15:59:23) bjorn3: sent as repy of the previous mail
+(16:03:23) tromey: the low PC has DW_FORM_addr, but the high PC has DW_FORM_udata, which seems weird
+(16:03:50) mjw: no
+(16:03:54) tromey: no?
+(16:04:00) mjw: I suggested that for the DWARF standard...
+(16:04:05) mjw: sorry
+(16:04:58) mjw: The idea was that instead of two relocations and two address wide fields, you have one address and a constant offset.
+(16:05:05) tromey: ahh, I see the code now
+(16:05:07) tromey: I forgot about this
+(16:05:18) tromey: 	  if (cu->header.version >= 4 && attr_high->form_is_constant ())
+(16:05:18) tromey: 	    high += low;
+(16:05:36) mjw: that second offset doesn't need a relocation and can often be packed in something small, like an uleb128
+(16:05:51) mjw: using udata might not be ideal though, but is allowed
+(16:05:51) tromey: bjorn3: the problem is that this CU claims to be DWARF 3 but is using a DWARF 4 feature
+(16:05:58) mjw: aha
+(16:05:59) bjorn3: which one?
+(16:06:03) ryoshu: hi
+(16:06:08) tromey:              high_pc              (udata) 107 (+0x00000000000011b0 <_ZN21mini_core_hello_world5start17hec55b7ca64fc434eE>)
+(16:06:08) tromey:
+(16:06:12) ryoshu: just soft ping, I have a queue of patches :)
+(16:06:22) tromey: using this as a length requires DWARF 4
+(16:06:36) tromey: for gdb at least it's fine to always emit DWARF 4
+(16:06:44) bjorn3: trying dwarf 4 now
+(16:06:48) tromey: I think there are some DWARF 5 features still in the works but DWARF 4 should be solid AFAIK
+(16:07:03) tromey: fini
+(16:07:08) tromey: lol wrong window
+(16:07:56) mjw: Maybe you can accept it for DWARF < 4. But if I remember correctly it might be that people might have been using udata as if it was an address...
+(16:08:13) tromey: yeah, I vaguely recall this as well, though I'd expect there to be a comment
+(16:08:21) mjw: Cannot really remember why it needed version >= 4. Maybe there was no good reason?
+(16:08:32) bjorn3: tromey: it works!!!! thanks for all the help!
+(16:08:41) tromey: my pleasure bjorn3
+```
+
+</details>
diff --git a/compiler/rustc_codegen_cranelift/docs/usage.md b/compiler/rustc_codegen_cranelift/docs/usage.md
new file mode 100644
index 00000000000..956d5905a97
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/docs/usage.md
@@ -0,0 +1,65 @@
+# Usage
+
+rustc_codegen_cranelift can be used as a near-drop-in replacement for `cargo build` or `cargo run` for existing projects.
+
+Assuming `$cg_clif_dir` is the directory you cloned this repo into and you followed the instructions (`y.rs prepare` and `y.rs build` or `test.sh`).
+
+## Cargo
+
+In the directory with your project (where you can do the usual `cargo build`), run:
+
+```bash
+$ $cg_clif_dir/build/cargo build
+```
+
+This will build your project with rustc_codegen_cranelift instead of the usual LLVM backend.
+
+## Rustc
+
+> You should prefer using the Cargo method.
+
+```bash
+$ $cg_clif_dir/build/bin/cg_clif my_crate.rs
+```
+
+## Jit mode
+
+In jit mode cg_clif will immediately execute your code without creating an executable file.
+
+> This requires all dependencies to be available as dynamic library.
+> The jit mode will probably need cargo integration to make this possible.
+
+```bash
+$ $cg_clif_dir/build/cargo jit
+```
+
+or
+
+```bash
+$ $cg_clif_dir/build/bin/cg_clif -Cllvm-args=mode=jit -Cprefer-dynamic my_crate.rs
+```
+
+There is also an experimental lazy jit mode. In this mode functions are only compiled once they are
+first called.
+
+```bash
+$ $cg_clif_dir/build/cargo lazy-jit
+```
+
+## Shell
+
+These are a few functions that allow you to easily run rust code from the shell using cg_clif as jit.
+
+```bash
+function jit_naked() {
+    echo "$@" | $cg_clif_dir/build/bin/cg_clif - -Cllvm-args=mode=jit -Cprefer-dynamic
+}
+
+function jit() {
+    jit_naked "fn main() { $@ }"
+}
+
+function jit_calc() {
+    jit 'println!("0x{:x}", ' $@ ');';
+}
+```
diff --git a/compiler/rustc_codegen_cranelift/example/alloc_example.rs b/compiler/rustc_codegen_cranelift/example/alloc_example.rs
new file mode 100644
index 00000000000..71e93e87b6c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/alloc_example.rs
@@ -0,0 +1,38 @@
+#![feature(start, box_syntax, core_intrinsics, alloc_prelude, alloc_error_handler)]
+#![no_std]
+
+extern crate alloc;
+extern crate alloc_system;
+
+use alloc::prelude::v1::*;
+
+use alloc_system::System;
+
+#[global_allocator]
+static ALLOC: System = System;
+
+#[cfg_attr(unix, link(name = "c"))]
+#[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
+extern "C" {
+    fn puts(s: *const u8) -> i32;
+}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+    core::intrinsics::abort();
+}
+
+#[alloc_error_handler]
+fn alloc_error_handler(_: alloc::alloc::Layout) -> ! {
+    core::intrinsics::abort();
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+    let world: Box<&str> = box "Hello World!\0";
+    unsafe {
+        puts(*world as *const str as *const u8);
+    }
+
+    0
+}
diff --git a/compiler/rustc_codegen_cranelift/example/alloc_system.rs b/compiler/rustc_codegen_cranelift/example/alloc_system.rs
new file mode 100644
index 00000000000..5f66ca67f2d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/alloc_system.rs
@@ -0,0 +1,212 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![no_std]
+#![feature(allocator_api, rustc_private)]
+#![cfg_attr(any(unix, target_os = "redox"), feature(libc))]
+
+// The minimum alignment guaranteed by the architecture. This value is used to
+// add fast paths for low alignment values.
+#[cfg(all(any(target_arch = "x86",
+              target_arch = "arm",
+              target_arch = "mips",
+              target_arch = "powerpc",
+              target_arch = "powerpc64")))]
+const MIN_ALIGN: usize = 8;
+#[cfg(all(any(target_arch = "x86_64",
+              target_arch = "aarch64",
+              target_arch = "mips64",
+              target_arch = "s390x",
+              target_arch = "sparc64")))]
+const MIN_ALIGN: usize = 16;
+
+pub struct System;
+#[cfg(any(windows, unix, target_os = "redox"))]
+mod realloc_fallback {
+    use core::alloc::{GlobalAlloc, Layout};
+    use core::cmp;
+    use core::ptr;
+    impl super::System {
+        pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout,
+                                              new_size: usize) -> *mut u8 {
+            // Docs for GlobalAlloc::realloc require this to be valid:
+            let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
+            let new_ptr = GlobalAlloc::alloc(self, new_layout);
+            if !new_ptr.is_null() {
+                let size = cmp::min(old_layout.size(), new_size);
+                ptr::copy_nonoverlapping(ptr, new_ptr, size);
+                GlobalAlloc::dealloc(self, ptr, old_layout);
+            }
+            new_ptr
+        }
+    }
+}
+#[cfg(any(unix, target_os = "redox"))]
+mod platform {
+    extern crate libc;
+    use core::ptr;
+    use MIN_ALIGN;
+    use System;
+    use core::alloc::{GlobalAlloc, Layout};
+    unsafe impl GlobalAlloc for System {
+        #[inline]
+        unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+            if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+                libc::malloc(layout.size()) as *mut u8
+            } else {
+                #[cfg(target_os = "macos")]
+                {
+                    if layout.align() > (1 << 31) {
+                        return ptr::null_mut()
+                    }
+                }
+                aligned_malloc(&layout)
+            }
+        }
+        #[inline]
+        unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+            if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+                libc::calloc(layout.size(), 1) as *mut u8
+            } else {
+                let ptr = self.alloc(layout.clone());
+                if !ptr.is_null() {
+                    ptr::write_bytes(ptr, 0, layout.size());
+                }
+                ptr
+            }
+        }
+        #[inline]
+        unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+            libc::free(ptr as *mut libc::c_void)
+        }
+        #[inline]
+        unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+            if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
+                libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
+            } else {
+                self.realloc_fallback(ptr, layout, new_size)
+            }
+        }
+    }
+    #[cfg(any(target_os = "android",
+              target_os = "hermit",
+              target_os = "redox",
+              target_os = "solaris"))]
+    #[inline]
+    unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+        // On android we currently target API level 9 which unfortunately
+        // doesn't have the `posix_memalign` API used below. Instead we use
+        // `memalign`, but this unfortunately has the property on some systems
+        // where the memory returned cannot be deallocated by `free`!
+        //
+        // Upon closer inspection, however, this appears to work just fine with
+        // Android, so for this platform we should be fine to call `memalign`
+        // (which is present in API level 9). Some helpful references could
+        // possibly be chromium using memalign [1], attempts at documenting that
+        // memalign + free is ok [2] [3], or the current source of chromium
+        // which still uses memalign on android [4].
+        //
+        // [1]: https://codereview.chromium.org/10796020/
+        // [2]: https://code.google.com/p/android/issues/detail?id=35391
+        // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
+        // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
+        //                                       /memory/aligned_memory.cc
+        libc::memalign(layout.align(), layout.size()) as *mut u8
+    }
+    #[cfg(not(any(target_os = "android",
+                  target_os = "hermit",
+                  target_os = "redox",
+                  target_os = "solaris")))]
+    #[inline]
+    unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+        let mut out = ptr::null_mut();
+        let ret = libc::posix_memalign(&mut out, layout.align(), layout.size());
+        if ret != 0 {
+            ptr::null_mut()
+        } else {
+            out as *mut u8
+        }
+    }
+}
+#[cfg(windows)]
+#[allow(nonstandard_style)]
+mod platform {
+    use MIN_ALIGN;
+    use System;
+    use core::alloc::{GlobalAlloc, Layout};
+    type LPVOID = *mut u8;
+    type HANDLE = LPVOID;
+    type SIZE_T = usize;
+    type DWORD = u32;
+    type BOOL = i32;
+    extern "system" {
+        fn GetProcessHeap() -> HANDLE;
+        fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
+        fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
+        fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
+        fn GetLastError() -> DWORD;
+    }
+    #[repr(C)]
+    struct Header(*mut u8);
+    const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
+    unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
+        &mut *(ptr as *mut Header).offset(-1)
+    }
+    unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
+        let aligned = ptr.add(align - (ptr as usize & (align - 1)));
+        *get_header(aligned) = Header(ptr);
+        aligned
+    }
+    #[inline]
+    unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 {
+        let ptr = if layout.align() <= MIN_ALIGN {
+            HeapAlloc(GetProcessHeap(), flags, layout.size())
+        } else {
+            let size = layout.size() + layout.align();
+            let ptr = HeapAlloc(GetProcessHeap(), flags, size);
+            if ptr.is_null() {
+                ptr
+            } else {
+                align_ptr(ptr, layout.align())
+            }
+        };
+        ptr as *mut u8
+    }
+    unsafe impl GlobalAlloc for System {
+        #[inline]
+        unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+            allocate_with_flags(layout, 0)
+        }
+        #[inline]
+        unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+            allocate_with_flags(layout, HEAP_ZERO_MEMORY)
+        }
+        #[inline]
+        unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+            if layout.align() <= MIN_ALIGN {
+                let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
+                debug_assert!(err != 0, "Failed to free heap memory: {}",
+                              GetLastError());
+            } else {
+                let header = get_header(ptr);
+                let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
+                debug_assert!(err != 0, "Failed to free heap memory: {}",
+                              GetLastError());
+            }
+        }
+        #[inline]
+        unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+            if layout.align() <= MIN_ALIGN {
+                HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8
+            } else {
+                self.realloc_fallback(ptr, layout, new_size)
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs b/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs
new file mode 100644
index 00000000000..ddeb752f93e
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs
@@ -0,0 +1,69 @@
+// Adapted from rustc run-pass test suite
+
+#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)]
+#![feature(rustc_attrs)]
+
+use std::{
+    ops::{Deref, CoerceUnsized, DispatchFromDyn},
+    marker::Unsize,
+};
+
+struct Ptr<T: ?Sized>(Box<T>);
+
+impl<T: ?Sized> Deref for Ptr<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        &*self.0
+    }
+}
+
+impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {}
+impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {}
+
+struct Wrapper<T: ?Sized>(T);
+
+impl<T: ?Sized> Deref for Wrapper<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        &self.0
+    }
+}
+
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
+impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
+
+
+trait Trait {
+    // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
+    // without unsized_locals), but wrappers arond `Self` currently are not.
+    // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
+    // fn wrapper(self: Wrapper<Self>) -> i32;
+    fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
+    fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
+    fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;
+}
+
+impl Trait for i32 {
+    fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 {
+        **self
+    }
+    fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 {
+        **self
+    }
+    fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 {
+        ***self
+    }
+}
+
+fn main() {
+    let pw = Ptr(Box::new(Wrapper(5))) as Ptr<Wrapper<dyn Trait>>;
+    assert_eq!(pw.ptr_wrapper(), 5);
+
+    let wp = Wrapper(Ptr(Box::new(6))) as Wrapper<Ptr<dyn Trait>>;
+    assert_eq!(wp.wrapper_ptr(), 6);
+
+    let wpw = Wrapper(Ptr(Box::new(Wrapper(7)))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
+    assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
+}
diff --git a/compiler/rustc_codegen_cranelift/example/dst-field-align.rs b/compiler/rustc_codegen_cranelift/example/dst-field-align.rs
new file mode 100644
index 00000000000..6c338e99912
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/dst-field-align.rs
@@ -0,0 +1,67 @@
+// run-pass
+#![allow(dead_code)]
+struct Foo<T: ?Sized> {
+    a: u16,
+    b: T
+}
+
+trait Bar {
+    fn get(&self) -> usize;
+}
+
+impl Bar for usize {
+    fn get(&self) -> usize { *self }
+}
+
+struct Baz<T: ?Sized> {
+    a: T
+}
+
+struct HasDrop<T: ?Sized> {
+    ptr: Box<usize>,
+    data: T
+}
+
+fn main() {
+    // Test that zero-offset works properly
+    let b : Baz<usize> = Baz { a: 7 };
+    assert_eq!(b.a.get(), 7);
+    let b : &Baz<dyn Bar> = &b;
+    assert_eq!(b.a.get(), 7);
+
+    // Test that the field is aligned properly
+    let f : Foo<usize> = Foo { a: 0, b: 11 };
+    assert_eq!(f.b.get(), 11);
+    let ptr1 : *const u8 = &f.b as *const _ as *const u8;
+
+    let f : &Foo<dyn Bar> = &f;
+    let ptr2 : *const u8 = &f.b as *const _ as *const u8;
+    assert_eq!(f.b.get(), 11);
+
+    // The pointers should be the same
+    assert_eq!(ptr1, ptr2);
+
+    // Test that nested DSTs work properly
+    let f : Foo<Foo<usize>> = Foo { a: 0, b: Foo { a: 1, b: 17 }};
+    assert_eq!(f.b.b.get(), 17);
+    let f : &Foo<Foo<dyn Bar>> = &f;
+    assert_eq!(f.b.b.get(), 17);
+
+    // Test that get the pointer via destructuring works
+
+    let f : Foo<usize> = Foo { a: 0, b: 11 };
+    let f : &Foo<dyn Bar> = &f;
+    let &Foo { a: _, b: ref bar } = f;
+    assert_eq!(bar.get(), 11);
+
+    // Make sure that drop flags don't screw things up
+
+    let d : HasDrop<Baz<[i32; 4]>> = HasDrop {
+        ptr: Box::new(0),
+        data: Baz { a: [1,2,3,4] }
+    };
+    assert_eq!([1,2,3,4], d.data.a);
+
+    let d : &HasDrop<Baz<[i32]>> = &d;
+    assert_eq!(&[1,2,3,4], &d.data.a);
+}
diff --git a/compiler/rustc_codegen_cranelift/example/example.rs b/compiler/rustc_codegen_cranelift/example/example.rs
new file mode 100644
index 00000000000..d5c122bf681
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/example.rs
@@ -0,0 +1,208 @@
+#![feature(no_core, unboxed_closures)]
+#![no_core]
+#![allow(dead_code)]
+
+extern crate mini_core;
+
+use mini_core::*;
+
+pub fn abc(a: u8) -> u8 {
+    a * 2
+}
+
+pub fn bcd(b: bool, a: u8) -> u8 {
+    if b {
+        a * 2
+    } else {
+        a * 3
+    }
+}
+
+pub fn call() {
+    abc(42);
+}
+
+pub fn indirect_call() {
+    let f: fn() = call;
+    f();
+}
+
+pub enum BoolOption {
+    Some(bool),
+    None,
+}
+
+pub fn option_unwrap_or(o: BoolOption, d: bool) -> bool {
+    match o {
+        BoolOption::Some(b) => b,
+        BoolOption::None => d,
+    }
+}
+
+pub fn ret_42() -> u8 {
+    42
+}
+
+pub fn return_str() -> &'static str {
+    "hello world"
+}
+
+pub fn promoted_val() -> &'static u8 {
+    &(1 * 2)
+}
+
+pub fn cast_ref_to_raw_ptr(abc: &u8) -> *const u8 {
+    abc as *const u8
+}
+
+pub fn cmp_raw_ptr(a: *const u8, b: *const u8) -> bool {
+    a == b
+}
+
+pub fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) {
+    (
+        a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8,
+        b as u32,
+    )
+}
+
+pub fn char_cast(c: char) -> u8 {
+    c as u8
+}
+
+pub struct DebugTuple(());
+
+pub fn debug_tuple() -> DebugTuple {
+    DebugTuple(())
+}
+
+pub fn size_of<T>() -> usize {
+    intrinsics::size_of::<T>()
+}
+
+pub fn use_size_of() -> usize {
+    size_of::<u64>()
+}
+
+pub unsafe fn use_copy_intrinsic(src: *const u8, dst: *mut u8) {
+    intrinsics::copy::<u8>(src, dst, 1);
+}
+
+pub unsafe fn use_copy_intrinsic_ref(src: *const u8, dst: *mut u8) {
+    let copy2 = &intrinsics::copy::<u8>;
+    copy2(src, dst, 1);
+}
+
+pub const ABC: u8 = 6 * 7;
+
+pub fn use_const() -> u8 {
+    ABC
+}
+
+pub fn call_closure_3arg() {
+    (|_, _, _| {})(0u8, 42u16, 0u8)
+}
+
+pub fn call_closure_2arg() {
+    (|_, _| {})(0u8, 42u16)
+}
+
+pub struct IsNotEmpty;
+
+impl<'a, 'b> FnOnce<(&'a &'b [u16],)> for IsNotEmpty {
+    type Output = (u8, u8);
+
+    #[inline]
+    extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u16],)) -> (u8, u8) {
+        self.call_mut(arg)
+    }
+}
+
+impl<'a, 'b> FnMut<(&'a &'b [u16],)> for IsNotEmpty {
+    #[inline]
+    extern "rust-call" fn call_mut(&mut self, _arg: (&'a &'b [u16],)) -> (u8, u8) {
+        (0, 42)
+    }
+}
+
+pub fn call_is_not_empty() {
+    IsNotEmpty.call_once((&(&[0u16] as &[_]),));
+}
+
+pub fn eq_char(a: char, b: char) -> bool {
+    a == b
+}
+
+pub unsafe fn transmute(c: char) -> u32 {
+    intrinsics::transmute(c)
+}
+
+pub unsafe fn deref_str_ptr(s: *const str) -> &'static str {
+    &*s
+}
+
+pub fn use_array(arr: [u8; 3]) -> u8 {
+    arr[1]
+}
+
+pub fn repeat_array() -> [u8; 3] {
+    [0; 3]
+}
+
+pub fn array_as_slice(arr: &[u8; 3]) -> &[u8] {
+    arr
+}
+
+pub unsafe fn use_ctlz_nonzero(a: u16) -> u16 {
+    intrinsics::ctlz_nonzero(a)
+}
+
+pub fn ptr_as_usize(ptr: *const u8) -> usize {
+    ptr as usize
+}
+
+pub fn float_cast(a: f32, b: f64) -> (f64, f32) {
+    (a as f64, b as f32)
+}
+
+pub fn int_to_float(a: u8, b: i32) -> (f64, f32) {
+    (a as f64, b as f32)
+}
+
+pub fn make_array() -> [u8; 3] {
+    [42, 0, 5]
+}
+
+pub fn some_promoted_tuple() -> &'static (&'static str, &'static str) {
+    &("abc", "some")
+}
+
+pub fn index_slice(s: &[u8]) -> u8 {
+    s[2]
+}
+
+pub struct StrWrapper {
+    s: str,
+}
+
+pub fn str_wrapper_get(w: &StrWrapper) -> &str {
+    &w.s
+}
+
+pub fn i16_as_i8(a: i16) -> i8 {
+    a as i8
+}
+
+pub struct Unsized(u8, str);
+
+pub fn get_sized_field_ref_from_unsized_type(u: &Unsized) -> &u8 {
+    &u.0
+}
+
+pub fn get_unsized_field_ref_from_unsized_type(u: &Unsized) -> &str {
+    &u.1
+}
+
+pub fn reuse_byref_argument_storage(a: (u8, u16, u32)) -> u8 {
+    a.0
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core.rs b/compiler/rustc_codegen_cranelift/example/mini_core.rs
new file mode 100644
index 00000000000..c4834c80408
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/mini_core.rs
@@ -0,0 +1,630 @@
+#![feature(
+    no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types,
+    untagged_unions, decl_macro, rustc_attrs, transparent_unions, auto_traits,
+    thread_local,
+)]
+#![no_core]
+#![allow(dead_code)]
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "dispatch_from_dyn"]
+pub trait DispatchFromDyn<T> {}
+
+// &T -> &U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
+// &mut T -> &mut U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
+// *const T -> *const U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
+// *mut T -> *mut U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
+
+#[lang = "receiver"]
+pub trait Receiver {}
+
+impl<T: ?Sized> Receiver for &T {}
+impl<T: ?Sized> Receiver for &mut T {}
+impl<T: ?Sized> Receiver for Box<T> {}
+
+#[lang = "copy"]
+pub unsafe trait Copy {}
+
+unsafe impl Copy for bool {}
+unsafe impl Copy for u8 {}
+unsafe impl Copy for u16 {}
+unsafe impl Copy for u32 {}
+unsafe impl Copy for u64 {}
+unsafe impl Copy for u128 {}
+unsafe impl Copy for usize {}
+unsafe impl Copy for i8 {}
+unsafe impl Copy for i16 {}
+unsafe impl Copy for i32 {}
+unsafe impl Copy for isize {}
+unsafe impl Copy for f32 {}
+unsafe impl Copy for char {}
+unsafe impl<'a, T: ?Sized> Copy for &'a T {}
+unsafe impl<T: ?Sized> Copy for *const T {}
+unsafe impl<T: ?Sized> Copy for *mut T {}
+unsafe impl<T: Copy> Copy for Option<T> {}
+
+#[lang = "sync"]
+pub unsafe trait Sync {}
+
+unsafe impl Sync for bool {}
+unsafe impl Sync for u8 {}
+unsafe impl Sync for u16 {}
+unsafe impl Sync for u32 {}
+unsafe impl Sync for u64 {}
+unsafe impl Sync for usize {}
+unsafe impl Sync for i8 {}
+unsafe impl Sync for i16 {}
+unsafe impl Sync for i32 {}
+unsafe impl Sync for isize {}
+unsafe impl Sync for char {}
+unsafe impl<'a, T: ?Sized> Sync for &'a T {}
+unsafe impl Sync for [u8; 16] {}
+
+#[lang = "freeze"]
+unsafe auto trait Freeze {}
+
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<T: ?Sized> Freeze for &T {}
+unsafe impl<T: ?Sized> Freeze for &mut T {}
+
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {}
+
+#[lang = "structural_teq"]
+pub trait StructuralEq {}
+
+#[lang = "not"]
+pub trait Not {
+    type Output;
+
+    fn not(self) -> Self::Output;
+}
+
+impl Not for bool {
+    type Output = bool;
+
+    fn not(self) -> bool {
+        !self
+    }
+}
+
+#[lang = "mul"]
+pub trait Mul<RHS = Self> {
+    type Output;
+
+    #[must_use]
+    fn mul(self, rhs: RHS) -> Self::Output;
+}
+
+impl Mul for u8 {
+    type Output = Self;
+
+    fn mul(self, rhs: Self) -> Self::Output {
+        self * rhs
+    }
+}
+
+impl Mul for usize {
+    type Output = Self;
+
+    fn mul(self, rhs: Self) -> Self::Output {
+        self * rhs
+    }
+}
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+    type Output;
+
+    fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for i8 {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+impl Add for usize {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self {
+        self + rhs
+    }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+    type Output;
+
+    fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for u8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i8 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+impl Sub for i16 {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self {
+        self - rhs
+    }
+}
+
+#[lang = "rem"]
+pub trait Rem<RHS = Self> {
+    type Output;
+
+    fn rem(self, rhs: RHS) -> Self::Output;
+}
+
+impl Rem for usize {
+    type Output = Self;
+
+    fn rem(self, rhs: Self) -> Self {
+        self % rhs
+    }
+}
+
+#[lang = "bitor"]
+pub trait BitOr<RHS = Self> {
+    type Output;
+
+    #[must_use]
+    fn bitor(self, rhs: RHS) -> Self::Output;
+}
+
+impl BitOr for bool {
+    type Output = bool;
+
+    fn bitor(self, rhs: bool) -> bool {
+        self | rhs
+    }
+}
+
+impl<'a> BitOr<bool> for &'a bool {
+    type Output = bool;
+
+    fn bitor(self, rhs: bool) -> bool {
+        *self | rhs
+    }
+}
+
+#[lang = "eq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+    fn eq(&self, other: &Rhs) -> bool;
+    fn ne(&self, other: &Rhs) -> bool;
+}
+
+impl PartialEq for u8 {
+    fn eq(&self, other: &u8) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u8) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for u16 {
+    fn eq(&self, other: &u16) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u16) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for u32 {
+    fn eq(&self, other: &u32) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u32) -> bool {
+        (*self) != (*other)
+    }
+}
+
+
+impl PartialEq for u64 {
+    fn eq(&self, other: &u64) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u64) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for u128 {
+    fn eq(&self, other: &u128) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &u128) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for usize {
+    fn eq(&self, other: &usize) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &usize) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for i8 {
+    fn eq(&self, other: &i8) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &i8) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for i32 {
+    fn eq(&self, other: &i32) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &i32) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for isize {
+    fn eq(&self, other: &isize) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &isize) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl PartialEq for char {
+    fn eq(&self, other: &char) -> bool {
+        (*self) == (*other)
+    }
+    fn ne(&self, other: &char) -> bool {
+        (*self) != (*other)
+    }
+}
+
+impl<T: ?Sized> PartialEq for *const T {
+    fn eq(&self, other: &*const T) -> bool {
+        *self == *other
+    }
+    fn ne(&self, other: &*const T) -> bool {
+        *self != *other
+    }
+}
+
+impl <T: PartialEq> PartialEq for Option<T> {
+    fn eq(&self, other: &Self) -> bool {
+        match (self, other) {
+            (Some(lhs), Some(rhs)) => *lhs == *rhs,
+            (None, None) => true,
+            _ => false,
+        }
+    }
+
+    fn ne(&self, other: &Self) -> bool {
+        match (self, other) {
+            (Some(lhs), Some(rhs)) => *lhs != *rhs,
+            (None, None) => false,
+            _ => true,
+        }
+    }
+}
+
+#[lang = "shl"]
+pub trait Shl<RHS = Self> {
+    type Output;
+
+    #[must_use]
+    fn shl(self, rhs: RHS) -> Self::Output;
+}
+
+impl Shl for u128 {
+    type Output = u128;
+
+    fn shl(self, rhs: u128) -> u128 {
+        self << rhs
+    }
+}
+
+#[lang = "neg"]
+pub trait Neg {
+    type Output;
+
+    fn neg(self) -> Self::Output;
+}
+
+impl Neg for i8 {
+    type Output = i8;
+
+    fn neg(self) -> i8 {
+        -self
+    }
+}
+
+impl Neg for i16 {
+    type Output = i16;
+
+    fn neg(self) -> i16 {
+        self
+    }
+}
+
+impl Neg for isize {
+    type Output = isize;
+
+    fn neg(self) -> isize {
+        -self
+    }
+}
+
+impl Neg for f32 {
+    type Output = f32;
+
+    fn neg(self) -> f32 {
+        -self
+    }
+}
+
+pub enum Option<T> {
+    Some(T),
+    None,
+}
+
+pub use Option::*;
+
+#[lang = "phantom_data"]
+pub struct PhantomData<T: ?Sized>;
+
+#[lang = "fn_once"]
+#[rustc_paren_sugar]
+pub trait FnOnce<Args> {
+    #[lang = "fn_once_output"]
+    type Output;
+
+    extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[lang = "fn_mut"]
+#[rustc_paren_sugar]
+pub trait FnMut<Args>: FnOnce<Args> {
+    extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+#[lang = "panic"]
+#[track_caller]
+pub fn panic(_msg: &str) -> ! {
+    unsafe {
+        libc::puts("Panicking\n\0" as *const str as *const i8);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+    unsafe {
+        libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+        intrinsics::abort();
+    }
+}
+
+#[lang = "eh_personality"]
+fn eh_personality() -> ! {
+    loop {}
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+    // Code here does not matter - this is replaced by the
+    // real drop glue by the compiler.
+    drop_in_place(to_drop);
+}
+
+#[lang = "deref"]
+pub trait Deref {
+    type Target: ?Sized;
+
+    fn deref(&self) -> &Self::Target;
+}
+
+#[lang = "owned_box"]
+pub struct Box<T: ?Sized>(*mut T);
+
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+
+impl<T: ?Sized> Drop for Box<T> {
+    fn drop(&mut self) {
+        // drop is currently performed by compiler.
+    }
+}
+
+impl<T> Deref for Box<T> {
+    type Target = T;
+
+    fn deref(&self) -> &Self::Target {
+        &**self
+    }
+}
+
+#[lang = "exchange_malloc"]
+unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
+    libc::malloc(size)
+}
+
+#[lang = "box_free"]
+unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
+    libc::free(ptr as *mut u8);
+}
+
+#[lang = "drop"]
+pub trait Drop {
+    fn drop(&mut self);
+}
+
+#[lang = "manually_drop"]
+#[repr(transparent)]
+pub struct ManuallyDrop<T: ?Sized> {
+    pub value: T,
+}
+
+#[lang = "maybe_uninit"]
+#[repr(transparent)]
+pub union MaybeUninit<T> {
+    pub uninit: (),
+    pub value: ManuallyDrop<T>,
+}
+
+pub mod intrinsics {
+    extern "rust-intrinsic" {
+        pub fn abort() -> !;
+        pub fn size_of<T>() -> usize;
+        pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
+        pub fn min_align_of<T>() -> usize;
+        pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
+        pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
+        pub fn transmute<T, U>(e: T) -> U;
+        pub fn ctlz_nonzero<T>(x: T) -> T;
+        pub fn needs_drop<T>() -> bool;
+        pub fn bitreverse<T>(x: T) -> T;
+        pub fn bswap<T>(x: T) -> T;
+        pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
+    }
+}
+
+pub mod libc {
+    #[cfg_attr(unix, link(name = "c"))]
+    #[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
+    extern "C" {
+        pub fn puts(s: *const i8) -> i32;
+        pub fn printf(format: *const i8, ...) -> i32;
+        pub fn malloc(size: usize) -> *mut u8;
+        pub fn free(ptr: *mut u8);
+        pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
+        pub fn memmove(dst: *mut u8, src: *const u8, size: usize);
+        pub fn strncpy(dst: *mut u8, src: *const u8, size: usize);
+    }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+    type Output: ?Sized;
+    fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+impl<T> Index<usize> for [T] {
+    type Output = T;
+
+    fn index(&self, index: usize) -> &Self::Output {
+        &self[index]
+    }
+}
+
+extern {
+    type VaListImpl;
+}
+
+#[lang = "va_list"]
+#[repr(transparent)]
+pub struct VaList<'a>(&'a mut VaListImpl);
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro stringify($($t:tt)*) { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro file() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro line() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro cfg() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro global_asm() { /* compiler built-in */ }
+
+pub static A_STATIC: u8 = 42;
+
+#[lang = "panic_location"]
+struct PanicLocation {
+    file: &'static str,
+    line: u32,
+    column: u32,
+}
+
+#[no_mangle]
+#[cfg(not(windows))]
+pub fn get_tls() -> u8 {
+    #[thread_local]
+    static A: u8 = 42;
+
+    A
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
new file mode 100644
index 00000000000..d997ce6d1b3
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
@@ -0,0 +1,475 @@
+#![feature(no_core, lang_items, box_syntax, never_type, linkage, extern_types, thread_local)]
+#![no_core]
+#![allow(dead_code, non_camel_case_types)]
+
+extern crate mini_core;
+
+use mini_core::*;
+use mini_core::libc::*;
+
+unsafe extern "C" fn my_puts(s: *const i8) {
+    puts(s);
+}
+
+macro_rules! assert {
+    ($e:expr) => {
+        if !$e {
+            panic(stringify!(! $e));
+        }
+    };
+}
+
+macro_rules! assert_eq {
+    ($l:expr, $r: expr) => {
+        if $l != $r {
+            panic(stringify!($l != $r));
+        }
+    }
+}
+
+#[lang = "termination"]
+trait Termination {
+    fn report(self) -> i32;
+}
+
+impl Termination for () {
+    fn report(self) -> i32 {
+        unsafe {
+            NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
+            assert_eq!(*NUM_REF as i32, 44);
+        }
+        0
+    }
+}
+
+trait SomeTrait {
+    fn object_safe(&self);
+}
+
+impl SomeTrait for &'static str {
+    fn object_safe(&self) {
+        unsafe {
+            puts(*self as *const str as *const i8);
+        }
+    }
+}
+
+struct NoisyDrop {
+    text: &'static str,
+    inner: NoisyDropInner,
+}
+
+struct NoisyDropInner;
+
+impl Drop for NoisyDrop {
+    fn drop(&mut self) {
+        unsafe {
+            puts(self.text as *const str as *const i8);
+        }
+    }
+}
+
+impl Drop for NoisyDropInner {
+    fn drop(&mut self) {
+        unsafe {
+            puts("Inner got dropped!\0" as *const str as *const i8);
+        }
+    }
+}
+
+impl SomeTrait for NoisyDrop {
+    fn object_safe(&self) {}
+}
+
+enum Ordering {
+    Less = -1,
+    Equal = 0,
+    Greater = 1,
+}
+
+#[lang = "start"]
+fn start<T: Termination + 'static>(
+    main: fn() -> T,
+    argc: isize,
+    argv: *const *const u8,
+) -> isize {
+    if argc == 3 {
+        unsafe { puts(*argv as *const i8); }
+        unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const i8)); }
+        unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const i8)); }
+    }
+
+    main().report() as isize
+}
+
+static mut NUM: u8 = 6 * 7;
+static NUM_REF: &'static u8 = unsafe { &NUM };
+
+struct Unique<T: ?Sized> {
+    pointer: *const T,
+    _marker: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+unsafe fn zeroed<T>() -> T {
+    let mut uninit = MaybeUninit { uninit: () };
+    intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
+    uninit.value.value
+}
+
+fn take_f32(_f: f32) {}
+fn take_unique(_u: Unique<()>) {}
+
+fn return_u128_pair() -> (u128, u128) {
+    (0, 0)
+}
+
+fn call_return_u128_pair() {
+    return_u128_pair();
+}
+
+fn main() {
+    take_unique(Unique {
+        pointer: 0 as *const (),
+        _marker: PhantomData,
+    });
+    take_f32(0.1);
+
+    call_return_u128_pair();
+
+    let slice = &[0, 1] as &[i32];
+    let slice_ptr = slice as *const [i32] as *const i32;
+
+    assert_eq!(slice_ptr as usize % 4, 0);
+
+    //return;
+
+    unsafe {
+        printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
+
+        let hello: &[u8] = b"Hello\0" as &[u8; 6];
+        let ptr: *const i8 = hello as *const [u8] as *const i8;
+        puts(ptr);
+
+        let world: Box<&str> = box "World!\0";
+        puts(*world as *const str as *const i8);
+        world as Box<dyn SomeTrait>;
+
+        assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
+
+        assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
+        assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
+        assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
+        assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
+
+        assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
+
+        let chars = &['C', 'h', 'a', 'r', 's'];
+        let chars = chars as &[char];
+        assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
+
+        let a: &dyn SomeTrait = &"abc\0";
+        a.object_safe();
+
+        assert_eq!(intrinsics::size_of_val(a) as u8, 16);
+        assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
+
+        assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
+        assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
+
+        assert!(!intrinsics::needs_drop::<u8>());
+        assert!(intrinsics::needs_drop::<NoisyDrop>());
+
+        Unique {
+            pointer: 0 as *const &str,
+            _marker: PhantomData,
+        } as Unique<dyn SomeTrait>;
+
+        struct MyDst<T: ?Sized>(T);
+
+        intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
+
+        struct Foo {
+            x: u8,
+            y: !,
+        }
+
+        unsafe fn uninitialized<T>() -> T {
+            MaybeUninit { uninit: () }.value.value
+        }
+
+        zeroed::<(u8, u8)>();
+        #[allow(unreachable_code)]
+        {
+            if false {
+                zeroed::<!>();
+                zeroed::<Foo>();
+                uninitialized::<Foo>();
+            }
+        }
+    }
+
+    let _ = box NoisyDrop {
+        text: "Boxed outer got dropped!\0",
+        inner: NoisyDropInner,
+    } as Box<dyn SomeTrait>;
+
+    const FUNC_REF: Option<fn()> = Some(main);
+    match FUNC_REF {
+        Some(_) => {},
+        None => assert!(false),
+    }
+
+    match Ordering::Less {
+        Ordering::Less => {},
+        _ => assert!(false),
+    }
+
+    [NoisyDropInner, NoisyDropInner];
+
+    let x = &[0u32, 42u32] as &[u32];
+    match x {
+        [] => assert_eq!(0u32, 1),
+        [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
+    }
+
+    assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
+
+    #[cfg(not(any(jit, windows)))]
+    {
+        extern {
+            #[linkage = "extern_weak"]
+            static ABC: *const u8;
+        }
+
+        {
+            extern {
+                #[linkage = "extern_weak"]
+                static ABC: *const u8;
+            }
+        }
+
+        unsafe { assert_eq!(ABC as usize, 0); }
+    }
+
+    &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
+
+    let f = 1000.0;
+    assert_eq!(f as u8, 255);
+    let f2 = -1000.0;
+    assert_eq!(f2 as i8, -128);
+    assert_eq!(f2 as u8, 0);
+
+    let amount = 0;
+    assert_eq!(1u128 << amount, 1);
+
+    static ANOTHER_STATIC: &u8 = &A_STATIC;
+    assert_eq!(*ANOTHER_STATIC, 42);
+
+    check_niche_behavior();
+
+    extern "C" {
+        type ExternType;
+    }
+
+    struct ExternTypeWrapper {
+        _a: ExternType,
+    }
+
+    let nullptr = 0 as *const ();
+    let extern_nullptr = nullptr as *const ExternTypeWrapper;
+    extern_nullptr as *const ();
+    let slice_ptr = &[] as *const [u8];
+    slice_ptr as *const u8;
+
+    let repeat = [Some(42); 2];
+    assert_eq!(repeat[0], Some(42));
+    assert_eq!(repeat[1], Some(42));
+
+    from_decimal_string();
+
+    #[cfg(not(any(jit, windows)))]
+    test_tls();
+
+    #[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
+    unsafe {
+        global_asm_test();
+    }
+
+    // Both statics have a reference that points to the same anonymous allocation.
+    static REF1: &u8 = &42;
+    static REF2: &u8 = REF1;
+    assert_eq!(*REF1, *REF2);
+}
+
+#[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
+extern "C" {
+    fn global_asm_test();
+}
+
+#[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
+global_asm! {
+    "
+    .global global_asm_test
+    global_asm_test:
+    // comment that would normally be removed by LLVM
+    ret
+    "
+}
+
+#[repr(C)]
+enum c_void {
+    _1,
+    _2,
+}
+
+type c_int = i32;
+type c_ulong = u64;
+
+type pthread_t = c_ulong;
+
+#[repr(C)]
+struct pthread_attr_t {
+    __size: [u64; 7],
+}
+
+#[link(name = "pthread")]
+extern "C" {
+    fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
+
+    fn pthread_create(
+        native: *mut pthread_t,
+        attr: *const pthread_attr_t,
+        f: extern "C" fn(_: *mut c_void) -> *mut c_void,
+        value: *mut c_void
+    ) -> c_int;
+
+    fn pthread_join(
+        native: pthread_t,
+        value: *mut *mut c_void
+    ) -> c_int;
+}
+
+#[thread_local]
+#[cfg(not(jit))]
+static mut TLS: u8 = 42;
+
+#[cfg(not(jit))]
+extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
+    unsafe { TLS = 0; }
+    0 as *mut c_void
+}
+
+#[cfg(not(jit))]
+fn test_tls() {
+    unsafe {
+        let mut attr: pthread_attr_t = zeroed();
+        let mut thread: pthread_t = 0;
+
+        assert_eq!(TLS, 42);
+
+        if pthread_attr_init(&mut attr) != 0 {
+            assert!(false);
+        }
+
+        if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
+            assert!(false);
+        }
+
+        let mut res = 0 as *mut c_void;
+        pthread_join(thread, &mut res);
+
+        // TLS of main thread must not have been changed by the other thread.
+        assert_eq!(TLS, 42);
+
+        puts("TLS works!\n\0" as *const str as *const i8);
+    }
+}
+
+// Copied ui/issues/issue-61696.rs
+
+pub enum Infallible {}
+
+// The check that the `bool` field of `V1` is encoding a "niche variant"
+// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
+// causing valid `V1` values to be interpreted as other variants.
+pub enum E1 {
+    V1 { f: bool },
+    V2 { f: Infallible },
+    V3,
+    V4,
+}
+
+// Computing the discriminant used to be done using the niche type (here `u8`,
+// from the `bool` field of `V1`), overflowing for variants with large enough
+// indices (`V3` and `V4`), causing them to be interpreted as other variants.
+pub enum E2<X> {
+    V1 { f: bool },
+
+    /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
+    _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
+    _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
+    _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
+    _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
+    _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
+    _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
+    _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
+    _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
+    _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
+    _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
+    _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
+    _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
+    _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
+    _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
+    _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
+    _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
+    _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
+    _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
+    _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
+    _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
+    _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
+    _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
+    _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
+    _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
+    _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
+    _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
+    _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
+    _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
+    _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
+    _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
+    _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
+
+    V3,
+    V4,
+}
+
+fn check_niche_behavior () {
+    if let E1::V2 { .. } = (E1::V1 { f: true }) {
+        intrinsics::abort();
+    }
+
+    if let E2::V1 { .. } = E2::V3::<Infallible> {
+        intrinsics::abort();
+    }
+}
+
+fn from_decimal_string() {
+    loop {
+        let multiplier = 1;
+
+        take_multiplier_ref(&multiplier);
+
+        if multiplier == 1 {
+            break;
+        }
+
+        unreachable();
+    }
+}
+
+fn take_multiplier_ref(_multiplier: &u128) {}
+
+fn unreachable() -> ! {
+    panic("unreachable")
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mod_bench.rs b/compiler/rustc_codegen_cranelift/example/mod_bench.rs
new file mode 100644
index 00000000000..152041aa9ed
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/mod_bench.rs
@@ -0,0 +1,36 @@
+#![feature(start, box_syntax, core_intrinsics, lang_items)]
+#![no_std]
+
+#[cfg_attr(unix, link(name = "c"))]
+#[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
+extern {}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+    core::intrinsics::abort();
+}
+
+#[lang="eh_personality"]
+fn eh_personality(){}
+
+// Required for rustc_codegen_llvm
+#[no_mangle]
+unsafe extern "C" fn _Unwind_Resume() {
+    core::intrinsics::unreachable();
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+    for i in 2..10_000_000 {
+        black_box((i + 1) % i);
+    }
+
+    0
+}
+
+#[inline(never)]
+fn black_box(i: u32) {
+    if i != 1 {
+        core::intrinsics::abort();
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/std_example.rs b/compiler/rustc_codegen_cranelift/example/std_example.rs
new file mode 100644
index 00000000000..5bc51a541b5
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/std_example.rs
@@ -0,0 +1,333 @@
+#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
+
+#[cfg(target_arch = "x86_64")]
+use std::arch::x86_64::*;
+use std::io::Write;
+use std::ops::Generator;
+
+fn main() {
+    println!("{:?}", std::env::args().collect::<Vec<_>>());
+
+    let mutex = std::sync::Mutex::new(());
+    let _guard = mutex.lock().unwrap();
+
+    let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
+    let stderr = ::std::io::stderr();
+    let mut stderr = stderr.lock();
+
+    std::thread::spawn(move || {
+        println!("Hello from another thread!");
+    });
+
+    writeln!(stderr, "some {} text", "<unknown>").unwrap();
+
+    let _ = std::process::Command::new("true").env("c", "d").spawn();
+
+    println!("cargo:rustc-link-lib=z");
+
+    static ONCE: std::sync::Once = std::sync::Once::new();
+    ONCE.call_once(|| {});
+
+    let _eq = LoopState::Continue(()) == LoopState::Break(());
+
+    // Make sure ByValPair values with differently sized components are correctly passed
+    map(None::<(u8, Box<Instruction>)>);
+
+    println!("{}", 2.3f32.exp());
+    println!("{}", 2.3f32.exp2());
+    println!("{}", 2.3f32.abs());
+    println!("{}", 2.3f32.sqrt());
+    println!("{}", 2.3f32.floor());
+    println!("{}", 2.3f32.ceil());
+    println!("{}", 2.3f32.min(1.0));
+    println!("{}", 2.3f32.max(1.0));
+    println!("{}", 2.3f32.powi(2));
+    println!("{}", 2.3f32.log2());
+    assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
+    println!("{}", 2.3f32.powf(2.0));
+
+    assert_eq!(i64::MAX.checked_mul(2), None);
+
+    assert_eq!(-128i8, (-128i8).saturating_sub(1));
+    assert_eq!(127i8, 127i8.saturating_sub(-128));
+    assert_eq!(-128i8, (-128i8).saturating_add(-128));
+    assert_eq!(127i8, 127i8.saturating_add(1));
+
+    assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
+    assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
+    assert_eq!(core::intrinsics::saturating_sub(0, -170141183460469231731687303715884105728i128), 170141183460469231731687303715884105727i128);
+
+    let _d = 0i128.checked_div(2i128);
+    let _d = 0u128.checked_div(2u128);
+    assert_eq!(1u128 + 2, 3);
+
+    assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
+    assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
+    assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
+
+    let tmp = 353985398u128;
+    assert_eq!(tmp * 932490u128, 330087843781020u128);
+
+    let tmp = -0x1234_5678_9ABC_DEF0i64;
+    assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
+
+    // Check that all u/i128 <-> float casts work correctly.
+    let houndred_u128 = 100u128;
+    let houndred_i128 = 100i128;
+    let houndred_f32 = 100.0f32;
+    let houndred_f64 = 100.0f64;
+    assert_eq!(houndred_u128 as f32, 100.0);
+    assert_eq!(houndred_u128 as f64, 100.0);
+    assert_eq!(houndred_f32 as u128, 100);
+    assert_eq!(houndred_f64 as u128, 100);
+    assert_eq!(houndred_i128 as f32, 100.0);
+    assert_eq!(houndred_i128 as f64, 100.0);
+    assert_eq!(houndred_f32 as i128, 100);
+    assert_eq!(houndred_f64 as i128, 100);
+    assert_eq!(1u128.rotate_left(2), 4);
+
+    // Test signed 128bit comparing
+    let max = usize::MAX as i128;
+    if 100i128 < 0i128 || 100i128 > max {
+        panic!();
+    }
+
+    test_checked_mul();
+
+    let _a = 1u32 << 2u8;
+
+    let empty: [i32; 0] = [];
+    assert!(empty.is_sorted());
+
+    println!("{:?}", std::intrinsics::caller_location());
+
+    #[cfg(target_arch = "x86_64")]
+    unsafe {
+        test_simd();
+    }
+
+    Box::pin(move |mut _task_context| {
+        yield ();
+    }).as_mut().resume(0);
+
+    #[derive(Copy, Clone)]
+    enum Nums {
+        NegOne = -1,
+    }
+
+    let kind = Nums::NegOne;
+    assert_eq!(-1i128, kind as i128);
+
+    let options = [1u128];
+    match options[0] {
+        1 => (),
+        0 => loop {},
+        v => panic(v),
+    };
+}
+
+fn panic(_: u128) {
+    panic!();
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_simd() {
+    assert!(is_x86_feature_detected!("sse2"));
+
+    let x = _mm_setzero_si128();
+    let y = _mm_set1_epi16(7);
+    let or = _mm_or_si128(x, y);
+    let cmp_eq = _mm_cmpeq_epi8(y, y);
+    let cmp_lt = _mm_cmplt_epi8(y, y);
+
+    assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
+    assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
+    assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
+
+    test_mm_slli_si128();
+    test_mm_movemask_epi8();
+    test_mm256_movemask_epi8();
+    test_mm_add_epi8();
+    test_mm_add_pd();
+    test_mm_cvtepi8_epi16();
+    test_mm_cvtsi128_si64();
+
+    test_mm_extract_epi8();
+    test_mm_insert_epi16();
+
+    let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
+    assert_eq!(mask1, 1);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_slli_si128() {
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, 1);
+    let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+    assert_eq_m128i(r, e);
+
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, 15);
+    let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
+    assert_eq_m128i(r, e);
+
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+    );
+    let r = _mm_slli_si128(a, 16);
+    assert_eq_m128i(r, _mm_set1_epi8(0));
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_movemask_epi8() {
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
+        0b0101, 0b1111_0000u8 as i8, 0, 0,
+        0, 0, 0b1111_0000u8 as i8, 0b0101,
+        0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
+    );
+    let r = _mm_movemask_epi8(a);
+    assert_eq!(r, 0b10100100_00100101);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "avx2")]
+unsafe fn test_mm256_movemask_epi8() {
+    let a = _mm256_set1_epi8(-1);
+    let r = _mm256_movemask_epi8(a);
+    let e = -1;
+    assert_eq!(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_epi8() {
+    let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+    #[rustfmt::skip]
+    let b = _mm_setr_epi8(
+        16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+    );
+    let r = _mm_add_epi8(a, b);
+    #[rustfmt::skip]
+    let e = _mm_setr_epi8(
+        16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
+    );
+    assert_eq_m128i(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_pd() {
+    let a = _mm_setr_pd(1.0, 2.0);
+    let b = _mm_setr_pd(5.0, 10.0);
+    let r = _mm_add_pd(a, b);
+    assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
+}
+
+#[cfg(target_arch = "x86_64")]
+fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
+    unsafe {
+        assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
+    }
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
+    if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
+        panic!("{:?} != {:?}", a, b);
+    }
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_cvtsi128_si64() {
+    let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
+    assert_eq!(r, 5);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_cvtepi8_epi16() {
+    let a = _mm_set1_epi8(10);
+    let r = _mm_cvtepi8_epi16(a);
+    let e = _mm_set1_epi16(10);
+    assert_eq_m128i(r, e);
+    let a = _mm_set1_epi8(-10);
+    let r = _mm_cvtepi8_epi16(a);
+    let e = _mm_set1_epi16(-10);
+    assert_eq_m128i(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_extract_epi8() {
+    #[rustfmt::skip]
+    let a = _mm_setr_epi8(
+        -1, 1, 2, 3, 4, 5, 6, 7,
+        8, 9, 10, 11, 12, 13, 14, 15
+    );
+    let r1 = _mm_extract_epi8(a, 0);
+    let r2 = _mm_extract_epi8(a, 3);
+    assert_eq!(r1, 0xFF);
+    assert_eq!(r2, 3);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_insert_epi16() {
+    let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
+    let r = _mm_insert_epi16(a, 9, 0);
+    let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7);
+    assert_eq_m128i(r, e);
+}
+
+fn test_checked_mul() {
+    let u: Option<u8> = u8::from_str_radix("1000", 10).ok();
+    assert_eq!(u, None);
+
+    assert_eq!(1u8.checked_mul(255u8), Some(255u8));
+    assert_eq!(255u8.checked_mul(255u8), None);
+    assert_eq!(1i8.checked_mul(127i8), Some(127i8));
+    assert_eq!(127i8.checked_mul(127i8), None);
+    assert_eq!((-1i8).checked_mul(-127i8), Some(127i8));
+    assert_eq!(1i8.checked_mul(-128i8), Some(-128i8));
+    assert_eq!((-128i8).checked_mul(-128i8), None);
+
+    assert_eq!(1u64.checked_mul(u64::MAX), Some(u64::MAX));
+    assert_eq!(u64::MAX.checked_mul(u64::MAX), None);
+    assert_eq!(1i64.checked_mul(i64::MAX), Some(i64::MAX));
+    assert_eq!(i64::MAX.checked_mul(i64::MAX), None);
+    assert_eq!((-1i64).checked_mul(i64::MIN + 1), Some(i64::MAX));
+    assert_eq!(1i64.checked_mul(i64::MIN), Some(i64::MIN));
+    assert_eq!(i64::MIN.checked_mul(i64::MIN), None);
+}
+
+#[derive(PartialEq)]
+enum LoopState {
+    Continue(()),
+    Break(())
+}
+
+pub enum Instruction {
+    Increment,
+    Loop,
+}
+
+fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
+    match a {
+        None => None,
+        Some((_, instr)) => Some(instr),
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs b/compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs
new file mode 100644
index 00000000000..2cb84786f56
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs
@@ -0,0 +1,97 @@
+// Based on https://github.com/rust-lang/rust/blob/c5840f9d252c2f5cc16698dbf385a29c5de3ca07/src/test/ui/array-slice-vec/subslice-patterns-const-eval-match.rs
+
+// Test that array subslice patterns are correctly handled in const evaluation.
+
+// run-pass
+
+#[derive(PartialEq, Debug, Clone)]
+struct N(u8);
+
+#[derive(PartialEq, Debug, Clone)]
+struct Z;
+
+macro_rules! n {
+    ($($e:expr),* $(,)?) => {
+        [$(N($e)),*]
+    }
+}
+
+// This macro has an unused variable so that it can be repeated base on the
+// number of times a repeated variable (`$e` in `z`) occurs.
+macro_rules! zed {
+    ($e:expr) => { Z }
+}
+
+macro_rules! z {
+    ($($e:expr),* $(,)?) => {
+        [$(zed!($e)),*]
+    }
+}
+
+// Compare constant evaluation and runtime evaluation of a given expression.
+macro_rules! compare_evaluation {
+    ($e:expr, $t:ty $(,)?) => {{
+        const CONST_EVAL: $t = $e;
+        const fn const_eval() -> $t { $e }
+        static CONST_EVAL2: $t = const_eval();
+        let runtime_eval = $e;
+        assert_eq!(CONST_EVAL, runtime_eval);
+        assert_eq!(CONST_EVAL2, runtime_eval);
+    }}
+}
+
+// Repeat `$test`, substituting the given macro variables with the given
+// identifiers.
+//
+// For example:
+//
+// repeat! {
+//     ($name); X; Y:
+//     struct $name;
+// }
+//
+// Expands to:
+//
+// struct X; struct Y;
+//
+// This is used to repeat the tests using both the `N` and `Z`
+// types.
+macro_rules! repeat {
+    (($($dollar:tt $placeholder:ident)*); $($($values:ident),+);*: $($test:tt)*) => {
+        macro_rules! single {
+            ($($dollar $placeholder:ident),*) => { $($test)* }
+        }
+        $(single!($($values),+);)*
+    }
+}
+
+fn main() {
+    repeat! {
+        ($arr $Ty); n, N; z, Z:
+        compare_evaluation!({ let [_, x @ .., _] = $arr!(1, 2, 3, 4); x }, [$Ty; 2]);
+        compare_evaluation!({ let [_, ref x @ .., _] = $arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+        compare_evaluation!({ let [_, x @ .., _] = &$arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+
+        compare_evaluation!({ let [_, _, x @ .., _, _] = $arr!(1, 2, 3, 4); x }, [$Ty; 0]);
+        compare_evaluation!(
+            { let [_, _, ref x @ .., _, _] = $arr!(1, 2, 3, 4); x },
+            &'static [$Ty; 0],
+        );
+        compare_evaluation!(
+            { let [_, _, x @ .., _, _] = &$arr!(1, 2, 3, 4); x },
+            &'static [$Ty; 0],
+        );
+
+        compare_evaluation!({ let [_, .., x] = $arr!(1, 2, 3, 4); x }, $Ty);
+        compare_evaluation!({ let [_, .., ref x] = $arr!(1, 2, 3, 4); x }, &'static $Ty);
+        compare_evaluation!({ let [_, _y @ .., x] = &$arr!(1, 2, 3, 4); x }, &'static $Ty);
+    }
+
+    compare_evaluation!({ let [_, .., N(x)] = n!(1, 2, 3, 4); x }, u8);
+    compare_evaluation!({ let [_, .., N(ref x)] = n!(1, 2, 3, 4); x }, &'static u8);
+    compare_evaluation!({ let [_, .., N(x)] = &n!(1, 2, 3, 4); x }, &'static u8);
+
+    compare_evaluation!({ let [N(x), .., _] = n!(1, 2, 3, 4); x }, u8);
+    compare_evaluation!({ let [N(ref x), .., _] = n!(1, 2, 3, 4); x }, &'static u8);
+    compare_evaluation!({ let [N(x), .., _] = &n!(1, 2, 3, 4); x }, &'static u8);
+}
diff --git a/compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs b/compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs
new file mode 100644
index 00000000000..93bab17e46b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs
@@ -0,0 +1,40 @@
+// Based on https://github.com/anp/rust/blob/175631311716d7dfeceec40d2587cde7142ffa8c/src/test/ui/rfc-2091-track-caller/track-caller-attribute.rs
+
+// run-pass
+
+use std::panic::Location;
+
+#[track_caller]
+fn tracked() -> &'static Location<'static> {
+    Location::caller()
+}
+
+fn nested_intrinsic() -> &'static Location<'static> {
+    Location::caller()
+}
+
+fn nested_tracked() -> &'static Location<'static> {
+    tracked()
+}
+
+fn main() {
+    let location = Location::caller();
+    assert_eq!(location.file(), file!());
+    assert_eq!(location.line(), 21);
+    assert_eq!(location.column(), 20);
+
+    let tracked = tracked();
+    assert_eq!(tracked.file(), file!());
+    assert_eq!(tracked.line(), 26);
+    assert_eq!(tracked.column(), 19);
+
+    let nested = nested_intrinsic();
+    assert_eq!(nested.file(), file!());
+    assert_eq!(nested.line(), 13);
+    assert_eq!(nested.column(), 5);
+
+    let contained = nested_tracked();
+    assert_eq!(contained.file(), file!());
+    assert_eq!(contained.line(), 17);
+    assert_eq!(contained.column(), 5);
+}
diff --git a/compiler/rustc_codegen_cranelift/patches/0001-compiler-builtins-Disable-128bit-atomic-operations.patch b/compiler/rustc_codegen_cranelift/patches/0001-compiler-builtins-Disable-128bit-atomic-operations.patch
new file mode 100644
index 00000000000..7daea99f579
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0001-compiler-builtins-Disable-128bit-atomic-operations.patch
@@ -0,0 +1,48 @@
+From 1d574bf5e32d51641dcacaf8ef777e95b44f6f2a Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Thu, 18 Feb 2021 18:30:55 +0100
+Subject: [PATCH] Disable 128bit atomic operations
+
+Cranelift doesn't support them yet
+---
+ src/mem/mod.rs | 12 ------------
+ 1 file changed, 12 deletions(-)
+
+diff --git a/src/mem/mod.rs b/src/mem/mod.rs
+index 107762c..2d1ae10 100644
+--- a/src/mem/mod.rs
++++ b/src/mem/mod.rs
+@@ -137,10 +137,6 @@ intrinsics! {
+     pub extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
+         memcpy_element_unordered_atomic(dest, src, bytes);
+     }
+-    #[cfg(target_has_atomic_load_store = "128")]
+-    pub extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
+-        memcpy_element_unordered_atomic(dest, src, bytes);
+-    }
+ 
+     #[cfg(target_has_atomic_load_store = "8")]
+     pub extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
+@@ -158,10 +154,6 @@ intrinsics! {
+     pub extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
+         memmove_element_unordered_atomic(dest, src, bytes);
+     }
+-    #[cfg(target_has_atomic_load_store = "128")]
+-    pub extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
+-        memmove_element_unordered_atomic(dest, src, bytes);
+-    }
+ 
+     #[cfg(target_has_atomic_load_store = "8")]
+     pub extern "C" fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) -> () {
+@@ -179,8 +171,4 @@ intrinsics! {
+     pub extern "C" fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) -> () {
+         memset_element_unordered_atomic(s, c, bytes);
+     }
+-    #[cfg(target_has_atomic_load_store = "128")]
+-    pub extern "C" fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) -> () {
+-        memset_element_unordered_atomic(s, c, bytes);
+-    }
+ }
+-- 
+2.26.2.7.g19db9cfb68
+
diff --git a/compiler/rustc_codegen_cranelift/patches/0001-rand-Enable-c2-chacha-simd-feature.patch b/compiler/rustc_codegen_cranelift/patches/0001-rand-Enable-c2-chacha-simd-feature.patch
new file mode 100644
index 00000000000..01dc0fcc537
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0001-rand-Enable-c2-chacha-simd-feature.patch
@@ -0,0 +1,23 @@
+From 9c5663e36391fa20becf84f3af2e82afa5bb720b Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sat, 15 Aug 2020 19:56:03 +0200
+Subject: [PATCH] [rand] Enable c2-chacha simd feature
+
+---
+ rand_chacha/Cargo.toml | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/rand_chacha/Cargo.toml b/rand_chacha/Cargo.toml
+index 9190b7f..872cca2 100644
+--- a/rand_chacha/Cargo.toml
++++ b/rand_chacha/Cargo.toml
+@@ -24,5 +24,5 @@ ppv-lite86 = { version = "0.2.8", default-features = false }
+ 
+ [features]
+ default = ["std"]
+-std = ["ppv-lite86/std"]
++std = ["ppv-lite86/std", "ppv-lite86/simd"]
+ simd = [] # deprecated
+-- 
+2.20.1
+
diff --git a/compiler/rustc_codegen_cranelift/patches/0002-rand-Disable-failing-test.patch b/compiler/rustc_codegen_cranelift/patches/0002-rand-Disable-failing-test.patch
new file mode 100644
index 00000000000..19fd20d7269
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0002-rand-Disable-failing-test.patch
@@ -0,0 +1,33 @@
+From a8fb97120d71252538b6b026695df40d02696bdb Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sat, 15 Aug 2020 20:04:38 +0200
+Subject: [PATCH] [rand] Disable failing test
+
+---
+ src/distributions/uniform.rs | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/src/distributions/uniform.rs b/src/distributions/uniform.rs
+index 480b859..c80bb6f 100644
+--- a/src/distributions/uniform.rs
++++ b/src/distributions/uniform.rs
+@@ -1085,7 +1085,7 @@ mod tests {
+             _ => panic!("`UniformDurationMode` was not serialized/deserialized correctly")
+         }
+     }
+-    
++
+     #[test]
+     #[cfg(feature = "serde1")]
+     fn test_uniform_serialization() {
+@@ -1314,6 +1314,7 @@ mod tests {
+         not(target_arch = "wasm32"),
+         not(target_arch = "asmjs")
+     ))]
++    #[ignore] // FIXME
+     fn test_float_assertions() {
+         use super::SampleUniform;
+         use std::panic::catch_unwind;
+-- 
+2.20.1
+
diff --git a/compiler/rustc_codegen_cranelift/patches/0022-sysroot-Disable-not-compiling-tests.patch b/compiler/rustc_codegen_cranelift/patches/0022-sysroot-Disable-not-compiling-tests.patch
new file mode 100644
index 00000000000..ba0eaacd828
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0022-sysroot-Disable-not-compiling-tests.patch
@@ -0,0 +1,83 @@
+From f6befc4bb51d84f5f1cf35938a168c953d421350 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 24 Nov 2019 15:10:23 +0100
+Subject: [PATCH] [core] Disable not compiling tests
+
+---
+ library/core/tests/Cargo.toml         | 8 ++++++++
+ library/core/tests/num/flt2dec/mod.rs | 1 -
+ library/core/tests/num/int_macros.rs  | 2 ++
+ library/core/tests/num/uint_macros.rs | 2 ++
+ library/core/tests/ptr.rs             | 2 ++
+ library/core/tests/slice.rs           | 2 ++
+ 6 files changed, 16 insertions(+), 1 deletion(-)
+ create mode 100644 library/core/tests/Cargo.toml
+
+diff --git a/library/core/tests/Cargo.toml b/library/core/tests/Cargo.toml
+new file mode 100644
+index 0000000..46fd999
+--- /dev/null
++++ b/library/core/tests/Cargo.toml
+@@ -0,0 +1,8 @@
++[package]
++name = "core"
++version = "0.0.0"
++edition = "2018"
++
++[lib]
++name = "coretests"
++path = "lib.rs"
+diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
+index a35897e..f0bf645 100644
+--- a/library/core/tests/num/flt2dec/mod.rs
++++ b/library/core/tests/num/flt2dec/mod.rs
+@@ -13,7 +13,6 @@ mod strategy {
+     mod dragon;
+     mod grisu;
+ }
+-mod random;
+ 
+ pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+     match decode(v).1 {
+diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
+index 1a6be3a..42dbd59 100644
+--- a/library/core/tests/ptr.rs
++++ b/library/core/tests/ptr.rs
+@@ -250,6 +250,7 @@ fn test_unsized_nonnull() {
+     assert!(ys == zs);
+ }
+ 
++/*
+ #[test]
+ #[allow(warnings)]
+ // Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the
+@@ -289,6 +290,7 @@ fn write_unaligned_drop() {
+     }
+     DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
+ }
++*/
+ 
+ #[test]
+ fn align_offset_zst() {
+diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
+index 6609bc3..241b497 100644
+--- a/library/core/tests/slice.rs
++++ b/library/core/tests/slice.rs
+@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() {
+     }
+ }
+ 
++/*
+ #[test]
+ #[cfg(not(target_arch = "wasm32"))]
+ fn sort_unstable() {
+@@ -1394,6 +1395,7 @@ fn partition_at_index() {
+     v.select_nth_unstable(0);
+     assert!(v == [0xDEADBEEF]);
+ }
++*/
+ 
+ #[test]
+ #[should_panic(expected = "index 0 greater than length of slice")]
+--
+2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch b/compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch
new file mode 100644
index 00000000000..5d2c3049f60
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch
@@ -0,0 +1,90 @@
+From dd82e95c9de212524e14fc60155de1ae40156dfc Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 24 Nov 2019 15:34:06 +0100
+Subject: [PATCH] [core] Ignore failing tests
+
+---
+ library/core/tests/iter.rs       |  4 ++++
+ library/core/tests/num/bignum.rs | 10 ++++++++++
+ library/core/tests/num/mod.rs    |  5 +++--
+ library/core/tests/time.rs       |  1 +
+ 4 files changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
+index 4bc44e9..8e3c7a4 100644
+--- a/library/core/tests/array.rs
++++ b/library/core/tests/array.rs
+@@ -242,6 +242,7 @@ fn iterator_drops() {
+     assert_eq!(i.get(), 5);
+ }
+ 
++/*
+ // This test does not work on targets without panic=unwind support.
+ // To work around this problem, test is marked is should_panic, so it will
+ // be automagically skipped on unsuitable targets, such as
+@@ -283,6 +284,7 @@ fn array_default_impl_avoids_leaks_on_panic() {
+     assert_eq!(COUNTER.load(Relaxed), 0);
+     panic!("test succeeded")
+ }
++*/
+ 
+ #[test]
+ fn empty_array_is_always_default() {
+@@ -304,6 +304,7 @@ fn array_map() {
+     assert_eq!(b, [1, 2, 3]);
+ }
+ 
++/*
+ // See note on above test for why `should_panic` is used.
+ #[test]
+ #[should_panic(expected = "test succeeded")]
+@@ -332,6 +333,7 @@ fn array_map_drop_safety() {
+     assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create);
+     panic!("test succeeded")
+ }
++*/
+ 
+ #[test]
+ fn cell_allows_array_cycle() {
+diff --git a/library/core/tests/num/mod.rs b/library/core/tests/num/mod.rs
+index a17c094..5bb11d2 100644
+--- a/library/core/tests/num/mod.rs
++++ b/library/core/tests/num/mod.rs
+@@ -651,11 +651,12 @@ macro_rules! test_float {
+                 assert_eq!((9.0 as $fty).min($neginf), $neginf);
+                 assert_eq!(($neginf as $fty).min(-9.0), $neginf);
+                 assert_eq!((-9.0 as $fty).min($neginf), $neginf);
+-                assert_eq!(($nan as $fty).min(9.0), 9.0);
+-                assert_eq!(($nan as $fty).min(-9.0), -9.0);
+-                assert_eq!((9.0 as $fty).min($nan), 9.0);
+-                assert_eq!((-9.0 as $fty).min($nan), -9.0);
+-                assert!(($nan as $fty).min($nan).is_nan());
++                // Cranelift fmin has NaN propagation
++                //assert_eq!(($nan as $fty).min(9.0), 9.0);
++                //assert_eq!(($nan as $fty).min(-9.0), -9.0);
++                //assert_eq!((9.0 as $fty).min($nan), 9.0);
++                //assert_eq!((-9.0 as $fty).min($nan), -9.0);
++                //assert!(($nan as $fty).min($nan).is_nan());
+             }
+             #[test]
+             fn max() {
+@@ -673,11 +674,12 @@ macro_rules! test_float {
+                 assert_eq!((9.0 as $fty).max($neginf), 9.0);
+                 assert_eq!(($neginf as $fty).max(-9.0), -9.0);
+                 assert_eq!((-9.0 as $fty).max($neginf), -9.0);
+-                assert_eq!(($nan as $fty).max(9.0), 9.0);
+-                assert_eq!(($nan as $fty).max(-9.0), -9.0);
+-                assert_eq!((9.0 as $fty).max($nan), 9.0);
+-                assert_eq!((-9.0 as $fty).max($nan), -9.0);
+-                assert!(($nan as $fty).max($nan).is_nan());
++                // Cranelift fmax has NaN propagation
++                //assert_eq!(($nan as $fty).max(9.0), 9.0);
++                //assert_eq!(($nan as $fty).max(-9.0), -9.0);
++                //assert_eq!((9.0 as $fty).max($nan), 9.0);
++                //assert_eq!((-9.0 as $fty).max($nan), -9.0);
++                //assert!(($nan as $fty).max($nan).is_nan());
+             }
+             #[test]
+             fn rem_euclid() {
+-- 
+2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_cranelift/patches/0027-sysroot-128bit-atomic-operations.patch b/compiler/rustc_codegen_cranelift/patches/0027-sysroot-128bit-atomic-operations.patch
new file mode 100644
index 00000000000..32e59309690
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0027-sysroot-128bit-atomic-operations.patch
@@ -0,0 +1,103 @@
+From 894e07dfec2624ba539129b1c1d63e1d7d812bda Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Thu, 18 Feb 2021 18:45:28 +0100
+Subject: [PATCH] Disable 128bit atomic operations
+
+Cranelift doesn't support them yet
+---
+ library/core/src/sync/atomic.rs | 38 ---------------------------------
+ library/core/tests/atomic.rs    |  4 ----
+ library/std/src/panic.rs        |  6 ------
+ 3 files changed, 48 deletions(-)
+
+diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
+index 81c9e1d..65c9503 100644
+--- a/library/core/src/sync/atomic.rs
++++ b/library/core/src/sync/atomic.rs
+@@ -2228,44 +2228,6 @@ atomic_int! {
+     "AtomicU64::new(0)",
+     u64 AtomicU64 ATOMIC_U64_INIT
+ }
+-#[cfg(target_has_atomic_load_store = "128")]
+-atomic_int! {
+-    cfg(target_has_atomic = "128"),
+-    cfg(target_has_atomic_equal_alignment = "128"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    "i128",
+-    "#![feature(integer_atomics)]\n\n",
+-    atomic_min, atomic_max,
+-    16,
+-    "AtomicI128::new(0)",
+-    i128 AtomicI128 ATOMIC_I128_INIT
+-}
+-#[cfg(target_has_atomic_load_store = "128")]
+-atomic_int! {
+-    cfg(target_has_atomic = "128"),
+-    cfg(target_has_atomic_equal_alignment = "128"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+-    unstable(feature = "integer_atomics", issue = "32976"),
+-    "u128",
+-    "#![feature(integer_atomics)]\n\n",
+-    atomic_umin, atomic_umax,
+-    16,
+-    "AtomicU128::new(0)",
+-    u128 AtomicU128 ATOMIC_U128_INIT
+-}
+ 
+ macro_rules! atomic_int_ptr_sized {
+     ( $($target_pointer_width:literal $align:literal)* ) => { $(
+diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs
+index 2d1e449..cb6da5d 100644
+--- a/library/core/tests/atomic.rs
++++ b/library/core/tests/atomic.rs
+@@ -145,10 +145,6 @@ fn atomic_alignment() {
+     assert_eq!(align_of::<AtomicU64>(), size_of::<AtomicU64>());
+     #[cfg(target_has_atomic = "64")]
+     assert_eq!(align_of::<AtomicI64>(), size_of::<AtomicI64>());
+-    #[cfg(target_has_atomic = "128")]
+-    assert_eq!(align_of::<AtomicU128>(), size_of::<AtomicU128>());
+-    #[cfg(target_has_atomic = "128")]
+-    assert_eq!(align_of::<AtomicI128>(), size_of::<AtomicI128>());
+     #[cfg(target_has_atomic = "ptr")]
+     assert_eq!(align_of::<AtomicUsize>(), size_of::<AtomicUsize>());
+     #[cfg(target_has_atomic = "ptr")]
+diff --git a/library/std/src/panic.rs b/library/std/src/panic.rs
+index 89a822a..779fd88 100644
+--- a/library/std/src/panic.rs
++++ b/library/std/src/panic.rs
+@@ -279,9 +279,6 @@ impl RefUnwindSafe for atomic::AtomicI32 {}
+ #[cfg(target_has_atomic_load_store = "64")]
+ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+ impl RefUnwindSafe for atomic::AtomicI64 {}
+-#[cfg(target_has_atomic_load_store = "128")]
+-#[unstable(feature = "integer_atomics", issue = "32976")]
+-impl RefUnwindSafe for atomic::AtomicI128 {}
+ 
+ #[cfg(target_has_atomic_load_store = "ptr")]
+ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+@@ -298,9 +295,6 @@ impl RefUnwindSafe for atomic::AtomicU32 {}
+ #[cfg(target_has_atomic_load_store = "64")]
+ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+ impl RefUnwindSafe for atomic::AtomicU64 {}
+-#[cfg(target_has_atomic_load_store = "128")]
+-#[unstable(feature = "integer_atomics", issue = "32976")]
+-impl RefUnwindSafe for atomic::AtomicU128 {}
+ 
+ #[cfg(target_has_atomic_load_store = "8")]
+ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+-- 
+2.26.2.7.g19db9cfb68
+
diff --git a/compiler/rustc_codegen_cranelift/rust-toolchain b/compiler/rustc_codegen_cranelift/rust-toolchain
new file mode 100644
index 00000000000..f806f7bdcd9
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/rust-toolchain
@@ -0,0 +1,3 @@
+[toolchain]
+channel = "nightly-2021-07-07"
+components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
diff --git a/compiler/rustc_codegen_cranelift/rustfmt.toml b/compiler/rustc_codegen_cranelift/rustfmt.toml
new file mode 100644
index 00000000000..2bd8f7d1bc1
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/rustfmt.toml
@@ -0,0 +1,4 @@
+# Matches rustfmt.toml of rustc
+version = "Two"
+use_small_heuristics = "Max"
+merge_derives = false
diff --git a/compiler/rustc_codegen_cranelift/scripts/Readme.md b/compiler/rustc_codegen_cranelift/scripts/Readme.md
new file mode 100644
index 00000000000..83cec9c6f36
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/Readme.md
@@ -0,0 +1,2 @@
+This directory is for scripts that are either never directly invoked or are not used very often.
+Scripts that are frequently used should be kept at the project root.
diff --git a/compiler/rustc_codegen_cranelift/scripts/cargo.rs b/compiler/rustc_codegen_cranelift/scripts/cargo.rs
new file mode 100644
index 00000000000..b7e8dd44974
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/cargo.rs
@@ -0,0 +1,70 @@
+use std::env;
+#[cfg(unix)]
+use std::os::unix::process::CommandExt;
+use std::path::PathBuf;
+use std::process::Command;
+
+fn main() {
+    if env::var("RUSTC_WRAPPER").map_or(false, |wrapper| wrapper.contains("sccache")) {
+        eprintln!(
+            "\x1b[1;93m=== Warning: Unsetting RUSTC_WRAPPER to prevent interference with sccache ===\x1b[0m"
+        );
+        env::remove_var("RUSTC_WRAPPER");
+    }
+
+    let sysroot = PathBuf::from(env::current_exe().unwrap().parent().unwrap());
+
+    env::set_var("RUSTC", sysroot.join("bin/cg_clif".to_string() + env::consts::EXE_SUFFIX));
+
+    let mut rustdoc_flags = env::var("RUSTDOCFLAGS").unwrap_or(String::new());
+    rustdoc_flags.push_str(" -Cpanic=abort -Zpanic-abort-tests -Zcodegen-backend=");
+    rustdoc_flags.push_str(
+        sysroot
+            .join(if cfg!(windows) { "bin" } else { "lib" })
+            .join(
+                env::consts::DLL_PREFIX.to_string()
+                    + "rustc_codegen_cranelift"
+                    + env::consts::DLL_SUFFIX,
+            )
+            .to_str()
+            .unwrap(),
+    );
+    rustdoc_flags.push_str(" --sysroot ");
+    rustdoc_flags.push_str(sysroot.to_str().unwrap());
+    env::set_var("RUSTDOCFLAGS", rustdoc_flags);
+
+    // Ensure that the right toolchain is used
+    env::set_var("RUSTUP_TOOLCHAIN", env!("RUSTUP_TOOLCHAIN"));
+
+    let args: Vec<_> = match env::args().nth(1).as_deref() {
+        Some("jit") => {
+            env::set_var(
+                "RUSTFLAGS",
+                env::var("RUSTFLAGS").unwrap_or(String::new()) + " -Cprefer-dynamic",
+            );
+            std::array::IntoIter::new(["rustc".to_string()])
+                .chain(env::args().skip(2))
+                .chain(["--".to_string(), "-Cllvm-args=mode=jit".to_string()])
+                .collect()
+        }
+        Some("lazy-jit") => {
+            env::set_var(
+                "RUSTFLAGS",
+                env::var("RUSTFLAGS").unwrap_or(String::new()) + " -Cprefer-dynamic",
+            );
+            std::array::IntoIter::new(["rustc".to_string()])
+                .chain(env::args().skip(2))
+                .chain(["--".to_string(), "-Cllvm-args=mode=jit-lazy".to_string()])
+                .collect()
+        }
+        _ => env::args().skip(1).collect(),
+    };
+
+    #[cfg(unix)]
+    Command::new("cargo").args(args).exec();
+
+    #[cfg(not(unix))]
+    std::process::exit(
+        Command::new("cargo").args(args).spawn().unwrap().wait().unwrap().code().unwrap_or(1),
+    );
+}
diff --git a/compiler/rustc_codegen_cranelift/scripts/config.sh b/compiler/rustc_codegen_cranelift/scripts/config.sh
new file mode 100644
index 00000000000..53ada369b08
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/config.sh
@@ -0,0 +1,6 @@
+# Note to people running shellcheck: this file should only be sourced, not executed directly.
+
+set -e
+
+export LD_LIBRARY_PATH="$(rustc --print sysroot)/lib:$LD_LIBRARY_PATH"
+export DYLD_LIBRARY_PATH="$(rustc --print sysroot)/lib:$DYLD_LIBRARY_PATH"
diff --git a/compiler/rustc_codegen_cranelift/scripts/ext_config.sh b/compiler/rustc_codegen_cranelift/scripts/ext_config.sh
new file mode 100644
index 00000000000..11d6c4c8318
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/ext_config.sh
@@ -0,0 +1,32 @@
+# Note to people running shellcheck: this file should only be sourced, not executed directly.
+
+# Various env vars that should only be set for the build system
+
+set -e
+
+export CG_CLIF_DISPLAY_CG_TIME=1
+export CG_CLIF_DISABLE_INCR_CACHE=1
+
+export HOST_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
+export TARGET_TRIPLE=${TARGET_TRIPLE:-$HOST_TRIPLE}
+
+export RUN_WRAPPER=''
+export JIT_SUPPORTED=1
+if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
+   export JIT_SUPPORTED=0
+   if [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then
+      # We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
+      export RUSTFLAGS='-Clinker=aarch64-linux-gnu-gcc '$RUSTFLAGS
+      export RUN_WRAPPER='qemu-aarch64 -L /usr/aarch64-linux-gnu'
+   elif [[ "$TARGET_TRIPLE" == "x86_64-pc-windows-gnu" ]]; then
+      # We are cross-compiling for Windows. Run tests in wine.
+      export RUN_WRAPPER='wine'
+   else
+      echo "Unknown non-native platform"
+   fi
+fi
+
+# FIXME fix `#[linkage = "extern_weak"]` without this
+if [[ "$(uname)" == 'Darwin' ]]; then
+   export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
+fi
diff --git a/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs b/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
new file mode 100755
index 00000000000..9e196afbe4f
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
@@ -0,0 +1,126 @@
+#!/bin/bash
+#![forbid(unsafe_code)]/* This line is ignored by bash
+# This block is ignored by rustc
+pushd $(dirname "$0")/../
+source scripts/config.sh
+RUSTC="$(pwd)/build/bin/cg_clif"
+popd
+PROFILE=$1 OUTPUT=$2 exec $RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic $0
+#*/
+
+//! This program filters away uninteresting samples and trims uninteresting frames for stackcollapse
+//! profiles.
+//!
+//! Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>
+//!
+//! This file is specially crafted to be both a valid bash script and valid rust source file. If
+//! executed as bash script this will run the rust source using cg_clif in JIT mode.
+
+use std::io::Write;
+
+fn main() -> Result<(), Box<dyn std::error::Error>> {
+    let profile_name = std::env::var("PROFILE").unwrap();
+    let output_name = std::env::var("OUTPUT").unwrap();
+    if profile_name.is_empty() || output_name.is_empty() {
+        println!("Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>");
+        std::process::exit(1);
+    }
+    let profile = std::fs::read_to_string(profile_name)
+        .map_err(|err| format!("Failed to read profile {}", err))?;
+    let mut output = std::fs::OpenOptions::new()
+        .create(true)
+        .write(true)
+        .truncate(true)
+        .open(output_name)?;
+
+    for line in profile.lines() {
+        let mut stack = &line[..line.rfind(" ").unwrap()];
+        let count = &line[line.rfind(" ").unwrap() + 1..];
+
+        // Filter away uninteresting samples
+        if !stack.contains("rustc_codegen_cranelift") {
+            continue;
+        }
+
+        if stack.contains("rustc_mir::monomorphize::partitioning::collect_and_partition_mono_items")
+            || stack.contains("rustc_incremental::assert_dep_graph::assert_dep_graph")
+            || stack.contains("rustc_symbol_mangling::test::report_symbol_names")
+        {
+            continue;
+        }
+
+        // Trim start
+        if let Some(index) = stack.find("rustc_interface::passes::configure_and_expand") {
+            stack = &stack[index..];
+        } else if let Some(index) = stack.find("rustc_interface::passes::analysis") {
+            stack = &stack[index..];
+        } else if let Some(index) = stack.find("rustc_interface::passes::start_codegen") {
+            stack = &stack[index..];
+        } else if let Some(index) = stack.find("rustc_interface::queries::Linker::link") {
+            stack = &stack[index..];
+        }
+
+        if let Some(index) = stack.find("rustc_codegen_cranelift::driver::aot::module_codegen") {
+            stack = &stack[index..];
+        }
+
+        // Trim end
+        const MALLOC: &str = "malloc";
+        if let Some(index) = stack.find(MALLOC) {
+            stack = &stack[..index + MALLOC.len()];
+        }
+
+        const FREE: &str = "free";
+        if let Some(index) = stack.find(FREE) {
+            stack = &stack[..index + FREE.len()];
+        }
+
+        const TYPECK_ITEM_BODIES: &str = "rustc_typeck::check::typeck_item_bodies";
+        if let Some(index) = stack.find(TYPECK_ITEM_BODIES) {
+            stack = &stack[..index + TYPECK_ITEM_BODIES.len()];
+        }
+
+        const COLLECT_AND_PARTITION_MONO_ITEMS: &str =
+            "rustc_mir::monomorphize::partitioning::collect_and_partition_mono_items";
+        if let Some(index) = stack.find(COLLECT_AND_PARTITION_MONO_ITEMS) {
+            stack = &stack[..index + COLLECT_AND_PARTITION_MONO_ITEMS.len()];
+        }
+
+        const ASSERT_DEP_GRAPH: &str = "rustc_incremental::assert_dep_graph::assert_dep_graph";
+        if let Some(index) = stack.find(ASSERT_DEP_GRAPH) {
+            stack = &stack[..index + ASSERT_DEP_GRAPH.len()];
+        }
+
+        const REPORT_SYMBOL_NAMES: &str = "rustc_symbol_mangling::test::report_symbol_names";
+        if let Some(index) = stack.find(REPORT_SYMBOL_NAMES) {
+            stack = &stack[..index + REPORT_SYMBOL_NAMES.len()];
+        }
+
+        const ENCODE_METADATA: &str = "rustc_middle::ty::context::TyCtxt::encode_metadata";
+        if let Some(index) = stack.find(ENCODE_METADATA) {
+            stack = &stack[..index + ENCODE_METADATA.len()];
+        }
+
+        const SUBST_AND_NORMALIZE_ERASING_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::subst_and_normalize_erasing_regions";
+        if let Some(index) = stack.find(SUBST_AND_NORMALIZE_ERASING_REGIONS) {
+            stack = &stack[..index + SUBST_AND_NORMALIZE_ERASING_REGIONS.len()];
+        }
+
+        const NORMALIZE_ERASING_LATE_BOUND_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::normalize_erasing_late_bound_regions";
+        if let Some(index) = stack.find(NORMALIZE_ERASING_LATE_BOUND_REGIONS) {
+            stack = &stack[..index + NORMALIZE_ERASING_LATE_BOUND_REGIONS.len()];
+        }
+
+        const INST_BUILD: &str = "<cranelift_frontend::frontend::FuncInstBuilder as cranelift_codegen::ir::builder::InstBuilderBase>::build";
+        if let Some(index) = stack.find(INST_BUILD) {
+            stack = &stack[..index + INST_BUILD.len()];
+        }
+
+        output.write_all(stack.as_bytes())?;
+        output.write_all(&*b" ")?;
+        output.write_all(count.as_bytes())?;
+        output.write_all(&*b"\n")?;
+    }
+
+    Ok(())
+}
diff --git a/compiler/rustc_codegen_cranelift/scripts/rustup.sh b/compiler/rustc_codegen_cranelift/scripts/rustup.sh
new file mode 100755
index 00000000000..cc34c080886
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/rustup.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+
+set -e
+
+case $1 in
+    "prepare")
+        TOOLCHAIN=$(date +%Y-%m-%d)
+
+        echo "=> Installing new nightly"
+        rustup toolchain install --profile minimal "nightly-${TOOLCHAIN}" # Sanity check to see if the nightly exists
+        sed -i "s/\"nightly-.*\"/\"nightly-${TOOLCHAIN}\"/" rust-toolchain
+        rustup component add rustfmt || true
+
+        echo "=> Uninstalling all old nighlies"
+        for nightly in $(rustup toolchain list | grep nightly | grep -v "$TOOLCHAIN" | grep -v nightly-x86_64); do
+            rustup toolchain uninstall "$nightly"
+        done
+
+        ./clean_all.sh
+        ./y.rs prepare
+
+        (cd build_sysroot && cargo update)
+
+        ;;
+    "commit")
+        git add rust-toolchain build_sysroot/Cargo.lock
+        git commit -m "Rustup to $(rustc -V)"
+        ;;
+    "push")
+        cg_clif=$(pwd)
+        pushd ../rust
+        git pull origin master
+        branch=sync_cg_clif-$(date +%Y-%m-%d)
+        git checkout -b "$branch"
+        git subtree pull --prefix=compiler/rustc_codegen_cranelift/ https://github.com/bjorn3/rustc_codegen_cranelift.git master
+        git push -u my "$branch"
+
+        # immediately merge the merge commit into cg_clif to prevent merge conflicts when syncing
+        # from rust-lang/rust later
+        git subtree push --prefix=compiler/rustc_codegen_cranelift/ "$cg_clif" sync_from_rust
+        popd
+        git merge sync_from_rust
+	;;
+    "pull")
+        cg_clif=$(pwd)
+        pushd ../rust
+        git pull origin master
+        rust_vers="$(git rev-parse HEAD)"
+        git subtree push --prefix=compiler/rustc_codegen_cranelift/ "$cg_clif" sync_from_rust
+        popd
+        git merge sync_from_rust -m "Sync from rust $rust_vers"
+        git branch -d sync_from_rust
+        ;;
+    *)
+        echo "Unknown command '$1'"
+        echo "Usage: ./rustup.sh prepare|commit"
+        ;;
+esac
diff --git a/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh b/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
new file mode 100644
index 00000000000..52adaaa8de6
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+set -e
+
+./y.rs build
+source scripts/config.sh
+
+echo "[SETUP] Rust fork"
+git clone https://github.com/rust-lang/rust.git || true
+pushd rust
+git fetch
+git checkout -- .
+git checkout "$(rustc -V | cut -d' ' -f3 | tr -d '(')"
+
+git apply - <<EOF
+diff --git a/Cargo.toml b/Cargo.toml
+index 5bd1147cad5..10d68a2ff14 100644
+--- a/Cargo.toml
++++ b/Cargo.toml
+@@ -111,5 +111,7 @@ rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
+ rustc-std-workspace-alloc = { path = 'library/rustc-std-workspace-alloc' }
+ rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
+
++compiler_builtins = { path = "../build_sysroot/compiler-builtins" }
++
+ [patch."https://github.com/rust-lang/rust-clippy"]
+ clippy_lints = { path = "src/tools/clippy/clippy_lints" }
+diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
+index d95b5b7f17f..00b6f0e3635 100644
+--- a/library/alloc/Cargo.toml
++++ b/library/alloc/Cargo.toml
+@@ -8,7 +8,7 @@ edition = "2018"
+
+ [dependencies]
+ core = { path = "../core" }
+-compiler_builtins = { version = "0.1.40", features = ['rustc-dep-of-std'] }
++compiler_builtins = { version = "0.1.45", features = ['rustc-dep-of-std', 'no-asm'] }
+
+ [dev-dependencies]
+ rand = "0.7"
+ rand_xorshift = "0.2"
+EOF
+
+cat > config.toml <<EOF
+[llvm]
+ninja = false
+
+[build]
+rustc = "$(pwd)/../build/bin/cg_clif"
+cargo = "$(rustup which cargo)"
+full-bootstrap = true
+local-rebuild = true
+
+[rust]
+codegen-backends = ["cranelift"]
+deny-warnings = false
+EOF
+popd
diff --git a/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh b/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh
new file mode 100755
index 00000000000..791d457993d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+set -e
+
+cd "$(dirname "$0")/../"
+
+source ./scripts/setup_rust_fork.sh
+
+echo "[TEST] Bootstrap of rustc"
+pushd rust
+rm -r compiler/rustc_codegen_cranelift/{Cargo.*,src}
+cp ../Cargo.* compiler/rustc_codegen_cranelift/
+cp -r ../src compiler/rustc_codegen_cranelift/src
+
+./x.py build --stage 1 library/std
+popd
diff --git a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
new file mode 100755
index 00000000000..2f5c2cf737b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+set -e
+
+cd $(dirname "$0")/../
+
+source ./scripts/setup_rust_fork.sh
+
+echo "[TEST] Test suite of rustc"
+pushd rust
+
+cargo install ripgrep
+
+rm -r src/test/ui/{extern/,panics/,unsized-locals/,thinlto/,simd*,*lto*.rs,linkage*,unwind-*.rs} || true
+for test in $(rg --files-with-matches "asm!|catch_unwind|should_panic|lto" src/test/ui); do
+  rm $test
+done
+
+for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
+  rm $test
+done
+
+git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
+
+# these all depend on unwinding support
+rm src/test/ui/backtrace.rs
+rm src/test/ui/array-slice-vec/box-of-array-of-drop-*.rs
+rm src/test/ui/array-slice-vec/slice-panic-*.rs
+rm src/test/ui/array-slice-vec/nested-vec-3.rs
+rm src/test/ui/cleanup-rvalue-temp-during-incomplete-alloc.rs
+rm src/test/ui/issues/issue-26655.rs
+rm src/test/ui/issues/issue-29485.rs
+rm src/test/ui/issues/issue-30018-panic.rs
+rm src/test/ui/multi-panic.rs
+rm src/test/ui/sepcomp/sepcomp-unwind.rs
+rm src/test/ui/structs-enums/unit-like-struct-drop-run.rs
+rm src/test/ui/terminate-in-initializer.rs
+rm src/test/ui/threads-sendsync/task-stderr.rs
+rm src/test/ui/numbers-arithmetic/int-abs-overflow.rs
+rm src/test/ui/drop/drop-trait-enum.rs
+rm src/test/ui/numbers-arithmetic/issue-8460.rs
+rm src/test/ui/rt-explody-panic-payloads.rs
+rm src/test/incremental/change_crate_dep_kind.rs
+
+rm src/test/ui/issues/issue-28950.rs # depends on stack size optimizations
+rm src/test/ui/init-large-type.rs # same
+rm src/test/ui/sse2.rs # cpuid not supported, so sse2 not detected
+rm src/test/ui/issues/issue-33992.rs # unsupported linkages
+rm src/test/ui/issues/issue-51947.rs # same
+rm src/test/ui/numbers-arithmetic/saturating-float-casts.rs # intrinsic gives different but valid result
+rm src/test/ui/mir/mir_misc_casts.rs # depends on deduplication of constants
+rm src/test/ui/mir/mir_raw_fat_ptr.rs # same
+rm src/test/ui/consts/issue-33537.rs # same
+rm src/test/ui/async-await/async-fn-size-moved-locals.rs # -Cpanic=abort shrinks some generator by one byte
+rm src/test/ui/async-await/async-fn-size-uninit-locals.rs # same
+rm src/test/ui/generator/size-moved-locals.rs # same
+rm src/test/ui/fn/dyn-fn-alignment.rs # wants a 256 byte alignment
+rm src/test/ui/test-attrs/test-fn-signature-verification-for-explicit-return-type.rs # "Cannot run dynamic test fn out-of-process"
+rm src/test/ui/intrinsics/intrinsic-nearby.rs # unimplemented nearbyintf32 and nearbyintf64 intrinsics
+
+rm src/test/incremental/hashes/inline_asm.rs # inline asm
+rm src/test/incremental/issue-72386.rs # same
+rm src/test/incremental/issue-49482.rs # same
+rm src/test/incremental/issue-54059.rs # same
+rm src/test/incremental/lto.rs # requires lto
+
+rm -r src/test/run-make/emit-shared-files # requires the rustdoc executable in build/bin/
+rm -r src/test/run-make/unstable-flag-required # same
+rm -r src/test/run-make/emit-named-files # requires full --emit support
+
+rm src/test/pretty/asm.rs # inline asm
+rm src/test/pretty/raw-str-nonexpr.rs # same
+
+rm -r src/test/run-pass-valgrind/unsized-locals
+
+rm src/test/ui/json-bom-plus-crlf-multifile.rs # differing warning
+rm src/test/ui/json-bom-plus-crlf.rs # same
+rm src/test/ui/match/issue-82392.rs # differing error
+rm src/test/ui/type-alias-impl-trait/cross_crate_ice*.rs # requires removed aux dep
+
+rm src/test/ui/allocator/no_std-alloc-error-handler-default.rs # missing rust_oom definition
+rm src/test/ui/cfg/cfg-panic.rs
+rm src/test/ui/default-alloc-error-hook.rs
+rm -r src/test/ui/hygiene/
+
+rm -r src/test/ui/polymorphization/ # polymorphization not yet supported
+rm src/test/codegen-units/polymorphization/unused_type_parameters.rs # same
+
+rm -r src/test/run-make/fmt-write-bloat/ # tests an optimization
+rm src/test/ui/abi/mir/mir_codegen_calls_variadic.rs # requires float varargs
+rm src/test/ui/abi/variadic-ffi.rs # requires callee side vararg support
+
+echo "[TEST] rustc test suite"
+RUST_TEST_NOCAPTURE=1 COMPILETEST_FORCE_STAGE0=1 ./x.py test --stage 0 src/test/{codegen-units,run-make,run-pass-valgrind,ui}
+popd
diff --git a/compiler/rustc_codegen_cranelift/scripts/tests.sh b/compiler/rustc_codegen_cranelift/scripts/tests.sh
new file mode 100755
index 00000000000..5df04c533a7
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/tests.sh
@@ -0,0 +1,154 @@
+#!/usr/bin/env bash
+
+set -e
+
+source scripts/config.sh
+source scripts/ext_config.sh
+export RUSTC=false # ensure that cg_llvm isn't accidentally used
+MY_RUSTC="$(pwd)/build/bin/cg_clif $RUSTFLAGS -L crate=target/out --out-dir target/out -Cdebuginfo=2"
+
+function no_sysroot_tests() {
+    echo "[BUILD] mini_core"
+    $MY_RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target "$TARGET_TRIPLE"
+
+    echo "[BUILD] example"
+    $MY_RUSTC example/example.rs --crate-type lib --target "$TARGET_TRIPLE"
+
+    if [[ "$JIT_SUPPORTED" = "1" ]]; then
+        echo "[JIT] mini_core_hello_world"
+        CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
+
+        echo "[JIT-lazy] mini_core_hello_world"
+        CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
+    else
+        echo "[JIT] mini_core_hello_world (skipped)"
+    fi
+
+    echo "[AOT] mini_core_hello_world"
+    $MY_RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target "$TARGET_TRIPLE"
+    $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
+    # (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
+}
+
+function base_sysroot_tests() {
+    echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
+    $MY_RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target "$TARGET_TRIPLE"
+    $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
+
+    echo "[AOT] alloc_system"
+    $MY_RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
+
+    echo "[AOT] alloc_example"
+    $MY_RUSTC example/alloc_example.rs --crate-type bin --target "$TARGET_TRIPLE"
+    $RUN_WRAPPER ./target/out/alloc_example
+
+    if [[ "$JIT_SUPPORTED" = "1" ]]; then
+        echo "[JIT] std_example"
+        $MY_RUSTC -Cllvm-args=mode=jit -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
+
+        echo "[JIT-lazy] std_example"
+        $MY_RUSTC -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
+    else
+        echo "[JIT] std_example (skipped)"
+    fi
+
+    echo "[AOT] dst_field_align"
+    # FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
+    $MY_RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target "$TARGET_TRIPLE"
+    $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
+
+    echo "[AOT] std_example"
+    $MY_RUSTC example/std_example.rs --crate-type bin --target "$TARGET_TRIPLE"
+    $RUN_WRAPPER ./target/out/std_example arg
+
+    echo "[AOT] subslice-patterns-const-eval"
+    $MY_RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
+    $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
+
+    echo "[AOT] track-caller-attribute"
+    $MY_RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
+    $RUN_WRAPPER ./target/out/track-caller-attribute
+
+    echo "[AOT] mod_bench"
+    $MY_RUSTC example/mod_bench.rs --crate-type bin --target "$TARGET_TRIPLE"
+    $RUN_WRAPPER ./target/out/mod_bench
+}
+
+function extended_sysroot_tests() {
+    pushd rand
+    ../build/cargo clean
+    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+        echo "[TEST] rust-random/rand"
+        ../build/cargo test --workspace
+    else
+        echo "[AOT] rust-random/rand"
+        ../build/cargo build --workspace --target $TARGET_TRIPLE --tests
+    fi
+    popd
+
+    pushd simple-raytracer
+    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+        echo "[BENCH COMPILE] ebobby/simple-raytracer"
+        hyperfine --runs "${RUN_RUNS:-10}" --warmup 1 --prepare "../build/cargo clean" \
+        "RUSTC=rustc RUSTFLAGS='' cargo build" \
+        "../build/cargo build"
+
+        echo "[BENCH RUN] ebobby/simple-raytracer"
+        cp ./target/debug/main ./raytracer_cg_clif
+        hyperfine --runs "${RUN_RUNS:-10}" ./raytracer_cg_llvm ./raytracer_cg_clif
+    else
+        ../build/cargo clean
+        echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
+        echo "[COMPILE] ebobby/simple-raytracer"
+        ../build/cargo build --target $TARGET_TRIPLE
+        echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
+    fi
+    popd
+
+    pushd build_sysroot/sysroot_src/library/core/tests
+    echo "[TEST] libcore"
+    ../../../../../build/cargo clean
+    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+        ../../../../../build/cargo test
+    else
+        ../../../../../build/cargo build --target $TARGET_TRIPLE --tests
+    fi
+    popd
+
+    pushd regex
+    echo "[TEST] rust-lang/regex example shootout-regex-dna"
+    ../build/cargo clean
+    export RUSTFLAGS="$RUSTFLAGS --cap-lints warn" # newer aho_corasick versions throw a deprecation warning
+    # Make sure `[codegen mono items] start` doesn't poison the diff
+    ../build/cargo build --example shootout-regex-dna --target $TARGET_TRIPLE
+    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+        cat examples/regexdna-input.txt \
+            | ../build/cargo run --example shootout-regex-dna --target $TARGET_TRIPLE \
+            | grep -v "Spawned thread" > res.txt
+        diff -u res.txt examples/regexdna-output.txt
+    fi
+
+    if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+        echo "[TEST] rust-lang/regex tests"
+        ../build/cargo test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q
+    else
+        echo "[AOT] rust-lang/regex tests"
+        ../build/cargo build --tests --target $TARGET_TRIPLE
+    fi
+    popd
+}
+
+case "$1" in
+    "no_sysroot")
+        no_sysroot_tests
+        ;;
+    "base_sysroot")
+        base_sysroot_tests
+        ;;
+    "extended_sysroot")
+        extended_sysroot_tests
+        ;;
+    *)
+        echo "unknown test suite"
+        ;;
+esac
diff --git a/compiler/rustc_codegen_cranelift/src/abi/comments.rs b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
new file mode 100644
index 00000000000..5fbaed7283a
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
@@ -0,0 +1,129 @@
+//! Annotate the clif ir with comments describing how arguments are passed into the current function
+//! and where all locals are stored.
+
+use std::borrow::Cow;
+
+use rustc_middle::mir;
+use rustc_target::abi::call::PassMode;
+
+use cranelift_codegen::entity::EntityRef;
+
+use crate::prelude::*;
+
+pub(super) fn add_args_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
+    if fx.clif_comments.enabled() {
+        fx.add_global_comment(
+            "kind  loc.idx   param    pass mode                            ty".to_string(),
+        );
+    }
+}
+
+pub(super) fn add_arg_comment<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    kind: &str,
+    local: Option<mir::Local>,
+    local_field: Option<usize>,
+    params: &[Value],
+    arg_abi_mode: PassMode,
+    arg_layout: TyAndLayout<'tcx>,
+) {
+    if !fx.clif_comments.enabled() {
+        return;
+    }
+
+    let local = if let Some(local) = local {
+        Cow::Owned(format!("{:?}", local))
+    } else {
+        Cow::Borrowed("???")
+    };
+    let local_field = if let Some(local_field) = local_field {
+        Cow::Owned(format!(".{}", local_field))
+    } else {
+        Cow::Borrowed("")
+    };
+
+    let params = match params {
+        [] => Cow::Borrowed("-"),
+        [param] => Cow::Owned(format!("= {:?}", param)),
+        [param_a, param_b] => Cow::Owned(format!("= {:?},{:?}", param_a, param_b)),
+        params => Cow::Owned(format!(
+            "= {}",
+            params.iter().map(ToString::to_string).collect::<Vec<_>>().join(",")
+        )),
+    };
+
+    let pass_mode = format!("{:?}", arg_abi_mode);
+    fx.add_global_comment(format!(
+        "{kind:5}{local:>3}{local_field:<5} {params:10} {pass_mode:36} {ty:?}",
+        kind = kind,
+        local = local,
+        local_field = local_field,
+        params = params,
+        pass_mode = pass_mode,
+        ty = arg_layout.ty,
+    ));
+}
+
+pub(super) fn add_locals_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
+    if fx.clif_comments.enabled() {
+        fx.add_global_comment(String::new());
+        fx.add_global_comment(
+            "kind  local ty                              size align (abi,pref)".to_string(),
+        );
+    }
+}
+
+pub(super) fn add_local_place_comments<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    place: CPlace<'tcx>,
+    local: Local,
+) {
+    if !fx.clif_comments.enabled() {
+        return;
+    }
+    let TyAndLayout { ty, layout } = place.layout();
+    let rustc_target::abi::Layout { size, align, abi: _, variants: _, fields: _, largest_niche: _ } =
+        layout;
+
+    let (kind, extra) = match *place.inner() {
+        CPlaceInner::Var(place_local, var) => {
+            assert_eq!(local, place_local);
+            ("ssa", Cow::Owned(format!(",var={}", var.index())))
+        }
+        CPlaceInner::VarPair(place_local, var1, var2) => {
+            assert_eq!(local, place_local);
+            ("ssa", Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())))
+        }
+        CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
+        CPlaceInner::Addr(ptr, meta) => {
+            let meta = if let Some(meta) = meta {
+                Cow::Owned(format!(",meta={}", meta))
+            } else {
+                Cow::Borrowed("")
+            };
+            match ptr.debug_base_and_offset() {
+                (crate::pointer::PointerBase::Addr(addr), offset) => {
+                    ("reuse", format!("storage={}{}{}", addr, offset, meta).into())
+                }
+                (crate::pointer::PointerBase::Stack(stack_slot), offset) => {
+                    ("stack", format!("storage={}{}{}", stack_slot, offset, meta).into())
+                }
+                (crate::pointer::PointerBase::Dangling(align), offset) => {
+                    ("zst", format!("align={},offset={}", align.bytes(), offset).into())
+                }
+            }
+        }
+    };
+
+    fx.add_global_comment(format!(
+        "{:<5} {:5} {:30} {:4}b {}, {}{}{}",
+        kind,
+        format!("{:?}", local),
+        format!("{:?}", ty),
+        size.bytes(),
+        align.abi.bytes(),
+        align.pref.bytes(),
+        if extra.is_empty() { "" } else { "              " },
+        extra,
+    ));
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
new file mode 100644
index 00000000000..54c8fb0e7b8
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -0,0 +1,555 @@
+//! Handling of everything related to the calling convention. Also fills `fx.local_map`.
+
+mod comments;
+mod pass_mode;
+mod returning;
+
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_target::abi::call::{Conv, FnAbi};
+use rustc_target::spec::abi::Abi;
+
+use cranelift_codegen::ir::AbiParam;
+use smallvec::smallvec;
+
+use self::pass_mode::*;
+use crate::prelude::*;
+
+pub(crate) use self::returning::{can_return_to_ssa_var, codegen_return};
+
+fn clif_sig_from_fn_abi<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    triple: &target_lexicon::Triple,
+    fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+) -> Signature {
+    let call_conv = match fn_abi.conv {
+        Conv::Rust | Conv::C => CallConv::triple_default(triple),
+        Conv::X86_64SysV => CallConv::SystemV,
+        Conv::X86_64Win64 => CallConv::WindowsFastcall,
+        Conv::ArmAapcs
+        | Conv::CCmseNonSecureCall
+        | Conv::Msp430Intr
+        | Conv::PtxKernel
+        | Conv::X86Fastcall
+        | Conv::X86Intr
+        | Conv::X86Stdcall
+        | Conv::X86ThisCall
+        | Conv::X86VectorCall
+        | Conv::AmdGpuKernel
+        | Conv::AvrInterrupt
+        | Conv::AvrNonBlockingInterrupt => todo!("{:?}", fn_abi.conv),
+    };
+    let inputs = fn_abi.args.iter().map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter()).flatten();
+
+    let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
+    // Sometimes the first param is an pointer to the place where the return value needs to be stored.
+    let params: Vec<_> = return_ptr.into_iter().chain(inputs).collect();
+
+    Signature { params, returns, call_conv }
+}
+
+pub(crate) fn get_function_sig<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    triple: &target_lexicon::Triple,
+    inst: Instance<'tcx>,
+) -> Signature {
+    assert!(!inst.substs.needs_infer());
+    clif_sig_from_fn_abi(tcx, triple, &FnAbi::of_instance(&RevealAllLayoutCx(tcx), inst, &[]))
+}
+
+/// Instance must be monomorphized
+pub(crate) fn import_function<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    module: &mut dyn Module,
+    inst: Instance<'tcx>,
+) -> FuncId {
+    let name = tcx.symbol_name(inst).name;
+    let sig = get_function_sig(tcx, module.isa().triple(), inst);
+    module.declare_function(name, Linkage::Import, &sig).unwrap()
+}
+
+impl<'tcx> FunctionCx<'_, '_, 'tcx> {
+    /// Instance must be monomorphized
+    pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
+        let func_id = import_function(self.tcx, self.module, inst);
+        let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
+
+        if self.clif_comments.enabled() {
+            self.add_comment(func_ref, format!("{:?}", inst));
+        }
+
+        func_ref
+    }
+
+    pub(crate) fn lib_call(
+        &mut self,
+        name: &str,
+        params: Vec<AbiParam>,
+        returns: Vec<AbiParam>,
+        args: &[Value],
+    ) -> &[Value] {
+        let sig = Signature { params, returns, call_conv: CallConv::triple_default(self.triple()) };
+        let func_id = self.module.declare_function(name, Linkage::Import, &sig).unwrap();
+        let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
+        let call_inst = self.bcx.ins().call(func_ref, args);
+        if self.clif_comments.enabled() {
+            self.add_comment(call_inst, format!("easy_call {}", name));
+        }
+        let results = self.bcx.inst_results(call_inst);
+        assert!(results.len() <= 2, "{}", results.len());
+        results
+    }
+
+    pub(crate) fn easy_call(
+        &mut self,
+        name: &str,
+        args: &[CValue<'tcx>],
+        return_ty: Ty<'tcx>,
+    ) -> CValue<'tcx> {
+        let (input_tys, args): (Vec<_>, Vec<_>) = args
+            .iter()
+            .map(|arg| {
+                (AbiParam::new(self.clif_type(arg.layout().ty).unwrap()), arg.load_scalar(self))
+            })
+            .unzip();
+        let return_layout = self.layout_of(return_ty);
+        let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
+            tup.types().map(|ty| AbiParam::new(self.clif_type(ty).unwrap())).collect()
+        } else {
+            vec![AbiParam::new(self.clif_type(return_ty).unwrap())]
+        };
+        let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
+        match *ret_vals {
+            [] => CValue::by_ref(
+                Pointer::const_addr(self, i64::from(self.pointer_type.bytes())),
+                return_layout,
+            ),
+            [val] => CValue::by_val(val, return_layout),
+            [val, extra] => CValue::by_val_pair(val, extra, return_layout),
+            _ => unreachable!(),
+        }
+    }
+}
+
+/// Make a [`CPlace`] capable of holding value of the specified type.
+fn make_local_place<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    local: Local,
+    layout: TyAndLayout<'tcx>,
+    is_ssa: bool,
+) -> CPlace<'tcx> {
+    let place = if is_ssa {
+        if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
+            CPlace::new_var_pair(fx, local, layout)
+        } else {
+            CPlace::new_var(fx, local, layout)
+        }
+    } else {
+        CPlace::new_stack_slot(fx, layout)
+    };
+
+    self::comments::add_local_place_comments(fx, place, local);
+
+    place
+}
+
+pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_block: Block) {
+    fx.bcx.append_block_params_for_function_params(start_block);
+
+    fx.bcx.switch_to_block(start_block);
+    fx.bcx.ins().nop();
+
+    let ssa_analyzed = crate::analyze::analyze(fx);
+
+    self::comments::add_args_header_comment(fx);
+
+    let mut block_params_iter = fx.bcx.func.dfg.block_params(start_block).to_vec().into_iter();
+    let ret_place =
+        self::returning::codegen_return_param(fx, &ssa_analyzed, &mut block_params_iter);
+    assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
+
+    // None means pass_mode == NoPass
+    enum ArgKind<'tcx> {
+        Normal(Option<CValue<'tcx>>),
+        Spread(Vec<Option<CValue<'tcx>>>),
+    }
+
+    let fn_abi = fx.fn_abi.take().unwrap();
+    let mut arg_abis_iter = fn_abi.args.iter();
+
+    let func_params = fx
+        .mir
+        .args_iter()
+        .map(|local| {
+            let arg_ty = fx.monomorphize(fx.mir.local_decls[local].ty);
+
+            // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
+            if Some(local) == fx.mir.spread_arg {
+                // This argument (e.g. the last argument in the "rust-call" ABI)
+                // is a tuple that was spread at the ABI level and now we have
+                // to reconstruct it into a tuple local variable, from multiple
+                // individual function arguments.
+
+                let tupled_arg_tys = match arg_ty.kind() {
+                    ty::Tuple(ref tys) => tys,
+                    _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
+                };
+
+                let mut params = Vec::new();
+                for (i, _arg_ty) in tupled_arg_tys.types().enumerate() {
+                    let arg_abi = arg_abis_iter.next().unwrap();
+                    let param =
+                        cvalue_for_param(fx, Some(local), Some(i), arg_abi, &mut block_params_iter);
+                    params.push(param);
+                }
+
+                (local, ArgKind::Spread(params), arg_ty)
+            } else {
+                let arg_abi = arg_abis_iter.next().unwrap();
+                let param =
+                    cvalue_for_param(fx, Some(local), None, arg_abi, &mut block_params_iter);
+                (local, ArgKind::Normal(param), arg_ty)
+            }
+        })
+        .collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
+
+    assert!(fx.caller_location.is_none());
+    if fx.instance.def.requires_caller_location(fx.tcx) {
+        // Store caller location for `#[track_caller]`.
+        let arg_abi = arg_abis_iter.next().unwrap();
+        fx.caller_location =
+            Some(cvalue_for_param(fx, None, None, arg_abi, &mut block_params_iter).unwrap());
+    }
+
+    assert!(arg_abis_iter.next().is_none(), "ArgAbi left behind");
+    fx.fn_abi = Some(fn_abi);
+    assert!(block_params_iter.next().is_none(), "arg_value left behind");
+
+    self::comments::add_locals_header_comment(fx);
+
+    for (local, arg_kind, ty) in func_params {
+        let layout = fx.layout_of(ty);
+
+        let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+        // While this is normally an optimization to prevent an unnecessary copy when an argument is
+        // not mutated by the current function, this is necessary to support unsized arguments.
+        if let ArgKind::Normal(Some(val)) = arg_kind {
+            if let Some((addr, meta)) = val.try_to_ptr() {
+                let local_decl = &fx.mir.local_decls[local];
+                //                       v this ! is important
+                let internally_mutable = !val
+                    .layout()
+                    .ty
+                    .is_freeze(fx.tcx.at(local_decl.source_info.span), ParamEnv::reveal_all());
+                if local_decl.mutability == mir::Mutability::Not && !internally_mutable {
+                    // We wont mutate this argument, so it is fine to borrow the backing storage
+                    // of this argument, to prevent a copy.
+
+                    let place = if let Some(meta) = meta {
+                        CPlace::for_ptr_with_extra(addr, meta, val.layout())
+                    } else {
+                        CPlace::for_ptr(addr, val.layout())
+                    };
+
+                    self::comments::add_local_place_comments(fx, place, local);
+
+                    assert_eq!(fx.local_map.push(place), local);
+                    continue;
+                }
+            }
+        }
+
+        let place = make_local_place(fx, local, layout, is_ssa);
+        assert_eq!(fx.local_map.push(place), local);
+
+        match arg_kind {
+            ArgKind::Normal(param) => {
+                if let Some(param) = param {
+                    place.write_cvalue(fx, param);
+                }
+            }
+            ArgKind::Spread(params) => {
+                for (i, param) in params.into_iter().enumerate() {
+                    if let Some(param) = param {
+                        place.place_field(fx, mir::Field::new(i)).write_cvalue(fx, param);
+                    }
+                }
+            }
+        }
+    }
+
+    for local in fx.mir.vars_and_temps_iter() {
+        let ty = fx.monomorphize(fx.mir.local_decls[local].ty);
+        let layout = fx.layout_of(ty);
+
+        let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+        let place = make_local_place(fx, local, layout, is_ssa);
+        assert_eq!(fx.local_map.push(place), local);
+    }
+
+    fx.bcx.ins().jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
+}
+
+pub(crate) fn codegen_terminator_call<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    span: Span,
+    func: &Operand<'tcx>,
+    args: &[Operand<'tcx>],
+    destination: Option<(Place<'tcx>, BasicBlock)>,
+) {
+    let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
+    let fn_sig =
+        fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
+
+    let destination = destination.map(|(place, bb)| (codegen_place(fx, place), bb));
+
+    // Handle special calls like instrinsics and empty drop glue.
+    let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
+        let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+            .unwrap()
+            .unwrap()
+            .polymorphize(fx.tcx);
+
+        if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
+            crate::intrinsics::codegen_llvm_intrinsic_call(
+                fx,
+                &fx.tcx.symbol_name(instance).name,
+                substs,
+                args,
+                destination,
+            );
+            return;
+        }
+
+        match instance.def {
+            InstanceDef::Intrinsic(_) => {
+                crate::intrinsics::codegen_intrinsic_call(fx, instance, args, destination, span);
+                return;
+            }
+            InstanceDef::DropGlue(_, None) => {
+                // empty drop glue - a nop.
+                let (_, dest) = destination.expect("Non terminating drop_in_place_real???");
+                let ret_block = fx.get_block(dest);
+                fx.bcx.ins().jump(ret_block, &[]);
+                return;
+            }
+            _ => Some(instance),
+        }
+    } else {
+        None
+    };
+
+    let extra_args = &args[fn_sig.inputs().len()..];
+    let extra_args = extra_args
+        .iter()
+        .map(|op_arg| fx.monomorphize(op_arg.ty(fx.mir, fx.tcx)))
+        .collect::<Vec<_>>();
+    let fn_abi = if let Some(instance) = instance {
+        FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), instance, &extra_args)
+    } else {
+        FnAbi::of_fn_ptr(&RevealAllLayoutCx(fx.tcx), fn_ty.fn_sig(fx.tcx), &extra_args)
+    };
+
+    let is_cold = instance
+        .map(|inst| fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD))
+        .unwrap_or(false);
+    if is_cold {
+        // FIXME Mark current_block block as cold once Cranelift supports it
+    }
+
+    // Unpack arguments tuple for closures
+    let args = if fn_sig.abi == Abi::RustCall {
+        assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
+        let self_arg = codegen_operand(fx, &args[0]);
+        let pack_arg = codegen_operand(fx, &args[1]);
+
+        let tupled_arguments = match pack_arg.layout().ty.kind() {
+            ty::Tuple(ref tupled_arguments) => tupled_arguments,
+            _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
+        };
+
+        let mut args = Vec::with_capacity(1 + tupled_arguments.len());
+        args.push(self_arg);
+        for i in 0..tupled_arguments.len() {
+            args.push(pack_arg.value_field(fx, mir::Field::new(i)));
+        }
+        args
+    } else {
+        args.iter().map(|arg| codegen_operand(fx, arg)).collect::<Vec<_>>()
+    };
+
+    //   | indirect call target
+    //   |         | the first argument to be passed
+    //   v         v
+    let (func_ref, first_arg) = match instance {
+        // Trait object call
+        Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => {
+            if fx.clif_comments.enabled() {
+                let nop_inst = fx.bcx.ins().nop();
+                fx.add_comment(
+                    nop_inst,
+                    format!("virtual call; self arg pass mode: {:?}", &fn_abi.args[0],),
+                );
+            }
+            let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0], idx);
+            (Some(method), smallvec![ptr])
+        }
+
+        // Normal call
+        Some(_) => (
+            None,
+            args.get(0)
+                .map(|arg| adjust_arg_for_abi(fx, *arg, &fn_abi.args[0]))
+                .unwrap_or(smallvec![]),
+        ),
+
+        // Indirect call
+        None => {
+            if fx.clif_comments.enabled() {
+                let nop_inst = fx.bcx.ins().nop();
+                fx.add_comment(nop_inst, "indirect call");
+            }
+            let func = codegen_operand(fx, func).load_scalar(fx);
+            (
+                Some(func),
+                args.get(0)
+                    .map(|arg| adjust_arg_for_abi(fx, *arg, &fn_abi.args[0]))
+                    .unwrap_or(smallvec![]),
+            )
+        }
+    };
+
+    let ret_place = destination.map(|(place, _)| place);
+    let (call_inst, call_args) = self::returning::codegen_with_call_return_arg(
+        fx,
+        &fn_abi.ret,
+        ret_place,
+        |fx, return_ptr| {
+            let regular_args_count = args.len();
+            let mut call_args: Vec<Value> = return_ptr
+                .into_iter()
+                .chain(first_arg.into_iter())
+                .chain(
+                    args.into_iter()
+                        .enumerate()
+                        .skip(1)
+                        .map(|(i, arg)| adjust_arg_for_abi(fx, arg, &fn_abi.args[i]).into_iter())
+                        .flatten(),
+                )
+                .collect::<Vec<_>>();
+
+            if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
+                // Pass the caller location for `#[track_caller]`.
+                let caller_location = fx.get_caller_location(span);
+                call_args.extend(
+                    adjust_arg_for_abi(fx, caller_location, &fn_abi.args[regular_args_count])
+                        .into_iter(),
+                );
+                assert_eq!(fn_abi.args.len(), regular_args_count + 1);
+            } else {
+                assert_eq!(fn_abi.args.len(), regular_args_count);
+            }
+
+            let call_inst = if let Some(func_ref) = func_ref {
+                let sig = clif_sig_from_fn_abi(fx.tcx, fx.triple(), &fn_abi);
+                let sig = fx.bcx.import_signature(sig);
+                fx.bcx.ins().call_indirect(sig, func_ref, &call_args)
+            } else {
+                let func_ref =
+                    fx.get_function_ref(instance.expect("non-indirect call on non-FnDef type"));
+                fx.bcx.ins().call(func_ref, &call_args)
+            };
+
+            (call_inst, call_args)
+        },
+    );
+
+    // FIXME find a cleaner way to support varargs
+    if fn_sig.c_variadic {
+        if !matches!(fn_sig.abi, Abi::C { .. }) {
+            fx.tcx.sess.span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
+        }
+        let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
+        let abi_params = call_args
+            .into_iter()
+            .map(|arg| {
+                let ty = fx.bcx.func.dfg.value_type(arg);
+                if !ty.is_int() {
+                    // FIXME set %al to upperbound on float args once floats are supported
+                    fx.tcx.sess.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
+                }
+                AbiParam::new(ty)
+            })
+            .collect::<Vec<AbiParam>>();
+        fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
+    }
+
+    if let Some((_, dest)) = destination {
+        let ret_block = fx.get_block(dest);
+        fx.bcx.ins().jump(ret_block, &[]);
+    } else {
+        trap_unreachable(fx, "[corruption] Diverging function returned");
+    }
+}
+
+pub(crate) fn codegen_drop<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    span: Span,
+    drop_place: CPlace<'tcx>,
+) {
+    let ty = drop_place.layout().ty;
+    let drop_instance = Instance::resolve_drop_in_place(fx.tcx, ty).polymorphize(fx.tcx);
+
+    if let ty::InstanceDef::DropGlue(_, None) = drop_instance.def {
+        // we don't actually need to drop anything
+    } else {
+        match ty.kind() {
+            ty::Dynamic(..) => {
+                let (ptr, vtable) = drop_place.to_ptr_maybe_unsized();
+                let ptr = ptr.get_addr(fx);
+                let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
+
+                // FIXME(eddyb) perhaps move some of this logic into
+                // `Instance::resolve_drop_in_place`?
+                let virtual_drop = Instance {
+                    def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
+                    substs: drop_instance.substs,
+                };
+                let fn_abi = FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), virtual_drop, &[]);
+
+                let sig = clif_sig_from_fn_abi(fx.tcx, fx.triple(), &fn_abi);
+                let sig = fx.bcx.import_signature(sig);
+                fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]);
+            }
+            _ => {
+                assert!(!matches!(drop_instance.def, InstanceDef::Virtual(_, _)));
+
+                let fn_abi = FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), drop_instance, &[]);
+
+                let arg_value = drop_place.place_ref(
+                    fx,
+                    fx.layout_of(fx.tcx.mk_ref(
+                        &ty::RegionKind::ReErased,
+                        TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut },
+                    )),
+                );
+                let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0]);
+
+                let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
+
+                if drop_instance.def.requires_caller_location(fx.tcx) {
+                    // Pass the caller location for `#[track_caller]`.
+                    let caller_location = fx.get_caller_location(span);
+                    call_args.extend(
+                        adjust_arg_for_abi(fx, caller_location, &fn_abi.args[1]).into_iter(),
+                    );
+                }
+
+                let func_ref = fx.get_function_ref(drop_instance);
+                fx.bcx.ins().call(func_ref, &call_args);
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
new file mode 100644
index 00000000000..7c275965199
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
@@ -0,0 +1,300 @@
+//! Argument passing
+
+use crate::prelude::*;
+use crate::value_and_place::assert_assignable;
+
+use cranelift_codegen::ir::{ArgumentExtension, ArgumentPurpose};
+use rustc_target::abi::call::{
+    ArgAbi, ArgAttributes, ArgExtension as RustcArgExtension, CastTarget, PassMode, Reg, RegKind,
+};
+use smallvec::{smallvec, SmallVec};
+
+pub(super) trait ArgAbiExt<'tcx> {
+    fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]>;
+    fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>);
+}
+
+fn reg_to_abi_param(reg: Reg) -> AbiParam {
+    let clif_ty = match (reg.kind, reg.size.bytes()) {
+        (RegKind::Integer, 1) => types::I8,
+        (RegKind::Integer, 2) => types::I16,
+        (RegKind::Integer, 4) => types::I32,
+        (RegKind::Integer, 8) => types::I64,
+        (RegKind::Integer, 16) => types::I128,
+        (RegKind::Float, 4) => types::F32,
+        (RegKind::Float, 8) => types::F64,
+        (RegKind::Vector, size) => types::I8.by(u16::try_from(size).unwrap()).unwrap(),
+        _ => unreachable!("{:?}", reg),
+    };
+    AbiParam::new(clif_ty)
+}
+
+fn apply_arg_attrs_to_abi_param(mut param: AbiParam, arg_attrs: ArgAttributes) -> AbiParam {
+    match arg_attrs.arg_ext {
+        RustcArgExtension::None => {}
+        RustcArgExtension::Zext => param.extension = ArgumentExtension::Uext,
+        RustcArgExtension::Sext => param.extension = ArgumentExtension::Sext,
+    }
+    param
+}
+
+fn cast_target_to_abi_params(cast: CastTarget) -> SmallVec<[AbiParam; 2]> {
+    let (rest_count, rem_bytes) = if cast.rest.unit.size.bytes() == 0 {
+        (0, 0)
+    } else {
+        (
+            cast.rest.total.bytes() / cast.rest.unit.size.bytes(),
+            cast.rest.total.bytes() % cast.rest.unit.size.bytes(),
+        )
+    };
+
+    if cast.prefix.iter().all(|x| x.is_none()) {
+        // Simplify to a single unit when there is no prefix and size <= unit size
+        if cast.rest.total <= cast.rest.unit.size {
+            let clif_ty = match (cast.rest.unit.kind, cast.rest.unit.size.bytes()) {
+                (RegKind::Integer, 1) => types::I8,
+                (RegKind::Integer, 2) => types::I16,
+                (RegKind::Integer, 3..=4) => types::I32,
+                (RegKind::Integer, 5..=8) => types::I64,
+                (RegKind::Integer, 9..=16) => types::I128,
+                (RegKind::Float, 4) => types::F32,
+                (RegKind::Float, 8) => types::F64,
+                (RegKind::Vector, size) => types::I8.by(u16::try_from(size).unwrap()).unwrap(),
+                _ => unreachable!("{:?}", cast.rest.unit),
+            };
+            return smallvec![AbiParam::new(clif_ty)];
+        }
+    }
+
+    // Create list of fields in the main structure
+    let mut args = cast
+        .prefix
+        .iter()
+        .flatten()
+        .map(|&kind| reg_to_abi_param(Reg { kind, size: cast.prefix_chunk_size }))
+        .chain((0..rest_count).map(|_| reg_to_abi_param(cast.rest.unit)))
+        .collect::<SmallVec<_>>();
+
+    // Append final integer
+    if rem_bytes != 0 {
+        // Only integers can be really split further.
+        assert_eq!(cast.rest.unit.kind, RegKind::Integer);
+        args.push(reg_to_abi_param(Reg {
+            kind: RegKind::Integer,
+            size: Size::from_bytes(rem_bytes),
+        }));
+    }
+
+    args
+}
+
+impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+    fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> {
+        match self.mode {
+            PassMode::Ignore => smallvec![],
+            PassMode::Direct(attrs) => match &self.layout.abi {
+                Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
+                    AbiParam::new(scalar_to_clif_type(tcx, scalar.clone())),
+                    attrs
+                )],
+                Abi::Vector { .. } => {
+                    let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
+                    smallvec![AbiParam::new(vector_ty)]
+                }
+                _ => unreachable!("{:?}", self.layout.abi),
+            },
+            PassMode::Pair(attrs_a, attrs_b) => match &self.layout.abi {
+                Abi::ScalarPair(a, b) => {
+                    let a = scalar_to_clif_type(tcx, a.clone());
+                    let b = scalar_to_clif_type(tcx, b.clone());
+                    smallvec![
+                        apply_arg_attrs_to_abi_param(AbiParam::new(a), attrs_a),
+                        apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b),
+                    ]
+                }
+                _ => unreachable!("{:?}", self.layout.abi),
+            },
+            PassMode::Cast(cast) => cast_target_to_abi_params(cast),
+            PassMode::Indirect { attrs, extra_attrs: None, on_stack } => {
+                if on_stack {
+                    let size = u32::try_from(self.layout.size.bytes()).unwrap();
+                    smallvec![apply_arg_attrs_to_abi_param(
+                        AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructArgument(size),),
+                        attrs
+                    )]
+                } else {
+                    smallvec![apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs)]
+                }
+            }
+            PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+                assert!(!on_stack);
+                smallvec![
+                    apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs),
+                    apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), extra_attrs),
+                ]
+            }
+        }
+    }
+
+    fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) {
+        match self.mode {
+            PassMode::Ignore => (None, vec![]),
+            PassMode::Direct(_) => match &self.layout.abi {
+                Abi::Scalar(scalar) => {
+                    (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar.clone()))])
+                }
+                Abi::Vector { .. } => {
+                    let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
+                    (None, vec![AbiParam::new(vector_ty)])
+                }
+                _ => unreachable!("{:?}", self.layout.abi),
+            },
+            PassMode::Pair(_, _) => match &self.layout.abi {
+                Abi::ScalarPair(a, b) => {
+                    let a = scalar_to_clif_type(tcx, a.clone());
+                    let b = scalar_to_clif_type(tcx, b.clone());
+                    (None, vec![AbiParam::new(a), AbiParam::new(b)])
+                }
+                _ => unreachable!("{:?}", self.layout.abi),
+            },
+            PassMode::Cast(cast) => (None, cast_target_to_abi_params(cast).into_iter().collect()),
+            PassMode::Indirect { attrs: _, extra_attrs: None, on_stack } => {
+                assert!(!on_stack);
+                (Some(AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructReturn)), vec![])
+            }
+            PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+                unreachable!("unsized return value")
+            }
+        }
+    }
+}
+
+pub(super) fn to_casted_value<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    arg: CValue<'tcx>,
+    cast: CastTarget,
+) -> SmallVec<[Value; 2]> {
+    let (ptr, meta) = arg.force_stack(fx);
+    assert!(meta.is_none());
+    let mut offset = 0;
+    cast_target_to_abi_params(cast)
+        .into_iter()
+        .map(|param| {
+            let val = ptr.offset_i64(fx, offset).load(fx, param.value_type, MemFlags::new());
+            offset += i64::from(param.value_type.bytes());
+            val
+        })
+        .collect()
+}
+
+pub(super) fn from_casted_value<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    block_params: &[Value],
+    layout: TyAndLayout<'tcx>,
+    cast: CastTarget,
+) -> CValue<'tcx> {
+    let abi_params = cast_target_to_abi_params(cast);
+    let abi_param_size: u32 = abi_params.iter().map(|param| param.value_type.bytes()).sum();
+    let layout_size = u32::try_from(layout.size.bytes()).unwrap();
+    let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+        kind: StackSlotKind::ExplicitSlot,
+        // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+        // specify stack slot alignment.
+        // Stack slot size may be bigger for for example `[u8; 3]` which is packed into an `i32`.
+        // It may also be smaller for example when the type is a wrapper around an integer with a
+        // larger alignment than the integer.
+        size: (std::cmp::max(abi_param_size, layout_size) + 15) / 16 * 16,
+        offset: None,
+    });
+    let ptr = Pointer::new(fx.bcx.ins().stack_addr(pointer_ty(fx.tcx), stack_slot, 0));
+    let mut offset = 0;
+    let mut block_params_iter = block_params.iter().copied();
+    for param in abi_params {
+        let val = ptr.offset_i64(fx, offset).store(
+            fx,
+            block_params_iter.next().unwrap(),
+            MemFlags::new(),
+        );
+        offset += i64::from(param.value_type.bytes());
+        val
+    }
+    assert_eq!(block_params_iter.next(), None, "Leftover block param");
+    CValue::by_ref(ptr, layout)
+}
+
+/// Get a set of values to be passed as function arguments.
+pub(super) fn adjust_arg_for_abi<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    arg: CValue<'tcx>,
+    arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+) -> SmallVec<[Value; 2]> {
+    assert_assignable(fx, arg.layout().ty, arg_abi.layout.ty);
+    match arg_abi.mode {
+        PassMode::Ignore => smallvec![],
+        PassMode::Direct(_) => smallvec![arg.load_scalar(fx)],
+        PassMode::Pair(_, _) => {
+            let (a, b) = arg.load_scalar_pair(fx);
+            smallvec![a, b]
+        }
+        PassMode::Cast(cast) => to_casted_value(fx, arg, cast),
+        PassMode::Indirect { .. } => match arg.force_stack(fx) {
+            (ptr, None) => smallvec![ptr.get_addr(fx)],
+            (ptr, Some(meta)) => smallvec![ptr.get_addr(fx), meta],
+        },
+    }
+}
+
+/// Create a [`CValue`] containing the value of a function parameter adding clif function parameters
+/// as necessary.
+pub(super) fn cvalue_for_param<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    local: Option<mir::Local>,
+    local_field: Option<usize>,
+    arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+    block_params_iter: &mut impl Iterator<Item = Value>,
+) -> Option<CValue<'tcx>> {
+    let block_params = arg_abi
+        .get_abi_param(fx.tcx)
+        .into_iter()
+        .map(|abi_param| {
+            let block_param = block_params_iter.next().unwrap();
+            assert_eq!(fx.bcx.func.dfg.value_type(block_param), abi_param.value_type);
+            block_param
+        })
+        .collect::<SmallVec<[_; 2]>>();
+
+    crate::abi::comments::add_arg_comment(
+        fx,
+        "arg",
+        local,
+        local_field,
+        &block_params,
+        arg_abi.mode,
+        arg_abi.layout,
+    );
+
+    match arg_abi.mode {
+        PassMode::Ignore => None,
+        PassMode::Direct(_) => {
+            assert_eq!(block_params.len(), 1, "{:?}", block_params);
+            Some(CValue::by_val(block_params[0], arg_abi.layout))
+        }
+        PassMode::Pair(_, _) => {
+            assert_eq!(block_params.len(), 2, "{:?}", block_params);
+            Some(CValue::by_val_pair(block_params[0], block_params[1], arg_abi.layout))
+        }
+        PassMode::Cast(cast) => Some(from_casted_value(fx, &block_params, arg_abi.layout, cast)),
+        PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+            assert_eq!(block_params.len(), 1, "{:?}", block_params);
+            Some(CValue::by_ref(Pointer::new(block_params[0]), arg_abi.layout))
+        }
+        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+            assert_eq!(block_params.len(), 2, "{:?}", block_params);
+            Some(CValue::by_ref_unsized(
+                Pointer::new(block_params[0]),
+                block_params[1],
+                arg_abi.layout,
+            ))
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/returning.rs b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
new file mode 100644
index 00000000000..e1c53224b4f
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
@@ -0,0 +1,188 @@
+//! Return value handling
+
+use crate::prelude::*;
+
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use smallvec::{smallvec, SmallVec};
+
+/// Can the given type be returned into an ssa var or does it need to be returned on the stack.
+pub(crate) fn can_return_to_ssa_var<'tcx>(
+    fx: &FunctionCx<'_, '_, 'tcx>,
+    func: &mir::Operand<'tcx>,
+    args: &[mir::Operand<'tcx>],
+) -> bool {
+    let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
+    let fn_sig =
+        fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
+
+    // Handle special calls like instrinsics and empty drop glue.
+    let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
+        let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+            .unwrap()
+            .unwrap()
+            .polymorphize(fx.tcx);
+
+        match instance.def {
+            InstanceDef::Intrinsic(_) | InstanceDef::DropGlue(_, _) => {
+                return true;
+            }
+            _ => Some(instance),
+        }
+    } else {
+        None
+    };
+
+    let extra_args = &args[fn_sig.inputs().len()..];
+    let extra_args = extra_args
+        .iter()
+        .map(|op_arg| fx.monomorphize(op_arg.ty(fx.mir, fx.tcx)))
+        .collect::<Vec<_>>();
+    let fn_abi = if let Some(instance) = instance {
+        FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), instance, &extra_args)
+    } else {
+        FnAbi::of_fn_ptr(&RevealAllLayoutCx(fx.tcx), fn_ty.fn_sig(fx.tcx), &extra_args)
+    };
+    match fn_abi.ret.mode {
+        PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) => true,
+        // FIXME Make it possible to return Cast and Indirect to an ssa var.
+        PassMode::Cast(_) | PassMode::Indirect { .. } => false,
+    }
+}
+
+/// Return a place where the return value of the current function can be written to. If necessary
+/// this adds an extra parameter pointing to where the return value needs to be stored.
+pub(super) fn codegen_return_param<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    ssa_analyzed: &rustc_index::vec::IndexVec<Local, crate::analyze::SsaKind>,
+    block_params_iter: &mut impl Iterator<Item = Value>,
+) -> CPlace<'tcx> {
+    let (ret_place, ret_param): (_, SmallVec<[_; 2]>) = match fx.fn_abi.as_ref().unwrap().ret.mode {
+        PassMode::Ignore => (CPlace::no_place(fx.fn_abi.as_ref().unwrap().ret.layout), smallvec![]),
+        PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => {
+            let is_ssa = ssa_analyzed[RETURN_PLACE] == crate::analyze::SsaKind::Ssa;
+            (
+                super::make_local_place(
+                    fx,
+                    RETURN_PLACE,
+                    fx.fn_abi.as_ref().unwrap().ret.layout,
+                    is_ssa,
+                ),
+                smallvec![],
+            )
+        }
+        PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+            let ret_param = block_params_iter.next().unwrap();
+            assert_eq!(fx.bcx.func.dfg.value_type(ret_param), pointer_ty(fx.tcx));
+            (
+                CPlace::for_ptr(Pointer::new(ret_param), fx.fn_abi.as_ref().unwrap().ret.layout),
+                smallvec![ret_param],
+            )
+        }
+        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+            unreachable!("unsized return value")
+        }
+    };
+
+    crate::abi::comments::add_arg_comment(
+        fx,
+        "ret",
+        Some(RETURN_PLACE),
+        None,
+        &ret_param,
+        fx.fn_abi.as_ref().unwrap().ret.mode,
+        fx.fn_abi.as_ref().unwrap().ret.layout,
+    );
+
+    ret_place
+}
+
+/// Invokes the closure with if necessary a value representing the return pointer. When the closure
+/// returns the call return value(s) if any are written to the correct place.
+pub(super) fn codegen_with_call_return_arg<'tcx, T>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    ret_arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+    ret_place: Option<CPlace<'tcx>>,
+    f: impl FnOnce(&mut FunctionCx<'_, '_, 'tcx>, Option<Value>) -> (Inst, T),
+) -> (Inst, T) {
+    let return_ptr = match ret_arg_abi.mode {
+        PassMode::Ignore => None,
+        PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => match ret_place {
+            Some(ret_place) => Some(ret_place.to_ptr().get_addr(fx)),
+            None => Some(fx.bcx.ins().iconst(fx.pointer_type, 43)), // FIXME allocate temp stack slot
+        },
+        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+            unreachable!("unsized return value")
+        }
+        PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => None,
+    };
+
+    let (call_inst, meta) = f(fx, return_ptr);
+
+    match ret_arg_abi.mode {
+        PassMode::Ignore => {}
+        PassMode::Direct(_) => {
+            if let Some(ret_place) = ret_place {
+                let ret_val = fx.bcx.inst_results(call_inst)[0];
+                ret_place.write_cvalue(fx, CValue::by_val(ret_val, ret_arg_abi.layout));
+            }
+        }
+        PassMode::Pair(_, _) => {
+            if let Some(ret_place) = ret_place {
+                let ret_val_a = fx.bcx.inst_results(call_inst)[0];
+                let ret_val_b = fx.bcx.inst_results(call_inst)[1];
+                ret_place.write_cvalue(
+                    fx,
+                    CValue::by_val_pair(ret_val_a, ret_val_b, ret_arg_abi.layout),
+                );
+            }
+        }
+        PassMode::Cast(cast) => {
+            if let Some(ret_place) = ret_place {
+                let results = fx
+                    .bcx
+                    .inst_results(call_inst)
+                    .iter()
+                    .copied()
+                    .collect::<SmallVec<[Value; 2]>>();
+                let result =
+                    super::pass_mode::from_casted_value(fx, &results, ret_place.layout(), cast);
+                ret_place.write_cvalue(fx, result);
+            }
+        }
+        PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {}
+        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+            unreachable!("unsized return value")
+        }
+    }
+
+    (call_inst, meta)
+}
+
+/// Codegen a return instruction with the right return value(s) if any.
+pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, '_>) {
+    match fx.fn_abi.as_ref().unwrap().ret.mode {
+        PassMode::Ignore | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+            fx.bcx.ins().return_(&[]);
+        }
+        PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+            unreachable!("unsized return value")
+        }
+        PassMode::Direct(_) => {
+            let place = fx.get_local_place(RETURN_PLACE);
+            let ret_val = place.to_cvalue(fx).load_scalar(fx);
+            fx.bcx.ins().return_(&[ret_val]);
+        }
+        PassMode::Pair(_, _) => {
+            let place = fx.get_local_place(RETURN_PLACE);
+            let (ret_val_a, ret_val_b) = place.to_cvalue(fx).load_scalar_pair(fx);
+            fx.bcx.ins().return_(&[ret_val_a, ret_val_b]);
+        }
+        PassMode::Cast(cast) => {
+            let place = fx.get_local_place(RETURN_PLACE);
+            let ret_val = place.to_cvalue(fx);
+            let ret_vals = super::pass_mode::to_casted_value(fx, ret_val, cast);
+            fx.bcx.ins().return_(&ret_vals);
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/allocator.rs b/compiler/rustc_codegen_cranelift/src/allocator.rs
new file mode 100644
index 00000000000..d39486c2f10
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/allocator.rs
@@ -0,0 +1,139 @@
+//! Allocator shim
+// Adapted from rustc
+
+use crate::prelude::*;
+
+use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_span::symbol::sym;
+
+/// Returns whether an allocator shim was created
+pub(crate) fn codegen(
+    tcx: TyCtxt<'_>,
+    module: &mut impl Module,
+    unwind_context: &mut UnwindContext,
+) -> bool {
+    let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
+        use rustc_middle::middle::dependency_format::Linkage;
+        list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+    });
+    if any_dynamic_crate {
+        false
+    } else if let Some(kind) = tcx.allocator_kind(()) {
+        codegen_inner(module, unwind_context, kind);
+        true
+    } else {
+        false
+    }
+}
+
+fn codegen_inner(
+    module: &mut impl Module,
+    unwind_context: &mut UnwindContext,
+    kind: AllocatorKind,
+) {
+    let usize_ty = module.target_config().pointer_type();
+
+    for method in ALLOCATOR_METHODS {
+        let mut arg_tys = Vec::with_capacity(method.inputs.len());
+        for ty in method.inputs.iter() {
+            match *ty {
+                AllocatorTy::Layout => {
+                    arg_tys.push(usize_ty); // size
+                    arg_tys.push(usize_ty); // align
+                }
+                AllocatorTy::Ptr => arg_tys.push(usize_ty),
+                AllocatorTy::Usize => arg_tys.push(usize_ty),
+
+                AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+            }
+        }
+        let output = match method.output {
+            AllocatorTy::ResultPtr => Some(usize_ty),
+            AllocatorTy::Unit => None,
+
+            AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+                panic!("invalid allocator output")
+            }
+        };
+
+        let sig = Signature {
+            call_conv: CallConv::triple_default(module.isa().triple()),
+            params: arg_tys.iter().cloned().map(AbiParam::new).collect(),
+            returns: output.into_iter().map(AbiParam::new).collect(),
+        };
+
+        let caller_name = format!("__rust_{}", method.name);
+        let callee_name = kind.fn_name(method.name);
+        //eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
+
+        let func_id = module.declare_function(&caller_name, Linkage::Export, &sig).unwrap();
+
+        let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
+
+        let mut ctx = Context::new();
+        ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone());
+        {
+            let mut func_ctx = FunctionBuilderContext::new();
+            let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+            let block = bcx.create_block();
+            bcx.switch_to_block(block);
+            let args = arg_tys
+                .into_iter()
+                .map(|ty| bcx.append_block_param(block, ty))
+                .collect::<Vec<Value>>();
+
+            let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+            let call_inst = bcx.ins().call(callee_func_ref, &args);
+            let results = bcx.inst_results(call_inst).to_vec(); // Clone to prevent borrow error
+
+            bcx.ins().return_(&results);
+            bcx.seal_all_blocks();
+            bcx.finalize();
+        }
+        module
+            .define_function(func_id, &mut ctx, &mut NullTrapSink {}, &mut NullStackMapSink {})
+            .unwrap();
+        unwind_context.add_function(func_id, &ctx, module.isa());
+    }
+
+    let sig = Signature {
+        call_conv: CallConv::triple_default(module.isa().triple()),
+        params: vec![AbiParam::new(usize_ty), AbiParam::new(usize_ty)],
+        returns: vec![],
+    };
+
+    let callee_name = kind.fn_name(sym::oom);
+    //eprintln!("Codegen allocator shim {} -> {} ({:?} -> {:?})", caller_name, callee_name, sig.params, sig.returns);
+
+    let func_id =
+        module.declare_function("__rust_alloc_error_handler", Linkage::Export, &sig).unwrap();
+
+    let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
+
+    let mut ctx = Context::new();
+    ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
+    {
+        let mut func_ctx = FunctionBuilderContext::new();
+        let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+        let block = bcx.create_block();
+        bcx.switch_to_block(block);
+        let args = (&[usize_ty, usize_ty])
+            .iter()
+            .map(|&ty| bcx.append_block_param(block, ty))
+            .collect::<Vec<Value>>();
+
+        let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+        bcx.ins().call(callee_func_ref, &args);
+
+        bcx.ins().trap(TrapCode::UnreachableCodeReached);
+        bcx.seal_all_blocks();
+        bcx.finalize();
+    }
+    module
+        .define_function(func_id, &mut ctx, &mut NullTrapSink {}, &mut NullStackMapSink {})
+        .unwrap();
+    unwind_context.add_function(func_id, &ctx, module.isa());
+}
diff --git a/compiler/rustc_codegen_cranelift/src/analyze.rs b/compiler/rustc_codegen_cranelift/src/analyze.rs
new file mode 100644
index 00000000000..efead25552f
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/analyze.rs
@@ -0,0 +1,59 @@
+//! SSA analysis
+
+use crate::prelude::*;
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::StatementKind::*;
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) enum SsaKind {
+    NotSsa,
+    Ssa,
+}
+
+pub(crate) fn analyze(fx: &FunctionCx<'_, '_, '_>) -> IndexVec<Local, SsaKind> {
+    let mut flag_map = fx
+        .mir
+        .local_decls
+        .iter()
+        .map(|local_decl| {
+            let ty = fx.monomorphize(local_decl.ty);
+            if fx.clif_type(ty).is_some() || fx.clif_pair_type(ty).is_some() {
+                SsaKind::Ssa
+            } else {
+                SsaKind::NotSsa
+            }
+        })
+        .collect::<IndexVec<Local, SsaKind>>();
+
+    for bb in fx.mir.basic_blocks().iter() {
+        for stmt in bb.statements.iter() {
+            match &stmt.kind {
+                Assign(place_and_rval) => match &place_and_rval.1 {
+                    Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+                        not_ssa(&mut flag_map, place.local)
+                    }
+                    _ => {}
+                },
+                _ => {}
+            }
+        }
+
+        match &bb.terminator().kind {
+            TerminatorKind::Call { destination, func, args, .. } => {
+                if let Some((dest_place, _dest_bb)) = destination {
+                    if !crate::abi::can_return_to_ssa_var(fx, func, args) {
+                        not_ssa(&mut flag_map, dest_place.local)
+                    }
+                }
+            }
+            _ => {}
+        }
+    }
+
+    flag_map
+}
+
+fn not_ssa(flag_map: &mut IndexVec<Local, SsaKind>, local: Local) {
+    flag_map[local] = SsaKind::NotSsa;
+}
diff --git a/compiler/rustc_codegen_cranelift/src/archive.rs b/compiler/rustc_codegen_cranelift/src/archive.rs
new file mode 100644
index 00000000000..22897c43e7e
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/archive.rs
@@ -0,0 +1,291 @@
+//! Creation of ar archives like for the lib and staticlib crate type
+
+use std::collections::BTreeMap;
+use std::fs::File;
+use std::path::{Path, PathBuf};
+
+use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder};
+use rustc_codegen_ssa::METADATA_FILENAME;
+use rustc_session::Session;
+
+use object::{Object, ObjectSymbol, SymbolKind};
+
+#[derive(Debug)]
+enum ArchiveEntry {
+    FromArchive { archive_index: usize, entry_index: usize },
+    File(PathBuf),
+}
+
+pub(crate) struct ArArchiveBuilder<'a> {
+    sess: &'a Session,
+    dst: PathBuf,
+    lib_search_paths: Vec<PathBuf>,
+    use_gnu_style_archive: bool,
+    no_builtin_ranlib: bool,
+
+    src_archives: Vec<(PathBuf, ar::Archive<File>)>,
+    // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
+    // the end of an archive for linkers to not get confused.
+    entries: Vec<(String, ArchiveEntry)>,
+}
+
+impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
+    fn new(sess: &'a Session, output: &Path, input: Option<&Path>) -> Self {
+        use rustc_codegen_ssa::back::link::archive_search_paths;
+
+        let (src_archives, entries) = if let Some(input) = input {
+            let mut archive = ar::Archive::new(File::open(input).unwrap());
+            let mut entries = Vec::new();
+
+            let mut i = 0;
+            while let Some(entry) = archive.next_entry() {
+                let entry = entry.unwrap();
+                entries.push((
+                    String::from_utf8(entry.header().identifier().to_vec()).unwrap(),
+                    ArchiveEntry::FromArchive { archive_index: 0, entry_index: i },
+                ));
+                i += 1;
+            }
+
+            (vec![(input.to_owned(), archive)], entries)
+        } else {
+            (vec![], Vec::new())
+        };
+
+        ArArchiveBuilder {
+            sess,
+            dst: output.to_path_buf(),
+            lib_search_paths: archive_search_paths(sess),
+            use_gnu_style_archive: sess.target.archive_format == "gnu",
+            // FIXME fix builtin ranlib on macOS
+            no_builtin_ranlib: sess.target.is_like_osx,
+
+            src_archives,
+            entries,
+        }
+    }
+
+    fn src_files(&mut self) -> Vec<String> {
+        self.entries.iter().map(|(name, _)| name.clone()).collect()
+    }
+
+    fn remove_file(&mut self, name: &str) {
+        let index = self
+            .entries
+            .iter()
+            .position(|(entry_name, _)| entry_name == name)
+            .expect("Tried to remove file not existing in src archive");
+        self.entries.remove(index);
+    }
+
+    fn add_file(&mut self, file: &Path) {
+        self.entries.push((
+            file.file_name().unwrap().to_str().unwrap().to_string(),
+            ArchiveEntry::File(file.to_owned()),
+        ));
+    }
+
+    fn add_native_library(&mut self, name: rustc_span::symbol::Symbol, verbatim: bool) {
+        let location = find_library(name, verbatim, &self.lib_search_paths, self.sess);
+        self.add_archive(location.clone(), |_| false).unwrap_or_else(|e| {
+            panic!("failed to add native library {}: {}", location.to_string_lossy(), e);
+        });
+    }
+
+    fn add_rlib(
+        &mut self,
+        rlib: &Path,
+        name: &str,
+        lto: bool,
+        skip_objects: bool,
+    ) -> std::io::Result<()> {
+        let obj_start = name.to_owned();
+
+        self.add_archive(rlib.to_owned(), move |fname: &str| {
+            // Ignore metadata files, no matter the name.
+            if fname == METADATA_FILENAME {
+                return true;
+            }
+
+            // Don't include Rust objects if LTO is enabled
+            if lto && fname.starts_with(&obj_start) && fname.ends_with(".o") {
+                return true;
+            }
+
+            // Otherwise if this is *not* a rust object and we're skipping
+            // objects then skip this file
+            if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
+                return true;
+            }
+
+            // ok, don't skip this
+            false
+        })
+    }
+
+    fn update_symbols(&mut self) {}
+
+    fn build(mut self) {
+        enum BuilderKind {
+            Bsd(ar::Builder<File>),
+            Gnu(ar::GnuBuilder<File>),
+        }
+
+        let sess = self.sess;
+
+        let mut symbol_table = BTreeMap::new();
+
+        let mut entries = Vec::new();
+
+        for (entry_name, entry) in self.entries {
+            // FIXME only read the symbol table of the object files to avoid having to keep all
+            // object files in memory at once, or read them twice.
+            let data = match entry {
+                ArchiveEntry::FromArchive { archive_index, entry_index } => {
+                    // FIXME read symbols from symtab
+                    use std::io::Read;
+                    let (ref _src_archive_path, ref mut src_archive) =
+                        self.src_archives[archive_index];
+                    let mut entry = src_archive.jump_to_entry(entry_index).unwrap();
+                    let mut data = Vec::new();
+                    entry.read_to_end(&mut data).unwrap();
+                    data
+                }
+                ArchiveEntry::File(file) => std::fs::read(file).unwrap_or_else(|err| {
+                    sess.fatal(&format!(
+                        "error while reading object file during archive building: {}",
+                        err
+                    ));
+                }),
+            };
+
+            if !self.no_builtin_ranlib {
+                match object::File::parse(&*data) {
+                    Ok(object) => {
+                        symbol_table.insert(
+                            entry_name.as_bytes().to_vec(),
+                            object
+                                .symbols()
+                                .filter_map(|symbol| {
+                                    if symbol.is_undefined()
+                                        || symbol.is_local()
+                                        || symbol.kind() != SymbolKind::Data
+                                            && symbol.kind() != SymbolKind::Text
+                                            && symbol.kind() != SymbolKind::Tls
+                                    {
+                                        None
+                                    } else {
+                                        symbol.name().map(|name| name.as_bytes().to_vec()).ok()
+                                    }
+                                })
+                                .collect::<Vec<_>>(),
+                        );
+                    }
+                    Err(err) => {
+                        let err = err.to_string();
+                        if err == "Unknown file magic" {
+                            // Not an object file; skip it.
+                        } else {
+                            sess.fatal(&format!(
+                                "error parsing `{}` during archive creation: {}",
+                                entry_name, err
+                            ));
+                        }
+                    }
+                }
+            }
+
+            entries.push((entry_name, data));
+        }
+
+        let mut builder = if self.use_gnu_style_archive {
+            BuilderKind::Gnu(
+                ar::GnuBuilder::new(
+                    File::create(&self.dst).unwrap_or_else(|err| {
+                        sess.fatal(&format!(
+                            "error opening destination during archive building: {}",
+                            err
+                        ));
+                    }),
+                    entries.iter().map(|(name, _)| name.as_bytes().to_vec()).collect(),
+                    ar::GnuSymbolTableFormat::Size32,
+                    symbol_table,
+                )
+                .unwrap(),
+            )
+        } else {
+            BuilderKind::Bsd(
+                ar::Builder::new(
+                    File::create(&self.dst).unwrap_or_else(|err| {
+                        sess.fatal(&format!(
+                            "error opening destination during archive building: {}",
+                            err
+                        ));
+                    }),
+                    symbol_table,
+                )
+                .unwrap(),
+            )
+        };
+
+        // Add all files
+        for (entry_name, data) in entries.into_iter() {
+            let header = ar::Header::new(entry_name.into_bytes(), data.len() as u64);
+            match builder {
+                BuilderKind::Bsd(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+                BuilderKind::Gnu(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+            }
+        }
+
+        // Finalize archive
+        std::mem::drop(builder);
+
+        if self.no_builtin_ranlib {
+            let ranlib = crate::toolchain::get_toolchain_binary(self.sess, "ranlib");
+
+            // Run ranlib to be able to link the archive
+            let status = std::process::Command::new(ranlib)
+                .arg(self.dst)
+                .status()
+                .expect("Couldn't run ranlib");
+
+            if !status.success() {
+                self.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
+            }
+        }
+    }
+
+    fn inject_dll_import_lib(
+        &mut self,
+        _lib_name: &str,
+        _dll_imports: &[rustc_middle::middle::cstore::DllImport],
+        _tmpdir: &rustc_data_structures::temp_dir::MaybeTempDir,
+    ) {
+        bug!("injecting dll imports is not supported");
+    }
+}
+
+impl<'a> ArArchiveBuilder<'a> {
+    fn add_archive<F>(&mut self, archive_path: PathBuf, mut skip: F) -> std::io::Result<()>
+    where
+        F: FnMut(&str) -> bool + 'static,
+    {
+        let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?);
+        let archive_index = self.src_archives.len();
+
+        let mut i = 0;
+        while let Some(entry) = archive.next_entry() {
+            let entry = entry?;
+            let file_name = String::from_utf8(entry.header().identifier().to_vec())
+                .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?;
+            if !skip(&file_name) {
+                self.entries
+                    .push((file_name, ArchiveEntry::FromArchive { archive_index, entry_index: i }));
+            }
+            i += 1;
+        }
+
+        self.src_archives.push((archive_path, archive));
+        Ok(())
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/backend.rs b/compiler/rustc_codegen_cranelift/src/backend.rs
new file mode 100644
index 00000000000..05c06bac27d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/backend.rs
@@ -0,0 +1,152 @@
+//! Abstraction around the object writing crate
+
+use std::convert::{TryFrom, TryInto};
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_session::Session;
+
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_module::FuncId;
+use cranelift_object::{ObjectBuilder, ObjectModule, ObjectProduct};
+
+use object::write::*;
+use object::{RelocationEncoding, SectionKind, SymbolFlags};
+
+use gimli::SectionId;
+
+use crate::debuginfo::{DebugReloc, DebugRelocName};
+
+pub(crate) trait WriteMetadata {
+    fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>);
+}
+
+impl WriteMetadata for object::write::Object {
+    fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>) {
+        let segment = self.segment_name(object::write::StandardSegment::Data).to_vec();
+        let section_id = self.add_section(segment, b".rustc".to_vec(), object::SectionKind::Data);
+        let offset = self.append_section_data(section_id, &data, 1);
+        // For MachO and probably PE this is necessary to prevent the linker from throwing away the
+        // .rustc section. For ELF this isn't necessary, but it also doesn't harm.
+        self.add_symbol(object::write::Symbol {
+            name: symbol_name.into_bytes(),
+            value: offset,
+            size: data.len() as u64,
+            kind: object::SymbolKind::Data,
+            scope: object::SymbolScope::Dynamic,
+            weak: false,
+            section: SymbolSection::Section(section_id),
+            flags: SymbolFlags::None,
+        });
+    }
+}
+
+pub(crate) trait WriteDebugInfo {
+    type SectionId: Copy;
+
+    fn add_debug_section(&mut self, name: SectionId, data: Vec<u8>) -> Self::SectionId;
+    fn add_debug_reloc(
+        &mut self,
+        section_map: &FxHashMap<SectionId, Self::SectionId>,
+        from: &Self::SectionId,
+        reloc: &DebugReloc,
+    );
+}
+
+impl WriteDebugInfo for ObjectProduct {
+    type SectionId = (object::write::SectionId, object::write::SymbolId);
+
+    fn add_debug_section(
+        &mut self,
+        id: SectionId,
+        data: Vec<u8>,
+    ) -> (object::write::SectionId, object::write::SymbolId) {
+        let name = if self.object.format() == object::BinaryFormat::MachO {
+            id.name().replace('.', "__") // machO expects __debug_info instead of .debug_info
+        } else {
+            id.name().to_string()
+        }
+        .into_bytes();
+
+        let segment = self.object.segment_name(StandardSegment::Debug).to_vec();
+        // FIXME use SHT_X86_64_UNWIND for .eh_frame
+        let section_id = self.object.add_section(
+            segment,
+            name,
+            if id == SectionId::EhFrame { SectionKind::ReadOnlyData } else { SectionKind::Debug },
+        );
+        self.object
+            .section_mut(section_id)
+            .set_data(data, if id == SectionId::EhFrame { 8 } else { 1 });
+        let symbol_id = self.object.section_symbol(section_id);
+        (section_id, symbol_id)
+    }
+
+    fn add_debug_reloc(
+        &mut self,
+        section_map: &FxHashMap<SectionId, Self::SectionId>,
+        from: &Self::SectionId,
+        reloc: &DebugReloc,
+    ) {
+        let (symbol, symbol_offset) = match reloc.name {
+            DebugRelocName::Section(id) => (section_map.get(&id).unwrap().1, 0),
+            DebugRelocName::Symbol(id) => {
+                let symbol_id = self.function_symbol(FuncId::from_u32(id.try_into().unwrap()));
+                self.object
+                    .symbol_section_and_offset(symbol_id)
+                    .expect("Debug reloc for undef sym???")
+            }
+        };
+        self.object
+            .add_relocation(
+                from.0,
+                Relocation {
+                    offset: u64::from(reloc.offset),
+                    symbol,
+                    kind: reloc.kind,
+                    encoding: RelocationEncoding::Generic,
+                    size: reloc.size * 8,
+                    addend: i64::try_from(symbol_offset).unwrap() + reloc.addend,
+                },
+            )
+            .unwrap();
+    }
+}
+
+pub(crate) fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Object)) -> Vec<u8> {
+    let triple = crate::target_triple(sess);
+
+    let binary_format = match triple.binary_format {
+        target_lexicon::BinaryFormat::Elf => object::BinaryFormat::Elf,
+        target_lexicon::BinaryFormat::Coff => object::BinaryFormat::Coff,
+        target_lexicon::BinaryFormat::Macho => object::BinaryFormat::MachO,
+        binary_format => sess.fatal(&format!("binary format {} is unsupported", binary_format)),
+    };
+    let architecture = match triple.architecture {
+        target_lexicon::Architecture::X86_32(_) => object::Architecture::I386,
+        target_lexicon::Architecture::X86_64 => object::Architecture::X86_64,
+        target_lexicon::Architecture::Arm(_) => object::Architecture::Arm,
+        target_lexicon::Architecture::Aarch64(_) => object::Architecture::Aarch64,
+        architecture => {
+            sess.fatal(&format!("target architecture {:?} is unsupported", architecture,))
+        }
+    };
+    let endian = match triple.endianness().unwrap() {
+        target_lexicon::Endianness::Little => object::Endianness::Little,
+        target_lexicon::Endianness::Big => object::Endianness::Big,
+    };
+
+    let mut metadata_object = object::write::Object::new(binary_format, architecture, endian);
+    metadata_object.add_file_symbol(name.as_bytes().to_vec());
+    f(&mut metadata_object);
+    metadata_object.write().unwrap()
+}
+
+pub(crate) fn make_module(sess: &Session, isa: Box<dyn TargetIsa>, name: String) -> ObjectModule {
+    let mut builder =
+        ObjectBuilder::new(isa, name + ".o", cranelift_module::default_libcall_names()).unwrap();
+    // Unlike cg_llvm, cg_clif defaults to disabling -Zfunction-sections. For cg_llvm binary size
+    // is important, while cg_clif cares more about compilation times. Enabling -Zfunction-sections
+    // can easily double the amount of time necessary to perform linking.
+    builder.per_function_section(sess.opts.debugging_opts.function_sections.unwrap_or(false));
+    ObjectModule::new(builder)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
new file mode 100644
index 00000000000..3d78eed77b9
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -0,0 +1,920 @@
+//! Codegen of a single function
+
+use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_target::abi::call::FnAbi;
+
+use crate::constant::ConstantCx;
+use crate::prelude::*;
+
+pub(crate) fn codegen_fn<'tcx>(
+    cx: &mut crate::CodegenCx<'tcx>,
+    module: &mut dyn Module,
+    instance: Instance<'tcx>,
+) {
+    let tcx = cx.tcx;
+
+    let _inst_guard =
+        crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
+    debug_assert!(!instance.substs.needs_infer());
+
+    let mir = tcx.instance_mir(instance.def);
+    let _mir_guard = crate::PrintOnPanic(|| {
+        let mut buf = Vec::new();
+        rustc_mir::util::write_mir_pretty(tcx, Some(instance.def_id()), &mut buf).unwrap();
+        String::from_utf8_lossy(&buf).into_owned()
+    });
+
+    // Declare function
+    let symbol_name = tcx.symbol_name(instance);
+    let sig = get_function_sig(tcx, module.isa().triple(), instance);
+    let func_id = module.declare_function(symbol_name.name, Linkage::Local, &sig).unwrap();
+
+    cx.cached_context.clear();
+
+    // Make the FunctionBuilder
+    let mut func_ctx = FunctionBuilderContext::new();
+    let mut func = std::mem::replace(&mut cx.cached_context.func, Function::new());
+    func.name = ExternalName::user(0, func_id.as_u32());
+    func.signature = sig;
+    func.collect_debug_info();
+
+    let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
+
+    // Predefine blocks
+    let start_block = bcx.create_block();
+    let block_map: IndexVec<BasicBlock, Block> =
+        (0..mir.basic_blocks().len()).map(|_| bcx.create_block()).collect();
+
+    // Make FunctionCx
+    let pointer_type = module.target_config().pointer_type();
+    let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
+
+    let mut fx = FunctionCx {
+        cx,
+        module,
+        tcx,
+        pointer_type,
+        constants_cx: ConstantCx::new(),
+
+        instance,
+        symbol_name,
+        mir,
+        fn_abi: Some(FnAbi::of_instance(&RevealAllLayoutCx(tcx), instance, &[])),
+
+        bcx,
+        block_map,
+        local_map: IndexVec::with_capacity(mir.local_decls.len()),
+        caller_location: None, // set by `codegen_fn_prelude`
+
+        clif_comments,
+        source_info_set: indexmap::IndexSet::new(),
+        next_ssa_var: 0,
+
+        inline_asm_index: 0,
+    };
+
+    let arg_uninhabited = fx
+        .mir
+        .args_iter()
+        .any(|arg| fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
+
+    if !crate::constant::check_constants(&mut fx) {
+        fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+        fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+        crate::trap::trap_unreachable(&mut fx, "compilation should have been aborted");
+    } else if arg_uninhabited {
+        fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+        fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+        crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument");
+    } else {
+        tcx.sess.time("codegen clif ir", || {
+            tcx.sess
+                .time("codegen prelude", || crate::abi::codegen_fn_prelude(&mut fx, start_block));
+            codegen_fn_content(&mut fx);
+        });
+    }
+
+    // Recover all necessary data from fx, before accessing func will prevent future access to it.
+    let instance = fx.instance;
+    let mut clif_comments = fx.clif_comments;
+    let source_info_set = fx.source_info_set;
+    let local_map = fx.local_map;
+
+    fx.constants_cx.finalize(fx.tcx, &mut *fx.module);
+
+    // Store function in context
+    let context = &mut cx.cached_context;
+    context.func = func;
+
+    crate::pretty_clif::write_clif_file(
+        tcx,
+        "unopt",
+        module.isa(),
+        instance,
+        &context,
+        &clif_comments,
+    );
+
+    // Verify function
+    verify_func(tcx, &clif_comments, &context.func);
+
+    // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
+    // instruction, which doesn't have an encoding.
+    context.compute_cfg();
+    context.compute_domtree();
+    context.eliminate_unreachable_code(module.isa()).unwrap();
+    context.dce(module.isa()).unwrap();
+    // Some Cranelift optimizations expect the domtree to not yet be computed and as such don't
+    // invalidate it when it would change.
+    context.domtree.clear();
+
+    // Perform rust specific optimizations
+    tcx.sess.time("optimize clif ir", || {
+        crate::optimize::optimize_function(
+            tcx,
+            module.isa(),
+            instance,
+            context,
+            &mut clif_comments,
+        );
+    });
+
+    // Define function
+    tcx.sess.time("define function", || {
+        context.want_disasm = crate::pretty_clif::should_write_ir(tcx);
+        module
+            .define_function(func_id, context, &mut NullTrapSink {}, &mut NullStackMapSink {})
+            .unwrap()
+    });
+
+    // Write optimized function to file for debugging
+    crate::pretty_clif::write_clif_file(
+        tcx,
+        "opt",
+        module.isa(),
+        instance,
+        &context,
+        &clif_comments,
+    );
+
+    if let Some(disasm) = &context.mach_compile_result.as_ref().unwrap().disasm {
+        crate::pretty_clif::write_ir_file(
+            tcx,
+            || format!("{}.vcode", tcx.symbol_name(instance).name),
+            |file| file.write_all(disasm.as_bytes()),
+        )
+    }
+
+    // Define debuginfo for function
+    let isa = module.isa();
+    let debug_context = &mut cx.debug_context;
+    let unwind_context = &mut cx.unwind_context;
+    tcx.sess.time("generate debug info", || {
+        if let Some(debug_context) = debug_context {
+            debug_context.define_function(
+                instance,
+                func_id,
+                symbol_name.name,
+                isa,
+                context,
+                &source_info_set,
+                local_map,
+            );
+        }
+        unwind_context.add_function(func_id, &context, isa);
+    });
+
+    // Clear context to make it usable for the next function
+    context.clear();
+}
+
+pub(crate) fn verify_func(
+    tcx: TyCtxt<'_>,
+    writer: &crate::pretty_clif::CommentWriter,
+    func: &Function,
+) {
+    tcx.sess.time("verify clif ir", || {
+        let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
+        match cranelift_codegen::verify_function(&func, &flags) {
+            Ok(_) => {}
+            Err(err) => {
+                tcx.sess.err(&format!("{:?}", err));
+                let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
+                    &func,
+                    None,
+                    Some(Box::new(writer)),
+                    err,
+                );
+                tcx.sess.fatal(&format!("cranelift verify error:\n{}", pretty_error));
+            }
+        }
+    });
+}
+
+fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, '_>) {
+    for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() {
+        let block = fx.get_block(bb);
+        fx.bcx.switch_to_block(block);
+
+        if bb_data.is_cleanup {
+            // Unwinding after panicking is not supported
+            continue;
+
+            // FIXME Once unwinding is supported and Cranelift supports marking blocks as cold, do
+            // so for cleanup blocks.
+        }
+
+        fx.bcx.ins().nop();
+        for stmt in &bb_data.statements {
+            fx.set_debug_loc(stmt.source_info);
+            codegen_stmt(fx, block, stmt);
+        }
+
+        if fx.clif_comments.enabled() {
+            let mut terminator_head = "\n".to_string();
+            bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+            let inst = fx.bcx.func.layout.last_inst(block).unwrap();
+            fx.add_comment(inst, terminator_head);
+        }
+
+        fx.set_debug_loc(bb_data.terminator().source_info);
+
+        match &bb_data.terminator().kind {
+            TerminatorKind::Goto { target } => {
+                if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
+                    let mut can_immediately_return = true;
+                    for stmt in &fx.mir[*target].statements {
+                        if let StatementKind::StorageDead(_) = stmt.kind {
+                        } else {
+                            // FIXME Can sometimes happen, see rust-lang/rust#70531
+                            can_immediately_return = false;
+                            break;
+                        }
+                    }
+
+                    if can_immediately_return {
+                        crate::abi::codegen_return(fx);
+                        continue;
+                    }
+                }
+
+                let block = fx.get_block(*target);
+                fx.bcx.ins().jump(block, &[]);
+            }
+            TerminatorKind::Return => {
+                crate::abi::codegen_return(fx);
+            }
+            TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
+                if !fx.tcx.sess.overflow_checks() {
+                    if let mir::AssertKind::OverflowNeg(_) = *msg {
+                        let target = fx.get_block(*target);
+                        fx.bcx.ins().jump(target, &[]);
+                        continue;
+                    }
+                }
+                let cond = codegen_operand(fx, cond).load_scalar(fx);
+
+                let target = fx.get_block(*target);
+                let failure = fx.bcx.create_block();
+                // FIXME Mark failure block as cold once Cranelift supports it
+
+                if *expected {
+                    fx.bcx.ins().brz(cond, failure, &[]);
+                } else {
+                    fx.bcx.ins().brnz(cond, failure, &[]);
+                };
+                fx.bcx.ins().jump(target, &[]);
+
+                fx.bcx.switch_to_block(failure);
+                fx.bcx.ins().nop();
+
+                match msg {
+                    AssertKind::BoundsCheck { ref len, ref index } => {
+                        let len = codegen_operand(fx, len).load_scalar(fx);
+                        let index = codegen_operand(fx, index).load_scalar(fx);
+                        let location = fx
+                            .get_caller_location(bb_data.terminator().source_info.span)
+                            .load_scalar(fx);
+
+                        codegen_panic_inner(
+                            fx,
+                            rustc_hir::LangItem::PanicBoundsCheck,
+                            &[index, len, location],
+                            bb_data.terminator().source_info.span,
+                        );
+                    }
+                    _ => {
+                        let msg_str = msg.description();
+                        codegen_panic(fx, msg_str, bb_data.terminator().source_info.span);
+                    }
+                }
+            }
+
+            TerminatorKind::SwitchInt { discr, switch_ty, targets } => {
+                let discr = codegen_operand(fx, discr).load_scalar(fx);
+
+                let use_bool_opt = switch_ty.kind() == fx.tcx.types.bool.kind()
+                    || (targets.iter().count() == 1 && targets.iter().next().unwrap().0 == 0);
+                if use_bool_opt {
+                    assert_eq!(targets.iter().count(), 1);
+                    let (then_value, then_block) = targets.iter().next().unwrap();
+                    let then_block = fx.get_block(then_block);
+                    let else_block = fx.get_block(targets.otherwise());
+                    let test_zero = match then_value {
+                        0 => true,
+                        1 => false,
+                        _ => unreachable!("{:?}", targets),
+                    };
+
+                    let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+                    let (discr, is_inverted) =
+                        crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
+                    let test_zero = if is_inverted { !test_zero } else { test_zero };
+                    let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+                    let discr =
+                        crate::optimize::peephole::make_branchable_value(&mut fx.bcx, discr);
+                    if let Some(taken) = crate::optimize::peephole::maybe_known_branch_taken(
+                        &fx.bcx, discr, test_zero,
+                    ) {
+                        if taken {
+                            fx.bcx.ins().jump(then_block, &[]);
+                        } else {
+                            fx.bcx.ins().jump(else_block, &[]);
+                        }
+                    } else {
+                        if test_zero {
+                            fx.bcx.ins().brz(discr, then_block, &[]);
+                            fx.bcx.ins().jump(else_block, &[]);
+                        } else {
+                            fx.bcx.ins().brnz(discr, then_block, &[]);
+                            fx.bcx.ins().jump(else_block, &[]);
+                        }
+                    }
+                } else {
+                    let mut switch = ::cranelift_frontend::Switch::new();
+                    for (value, block) in targets.iter() {
+                        let block = fx.get_block(block);
+                        switch.set_entry(value, block);
+                    }
+                    let otherwise_block = fx.get_block(targets.otherwise());
+                    switch.emit(&mut fx.bcx, discr, otherwise_block);
+                }
+            }
+            TerminatorKind::Call {
+                func,
+                args,
+                destination,
+                fn_span,
+                cleanup: _,
+                from_hir_call: _,
+            } => {
+                fx.tcx.sess.time("codegen call", || {
+                    crate::abi::codegen_terminator_call(fx, *fn_span, func, args, *destination)
+                });
+            }
+            TerminatorKind::InlineAsm {
+                template,
+                operands,
+                options,
+                destination,
+                line_spans: _,
+            } => {
+                crate::inline_asm::codegen_inline_asm(
+                    fx,
+                    bb_data.terminator().source_info.span,
+                    template,
+                    operands,
+                    *options,
+                );
+
+                match *destination {
+                    Some(destination) => {
+                        let destination_block = fx.get_block(destination);
+                        fx.bcx.ins().jump(destination_block, &[]);
+                    }
+                    None => {
+                        crate::trap::trap_unreachable(
+                            fx,
+                            "[corruption] Returned from noreturn inline asm",
+                        );
+                    }
+                }
+            }
+            TerminatorKind::Resume | TerminatorKind::Abort => {
+                trap_unreachable(fx, "[corruption] Unwinding bb reached.");
+            }
+            TerminatorKind::Unreachable => {
+                trap_unreachable(fx, "[corruption] Hit unreachable code.");
+            }
+            TerminatorKind::Yield { .. }
+            | TerminatorKind::FalseEdge { .. }
+            | TerminatorKind::FalseUnwind { .. }
+            | TerminatorKind::DropAndReplace { .. }
+            | TerminatorKind::GeneratorDrop => {
+                bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
+            }
+            TerminatorKind::Drop { place, target, unwind: _ } => {
+                let drop_place = codegen_place(fx, *place);
+                crate::abi::codegen_drop(fx, bb_data.terminator().source_info.span, drop_place);
+
+                let target_block = fx.get_block(*target);
+                fx.bcx.ins().jump(target_block, &[]);
+            }
+        };
+    }
+
+    fx.bcx.seal_all_blocks();
+    fx.bcx.finalize();
+}
+
+fn codegen_stmt<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    #[allow(unused_variables)] cur_block: Block,
+    stmt: &Statement<'tcx>,
+) {
+    let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
+
+    fx.set_debug_loc(stmt.source_info);
+
+    #[cfg(disabled)]
+    match &stmt.kind {
+        StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
+        _ => {
+            if fx.clif_comments.enabled() {
+                let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
+                fx.add_comment(inst, format!("{:?}", stmt));
+            }
+        }
+    }
+
+    match &stmt.kind {
+        StatementKind::SetDiscriminant { place, variant_index } => {
+            let place = codegen_place(fx, **place);
+            crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
+        }
+        StatementKind::Assign(to_place_and_rval) => {
+            let lval = codegen_place(fx, to_place_and_rval.0);
+            let dest_layout = lval.layout();
+            match to_place_and_rval.1 {
+                Rvalue::Use(ref operand) => {
+                    let val = codegen_operand(fx, operand);
+                    lval.write_cvalue(fx, val);
+                }
+                Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+                    let place = codegen_place(fx, place);
+                    let ref_ = place.place_ref(fx, lval.layout());
+                    lval.write_cvalue(fx, ref_);
+                }
+                Rvalue::ThreadLocalRef(def_id) => {
+                    let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout());
+                    lval.write_cvalue(fx, val);
+                }
+                Rvalue::BinaryOp(bin_op, ref lhs_rhs) => {
+                    let lhs = codegen_operand(fx, &lhs_rhs.0);
+                    let rhs = codegen_operand(fx, &lhs_rhs.1);
+
+                    let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs);
+                    lval.write_cvalue(fx, res);
+                }
+                Rvalue::CheckedBinaryOp(bin_op, ref lhs_rhs) => {
+                    let lhs = codegen_operand(fx, &lhs_rhs.0);
+                    let rhs = codegen_operand(fx, &lhs_rhs.1);
+
+                    let res = if !fx.tcx.sess.overflow_checks() {
+                        let val =
+                            crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx);
+                        let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
+                        CValue::by_val_pair(val, is_overflow, lval.layout())
+                    } else {
+                        crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs)
+                    };
+
+                    lval.write_cvalue(fx, res);
+                }
+                Rvalue::UnaryOp(un_op, ref operand) => {
+                    let operand = codegen_operand(fx, operand);
+                    let layout = operand.layout();
+                    let val = operand.load_scalar(fx);
+                    let res = match un_op {
+                        UnOp::Not => match layout.ty.kind() {
+                            ty::Bool => {
+                                let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
+                                CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
+                            }
+                            ty::Uint(_) | ty::Int(_) => {
+                                CValue::by_val(fx.bcx.ins().bnot(val), layout)
+                            }
+                            _ => unreachable!("un op Not for {:?}", layout.ty),
+                        },
+                        UnOp::Neg => match layout.ty.kind() {
+                            ty::Int(IntTy::I128) => {
+                                // FIXME remove this case once ineg.i128 works
+                                let zero =
+                                    CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+                                crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
+                            }
+                            ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
+                            ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
+                            _ => unreachable!("un op Neg for {:?}", layout.ty),
+                        },
+                    };
+                    lval.write_cvalue(fx, res);
+                }
+                Rvalue::Cast(
+                    CastKind::Pointer(PointerCast::ReifyFnPointer),
+                    ref operand,
+                    to_ty,
+                ) => {
+                    let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
+                    let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+                    match *from_ty.kind() {
+                        ty::FnDef(def_id, substs) => {
+                            let func_ref = fx.get_function_ref(
+                                Instance::resolve_for_fn_ptr(
+                                    fx.tcx,
+                                    ParamEnv::reveal_all(),
+                                    def_id,
+                                    substs,
+                                )
+                                .unwrap()
+                                .polymorphize(fx.tcx),
+                            );
+                            let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+                            lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
+                        }
+                        _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
+                    }
+                }
+                Rvalue::Cast(
+                    CastKind::Pointer(PointerCast::UnsafeFnPointer),
+                    ref operand,
+                    to_ty,
+                )
+                | Rvalue::Cast(
+                    CastKind::Pointer(PointerCast::MutToConstPointer),
+                    ref operand,
+                    to_ty,
+                )
+                | Rvalue::Cast(
+                    CastKind::Pointer(PointerCast::ArrayToPointer),
+                    ref operand,
+                    to_ty,
+                ) => {
+                    let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+                    let operand = codegen_operand(fx, operand);
+                    lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
+                }
+                Rvalue::Cast(CastKind::Misc, ref operand, to_ty) => {
+                    let operand = codegen_operand(fx, operand);
+                    let from_ty = operand.layout().ty;
+                    let to_ty = fx.monomorphize(to_ty);
+
+                    fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
+                        ty.builtin_deref(true)
+                            .map(|ty::TypeAndMut { ty: pointee_ty, mutbl: _ }| {
+                                has_ptr_meta(fx.tcx, pointee_ty)
+                            })
+                            .unwrap_or(false)
+                    }
+
+                    if is_fat_ptr(fx, from_ty) {
+                        if is_fat_ptr(fx, to_ty) {
+                            // fat-ptr -> fat-ptr
+                            lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
+                        } else {
+                            // fat-ptr -> thin-ptr
+                            let (ptr, _extra) = operand.load_scalar_pair(fx);
+                            lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
+                        }
+                    } else if let ty::Adt(adt_def, _substs) = from_ty.kind() {
+                        // enum -> discriminant value
+                        assert!(adt_def.is_enum());
+                        match to_ty.kind() {
+                            ty::Uint(_) | ty::Int(_) => {}
+                            _ => unreachable!("cast adt {} -> {}", from_ty, to_ty),
+                        }
+                        let to_clif_ty = fx.clif_type(to_ty).unwrap();
+
+                        let discriminant = crate::discriminant::codegen_get_discriminant(
+                            fx,
+                            operand,
+                            fx.layout_of(operand.layout().ty.discriminant_ty(fx.tcx)),
+                        )
+                        .load_scalar(fx);
+
+                        let res = crate::cast::clif_intcast(
+                            fx,
+                            discriminant,
+                            to_clif_ty,
+                            to_ty.is_signed(),
+                        );
+                        lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
+                    } else {
+                        let to_clif_ty = fx.clif_type(to_ty).unwrap();
+                        let from = operand.load_scalar(fx);
+
+                        let res = clif_int_or_float_cast(
+                            fx,
+                            from,
+                            type_sign(from_ty),
+                            to_clif_ty,
+                            type_sign(to_ty),
+                        );
+                        lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
+                    }
+                }
+                Rvalue::Cast(
+                    CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
+                    ref operand,
+                    _to_ty,
+                ) => {
+                    let operand = codegen_operand(fx, operand);
+                    match *operand.layout().ty.kind() {
+                        ty::Closure(def_id, substs) => {
+                            let instance = Instance::resolve_closure(
+                                fx.tcx,
+                                def_id,
+                                substs,
+                                ty::ClosureKind::FnOnce,
+                            )
+                            .polymorphize(fx.tcx);
+                            let func_ref = fx.get_function_ref(instance);
+                            let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+                            lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
+                        }
+                        _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
+                    }
+                }
+                Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => {
+                    let operand = codegen_operand(fx, operand);
+                    operand.unsize_value(fx, lval);
+                }
+                Rvalue::Discriminant(place) => {
+                    let place = codegen_place(fx, place);
+                    let value = place.to_cvalue(fx);
+                    let discr =
+                        crate::discriminant::codegen_get_discriminant(fx, value, dest_layout);
+                    lval.write_cvalue(fx, discr);
+                }
+                Rvalue::Repeat(ref operand, times) => {
+                    let operand = codegen_operand(fx, operand);
+                    let times = fx
+                        .monomorphize(times)
+                        .eval(fx.tcx, ParamEnv::reveal_all())
+                        .val
+                        .try_to_bits(fx.tcx.data_layout.pointer_size)
+                        .unwrap();
+                    if operand.layout().size.bytes() == 0 {
+                        // Do nothing for ZST's
+                    } else if fx.clif_type(operand.layout().ty) == Some(types::I8) {
+                        let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
+                        // FIXME use emit_small_memset where possible
+                        let addr = lval.to_ptr().get_addr(fx);
+                        let val = operand.load_scalar(fx);
+                        fx.bcx.call_memset(fx.module.target_config(), addr, val, times);
+                    } else {
+                        let loop_block = fx.bcx.create_block();
+                        let loop_block2 = fx.bcx.create_block();
+                        let done_block = fx.bcx.create_block();
+                        let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
+                        let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+                        fx.bcx.ins().jump(loop_block, &[zero]);
+
+                        fx.bcx.switch_to_block(loop_block);
+                        let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
+                        fx.bcx.ins().brnz(done, done_block, &[]);
+                        fx.bcx.ins().jump(loop_block2, &[]);
+
+                        fx.bcx.switch_to_block(loop_block2);
+                        let to = lval.place_index(fx, index);
+                        to.write_cvalue(fx, operand);
+                        let index = fx.bcx.ins().iadd_imm(index, 1);
+                        fx.bcx.ins().jump(loop_block, &[index]);
+
+                        fx.bcx.switch_to_block(done_block);
+                        fx.bcx.ins().nop();
+                    }
+                }
+                Rvalue::Len(place) => {
+                    let place = codegen_place(fx, place);
+                    let usize_layout = fx.layout_of(fx.tcx.types.usize);
+                    let len = codegen_array_len(fx, place);
+                    lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
+                }
+                Rvalue::NullaryOp(NullOp::Box, content_ty) => {
+                    let usize_type = fx.clif_type(fx.tcx.types.usize).unwrap();
+                    let content_ty = fx.monomorphize(content_ty);
+                    let layout = fx.layout_of(content_ty);
+                    let llsize = fx.bcx.ins().iconst(usize_type, layout.size.bytes() as i64);
+                    let llalign = fx.bcx.ins().iconst(usize_type, layout.align.abi.bytes() as i64);
+                    let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
+
+                    // Allocate space:
+                    let def_id =
+                        match fx.tcx.lang_items().require(rustc_hir::LangItem::ExchangeMalloc) {
+                            Ok(id) => id,
+                            Err(s) => {
+                                fx.tcx
+                                    .sess
+                                    .fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
+                            }
+                        };
+                    let instance = ty::Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+                    let func_ref = fx.get_function_ref(instance);
+                    let call = fx.bcx.ins().call(func_ref, &[llsize, llalign]);
+                    let ptr = fx.bcx.inst_results(call)[0];
+                    lval.write_cvalue(fx, CValue::by_val(ptr, box_layout));
+                }
+                Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
+                    assert!(
+                        lval.layout()
+                            .ty
+                            .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all())
+                    );
+                    let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes();
+                    let val =
+                        CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into());
+                    lval.write_cvalue(fx, val);
+                }
+                Rvalue::Aggregate(ref kind, ref operands) => match kind.as_ref() {
+                    AggregateKind::Array(_ty) => {
+                        for (i, operand) in operands.iter().enumerate() {
+                            let operand = codegen_operand(fx, operand);
+                            let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
+                            let to = lval.place_index(fx, index);
+                            to.write_cvalue(fx, operand);
+                        }
+                    }
+                    _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
+                },
+            }
+        }
+        StatementKind::StorageLive(_)
+        | StatementKind::StorageDead(_)
+        | StatementKind::Nop
+        | StatementKind::FakeRead(..)
+        | StatementKind::Retag { .. }
+        | StatementKind::AscribeUserType(..) => {}
+
+        StatementKind::LlvmInlineAsm(asm) => {
+            match asm.asm.asm.as_str().trim() {
+                "" => {
+                    // Black box
+                }
+                _ => fx.tcx.sess.span_fatal(
+                    stmt.source_info.span,
+                    "Legacy `llvm_asm!` inline assembly is not supported. \
+                    Try using the new `asm!` instead.",
+                ),
+            }
+        }
+        StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
+        StatementKind::CopyNonOverlapping(inner) => {
+            let dst = codegen_operand(fx, &inner.dst);
+            let pointee = dst
+                .layout()
+                .pointee_info_at(fx, rustc_target::abi::Size::ZERO)
+                .expect("Expected pointer");
+            let dst = dst.load_scalar(fx);
+            let src = codegen_operand(fx, &inner.src).load_scalar(fx);
+            let count = codegen_operand(fx, &inner.count).load_scalar(fx);
+            let elem_size: u64 = pointee.size.bytes();
+            let bytes =
+                if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+            fx.bcx.call_memcpy(fx.module.target_config(), dst, src, bytes);
+        }
+    }
+}
+
+fn codegen_array_len<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, place: CPlace<'tcx>) -> Value {
+    match *place.layout().ty.kind() {
+        ty::Array(_elem_ty, len) => {
+            let len = fx.monomorphize(len).eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
+            fx.bcx.ins().iconst(fx.pointer_type, len)
+        }
+        ty::Slice(_elem_ty) => {
+            place.to_ptr_maybe_unsized().1.expect("Length metadata for slice place")
+        }
+        _ => bug!("Rvalue::Len({:?})", place),
+    }
+}
+
+pub(crate) fn codegen_place<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    place: Place<'tcx>,
+) -> CPlace<'tcx> {
+    let mut cplace = fx.get_local_place(place.local);
+
+    for elem in place.projection {
+        match elem {
+            PlaceElem::Deref => {
+                cplace = cplace.place_deref(fx);
+            }
+            PlaceElem::Field(field, _ty) => {
+                cplace = cplace.place_field(fx, field);
+            }
+            PlaceElem::Index(local) => {
+                let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
+                cplace = cplace.place_index(fx, index);
+            }
+            PlaceElem::ConstantIndex { offset, min_length: _, from_end } => {
+                let offset: u64 = offset;
+                let index = if !from_end {
+                    fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
+                } else {
+                    let len = codegen_array_len(fx, cplace);
+                    fx.bcx.ins().iadd_imm(len, -(offset as i64))
+                };
+                cplace = cplace.place_index(fx, index);
+            }
+            PlaceElem::Subslice { from, to, from_end } => {
+                // These indices are generated by slice patterns.
+                // slice[from:-to] in Python terms.
+
+                let from: u64 = from;
+                let to: u64 = to;
+
+                match cplace.layout().ty.kind() {
+                    ty::Array(elem_ty, _len) => {
+                        assert!(!from_end, "array subslices are never `from_end`");
+                        let elem_layout = fx.layout_of(elem_ty);
+                        let ptr = cplace.to_ptr();
+                        cplace = CPlace::for_ptr(
+                            ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+                            fx.layout_of(fx.tcx.mk_array(elem_ty, to - from)),
+                        );
+                    }
+                    ty::Slice(elem_ty) => {
+                        assert!(from_end, "slice subslices should be `from_end`");
+                        let elem_layout = fx.layout_of(elem_ty);
+                        let (ptr, len) = cplace.to_ptr_maybe_unsized();
+                        let len = len.unwrap();
+                        cplace = CPlace::for_ptr_with_extra(
+                            ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+                            fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
+                            cplace.layout(),
+                        );
+                    }
+                    _ => unreachable!(),
+                }
+            }
+            PlaceElem::Downcast(_adt_def, variant) => {
+                cplace = cplace.downcast_variant(fx, variant);
+            }
+        }
+    }
+
+    cplace
+}
+
+pub(crate) fn codegen_operand<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    operand: &Operand<'tcx>,
+) -> CValue<'tcx> {
+    match operand {
+        Operand::Move(place) | Operand::Copy(place) => {
+            let cplace = codegen_place(fx, *place);
+            cplace.to_cvalue(fx)
+        }
+        Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_),
+    }
+}
+
+pub(crate) fn codegen_panic<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, msg_str: &str, span: Span) {
+    let location = fx.get_caller_location(span).load_scalar(fx);
+
+    let msg_ptr = fx.anonymous_str(msg_str);
+    let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
+    let args = [msg_ptr, msg_len, location];
+
+    codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, span);
+}
+
+pub(crate) fn codegen_panic_inner<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    lang_item: rustc_hir::LangItem,
+    args: &[Value],
+    span: Span,
+) {
+    let def_id =
+        fx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
+
+    let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+    let symbol_name = fx.tcx.symbol_name(instance).name;
+
+    fx.lib_call(
+        &*symbol_name,
+        vec![
+            AbiParam::new(fx.pointer_type),
+            AbiParam::new(fx.pointer_type),
+            AbiParam::new(fx.pointer_type),
+        ],
+        vec![],
+        args,
+    );
+
+    crate::trap::trap_unreachable(fx, "panic lang item returned");
+}
diff --git a/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs b/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs
new file mode 100644
index 00000000000..a044b43b864
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/bin/cg_clif.rs
@@ -0,0 +1,87 @@
+#![feature(rustc_private, once_cell)]
+
+extern crate rustc_data_structures;
+extern crate rustc_driver;
+extern crate rustc_interface;
+extern crate rustc_session;
+extern crate rustc_target;
+
+use std::lazy::SyncLazy;
+use std::panic;
+
+use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
+use rustc_interface::interface;
+use rustc_session::config::ErrorOutputType;
+use rustc_session::early_error;
+use rustc_target::spec::PanicStrategy;
+
+const BUG_REPORT_URL: &str = "https://github.com/bjorn3/rustc_codegen_cranelift/issues/new";
+
+static DEFAULT_HOOK: SyncLazy<Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static>> =
+    SyncLazy::new(|| {
+        let hook = panic::take_hook();
+        panic::set_hook(Box::new(|info| {
+            // Invoke the default handler, which prints the actual panic message and optionally a backtrace
+            (*DEFAULT_HOOK)(info);
+
+            // Separate the output with an empty line
+            eprintln!();
+
+            // Print the ICE message
+            rustc_driver::report_ice(info, BUG_REPORT_URL);
+        }));
+        hook
+    });
+
+#[derive(Default)]
+pub struct CraneliftPassesCallbacks {
+    time_passes: bool,
+}
+
+impl rustc_driver::Callbacks for CraneliftPassesCallbacks {
+    fn config(&mut self, config: &mut interface::Config) {
+        // If a --prints=... option has been given, we don't print the "total"
+        // time because it will mess up the --prints output. See #64339.
+        self.time_passes = config.opts.prints.is_empty()
+            && (config.opts.debugging_opts.time_passes || config.opts.debugging_opts.time);
+
+        config.opts.cg.panic = Some(PanicStrategy::Abort);
+        config.opts.debugging_opts.panic_abort_tests = true;
+        config.opts.maybe_sysroot = Some(config.opts.maybe_sysroot.clone().unwrap_or_else(|| {
+            std::env::current_exe().unwrap().parent().unwrap().parent().unwrap().to_owned()
+        }));
+    }
+}
+
+fn main() {
+    let start_time = std::time::Instant::now();
+    let start_rss = get_resident_set_size();
+    rustc_driver::init_rustc_env_logger();
+    let mut callbacks = CraneliftPassesCallbacks::default();
+    SyncLazy::force(&DEFAULT_HOOK); // Install ice hook
+    let exit_code = rustc_driver::catch_with_exit_code(|| {
+        let args = std::env::args_os()
+            .enumerate()
+            .map(|(i, arg)| {
+                arg.into_string().unwrap_or_else(|arg| {
+                    early_error(
+                        ErrorOutputType::default(),
+                        &format!("Argument {} is not valid Unicode: {:?}", i, arg),
+                    )
+                })
+            })
+            .collect::<Vec<_>>();
+        let mut run_compiler = rustc_driver::RunCompiler::new(&args, &mut callbacks);
+        run_compiler.set_make_codegen_backend(Some(Box::new(move |_| {
+            Box::new(rustc_codegen_cranelift::CraneliftCodegenBackend { config: None })
+        })));
+        run_compiler.run()
+    });
+
+    if callbacks.time_passes {
+        let end_rss = get_resident_set_size();
+        print_time_passes_entry("total", start_time.elapsed(), start_rss, end_rss);
+    }
+
+    std::process::exit(exit_code)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/bin/cg_clif_build_sysroot.rs b/compiler/rustc_codegen_cranelift/src/bin/cg_clif_build_sysroot.rs
new file mode 100644
index 00000000000..e7cd5edbbf6
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/bin/cg_clif_build_sysroot.rs
@@ -0,0 +1,91 @@
+//! The only difference between this and cg_clif.rs is that this binary defaults to using cg_llvm
+//! instead of cg_clif and requires `--clif` to use cg_clif and that this binary doesn't have JIT
+//! support.
+//! This is necessary as with Cargo `RUSTC` applies to both target crates and host crates. The host
+//! crates must be built with cg_llvm as we are currently building a sysroot for cg_clif.
+//! `RUSTFLAGS` however is only applied to target crates, so `--clif` would only be passed to the
+//! target crates.
+
+#![feature(rustc_private)]
+
+extern crate rustc_data_structures;
+extern crate rustc_driver;
+extern crate rustc_interface;
+extern crate rustc_session;
+extern crate rustc_target;
+
+use std::path::PathBuf;
+
+use rustc_interface::interface;
+use rustc_session::config::ErrorOutputType;
+use rustc_session::early_error;
+use rustc_target::spec::PanicStrategy;
+
+fn find_sysroot() -> String {
+    // Taken from https://github.com/Manishearth/rust-clippy/pull/911.
+    let home = option_env!("RUSTUP_HOME").or(option_env!("MULTIRUST_HOME"));
+    let toolchain = option_env!("RUSTUP_TOOLCHAIN").or(option_env!("MULTIRUST_TOOLCHAIN"));
+    match (home, toolchain) {
+        (Some(home), Some(toolchain)) => format!("{}/toolchains/{}", home, toolchain),
+        _ => option_env!("RUST_SYSROOT")
+            .expect("need to specify RUST_SYSROOT env var or use rustup or multirust")
+            .to_owned(),
+    }
+}
+
+pub struct CraneliftPassesCallbacks {
+    use_clif: bool,
+}
+
+impl rustc_driver::Callbacks for CraneliftPassesCallbacks {
+    fn config(&mut self, config: &mut interface::Config) {
+        if !self.use_clif {
+            config.opts.maybe_sysroot = Some(PathBuf::from(find_sysroot()));
+            return;
+        }
+
+        config.opts.cg.panic = Some(PanicStrategy::Abort);
+        config.opts.debugging_opts.panic_abort_tests = true;
+        config.opts.maybe_sysroot =
+            Some(std::env::current_exe().unwrap().parent().unwrap().parent().unwrap().to_owned());
+    }
+}
+
+fn main() {
+    rustc_driver::init_rustc_env_logger();
+    rustc_driver::install_ice_hook();
+    let exit_code = rustc_driver::catch_with_exit_code(|| {
+        let mut use_clif = false;
+
+        let args = std::env::args_os()
+            .enumerate()
+            .map(|(i, arg)| {
+                arg.into_string().unwrap_or_else(|arg| {
+                    early_error(
+                        ErrorOutputType::default(),
+                        &format!("Argument {} is not valid Unicode: {:?}", i, arg),
+                    )
+                })
+            })
+            .filter(|arg| {
+                if arg == "--clif" {
+                    use_clif = true;
+                    false
+                } else {
+                    true
+                }
+            })
+            .collect::<Vec<_>>();
+
+        let mut callbacks = CraneliftPassesCallbacks { use_clif };
+
+        let mut run_compiler = rustc_driver::RunCompiler::new(&args, &mut callbacks);
+        if use_clif {
+            run_compiler.set_make_codegen_backend(Some(Box::new(move |_| {
+                Box::new(rustc_codegen_cranelift::CraneliftCodegenBackend { config: None })
+            })));
+        }
+        run_compiler.run()
+    });
+    std::process::exit(exit_code)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/cast.rs b/compiler/rustc_codegen_cranelift/src/cast.rs
new file mode 100644
index 00000000000..74c5e09f08d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/cast.rs
@@ -0,0 +1,179 @@
+//! Various number casting functions
+
+use crate::prelude::*;
+
+pub(crate) fn clif_intcast(
+    fx: &mut FunctionCx<'_, '_, '_>,
+    val: Value,
+    to: Type,
+    signed: bool,
+) -> Value {
+    let from = fx.bcx.func.dfg.value_type(val);
+    match (from, to) {
+        // equal
+        (_, _) if from == to => val,
+
+        // extend
+        (_, types::I128) => {
+            let lo = if from == types::I64 {
+                val
+            } else if signed {
+                fx.bcx.ins().sextend(types::I64, val)
+            } else {
+                fx.bcx.ins().uextend(types::I64, val)
+            };
+            let hi = if signed {
+                fx.bcx.ins().sshr_imm(lo, 63)
+            } else {
+                fx.bcx.ins().iconst(types::I64, 0)
+            };
+            fx.bcx.ins().iconcat(lo, hi)
+        }
+        (_, _) if to.wider_or_equal(from) => {
+            if signed {
+                fx.bcx.ins().sextend(to, val)
+            } else {
+                fx.bcx.ins().uextend(to, val)
+            }
+        }
+
+        // reduce
+        (types::I128, _) => {
+            let (lsb, _msb) = fx.bcx.ins().isplit(val);
+            if to == types::I64 { lsb } else { fx.bcx.ins().ireduce(to, lsb) }
+        }
+        (_, _) => fx.bcx.ins().ireduce(to, val),
+    }
+}
+
+pub(crate) fn clif_int_or_float_cast(
+    fx: &mut FunctionCx<'_, '_, '_>,
+    from: Value,
+    from_signed: bool,
+    to_ty: Type,
+    to_signed: bool,
+) -> Value {
+    let from_ty = fx.bcx.func.dfg.value_type(from);
+
+    if from_ty.is_int() && to_ty.is_int() {
+        // int-like -> int-like
+        clif_intcast(
+            fx,
+            from,
+            to_ty,
+            // This is correct as either from_signed == to_signed (=> this is trivially correct)
+            // Or from_clif_ty == to_clif_ty, which means this is a no-op.
+            from_signed,
+        )
+    } else if from_ty.is_int() && to_ty.is_float() {
+        if from_ty == types::I128 {
+            // _______ss__f_
+            // __float  tisf: i128 -> f32
+            // __float  tidf: i128 -> f64
+            // __floatuntisf: u128 -> f32
+            // __floatuntidf: u128 -> f64
+
+            let name = format!(
+                "__float{sign}ti{flt}f",
+                sign = if from_signed { "" } else { "un" },
+                flt = match to_ty {
+                    types::F32 => "s",
+                    types::F64 => "d",
+                    _ => unreachable!("{:?}", to_ty),
+                },
+            );
+
+            let from_rust_ty = if from_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+
+            let to_rust_ty = match to_ty {
+                types::F32 => fx.tcx.types.f32,
+                types::F64 => fx.tcx.types.f64,
+                _ => unreachable!(),
+            };
+
+            return fx
+                .easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
+                .load_scalar(fx);
+        }
+
+        // int-like -> float
+        if from_signed {
+            fx.bcx.ins().fcvt_from_sint(to_ty, from)
+        } else {
+            fx.bcx.ins().fcvt_from_uint(to_ty, from)
+        }
+    } else if from_ty.is_float() && to_ty.is_int() {
+        if to_ty == types::I128 {
+            // _____sssf___
+            // __fix   sfti: f32 -> i128
+            // __fix   dfti: f64 -> i128
+            // __fixunssfti: f32 -> u128
+            // __fixunsdfti: f64 -> u128
+
+            let name = format!(
+                "__fix{sign}{flt}fti",
+                sign = if to_signed { "" } else { "uns" },
+                flt = match from_ty {
+                    types::F32 => "s",
+                    types::F64 => "d",
+                    _ => unreachable!("{:?}", to_ty),
+                },
+            );
+
+            let from_rust_ty = match from_ty {
+                types::F32 => fx.tcx.types.f32,
+                types::F64 => fx.tcx.types.f64,
+                _ => unreachable!(),
+            };
+
+            let to_rust_ty = if to_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+
+            return fx
+                .easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
+                .load_scalar(fx);
+        }
+
+        // float -> int-like
+        if to_ty == types::I8 || to_ty == types::I16 {
+            // FIXME implement fcvt_to_*int_sat.i8/i16
+            let val = if to_signed {
+                fx.bcx.ins().fcvt_to_sint_sat(types::I32, from)
+            } else {
+                fx.bcx.ins().fcvt_to_uint_sat(types::I32, from)
+            };
+            let (min, max) = match (to_ty, to_signed) {
+                (types::I8, false) => (0, i64::from(u8::MAX)),
+                (types::I16, false) => (0, i64::from(u16::MAX)),
+                (types::I8, true) => (i64::from(i8::MIN), i64::from(i8::MAX)),
+                (types::I16, true) => (i64::from(i16::MIN), i64::from(i16::MAX)),
+                _ => unreachable!(),
+            };
+            let min_val = fx.bcx.ins().iconst(types::I32, min);
+            let max_val = fx.bcx.ins().iconst(types::I32, max);
+
+            let val = if to_signed {
+                let has_underflow = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, min);
+                let has_overflow = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, val, max);
+                let bottom_capped = fx.bcx.ins().select(has_underflow, min_val, val);
+                fx.bcx.ins().select(has_overflow, max_val, bottom_capped)
+            } else {
+                let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, val, max);
+                fx.bcx.ins().select(has_overflow, max_val, val)
+            };
+            fx.bcx.ins().ireduce(to_ty, val)
+        } else if to_signed {
+            fx.bcx.ins().fcvt_to_sint_sat(to_ty, from)
+        } else {
+            fx.bcx.ins().fcvt_to_uint_sat(to_ty, from)
+        }
+    } else if from_ty.is_float() && to_ty.is_float() {
+        // float -> float
+        match (from_ty, to_ty) {
+            (types::F32, types::F64) => fx.bcx.ins().fpromote(types::F64, from),
+            (types::F64, types::F32) => fx.bcx.ins().fdemote(types::F32, from),
+            _ => from,
+        }
+    } else {
+        unreachable!("cast value from {:?} to {:?}", from_ty, to_ty);
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/codegen_i128.rs b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
new file mode 100644
index 00000000000..ffe1922ab90
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
@@ -0,0 +1,167 @@
+//! Replaces 128-bit operators with lang item calls where necessary
+
+use cranelift_codegen::ir::ArgumentPurpose;
+
+use crate::prelude::*;
+
+pub(crate) fn maybe_codegen<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    bin_op: BinOp,
+    checked: bool,
+    lhs: CValue<'tcx>,
+    rhs: CValue<'tcx>,
+) -> Option<CValue<'tcx>> {
+    if lhs.layout().ty != fx.tcx.types.u128
+        && lhs.layout().ty != fx.tcx.types.i128
+        && rhs.layout().ty != fx.tcx.types.u128
+        && rhs.layout().ty != fx.tcx.types.i128
+    {
+        return None;
+    }
+
+    let lhs_val = lhs.load_scalar(fx);
+    let rhs_val = rhs.load_scalar(fx);
+
+    let is_signed = type_sign(lhs.layout().ty);
+
+    match bin_op {
+        BinOp::BitAnd | BinOp::BitOr | BinOp::BitXor => {
+            assert!(!checked);
+            None
+        }
+        BinOp::Add | BinOp::Sub if !checked => None,
+        BinOp::Mul if !checked => {
+            let val_ty = if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+            if fx.tcx.sess.target.is_like_windows {
+                let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
+                let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
+                let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
+                assert!(lhs_extra.is_none());
+                assert!(rhs_extra.is_none());
+                let args =
+                    [ret_place.to_ptr().get_addr(fx), lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)];
+                fx.lib_call(
+                    "__multi3",
+                    vec![
+                        AbiParam::special(pointer_ty(fx.tcx), ArgumentPurpose::StructReturn),
+                        AbiParam::new(pointer_ty(fx.tcx)),
+                        AbiParam::new(pointer_ty(fx.tcx)),
+                    ],
+                    vec![],
+                    &args,
+                );
+                Some(ret_place.to_cvalue(fx))
+            } else {
+                Some(fx.easy_call("__multi3", &[lhs, rhs], val_ty))
+            }
+        }
+        BinOp::Add | BinOp::Sub | BinOp::Mul => {
+            assert!(checked);
+            let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+            let out_place = CPlace::new_stack_slot(fx, fx.layout_of(out_ty));
+            let (param_types, args) = if fx.tcx.sess.target.is_like_windows {
+                let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
+                let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
+                assert!(lhs_extra.is_none());
+                assert!(rhs_extra.is_none());
+                (
+                    vec![
+                        AbiParam::special(pointer_ty(fx.tcx), ArgumentPurpose::StructReturn),
+                        AbiParam::new(pointer_ty(fx.tcx)),
+                        AbiParam::new(pointer_ty(fx.tcx)),
+                    ],
+                    [out_place.to_ptr().get_addr(fx), lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)],
+                )
+            } else {
+                (
+                    vec![
+                        AbiParam::special(pointer_ty(fx.tcx), ArgumentPurpose::StructReturn),
+                        AbiParam::new(types::I128),
+                        AbiParam::new(types::I128),
+                    ],
+                    [out_place.to_ptr().get_addr(fx), lhs.load_scalar(fx), rhs.load_scalar(fx)],
+                )
+            };
+            let name = match (bin_op, is_signed) {
+                (BinOp::Add, false) => "__rust_u128_addo",
+                (BinOp::Add, true) => "__rust_i128_addo",
+                (BinOp::Sub, false) => "__rust_u128_subo",
+                (BinOp::Sub, true) => "__rust_i128_subo",
+                (BinOp::Mul, false) => "__rust_u128_mulo",
+                (BinOp::Mul, true) => "__rust_i128_mulo",
+                _ => unreachable!(),
+            };
+            fx.lib_call(name, param_types, vec![], &args);
+            Some(out_place.to_cvalue(fx))
+        }
+        BinOp::Offset => unreachable!("offset should only be used on pointers, not 128bit ints"),
+        BinOp::Div | BinOp::Rem => {
+            assert!(!checked);
+            let name = match (bin_op, is_signed) {
+                (BinOp::Div, false) => "__udivti3",
+                (BinOp::Div, true) => "__divti3",
+                (BinOp::Rem, false) => "__umodti3",
+                (BinOp::Rem, true) => "__modti3",
+                _ => unreachable!(),
+            };
+            if fx.tcx.sess.target.is_like_windows {
+                let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
+                let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
+                assert!(lhs_extra.is_none());
+                assert!(rhs_extra.is_none());
+                let args = [lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)];
+                let ret = fx.lib_call(
+                    name,
+                    vec![AbiParam::new(pointer_ty(fx.tcx)), AbiParam::new(pointer_ty(fx.tcx))],
+                    vec![AbiParam::new(types::I64X2)],
+                    &args,
+                )[0];
+                // FIXME use bitcast instead of store to get from i64x2 to i128
+                let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
+                ret_place.to_ptr().store(fx, ret, MemFlags::trusted());
+                Some(ret_place.to_cvalue(fx))
+            } else {
+                Some(fx.easy_call(name, &[lhs, rhs], lhs.layout().ty))
+            }
+        }
+        BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => {
+            assert!(!checked);
+            None
+        }
+        BinOp::Shl | BinOp::Shr => {
+            let is_overflow = if checked {
+                // rhs >= 128
+
+                // FIXME support non 128bit rhs
+                /*let (rhs_lsb, rhs_msb) = fx.bcx.ins().isplit(rhs_val);
+                let rhs_msb_gt_0 = fx.bcx.ins().icmp_imm(IntCC::NotEqual, rhs_msb, 0);
+                let rhs_lsb_ge_128 = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, rhs_lsb, 127);
+                let is_overflow = fx.bcx.ins().bor(rhs_msb_gt_0, rhs_lsb_ge_128);*/
+                let is_overflow = fx.bcx.ins().bconst(types::B1, false);
+
+                Some(fx.bcx.ins().bint(types::I8, is_overflow))
+            } else {
+                None
+            };
+
+            let truncated_rhs = clif_intcast(fx, rhs_val, types::I32, false);
+            let val = match bin_op {
+                BinOp::Shl => fx.bcx.ins().ishl(lhs_val, truncated_rhs),
+                BinOp::Shr => {
+                    if is_signed {
+                        fx.bcx.ins().sshr(lhs_val, truncated_rhs)
+                    } else {
+                        fx.bcx.ins().ushr(lhs_val, truncated_rhs)
+                    }
+                }
+                _ => unreachable!(),
+            };
+            if let Some(is_overflow) = is_overflow {
+                let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+                Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)))
+            } else {
+                Some(CValue::by_val(val, lhs.layout()))
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs
new file mode 100644
index 00000000000..892ccf27f6d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/common.rs
@@ -0,0 +1,405 @@
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::SymbolName;
+use rustc_target::abi::call::FnAbi;
+use rustc_target::abi::{Integer, Primitive};
+use rustc_target::spec::{HasTargetSpec, Target};
+
+use crate::constant::ConstantCx;
+use crate::prelude::*;
+
+pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
+    match tcx.data_layout.pointer_size.bits() {
+        16 => types::I16,
+        32 => types::I32,
+        64 => types::I64,
+        bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits),
+    }
+}
+
+pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type {
+    match scalar.value {
+        Primitive::Int(int, _sign) => match int {
+            Integer::I8 => types::I8,
+            Integer::I16 => types::I16,
+            Integer::I32 => types::I32,
+            Integer::I64 => types::I64,
+            Integer::I128 => types::I128,
+        },
+        Primitive::F32 => types::F32,
+        Primitive::F64 => types::F64,
+        Primitive::Pointer => pointer_ty(tcx),
+    }
+}
+
+fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Type> {
+    Some(match ty.kind() {
+        ty::Bool => types::I8,
+        ty::Uint(size) => match size {
+            UintTy::U8 => types::I8,
+            UintTy::U16 => types::I16,
+            UintTy::U32 => types::I32,
+            UintTy::U64 => types::I64,
+            UintTy::U128 => types::I128,
+            UintTy::Usize => pointer_ty(tcx),
+        },
+        ty::Int(size) => match size {
+            IntTy::I8 => types::I8,
+            IntTy::I16 => types::I16,
+            IntTy::I32 => types::I32,
+            IntTy::I64 => types::I64,
+            IntTy::I128 => types::I128,
+            IntTy::Isize => pointer_ty(tcx),
+        },
+        ty::Char => types::I32,
+        ty::Float(size) => match size {
+            FloatTy::F32 => types::F32,
+            FloatTy::F64 => types::F64,
+        },
+        ty::FnPtr(_) => pointer_ty(tcx),
+        ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
+            if has_ptr_meta(tcx, pointee_ty) {
+                return None;
+            } else {
+                pointer_ty(tcx)
+            }
+        }
+        ty::Adt(adt_def, _) if adt_def.repr.simd() => {
+            let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
+            {
+                Abi::Vector { element, count } => (element.clone(), *count),
+                _ => unreachable!(),
+            };
+
+            match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+                // Cranelift currently only implements icmp for 128bit vectors.
+                Some(vector_ty) if vector_ty.bits() == 128 => vector_ty,
+                _ => return None,
+            }
+        }
+        ty::Param(_) => bug!("ty param {:?}", ty),
+        _ => return None,
+    })
+}
+
+fn clif_pair_type_from_ty<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    ty: Ty<'tcx>,
+) -> Option<(types::Type, types::Type)> {
+    Some(match ty.kind() {
+        ty::Tuple(substs) if substs.len() == 2 => {
+            let mut types = substs.types();
+            let a = clif_type_from_ty(tcx, types.next().unwrap())?;
+            let b = clif_type_from_ty(tcx, types.next().unwrap())?;
+            if a.is_vector() || b.is_vector() {
+                return None;
+            }
+            (a, b)
+        }
+        ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
+            if has_ptr_meta(tcx, pointee_ty) {
+                (pointer_ty(tcx), pointer_ty(tcx))
+            } else {
+                return None;
+            }
+        }
+        _ => return None,
+    })
+}
+
+/// Is a pointer to this type a fat ptr?
+pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+    let ptr_ty = tcx.mk_ptr(TypeAndMut { ty, mutbl: rustc_hir::Mutability::Not });
+    match &tcx.layout_of(ParamEnv::reveal_all().and(ptr_ty)).unwrap().abi {
+        Abi::Scalar(_) => false,
+        Abi::ScalarPair(_, _) => true,
+        abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
+    }
+}
+
+pub(crate) fn codegen_icmp_imm(
+    fx: &mut FunctionCx<'_, '_, '_>,
+    intcc: IntCC,
+    lhs: Value,
+    rhs: i128,
+) -> Value {
+    let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+    if lhs_ty == types::I128 {
+        // FIXME legalize `icmp_imm.i128` in Cranelift
+
+        let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs);
+        let (rhs_lsb, rhs_msb) = (rhs as u128 as u64 as i64, (rhs as u128 >> 64) as u64 as i64);
+
+        match intcc {
+            IntCC::Equal => {
+                let lsb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_lsb, rhs_lsb);
+                let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
+                fx.bcx.ins().band(lsb_eq, msb_eq)
+            }
+            IntCC::NotEqual => {
+                let lsb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_lsb, rhs_lsb);
+                let msb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_msb, rhs_msb);
+                fx.bcx.ins().bor(lsb_ne, msb_ne)
+            }
+            _ => {
+                // if msb_eq {
+                //     lsb_cc
+                // } else {
+                //     msb_cc
+                // }
+
+                let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
+                let lsb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_lsb, rhs_lsb);
+                let msb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_msb, rhs_msb);
+
+                fx.bcx.ins().select(msb_eq, lsb_cc, msb_cc)
+            }
+        }
+    } else {
+        let rhs = i64::try_from(rhs).expect("codegen_icmp_imm rhs out of range for <128bit int");
+        fx.bcx.ins().icmp_imm(intcc, lhs, rhs)
+    }
+}
+
+pub(crate) fn type_min_max_value(
+    bcx: &mut FunctionBuilder<'_>,
+    ty: Type,
+    signed: bool,
+) -> (Value, Value) {
+    assert!(ty.is_int());
+
+    if ty == types::I128 {
+        if signed {
+            let min = i128::MIN as u128;
+            let min_lsb = bcx.ins().iconst(types::I64, min as u64 as i64);
+            let min_msb = bcx.ins().iconst(types::I64, (min >> 64) as u64 as i64);
+            let min = bcx.ins().iconcat(min_lsb, min_msb);
+
+            let max = i128::MAX as u128;
+            let max_lsb = bcx.ins().iconst(types::I64, max as u64 as i64);
+            let max_msb = bcx.ins().iconst(types::I64, (max >> 64) as u64 as i64);
+            let max = bcx.ins().iconcat(max_lsb, max_msb);
+
+            return (min, max);
+        } else {
+            let min_half = bcx.ins().iconst(types::I64, 0);
+            let min = bcx.ins().iconcat(min_half, min_half);
+
+            let max_half = bcx.ins().iconst(types::I64, u64::MAX as i64);
+            let max = bcx.ins().iconcat(max_half, max_half);
+
+            return (min, max);
+        }
+    }
+
+    let min = match (ty, signed) {
+        (types::I8, false) | (types::I16, false) | (types::I32, false) | (types::I64, false) => {
+            0i64
+        }
+        (types::I8, true) => i64::from(i8::MIN),
+        (types::I16, true) => i64::from(i16::MIN),
+        (types::I32, true) => i64::from(i32::MIN),
+        (types::I64, true) => i64::MIN,
+        _ => unreachable!(),
+    };
+
+    let max = match (ty, signed) {
+        (types::I8, false) => i64::from(u8::MAX),
+        (types::I16, false) => i64::from(u16::MAX),
+        (types::I32, false) => i64::from(u32::MAX),
+        (types::I64, false) => u64::MAX as i64,
+        (types::I8, true) => i64::from(i8::MAX),
+        (types::I16, true) => i64::from(i16::MAX),
+        (types::I32, true) => i64::from(i32::MAX),
+        (types::I64, true) => i64::MAX,
+        _ => unreachable!(),
+    };
+
+    let (min, max) = (bcx.ins().iconst(ty, min), bcx.ins().iconst(ty, max));
+
+    (min, max)
+}
+
+pub(crate) fn type_sign(ty: Ty<'_>) -> bool {
+    match ty.kind() {
+        ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) | ty::Char | ty::Uint(..) | ty::Bool => false,
+        ty::Int(..) => true,
+        ty::Float(..) => false, // `signed` is unused for floats
+        _ => panic!("{}", ty),
+    }
+}
+
+pub(crate) struct FunctionCx<'m, 'clif, 'tcx: 'm> {
+    pub(crate) cx: &'clif mut crate::CodegenCx<'tcx>,
+    pub(crate) module: &'m mut dyn Module,
+    pub(crate) tcx: TyCtxt<'tcx>,
+    pub(crate) pointer_type: Type, // Cached from module
+    pub(crate) constants_cx: ConstantCx,
+
+    pub(crate) instance: Instance<'tcx>,
+    pub(crate) symbol_name: SymbolName<'tcx>,
+    pub(crate) mir: &'tcx Body<'tcx>,
+    pub(crate) fn_abi: Option<FnAbi<'tcx, Ty<'tcx>>>,
+
+    pub(crate) bcx: FunctionBuilder<'clif>,
+    pub(crate) block_map: IndexVec<BasicBlock, Block>,
+    pub(crate) local_map: IndexVec<Local, CPlace<'tcx>>,
+
+    /// When `#[track_caller]` is used, the implicit caller location is stored in this variable.
+    pub(crate) caller_location: Option<CValue<'tcx>>,
+
+    pub(crate) clif_comments: crate::pretty_clif::CommentWriter,
+    pub(crate) source_info_set: indexmap::IndexSet<SourceInfo>,
+
+    /// This should only be accessed by `CPlace::new_var`.
+    pub(crate) next_ssa_var: u32,
+
+    pub(crate) inline_asm_index: u32,
+}
+
+impl<'tcx> LayoutOf for FunctionCx<'_, '_, 'tcx> {
+    type Ty = Ty<'tcx>;
+    type TyAndLayout = TyAndLayout<'tcx>;
+
+    fn layout_of(&self, ty: Ty<'tcx>) -> TyAndLayout<'tcx> {
+        RevealAllLayoutCx(self.tcx).layout_of(ty)
+    }
+}
+
+impl<'tcx> layout::HasTyCtxt<'tcx> for FunctionCx<'_, '_, 'tcx> {
+    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+}
+
+impl<'tcx> rustc_target::abi::HasDataLayout for FunctionCx<'_, '_, 'tcx> {
+    fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
+        &self.tcx.data_layout
+    }
+}
+
+impl<'tcx> layout::HasParamEnv<'tcx> for FunctionCx<'_, '_, 'tcx> {
+    fn param_env(&self) -> ParamEnv<'tcx> {
+        ParamEnv::reveal_all()
+    }
+}
+
+impl<'tcx> HasTargetSpec for FunctionCx<'_, '_, 'tcx> {
+    fn target_spec(&self) -> &Target {
+        &self.tcx.sess.target
+    }
+}
+
+impl<'tcx> FunctionCx<'_, '_, 'tcx> {
+    pub(crate) fn monomorphize<T>(&self, value: T) -> T
+    where
+        T: TypeFoldable<'tcx> + Copy,
+    {
+        self.instance.subst_mir_and_normalize_erasing_regions(
+            self.tcx,
+            ty::ParamEnv::reveal_all(),
+            value,
+        )
+    }
+
+    pub(crate) fn clif_type(&self, ty: Ty<'tcx>) -> Option<Type> {
+        clif_type_from_ty(self.tcx, ty)
+    }
+
+    pub(crate) fn clif_pair_type(&self, ty: Ty<'tcx>) -> Option<(Type, Type)> {
+        clif_pair_type_from_ty(self.tcx, ty)
+    }
+
+    pub(crate) fn get_block(&self, bb: BasicBlock) -> Block {
+        *self.block_map.get(bb).unwrap()
+    }
+
+    pub(crate) fn get_local_place(&mut self, local: Local) -> CPlace<'tcx> {
+        *self.local_map.get(local).unwrap_or_else(|| {
+            panic!("Local {:?} doesn't exist", local);
+        })
+    }
+
+    pub(crate) fn set_debug_loc(&mut self, source_info: mir::SourceInfo) {
+        let (index, _) = self.source_info_set.insert_full(source_info);
+        self.bcx.set_srcloc(SourceLoc::new(index as u32));
+    }
+
+    pub(crate) fn get_caller_location(&mut self, span: Span) -> CValue<'tcx> {
+        if let Some(loc) = self.caller_location {
+            // `#[track_caller]` is used; return caller location instead of current location.
+            return loc;
+        }
+
+        let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+        let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+        let const_loc = self.tcx.const_caller_location((
+            rustc_span::symbol::Symbol::intern(
+                &caller.file.name.prefer_remapped().to_string_lossy(),
+            ),
+            caller.line as u32,
+            caller.col_display as u32 + 1,
+        ));
+        crate::constant::codegen_const_value(self, const_loc, self.tcx.caller_location_ty())
+    }
+
+    pub(crate) fn triple(&self) -> &target_lexicon::Triple {
+        self.module.isa().triple()
+    }
+
+    pub(crate) fn anonymous_str(&mut self, msg: &str) -> Value {
+        let mut data_ctx = DataContext::new();
+        data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
+        let msg_id = self.module.declare_anonymous_data(false, false).unwrap();
+
+        // Ignore DuplicateDefinition error, as the data will be the same
+        let _ = self.module.define_data(msg_id, &data_ctx);
+
+        let local_msg_id = self.module.declare_data_in_func(msg_id, self.bcx.func);
+        if self.clif_comments.enabled() {
+            self.add_comment(local_msg_id, msg);
+        }
+        self.bcx.ins().global_value(self.pointer_type, local_msg_id)
+    }
+}
+
+pub(crate) struct RevealAllLayoutCx<'tcx>(pub(crate) TyCtxt<'tcx>);
+
+impl<'tcx> LayoutOf for RevealAllLayoutCx<'tcx> {
+    type Ty = Ty<'tcx>;
+    type TyAndLayout = TyAndLayout<'tcx>;
+
+    fn layout_of(&self, ty: Ty<'tcx>) -> TyAndLayout<'tcx> {
+        assert!(!ty.still_further_specializable());
+        self.0.layout_of(ParamEnv::reveal_all().and(&ty)).unwrap_or_else(|e| {
+            if let layout::LayoutError::SizeOverflow(_) = e {
+                self.0.sess.fatal(&e.to_string())
+            } else {
+                bug!("failed to get layout for `{}`: {}", ty, e)
+            }
+        })
+    }
+}
+
+impl<'tcx> layout::HasTyCtxt<'tcx> for RevealAllLayoutCx<'tcx> {
+    fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+        self.0
+    }
+}
+
+impl<'tcx> rustc_target::abi::HasDataLayout for RevealAllLayoutCx<'tcx> {
+    fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
+        &self.0.data_layout
+    }
+}
+
+impl<'tcx> layout::HasParamEnv<'tcx> for RevealAllLayoutCx<'tcx> {
+    fn param_env(&self) -> ParamEnv<'tcx> {
+        ParamEnv::reveal_all()
+    }
+}
+
+impl<'tcx> HasTargetSpec for RevealAllLayoutCx<'tcx> {
+    fn target_spec(&self) -> &Target {
+        &self.0.sess.target
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs b/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs
new file mode 100644
index 00000000000..100c3b43160
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs
@@ -0,0 +1,41 @@
+macro builtin_functions($register:ident; $(fn $name:ident($($arg_name:ident: $arg_ty:ty),*) -> $ret_ty:ty;)*) {
+    #[cfg(feature = "jit")]
+    #[allow(improper_ctypes)]
+    extern "C" {
+        $(fn $name($($arg_name: $arg_ty),*) -> $ret_ty;)*
+    }
+
+    #[cfg(feature = "jit")]
+    pub(crate) fn $register(builder: &mut cranelift_jit::JITBuilder) {
+        for (name, val) in [$((stringify!($name), $name as *const u8)),*] {
+            builder.symbol(name, val);
+        }
+    }
+}
+
+builtin_functions! {
+    register_functions_for_jit;
+
+    // integers
+    fn __multi3(a: i128, b: i128) -> i128;
+    fn __udivti3(n: u128, d: u128) -> u128;
+    fn __divti3(n: i128, d: i128) -> i128;
+    fn __umodti3(n: u128, d: u128) -> u128;
+    fn __modti3(n: i128, d: i128) -> i128;
+    fn __rust_u128_addo(a: u128, b: u128) -> (u128, bool);
+    fn __rust_i128_addo(a: i128, b: i128) -> (i128, bool);
+    fn __rust_u128_subo(a: u128, b: u128) -> (u128, bool);
+    fn __rust_i128_subo(a: i128, b: i128) -> (i128, bool);
+    fn __rust_u128_mulo(a: u128, b: u128) -> (u128, bool);
+    fn __rust_i128_mulo(a: i128, b: i128) -> (i128, bool);
+
+    // floats
+    fn __floattisf(i: i128) -> f32;
+    fn __floattidf(i: i128) -> f64;
+    fn __floatuntisf(i: u128) -> f32;
+    fn __floatuntidf(i: u128) -> f64;
+    fn __fixsfti(f: f32) -> i128;
+    fn __fixdfti(f: f64) -> i128;
+    fn __fixunssfti(f: f32) -> u128;
+    fn __fixunsdfti(f: f64) -> u128;
+}
diff --git a/compiler/rustc_codegen_cranelift/src/config.rs b/compiler/rustc_codegen_cranelift/src/config.rs
new file mode 100644
index 00000000000..eef3c8c8d6e
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/config.rs
@@ -0,0 +1,116 @@
+use std::env;
+use std::str::FromStr;
+
+fn bool_env_var(key: &str) -> bool {
+    env::var(key).as_ref().map(|val| &**val) == Ok("1")
+}
+
+/// The mode to use for compilation.
+#[derive(Copy, Clone, Debug)]
+pub enum CodegenMode {
+    /// AOT compile the crate. This is the default.
+    Aot,
+    /// JIT compile and execute the crate.
+    Jit,
+    /// JIT compile and execute the crate, but only compile functions the first time they are used.
+    JitLazy,
+}
+
+impl FromStr for CodegenMode {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        match s {
+            "aot" => Ok(CodegenMode::Aot),
+            "jit" => Ok(CodegenMode::Jit),
+            "jit-lazy" => Ok(CodegenMode::JitLazy),
+            _ => Err(format!("Unknown codegen mode `{}`", s)),
+        }
+    }
+}
+
+/// Configuration of cg_clif as passed in through `-Cllvm-args` and various env vars.
+#[derive(Clone, Debug)]
+pub struct BackendConfig {
+    /// Should the crate be AOT compiled or JIT executed.
+    ///
+    /// Defaults to AOT compilation. Can be set using `-Cllvm-args=mode=...`.
+    pub codegen_mode: CodegenMode,
+
+    /// When JIT mode is enable pass these arguments to the program.
+    ///
+    /// Defaults to the value of `CG_CLIF_JIT_ARGS`.
+    pub jit_args: Vec<String>,
+
+    /// Display the time it took to perform codegen for a crate.
+    ///
+    /// Defaults to true when the `CG_CLIF_DISPLAY_CG_TIME` env var is set to 1 or false otherwise.
+    /// Can be set using `-Cllvm-args=display_cg_time=...`.
+    pub display_cg_time: bool,
+
+    /// The register allocator to use.
+    ///
+    /// Defaults to the value of `CG_CLIF_REGALLOC` or `backtracking` otherwise. Can be set using
+    /// `-Cllvm-args=regalloc=...`.
+    pub regalloc: String,
+
+    /// Enable the Cranelift ir verifier for all compilation passes. If not set it will only run
+    /// once before passing the clif ir to Cranelift for compilation.
+    ///
+    /// Defaults to true when the `CG_CLIF_ENABLE_VERIFIER` env var is set to 1 or when cg_clif is
+    /// compiled with debug assertions enabled or false otherwise. Can be set using
+    /// `-Cllvm-args=enable_verifier=...`.
+    pub enable_verifier: bool,
+
+    /// Don't cache object files in the incremental cache. Useful during development of cg_clif
+    /// to make it possible to use incremental mode for all analyses performed by rustc without
+    /// caching object files when their content should have been changed by a change to cg_clif.
+    ///
+    /// Defaults to true when the `CG_CLIF_DISABLE_INCR_CACHE` env var is set to 1 or false
+    /// otherwise. Can be set using `-Cllvm-args=disable_incr_cache=...`.
+    pub disable_incr_cache: bool,
+}
+
+impl Default for BackendConfig {
+    fn default() -> Self {
+        BackendConfig {
+            codegen_mode: CodegenMode::Aot,
+            jit_args: {
+                let args = std::env::var("CG_CLIF_JIT_ARGS").unwrap_or_else(|_| String::new());
+                args.split(' ').map(|arg| arg.to_string()).collect()
+            },
+            display_cg_time: bool_env_var("CG_CLIF_DISPLAY_CG_TIME"),
+            regalloc: std::env::var("CG_CLIF_REGALLOC")
+                .unwrap_or_else(|_| "backtracking".to_string()),
+            enable_verifier: cfg!(debug_assertions) || bool_env_var("CG_CLIF_ENABLE_VERIFIER"),
+            disable_incr_cache: bool_env_var("CG_CLIF_DISABLE_INCR_CACHE"),
+        }
+    }
+}
+
+impl BackendConfig {
+    /// Parse the configuration passed in using `-Cllvm-args`.
+    pub fn from_opts(opts: &[String]) -> Result<Self, String> {
+        fn parse_bool(name: &str, value: &str) -> Result<bool, String> {
+            value.parse().map_err(|_| format!("failed to parse value `{}` for {}", value, name))
+        }
+
+        let mut config = BackendConfig::default();
+        for opt in opts {
+            if let Some((name, value)) = opt.split_once('=') {
+                match name {
+                    "mode" => config.codegen_mode = value.parse()?,
+                    "display_cg_time" => config.display_cg_time = parse_bool(name, value)?,
+                    "regalloc" => config.regalloc = value.to_string(),
+                    "enable_verifier" => config.enable_verifier = parse_bool(name, value)?,
+                    "disable_incr_cache" => config.disable_incr_cache = parse_bool(name, value)?,
+                    _ => return Err(format!("Unknown option `{}`", name)),
+                }
+            } else {
+                return Err(format!("Invalid option `{}`", opt));
+            }
+        }
+
+        Ok(config)
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
new file mode 100644
index 00000000000..2a2573aad29
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -0,0 +1,549 @@
+//! Handling of `static`s, `const`s and promoted allocations
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::ErrorReported;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::interpret::{
+    read_target_uint, AllocId, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
+};
+use rustc_middle::ty::ConstKind;
+use rustc_span::DUMMY_SP;
+
+use cranelift_codegen::ir::GlobalValueData;
+use cranelift_module::*;
+
+use crate::prelude::*;
+
+pub(crate) struct ConstantCx {
+    todo: Vec<TodoItem>,
+    done: FxHashSet<DataId>,
+    anon_allocs: FxHashMap<AllocId, DataId>,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum TodoItem {
+    Alloc(AllocId),
+    Static(DefId),
+}
+
+impl ConstantCx {
+    pub(crate) fn new() -> Self {
+        ConstantCx { todo: vec![], done: FxHashSet::default(), anon_allocs: FxHashMap::default() }
+    }
+
+    pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut dyn Module) {
+        //println!("todo {:?}", self.todo);
+        define_all_allocs(tcx, module, &mut self);
+        //println!("done {:?}", self.done);
+        self.done.clear();
+    }
+}
+
+pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
+    let mut all_constants_ok = true;
+    for constant in &fx.mir.required_consts {
+        let const_ = match fx.monomorphize(constant.literal) {
+            ConstantKind::Ty(ct) => ct,
+            ConstantKind::Val(..) => continue,
+        };
+        match const_.val {
+            ConstKind::Value(_) => {}
+            ConstKind::Unevaluated(unevaluated) => {
+                if let Err(err) =
+                    fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None)
+                {
+                    all_constants_ok = false;
+                    match err {
+                        ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
+                            fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
+                        }
+                        ErrorHandled::TooGeneric => {
+                            span_bug!(
+                                constant.span,
+                                "codgen encountered polymorphic constant: {:?}",
+                                err
+                            );
+                        }
+                    }
+                }
+            }
+            ConstKind::Param(_)
+            | ConstKind::Infer(_)
+            | ConstKind::Bound(_, _)
+            | ConstKind::Placeholder(_)
+            | ConstKind::Error(_) => unreachable!("{:?}", const_),
+        }
+    }
+    all_constants_ok
+}
+
+pub(crate) fn codegen_static(tcx: TyCtxt<'_>, module: &mut dyn Module, def_id: DefId) {
+    let mut constants_cx = ConstantCx::new();
+    constants_cx.todo.push(TodoItem::Static(def_id));
+    constants_cx.finalize(tcx, module);
+}
+
+pub(crate) fn codegen_tls_ref<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    def_id: DefId,
+    layout: TyAndLayout<'tcx>,
+) -> CValue<'tcx> {
+    let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
+    let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+    if fx.clif_comments.enabled() {
+        fx.add_comment(local_data_id, format!("tls {:?}", def_id));
+    }
+    let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
+    CValue::by_val(tls_ptr, layout)
+}
+
+fn codegen_static_ref<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    def_id: DefId,
+    layout: TyAndLayout<'tcx>,
+) -> CPlace<'tcx> {
+    let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
+    let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+    if fx.clif_comments.enabled() {
+        fx.add_comment(local_data_id, format!("{:?}", def_id));
+    }
+    let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+    assert!(!layout.is_unsized(), "unsized statics aren't supported");
+    assert!(
+        matches!(
+            fx.bcx.func.global_values[local_data_id],
+            GlobalValueData::Symbol { tls: false, .. }
+        ),
+        "tls static referenced without Rvalue::ThreadLocalRef"
+    );
+    CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
+}
+
+pub(crate) fn codegen_constant<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    constant: &Constant<'tcx>,
+) -> CValue<'tcx> {
+    let const_ = match fx.monomorphize(constant.literal) {
+        ConstantKind::Ty(ct) => ct,
+        ConstantKind::Val(val, ty) => return codegen_const_value(fx, val, ty),
+    };
+    let const_val = match const_.val {
+        ConstKind::Value(const_val) => const_val,
+        ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted })
+            if fx.tcx.is_static(def.did) =>
+        {
+            assert!(substs.is_empty());
+            assert!(promoted.is_none());
+
+            return codegen_static_ref(fx, def.did, fx.layout_of(const_.ty)).to_cvalue(fx);
+        }
+        ConstKind::Unevaluated(unevaluated) => {
+            match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None) {
+                Ok(const_val) => const_val,
+                Err(_) => {
+                    span_bug!(constant.span, "erroneous constant not captured by required_consts");
+                }
+            }
+        }
+        ConstKind::Param(_)
+        | ConstKind::Infer(_)
+        | ConstKind::Bound(_, _)
+        | ConstKind::Placeholder(_)
+        | ConstKind::Error(_) => unreachable!("{:?}", const_),
+    };
+
+    codegen_const_value(fx, const_val, const_.ty)
+}
+
+pub(crate) fn codegen_const_value<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    const_val: ConstValue<'tcx>,
+    ty: Ty<'tcx>,
+) -> CValue<'tcx> {
+    let layout = fx.layout_of(ty);
+    assert!(!layout.is_unsized(), "sized const value");
+
+    if layout.is_zst() {
+        return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);
+    }
+
+    match const_val {
+        ConstValue::Scalar(x) => match x {
+            Scalar::Int(int) => {
+                if fx.clif_type(layout.ty).is_some() {
+                    return CValue::const_val(fx, layout, int);
+                } else {
+                    let raw_val = int.to_bits(int.size()).unwrap();
+                    let val = match int.size().bytes() {
+                        1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
+                        2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
+                        4 => fx.bcx.ins().iconst(types::I32, raw_val as i64),
+                        8 => fx.bcx.ins().iconst(types::I64, raw_val as i64),
+                        16 => {
+                            let lsb = fx.bcx.ins().iconst(types::I64, raw_val as u64 as i64);
+                            let msb =
+                                fx.bcx.ins().iconst(types::I64, (raw_val >> 64) as u64 as i64);
+                            fx.bcx.ins().iconcat(lsb, msb)
+                        }
+                        _ => unreachable!(),
+                    };
+
+                    let place = CPlace::new_stack_slot(fx, layout);
+                    place.to_ptr().store(fx, val, MemFlags::trusted());
+                    place.to_cvalue(fx)
+                }
+            }
+            Scalar::Ptr(ptr) => {
+                let alloc_kind = fx.tcx.get_global_alloc(ptr.alloc_id);
+                let base_addr = match alloc_kind {
+                    Some(GlobalAlloc::Memory(alloc)) => {
+                        let data_id = data_id_for_alloc_id(
+                            &mut fx.constants_cx,
+                            fx.module,
+                            ptr.alloc_id,
+                            alloc.mutability,
+                        );
+                        let local_data_id =
+                            fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+                        if fx.clif_comments.enabled() {
+                            fx.add_comment(local_data_id, format!("{:?}", ptr.alloc_id));
+                        }
+                        fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+                    }
+                    Some(GlobalAlloc::Function(instance)) => {
+                        let func_id = crate::abi::import_function(fx.tcx, fx.module, instance);
+                        let local_func_id =
+                            fx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
+                        fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
+                    }
+                    Some(GlobalAlloc::Static(def_id)) => {
+                        assert!(fx.tcx.is_static(def_id));
+                        let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
+                        let local_data_id =
+                            fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+                        if fx.clif_comments.enabled() {
+                            fx.add_comment(local_data_id, format!("{:?}", def_id));
+                        }
+                        fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+                    }
+                    None => bug!("missing allocation {:?}", ptr.alloc_id),
+                };
+                let val = if ptr.offset.bytes() != 0 {
+                    fx.bcx.ins().iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap())
+                } else {
+                    base_addr
+                };
+                CValue::by_val(val, layout)
+            }
+        },
+        ConstValue::ByRef { alloc, offset } => CValue::by_ref(
+            pointer_for_allocation(fx, alloc)
+                .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
+            layout,
+        ),
+        ConstValue::Slice { data, start, end } => {
+            let ptr = pointer_for_allocation(fx, data)
+                .offset_i64(fx, i64::try_from(start).unwrap())
+                .get_addr(fx);
+            let len = fx
+                .bcx
+                .ins()
+                .iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
+            CValue::by_val_pair(ptr, len, layout)
+        }
+    }
+}
+
+pub(crate) fn pointer_for_allocation<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    alloc: &'tcx Allocation,
+) -> crate::pointer::Pointer {
+    let alloc_id = fx.tcx.create_memory_alloc(alloc);
+    let data_id =
+        data_id_for_alloc_id(&mut fx.constants_cx, &mut *fx.module, alloc_id, alloc.mutability);
+
+    let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+    if fx.clif_comments.enabled() {
+        fx.add_comment(local_data_id, format!("{:?}", alloc_id));
+    }
+    let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+    crate::pointer::Pointer::new(global_ptr)
+}
+
+pub(crate) fn data_id_for_alloc_id(
+    cx: &mut ConstantCx,
+    module: &mut dyn Module,
+    alloc_id: AllocId,
+    mutability: rustc_hir::Mutability,
+) -> DataId {
+    cx.todo.push(TodoItem::Alloc(alloc_id));
+    *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
+        module.declare_anonymous_data(mutability == rustc_hir::Mutability::Mut, false).unwrap()
+    })
+}
+
+fn data_id_for_static(
+    tcx: TyCtxt<'_>,
+    module: &mut dyn Module,
+    def_id: DefId,
+    definition: bool,
+) -> DataId {
+    let rlinkage = tcx.codegen_fn_attrs(def_id).linkage;
+    let linkage = if definition {
+        crate::linkage::get_static_linkage(tcx, def_id)
+    } else if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
+        || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
+    {
+        Linkage::Preemptible
+    } else {
+        Linkage::Import
+    };
+
+    let instance = Instance::mono(tcx, def_id).polymorphize(tcx);
+    let symbol_name = tcx.symbol_name(instance).name;
+    let ty = instance.ty(tcx, ParamEnv::reveal_all());
+    let is_mutable = if tcx.is_mutable_static(def_id) {
+        true
+    } else {
+        !ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all())
+    };
+    let align = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().align.pref.bytes();
+
+    let attrs = tcx.codegen_fn_attrs(def_id);
+
+    let data_id = module
+        .declare_data(
+            &*symbol_name,
+            linkage,
+            is_mutable,
+            attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
+        )
+        .unwrap();
+
+    if rlinkage.is_some() {
+        // Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
+        // Declare an internal global `extern_with_linkage_foo` which
+        // is initialized with the address of `foo`.  If `foo` is
+        // discarded during linking (for example, if `foo` has weak
+        // linkage and there are no definitions), then
+        // `extern_with_linkage_foo` will instead be initialized to
+        // zero.
+
+        let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
+        let ref_data_id = module.declare_data(&ref_name, Linkage::Local, false, false).unwrap();
+        let mut data_ctx = DataContext::new();
+        data_ctx.set_align(align);
+        let data = module.declare_data_in_data(data_id, &mut data_ctx);
+        data_ctx.define(std::iter::repeat(0).take(pointer_ty(tcx).bytes() as usize).collect());
+        data_ctx.write_data_addr(0, data, 0);
+        match module.define_data(ref_data_id, &data_ctx) {
+            // Every time the static is referenced there will be another definition of this global,
+            // so duplicate definitions are expected and allowed.
+            Err(ModuleError::DuplicateDefinition(_)) => {}
+            res => res.unwrap(),
+        }
+        ref_data_id
+    } else {
+        data_id
+    }
+}
+
+fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut ConstantCx) {
+    while let Some(todo_item) = cx.todo.pop() {
+        let (data_id, alloc, section_name) = match todo_item {
+            TodoItem::Alloc(alloc_id) => {
+                //println!("alloc_id {}", alloc_id);
+                let alloc = match tcx.get_global_alloc(alloc_id).unwrap() {
+                    GlobalAlloc::Memory(alloc) => alloc,
+                    GlobalAlloc::Function(_) | GlobalAlloc::Static(_) => unreachable!(),
+                };
+                let data_id = *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
+                    module
+                        .declare_anonymous_data(
+                            alloc.mutability == rustc_hir::Mutability::Mut,
+                            false,
+                        )
+                        .unwrap()
+                });
+                (data_id, alloc, None)
+            }
+            TodoItem::Static(def_id) => {
+                //println!("static {:?}", def_id);
+
+                let section_name = tcx.codegen_fn_attrs(def_id).link_section.map(|s| s.as_str());
+
+                let alloc = tcx.eval_static_initializer(def_id).unwrap();
+
+                let data_id = data_id_for_static(tcx, module, def_id, true);
+                (data_id, alloc, section_name)
+            }
+        };
+
+        //("data_id {}", data_id);
+        if cx.done.contains(&data_id) {
+            continue;
+        }
+
+        let mut data_ctx = DataContext::new();
+        data_ctx.set_align(alloc.align.bytes());
+
+        if let Some(section_name) = section_name {
+            let (segment_name, section_name) = if tcx.sess.target.is_like_osx {
+                if let Some(names) = section_name.split_once(',') {
+                    names
+                } else {
+                    tcx.sess.fatal(&format!(
+                        "#[link_section = \"{}\"] is not valid for macos target: must be segment and section separated by comma",
+                        section_name
+                    ));
+                }
+            } else {
+                ("", &*section_name)
+            };
+            data_ctx.set_segment_section(segment_name, section_name);
+        }
+
+        let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
+        data_ctx.define(bytes.into_boxed_slice());
+
+        for &(offset, (_tag, reloc)) in alloc.relocations().iter() {
+            let addend = {
+                let endianness = tcx.data_layout.endian;
+                let offset = offset.bytes() as usize;
+                let ptr_size = tcx.data_layout.pointer_size;
+                let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
+                    offset..offset + ptr_size.bytes() as usize,
+                );
+                read_target_uint(endianness, bytes).unwrap()
+            };
+
+            let reloc_target_alloc = tcx.get_global_alloc(reloc).unwrap();
+            let data_id = match reloc_target_alloc {
+                GlobalAlloc::Function(instance) => {
+                    assert_eq!(addend, 0);
+                    let func_id = crate::abi::import_function(tcx, module, instance);
+                    let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
+                    data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
+                    continue;
+                }
+                GlobalAlloc::Memory(target_alloc) => {
+                    data_id_for_alloc_id(cx, module, reloc, target_alloc.mutability)
+                }
+                GlobalAlloc::Static(def_id) => {
+                    if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
+                    {
+                        tcx.sess.fatal(&format!(
+                            "Allocation {:?} contains reference to TLS value {:?}",
+                            alloc, def_id
+                        ));
+                    }
+
+                    // Don't push a `TodoItem::Static` here, as it will cause statics used by
+                    // multiple crates to be duplicated between them. It isn't necessary anyway,
+                    // as it will get pushed by `codegen_static` when necessary.
+                    data_id_for_static(tcx, module, def_id, false)
+                }
+            };
+
+            let global_value = module.declare_data_in_data(data_id, &mut data_ctx);
+            data_ctx.write_data_addr(offset.bytes() as u32, global_value, addend as i64);
+        }
+
+        module.define_data(data_id, &data_ctx).unwrap();
+        cx.done.insert(data_id);
+    }
+
+    assert!(cx.todo.is_empty(), "{:?}", cx.todo);
+}
+
+pub(crate) fn mir_operand_get_const_val<'tcx>(
+    fx: &FunctionCx<'_, '_, 'tcx>,
+    operand: &Operand<'tcx>,
+) -> Option<ConstValue<'tcx>> {
+    match operand {
+        Operand::Constant(const_) => match const_.literal {
+            ConstantKind::Ty(const_) => {
+                fx.monomorphize(const_).eval(fx.tcx, ParamEnv::reveal_all()).val.try_to_value()
+            }
+            ConstantKind::Val(val, _) => Some(val),
+        },
+        // FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored
+        // inside a temporary before being passed to the intrinsic requiring the const argument.
+        // This code tries to find a single constant defining definition of the referenced local.
+        Operand::Copy(place) | Operand::Move(place) => {
+            if !place.projection.is_empty() {
+                return None;
+            }
+            let mut computed_const_val = None;
+            for bb_data in fx.mir.basic_blocks() {
+                for stmt in &bb_data.statements {
+                    match &stmt.kind {
+                        StatementKind::Assign(local_and_rvalue) if &local_and_rvalue.0 == place => {
+                            match &local_and_rvalue.1 {
+                                Rvalue::Cast(CastKind::Misc, operand, ty) => {
+                                    if computed_const_val.is_some() {
+                                        return None; // local assigned twice
+                                    }
+                                    if !matches!(ty.kind(), ty::Uint(_) | ty::Int(_)) {
+                                        return None;
+                                    }
+                                    let const_val = mir_operand_get_const_val(fx, operand)?;
+                                    if fx.layout_of(ty).size
+                                        != const_val.try_to_scalar_int()?.size()
+                                    {
+                                        return None;
+                                    }
+                                    computed_const_val = Some(const_val);
+                                }
+                                Rvalue::Use(operand) => {
+                                    computed_const_val = mir_operand_get_const_val(fx, operand)
+                                }
+                                _ => return None,
+                            }
+                        }
+                        StatementKind::SetDiscriminant { place: stmt_place, variant_index: _ }
+                            if &**stmt_place == place =>
+                        {
+                            return None;
+                        }
+                        StatementKind::LlvmInlineAsm(_) | StatementKind::CopyNonOverlapping(_) => {
+                            return None;
+                        } // conservative handling
+                        StatementKind::Assign(_)
+                        | StatementKind::FakeRead(_)
+                        | StatementKind::SetDiscriminant { .. }
+                        | StatementKind::StorageLive(_)
+                        | StatementKind::StorageDead(_)
+                        | StatementKind::Retag(_, _)
+                        | StatementKind::AscribeUserType(_, _)
+                        | StatementKind::Coverage(_)
+                        | StatementKind::Nop => {}
+                    }
+                }
+                match &bb_data.terminator().kind {
+                    TerminatorKind::Goto { .. }
+                    | TerminatorKind::SwitchInt { .. }
+                    | TerminatorKind::Resume
+                    | TerminatorKind::Abort
+                    | TerminatorKind::Return
+                    | TerminatorKind::Unreachable
+                    | TerminatorKind::Drop { .. }
+                    | TerminatorKind::Assert { .. } => {}
+                    TerminatorKind::DropAndReplace { .. }
+                    | TerminatorKind::Yield { .. }
+                    | TerminatorKind::GeneratorDrop
+                    | TerminatorKind::FalseEdge { .. }
+                    | TerminatorKind::FalseUnwind { .. } => unreachable!(),
+                    TerminatorKind::InlineAsm { .. } => return None,
+                    TerminatorKind::Call { destination: Some((call_place, _)), .. }
+                        if call_place == place =>
+                    {
+                        return None;
+                    }
+                    TerminatorKind::Call { .. } => {}
+                }
+            }
+            computed_const_val
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
new file mode 100644
index 00000000000..6018eefcd42
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
@@ -0,0 +1,192 @@
+//! Write the debuginfo into an object file.
+
+use rustc_data_structures::fx::FxHashMap;
+
+use gimli::write::{Address, AttributeValue, EndianVec, Result, Sections, Writer};
+use gimli::{RunTimeEndian, SectionId};
+
+use crate::backend::WriteDebugInfo;
+
+use super::DebugContext;
+
+impl DebugContext<'_> {
+    pub(crate) fn emit<P: WriteDebugInfo>(&mut self, product: &mut P) {
+        let unit_range_list_id = self.dwarf.unit.ranges.add(self.unit_range_list.clone());
+        let root = self.dwarf.unit.root();
+        let root = self.dwarf.unit.get_mut(root);
+        root.set(gimli::DW_AT_ranges, AttributeValue::RangeListRef(unit_range_list_id));
+
+        let mut sections = Sections::new(WriterRelocate::new(self.endian));
+        self.dwarf.write(&mut sections).unwrap();
+
+        let mut section_map = FxHashMap::default();
+        let _: Result<()> = sections.for_each_mut(|id, section| {
+            if !section.writer.slice().is_empty() {
+                let section_id = product.add_debug_section(id, section.writer.take());
+                section_map.insert(id, section_id);
+            }
+            Ok(())
+        });
+
+        let _: Result<()> = sections.for_each(|id, section| {
+            if let Some(section_id) = section_map.get(&id) {
+                for reloc in &section.relocs {
+                    product.add_debug_reloc(&section_map, section_id, reloc);
+                }
+            }
+            Ok(())
+        });
+    }
+}
+
+#[derive(Clone)]
+pub(crate) struct DebugReloc {
+    pub(crate) offset: u32,
+    pub(crate) size: u8,
+    pub(crate) name: DebugRelocName,
+    pub(crate) addend: i64,
+    pub(crate) kind: object::RelocationKind,
+}
+
+#[derive(Clone)]
+pub(crate) enum DebugRelocName {
+    Section(SectionId),
+    Symbol(usize),
+}
+
+/// A [`Writer`] that collects all necessary relocations.
+#[derive(Clone)]
+pub(super) struct WriterRelocate {
+    pub(super) relocs: Vec<DebugReloc>,
+    pub(super) writer: EndianVec<RunTimeEndian>,
+}
+
+impl WriterRelocate {
+    pub(super) fn new(endian: RunTimeEndian) -> Self {
+        WriterRelocate { relocs: Vec::new(), writer: EndianVec::new(endian) }
+    }
+
+    /// Perform the collected relocations to be usable for JIT usage.
+    #[cfg(feature = "jit")]
+    pub(super) fn relocate_for_jit(mut self, jit_module: &cranelift_jit::JITModule) -> Vec<u8> {
+        use std::convert::TryInto;
+
+        for reloc in self.relocs.drain(..) {
+            match reloc.name {
+                super::DebugRelocName::Section(_) => unreachable!(),
+                super::DebugRelocName::Symbol(sym) => {
+                    let addr = jit_module.get_finalized_function(
+                        cranelift_module::FuncId::from_u32(sym.try_into().unwrap()),
+                    );
+                    let val = (addr as u64 as i64 + reloc.addend) as u64;
+                    self.writer.write_udata_at(reloc.offset as usize, val, reloc.size).unwrap();
+                }
+            }
+        }
+        self.writer.into_vec()
+    }
+}
+
+impl Writer for WriterRelocate {
+    type Endian = RunTimeEndian;
+
+    fn endian(&self) -> Self::Endian {
+        self.writer.endian()
+    }
+
+    fn len(&self) -> usize {
+        self.writer.len()
+    }
+
+    fn write(&mut self, bytes: &[u8]) -> Result<()> {
+        self.writer.write(bytes)
+    }
+
+    fn write_at(&mut self, offset: usize, bytes: &[u8]) -> Result<()> {
+        self.writer.write_at(offset, bytes)
+    }
+
+    fn write_address(&mut self, address: Address, size: u8) -> Result<()> {
+        match address {
+            Address::Constant(val) => self.write_udata(val, size),
+            Address::Symbol { symbol, addend } => {
+                let offset = self.len() as u64;
+                self.relocs.push(DebugReloc {
+                    offset: offset as u32,
+                    size,
+                    name: DebugRelocName::Symbol(symbol),
+                    addend: addend as i64,
+                    kind: object::RelocationKind::Absolute,
+                });
+                self.write_udata(0, size)
+            }
+        }
+    }
+
+    fn write_offset(&mut self, val: usize, section: SectionId, size: u8) -> Result<()> {
+        let offset = self.len() as u32;
+        self.relocs.push(DebugReloc {
+            offset,
+            size,
+            name: DebugRelocName::Section(section),
+            addend: val as i64,
+            kind: object::RelocationKind::Absolute,
+        });
+        self.write_udata(0, size)
+    }
+
+    fn write_offset_at(
+        &mut self,
+        offset: usize,
+        val: usize,
+        section: SectionId,
+        size: u8,
+    ) -> Result<()> {
+        self.relocs.push(DebugReloc {
+            offset: offset as u32,
+            size,
+            name: DebugRelocName::Section(section),
+            addend: val as i64,
+            kind: object::RelocationKind::Absolute,
+        });
+        self.write_udata_at(offset, 0, size)
+    }
+
+    fn write_eh_pointer(&mut self, address: Address, eh_pe: gimli::DwEhPe, size: u8) -> Result<()> {
+        match address {
+            // Address::Constant arm copied from gimli
+            Address::Constant(val) => {
+                // Indirect doesn't matter here.
+                let val = match eh_pe.application() {
+                    gimli::DW_EH_PE_absptr => val,
+                    gimli::DW_EH_PE_pcrel => {
+                        // TODO: better handling of sign
+                        let offset = self.len() as u64;
+                        offset.wrapping_sub(val)
+                    }
+                    _ => {
+                        return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe));
+                    }
+                };
+                self.write_eh_pointer_data(val, eh_pe.format(), size)
+            }
+            Address::Symbol { symbol, addend } => match eh_pe.application() {
+                gimli::DW_EH_PE_pcrel => {
+                    let size = match eh_pe.format() {
+                        gimli::DW_EH_PE_sdata4 => 4,
+                        _ => return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
+                    };
+                    self.relocs.push(DebugReloc {
+                        offset: self.len() as u32,
+                        size,
+                        name: DebugRelocName::Symbol(symbol),
+                        addend,
+                        kind: object::RelocationKind::Relative,
+                    });
+                    self.write_udata(0, size)
+                }
+                _ => Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
+            },
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
new file mode 100644
index 00000000000..c7e15f81e03
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
@@ -0,0 +1,219 @@
+//! Line info generation (`.debug_line`)
+
+use std::ffi::OsStr;
+use std::path::{Component, Path};
+
+use crate::prelude::*;
+
+use rustc_span::{
+    FileName, Pos, SourceFile, SourceFileAndLine, SourceFileHash, SourceFileHashAlgorithm,
+};
+
+use cranelift_codegen::binemit::CodeOffset;
+use cranelift_codegen::MachSrcLoc;
+
+use gimli::write::{
+    Address, AttributeValue, FileId, FileInfo, LineProgram, LineString, LineStringTable,
+    UnitEntryId,
+};
+
+// OPTIMIZATION: It is cheaper to do this in one pass than using `.parent()` and `.file_name()`.
+fn split_path_dir_and_file(path: &Path) -> (&Path, &OsStr) {
+    let mut iter = path.components();
+    let file_name = match iter.next_back() {
+        Some(Component::Normal(p)) => p,
+        component => {
+            panic!(
+                "Path component {:?} of path {} is an invalid filename",
+                component,
+                path.display()
+            );
+        }
+    };
+    let parent = iter.as_path();
+    (parent, file_name)
+}
+
+// OPTIMIZATION: Avoid UTF-8 validation on UNIX.
+fn osstr_as_utf8_bytes(path: &OsStr) -> &[u8] {
+    #[cfg(unix)]
+    {
+        use std::os::unix::ffi::OsStrExt;
+        path.as_bytes()
+    }
+    #[cfg(not(unix))]
+    {
+        path.to_str().unwrap().as_bytes()
+    }
+}
+
+pub(crate) const MD5_LEN: usize = 16;
+
+pub(crate) fn make_file_info(hash: SourceFileHash) -> Option<FileInfo> {
+    if hash.kind == SourceFileHashAlgorithm::Md5 {
+        let mut buf = [0u8; MD5_LEN];
+        buf.copy_from_slice(hash.hash_bytes());
+        Some(FileInfo { timestamp: 0, size: 0, md5: buf })
+    } else {
+        None
+    }
+}
+
+fn line_program_add_file(
+    line_program: &mut LineProgram,
+    line_strings: &mut LineStringTable,
+    file: &SourceFile,
+) -> FileId {
+    match &file.name {
+        FileName::Real(path) => {
+            let (dir_path, file_name) = split_path_dir_and_file(path.remapped_path_if_available());
+            let dir_name = osstr_as_utf8_bytes(dir_path.as_os_str());
+            let file_name = osstr_as_utf8_bytes(file_name);
+
+            let dir_id = if !dir_name.is_empty() {
+                let dir_name = LineString::new(dir_name, line_program.encoding(), line_strings);
+                line_program.add_directory(dir_name)
+            } else {
+                line_program.default_directory()
+            };
+            let file_name = LineString::new(file_name, line_program.encoding(), line_strings);
+
+            let info = make_file_info(file.src_hash);
+
+            line_program.file_has_md5 &= info.is_some();
+            line_program.add_file(file_name, dir_id, info)
+        }
+        // FIXME give more appropriate file names
+        filename => {
+            let dir_id = line_program.default_directory();
+            let dummy_file_name = LineString::new(
+                filename.prefer_remapped().to_string().into_bytes(),
+                line_program.encoding(),
+                line_strings,
+            );
+            line_program.add_file(dummy_file_name, dir_id, None)
+        }
+    }
+}
+
+impl<'tcx> DebugContext<'tcx> {
+    pub(super) fn emit_location(&mut self, entry_id: UnitEntryId, span: Span) {
+        let loc = self.tcx.sess.source_map().lookup_char_pos(span.lo());
+
+        let file_id = line_program_add_file(
+            &mut self.dwarf.unit.line_program,
+            &mut self.dwarf.line_strings,
+            &loc.file,
+        );
+
+        let entry = self.dwarf.unit.get_mut(entry_id);
+
+        entry.set(gimli::DW_AT_decl_file, AttributeValue::FileIndex(Some(file_id)));
+        entry.set(gimli::DW_AT_decl_line, AttributeValue::Udata(loc.line as u64));
+        // FIXME: probably omit this
+        entry.set(gimli::DW_AT_decl_column, AttributeValue::Udata(loc.col.to_usize() as u64));
+    }
+
+    pub(super) fn create_debug_lines(
+        &mut self,
+        symbol: usize,
+        entry_id: UnitEntryId,
+        context: &Context,
+        function_span: Span,
+        source_info_set: &indexmap::IndexSet<SourceInfo>,
+    ) -> CodeOffset {
+        let tcx = self.tcx;
+        let line_program = &mut self.dwarf.unit.line_program;
+
+        let line_strings = &mut self.dwarf.line_strings;
+        let mut last_span = None;
+        let mut last_file = None;
+        let mut create_row_for_span = |line_program: &mut LineProgram, span: Span| {
+            if let Some(last_span) = last_span {
+                if span == last_span {
+                    line_program.generate_row();
+                    return;
+                }
+            }
+            last_span = Some(span);
+
+            // Based on https://github.com/rust-lang/rust/blob/e369d87b015a84653343032833d65d0545fd3f26/src/librustc_codegen_ssa/mir/mod.rs#L116-L131
+            // In order to have a good line stepping behavior in debugger, we overwrite debug
+            // locations of macro expansions with that of the outermost expansion site
+            // (unless the crate is being compiled with `-Z debug-macros`).
+            let span = if !span.from_expansion() || tcx.sess.opts.debugging_opts.debug_macros {
+                span
+            } else {
+                // Walk up the macro expansion chain until we reach a non-expanded span.
+                // We also stop at the function body level because no line stepping can occur
+                // at the level above that.
+                rustc_span::hygiene::walk_chain(span, function_span.ctxt())
+            };
+
+            let (file, line, col) = match tcx.sess.source_map().lookup_line(span.lo()) {
+                Ok(SourceFileAndLine { sf: file, line }) => {
+                    let line_pos = file.line_begin_pos(span.lo());
+
+                    (
+                        file,
+                        u64::try_from(line).unwrap() + 1,
+                        u64::from((span.lo() - line_pos).to_u32()) + 1,
+                    )
+                }
+                Err(file) => (file, 0, 0),
+            };
+
+            // line_program_add_file is very slow.
+            // Optimize for the common case of the current file not being changed.
+            let current_file_changed = if let Some(last_file) = &last_file {
+                // If the allocations are not equal, then the files may still be equal, but that
+                // is not a problem, as this is just an optimization.
+                !rustc_data_structures::sync::Lrc::ptr_eq(last_file, &file)
+            } else {
+                true
+            };
+            if current_file_changed {
+                let file_id = line_program_add_file(line_program, line_strings, &file);
+                line_program.row().file = file_id;
+                last_file = Some(file);
+            }
+
+            line_program.row().line = line;
+            line_program.row().column = col;
+            line_program.generate_row();
+        };
+
+        line_program.begin_sequence(Some(Address::Symbol { symbol, addend: 0 }));
+
+        let mut func_end = 0;
+
+        let mcr = context.mach_compile_result.as_ref().unwrap();
+        for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
+            line_program.row().address_offset = u64::from(start);
+            if !loc.is_default() {
+                let source_info = *source_info_set.get_index(loc.bits() as usize).unwrap();
+                create_row_for_span(line_program, source_info.span);
+            } else {
+                create_row_for_span(line_program, function_span);
+            }
+            func_end = end;
+        }
+
+        line_program.end_sequence(u64::from(func_end));
+
+        let func_end = mcr.buffer.total_size();
+
+        assert_ne!(func_end, 0);
+
+        let entry = self.dwarf.unit.get_mut(entry_id);
+        entry.set(
+            gimli::DW_AT_low_pc,
+            AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
+        );
+        entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(func_end)));
+
+        self.emit_location(entry_id, function_span);
+
+        func_end
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
new file mode 100644
index 00000000000..c67336eb3f2
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
@@ -0,0 +1,384 @@
+//! Handling of everything related to debuginfo.
+
+mod emit;
+mod line_info;
+mod unwind;
+
+use crate::prelude::*;
+
+use rustc_index::vec::IndexVec;
+
+use cranelift_codegen::entity::EntityRef;
+use cranelift_codegen::ir::{LabelValueLoc, StackSlots, ValueLabel, ValueLoc};
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_codegen::ValueLocRange;
+
+use gimli::write::{
+    Address, AttributeValue, DwarfUnit, Expression, LineProgram, LineString, Location,
+    LocationList, Range, RangeList, UnitEntryId,
+};
+use gimli::{Encoding, Format, LineEncoding, RunTimeEndian, X86_64};
+
+pub(crate) use emit::{DebugReloc, DebugRelocName};
+pub(crate) use unwind::UnwindContext;
+
+fn target_endian(tcx: TyCtxt<'_>) -> RunTimeEndian {
+    use rustc_target::abi::Endian;
+
+    match tcx.data_layout.endian {
+        Endian::Big => RunTimeEndian::Big,
+        Endian::Little => RunTimeEndian::Little,
+    }
+}
+
+pub(crate) struct DebugContext<'tcx> {
+    tcx: TyCtxt<'tcx>,
+
+    endian: RunTimeEndian,
+
+    dwarf: DwarfUnit,
+    unit_range_list: RangeList,
+
+    types: FxHashMap<Ty<'tcx>, UnitEntryId>,
+}
+
+impl<'tcx> DebugContext<'tcx> {
+    pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
+        let encoding = Encoding {
+            format: Format::Dwarf32,
+            // TODO: this should be configurable
+            // macOS doesn't seem to support DWARF > 3
+            // 5 version is required for md5 file hash
+            version: if tcx.sess.target.is_like_osx {
+                3
+            } else {
+                // FIXME change to version 5 once the gdb and lldb shipping with the latest debian
+                // support it.
+                4
+            },
+            address_size: isa.frontend_config().pointer_bytes(),
+        };
+
+        let mut dwarf = DwarfUnit::new(encoding);
+
+        let producer = format!(
+            "cg_clif (rustc {}, cranelift {})",
+            rustc_interface::util::version_str().unwrap_or("unknown version"),
+            cranelift_codegen::VERSION,
+        );
+        let comp_dir = tcx.sess.working_dir.to_string_lossy(false).into_owned();
+        let (name, file_info) = match tcx.sess.local_crate_source_file.clone() {
+            Some(path) => {
+                let name = path.to_string_lossy().into_owned();
+                (name, None)
+            }
+            None => (tcx.crate_name(LOCAL_CRATE).to_string(), None),
+        };
+
+        let mut line_program = LineProgram::new(
+            encoding,
+            LineEncoding::default(),
+            LineString::new(comp_dir.as_bytes(), encoding, &mut dwarf.line_strings),
+            LineString::new(name.as_bytes(), encoding, &mut dwarf.line_strings),
+            file_info,
+        );
+        line_program.file_has_md5 = file_info.is_some();
+
+        dwarf.unit.line_program = line_program;
+
+        {
+            let name = dwarf.strings.add(name);
+            let comp_dir = dwarf.strings.add(comp_dir);
+
+            let root = dwarf.unit.root();
+            let root = dwarf.unit.get_mut(root);
+            root.set(gimli::DW_AT_producer, AttributeValue::StringRef(dwarf.strings.add(producer)));
+            root.set(gimli::DW_AT_language, AttributeValue::Language(gimli::DW_LANG_Rust));
+            root.set(gimli::DW_AT_name, AttributeValue::StringRef(name));
+            root.set(gimli::DW_AT_comp_dir, AttributeValue::StringRef(comp_dir));
+            root.set(gimli::DW_AT_low_pc, AttributeValue::Address(Address::Constant(0)));
+        }
+
+        DebugContext {
+            tcx,
+
+            endian: target_endian(tcx),
+
+            dwarf,
+            unit_range_list: RangeList(Vec::new()),
+
+            types: FxHashMap::default(),
+        }
+    }
+
+    fn dwarf_ty(&mut self, ty: Ty<'tcx>) -> UnitEntryId {
+        if let Some(type_id) = self.types.get(ty) {
+            return *type_id;
+        }
+
+        let new_entry = |dwarf: &mut DwarfUnit, tag| dwarf.unit.add(dwarf.unit.root(), tag);
+
+        let primitive = |dwarf: &mut DwarfUnit, ate| {
+            let type_id = new_entry(dwarf, gimli::DW_TAG_base_type);
+            let type_entry = dwarf.unit.get_mut(type_id);
+            type_entry.set(gimli::DW_AT_encoding, AttributeValue::Encoding(ate));
+            type_id
+        };
+
+        let name = format!("{}", ty);
+        let layout = self.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap();
+
+        let type_id = match ty.kind() {
+            ty::Bool => primitive(&mut self.dwarf, gimli::DW_ATE_boolean),
+            ty::Char => primitive(&mut self.dwarf, gimli::DW_ATE_UTF),
+            ty::Uint(_) => primitive(&mut self.dwarf, gimli::DW_ATE_unsigned),
+            ty::Int(_) => primitive(&mut self.dwarf, gimli::DW_ATE_signed),
+            ty::Float(_) => primitive(&mut self.dwarf, gimli::DW_ATE_float),
+            ty::Ref(_, pointee_ty, _mutbl)
+            | ty::RawPtr(ty::TypeAndMut { ty: pointee_ty, mutbl: _mutbl }) => {
+                let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_pointer_type);
+
+                // Ensure that type is inserted before recursing to avoid duplicates
+                self.types.insert(ty, type_id);
+
+                let pointee = self.dwarf_ty(pointee_ty);
+
+                let type_entry = self.dwarf.unit.get_mut(type_id);
+
+                //type_entry.set(gimli::DW_AT_mutable, AttributeValue::Flag(mutbl == rustc_hir::Mutability::Mut));
+                type_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(pointee));
+
+                type_id
+            }
+            ty::Adt(adt_def, _substs) if adt_def.is_struct() && !layout.is_unsized() => {
+                let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type);
+
+                // Ensure that type is inserted before recursing to avoid duplicates
+                self.types.insert(ty, type_id);
+
+                let variant = adt_def.non_enum_variant();
+
+                for (field_idx, field_def) in variant.fields.iter().enumerate() {
+                    let field_offset = layout.fields.offset(field_idx);
+                    let field_layout = layout
+                        .field(
+                            &layout::LayoutCx { tcx: self.tcx, param_env: ParamEnv::reveal_all() },
+                            field_idx,
+                        )
+                        .unwrap();
+
+                    let field_type = self.dwarf_ty(field_layout.ty);
+
+                    let field_id = self.dwarf.unit.add(type_id, gimli::DW_TAG_member);
+                    let field_entry = self.dwarf.unit.get_mut(field_id);
+
+                    field_entry.set(
+                        gimli::DW_AT_name,
+                        AttributeValue::String(field_def.ident.as_str().to_string().into_bytes()),
+                    );
+                    field_entry.set(
+                        gimli::DW_AT_data_member_location,
+                        AttributeValue::Udata(field_offset.bytes()),
+                    );
+                    field_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(field_type));
+                }
+
+                type_id
+            }
+            _ => new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type),
+        };
+
+        let type_entry = self.dwarf.unit.get_mut(type_id);
+
+        type_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
+        type_entry.set(gimli::DW_AT_byte_size, AttributeValue::Udata(layout.size.bytes()));
+
+        self.types.insert(ty, type_id);
+
+        type_id
+    }
+
+    fn define_local(&mut self, scope: UnitEntryId, name: String, ty: Ty<'tcx>) -> UnitEntryId {
+        let dw_ty = self.dwarf_ty(ty);
+
+        let var_id = self.dwarf.unit.add(scope, gimli::DW_TAG_variable);
+        let var_entry = self.dwarf.unit.get_mut(var_id);
+
+        var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
+        var_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(dw_ty));
+
+        var_id
+    }
+
+    pub(crate) fn define_function(
+        &mut self,
+        instance: Instance<'tcx>,
+        func_id: FuncId,
+        name: &str,
+        isa: &dyn TargetIsa,
+        context: &Context,
+        source_info_set: &indexmap::IndexSet<SourceInfo>,
+        local_map: IndexVec<mir::Local, CPlace<'tcx>>,
+    ) {
+        let symbol = func_id.as_u32() as usize;
+        let mir = self.tcx.instance_mir(instance.def);
+
+        // FIXME: add to appropriate scope instead of root
+        let scope = self.dwarf.unit.root();
+
+        let entry_id = self.dwarf.unit.add(scope, gimli::DW_TAG_subprogram);
+        let entry = self.dwarf.unit.get_mut(entry_id);
+        let name_id = self.dwarf.strings.add(name);
+        // Gdb requires DW_AT_name. Otherwise the DW_TAG_subprogram is skipped.
+        entry.set(gimli::DW_AT_name, AttributeValue::StringRef(name_id));
+        entry.set(gimli::DW_AT_linkage_name, AttributeValue::StringRef(name_id));
+
+        let end = self.create_debug_lines(symbol, entry_id, context, mir.span, source_info_set);
+
+        self.unit_range_list.0.push(Range::StartLength {
+            begin: Address::Symbol { symbol, addend: 0 },
+            length: u64::from(end),
+        });
+
+        let func_entry = self.dwarf.unit.get_mut(entry_id);
+        // Gdb requires both DW_AT_low_pc and DW_AT_high_pc. Otherwise the DW_TAG_subprogram is skipped.
+        func_entry.set(
+            gimli::DW_AT_low_pc,
+            AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
+        );
+        // Using Udata for DW_AT_high_pc requires at least DWARF4
+        func_entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(end)));
+
+        // FIXME make it more reliable and implement scopes before re-enabling this.
+        if false {
+            let value_labels_ranges = context.build_value_labels_ranges(isa).unwrap();
+
+            for (local, _local_decl) in mir.local_decls.iter_enumerated() {
+                let ty = self.tcx.subst_and_normalize_erasing_regions(
+                    instance.substs,
+                    ty::ParamEnv::reveal_all(),
+                    mir.local_decls[local].ty,
+                );
+                let var_id = self.define_local(entry_id, format!("{:?}", local), ty);
+
+                let location = place_location(
+                    self,
+                    isa,
+                    symbol,
+                    context,
+                    &local_map,
+                    &value_labels_ranges,
+                    Place { local, projection: ty::List::empty() },
+                );
+
+                let var_entry = self.dwarf.unit.get_mut(var_id);
+                var_entry.set(gimli::DW_AT_location, location);
+            }
+        }
+
+        // FIXME create locals for all entries in mir.var_debug_info
+    }
+}
+
+fn place_location<'tcx>(
+    debug_context: &mut DebugContext<'tcx>,
+    isa: &dyn TargetIsa,
+    symbol: usize,
+    context: &Context,
+    local_map: &IndexVec<mir::Local, CPlace<'tcx>>,
+    #[allow(rustc::default_hash_types)] value_labels_ranges: &std::collections::HashMap<
+        ValueLabel,
+        Vec<ValueLocRange>,
+    >,
+    place: Place<'tcx>,
+) -> AttributeValue {
+    assert!(place.projection.is_empty()); // FIXME implement them
+
+    match local_map[place.local].inner() {
+        CPlaceInner::Var(_local, var) => {
+            let value_label = cranelift_codegen::ir::ValueLabel::new(var.index());
+            if let Some(value_loc_ranges) = value_labels_ranges.get(&value_label) {
+                let loc_list = LocationList(
+                    value_loc_ranges
+                        .iter()
+                        .map(|value_loc_range| Location::StartEnd {
+                            begin: Address::Symbol {
+                                symbol,
+                                addend: i64::from(value_loc_range.start),
+                            },
+                            end: Address::Symbol { symbol, addend: i64::from(value_loc_range.end) },
+                            data: translate_loc(
+                                isa,
+                                value_loc_range.loc,
+                                &context.func.stack_slots,
+                            )
+                            .unwrap(),
+                        })
+                        .collect(),
+                );
+                let loc_list_id = debug_context.dwarf.unit.locations.add(loc_list);
+
+                AttributeValue::LocationListRef(loc_list_id)
+            } else {
+                // FIXME set value labels for unused locals
+
+                AttributeValue::Exprloc(Expression::new())
+            }
+        }
+        CPlaceInner::VarPair(_, _, _) => {
+            // FIXME implement this
+
+            AttributeValue::Exprloc(Expression::new())
+        }
+        CPlaceInner::VarLane(_, _, _) => {
+            // FIXME implement this
+
+            AttributeValue::Exprloc(Expression::new())
+        }
+        CPlaceInner::Addr(_, _) => {
+            // FIXME implement this (used by arguments and returns)
+
+            AttributeValue::Exprloc(Expression::new())
+
+            // For PointerBase::Stack:
+            //AttributeValue::Exprloc(translate_loc(ValueLoc::Stack(*stack_slot), &context.func.stack_slots).unwrap())
+        }
+    }
+}
+
+// Adapted from https://github.com/CraneStation/wasmtime/blob/5a1845b4caf7a5dba8eda1fef05213a532ed4259/crates/debug/src/transform/expression.rs#L59-L137
+fn translate_loc(
+    isa: &dyn TargetIsa,
+    loc: LabelValueLoc,
+    stack_slots: &StackSlots,
+) -> Option<Expression> {
+    match loc {
+        LabelValueLoc::ValueLoc(ValueLoc::Reg(reg)) => {
+            let machine_reg = isa.map_dwarf_register(reg).unwrap();
+            let mut expr = Expression::new();
+            expr.op_reg(gimli::Register(machine_reg));
+            Some(expr)
+        }
+        LabelValueLoc::ValueLoc(ValueLoc::Stack(ss)) => {
+            if let Some(ss_offset) = stack_slots[ss].offset {
+                let mut expr = Expression::new();
+                expr.op_breg(X86_64::RBP, i64::from(ss_offset) + 16);
+                Some(expr)
+            } else {
+                None
+            }
+        }
+        LabelValueLoc::ValueLoc(ValueLoc::Unassigned) => unreachable!(),
+        LabelValueLoc::Reg(reg) => {
+            let machine_reg = isa.map_regalloc_reg_to_dwarf(reg).unwrap();
+            let mut expr = Expression::new();
+            expr.op_reg(gimli::Register(machine_reg));
+            Some(expr)
+        }
+        LabelValueLoc::SPOffset(offset) => {
+            let mut expr = Expression::new();
+            expr.op_breg(X86_64::RSP, offset);
+            Some(expr)
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
new file mode 100644
index 00000000000..d1251e749f3
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
@@ -0,0 +1,130 @@
+//! Unwind info generation (`.eh_frame`)
+
+use crate::prelude::*;
+
+use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
+
+use gimli::write::{Address, CieId, EhFrame, FrameTable, Section};
+use gimli::RunTimeEndian;
+
+use crate::backend::WriteDebugInfo;
+
+pub(crate) struct UnwindContext {
+    endian: RunTimeEndian,
+    frame_table: FrameTable,
+    cie_id: Option<CieId>,
+}
+
+impl UnwindContext {
+    pub(crate) fn new(tcx: TyCtxt<'_>, isa: &dyn TargetIsa, pic_eh_frame: bool) -> Self {
+        let endian = super::target_endian(tcx);
+        let mut frame_table = FrameTable::default();
+
+        let cie_id = if let Some(mut cie) = isa.create_systemv_cie() {
+            if pic_eh_frame {
+                cie.fde_address_encoding =
+                    gimli::DwEhPe(gimli::DW_EH_PE_pcrel.0 | gimli::DW_EH_PE_sdata4.0);
+            }
+            Some(frame_table.add_cie(cie))
+        } else {
+            None
+        };
+
+        UnwindContext { endian, frame_table, cie_id }
+    }
+
+    pub(crate) fn add_function(&mut self, func_id: FuncId, context: &Context, isa: &dyn TargetIsa) {
+        let unwind_info = if let Some(unwind_info) = context.create_unwind_info(isa).unwrap() {
+            unwind_info
+        } else {
+            return;
+        };
+
+        match unwind_info {
+            UnwindInfo::SystemV(unwind_info) => {
+                self.frame_table.add_fde(
+                    self.cie_id.unwrap(),
+                    unwind_info
+                        .to_fde(Address::Symbol { symbol: func_id.as_u32() as usize, addend: 0 }),
+                );
+            }
+            UnwindInfo::WindowsX64(_) => {
+                // FIXME implement this
+            }
+            unwind_info => unimplemented!("{:?}", unwind_info),
+        }
+    }
+
+    pub(crate) fn emit<P: WriteDebugInfo>(self, product: &mut P) {
+        let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(self.endian));
+        self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
+
+        if !eh_frame.0.writer.slice().is_empty() {
+            let id = eh_frame.id();
+            let section_id = product.add_debug_section(id, eh_frame.0.writer.into_vec());
+            let mut section_map = FxHashMap::default();
+            section_map.insert(id, section_id);
+
+            for reloc in &eh_frame.0.relocs {
+                product.add_debug_reloc(&section_map, &section_id, reloc);
+            }
+        }
+    }
+
+    #[cfg(all(feature = "jit", windows))]
+    pub(crate) unsafe fn register_jit(self, _jit_module: &cranelift_jit::JITModule) {}
+
+    #[cfg(all(feature = "jit", not(windows)))]
+    pub(crate) unsafe fn register_jit(self, jit_module: &cranelift_jit::JITModule) {
+        let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(self.endian));
+        self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
+
+        if eh_frame.0.writer.slice().is_empty() {
+            return;
+        }
+
+        let mut eh_frame = eh_frame.0.relocate_for_jit(jit_module);
+
+        // GCC expects a terminating "empty" length, so write a 0 length at the end of the table.
+        eh_frame.extend(&[0, 0, 0, 0]);
+
+        // FIXME support unregistering unwind tables once cranelift-jit supports deallocating
+        // individual functions
+        #[allow(unused_variables)]
+        let (eh_frame, eh_frame_len, _) = Vec::into_raw_parts(eh_frame);
+
+        // =======================================================================
+        // Everything after this line up to the end of the file is loosly based on
+        // https://github.com/bytecodealliance/wasmtime/blob/4471a82b0c540ff48960eca6757ccce5b1b5c3e4/crates/jit/src/unwind/systemv.rs
+        #[cfg(target_os = "macos")]
+        {
+            // On macOS, `__register_frame` takes a pointer to a single FDE
+            let start = eh_frame;
+            let end = start.add(eh_frame_len);
+            let mut current = start;
+
+            // Walk all of the entries in the frame table and register them
+            while current < end {
+                let len = std::ptr::read::<u32>(current as *const u32) as usize;
+
+                // Skip over the CIE
+                if current != start {
+                    __register_frame(current);
+                }
+
+                // Move to the next table entry (+4 because the length itself is not inclusive)
+                current = current.add(len + 4);
+            }
+        }
+        #[cfg(not(target_os = "macos"))]
+        {
+            // On other platforms, `__register_frame` will walk the FDEs until an entry of length 0
+            __register_frame(eh_frame);
+        }
+    }
+}
+
+extern "C" {
+    // libunwind import
+    fn __register_frame(fde: *const u8);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/discriminant.rs b/compiler/rustc_codegen_cranelift/src/discriminant.rs
new file mode 100644
index 00000000000..3326f87f000
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/discriminant.rs
@@ -0,0 +1,169 @@
+//! Handling of enum discriminants
+//!
+//! Adapted from <https://github.com/rust-lang/rust/blob/d760df5aea483aae041c9a241e7acacf48f75035/src/librustc_codegen_ssa/mir/place.rs>
+
+use rustc_target::abi::{Int, TagEncoding, Variants};
+
+use crate::prelude::*;
+
+pub(crate) fn codegen_set_discriminant<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    place: CPlace<'tcx>,
+    variant_index: VariantIdx,
+) {
+    let layout = place.layout();
+    if layout.for_variant(fx, variant_index).abi.is_uninhabited() {
+        return;
+    }
+    match layout.variants {
+        Variants::Single { index } => {
+            assert_eq!(index, variant_index);
+        }
+        Variants::Multiple {
+            tag: _,
+            tag_field,
+            tag_encoding: TagEncoding::Direct,
+            variants: _,
+        } => {
+            let ptr = place.place_field(fx, mir::Field::new(tag_field));
+            let to = layout.ty.discriminant_for_variant(fx.tcx, variant_index).unwrap().val;
+            let to = if ptr.layout().abi.is_signed() {
+                ty::ScalarInt::try_from_int(
+                    ptr.layout().size.sign_extend(to) as i128,
+                    ptr.layout().size,
+                )
+                .unwrap()
+            } else {
+                ty::ScalarInt::try_from_uint(to, ptr.layout().size).unwrap()
+            };
+            let discr = CValue::const_val(fx, ptr.layout(), to);
+            ptr.write_cvalue(fx, discr);
+        }
+        Variants::Multiple {
+            tag: _,
+            tag_field,
+            tag_encoding: TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+            variants: _,
+        } => {
+            if variant_index != dataful_variant {
+                let niche = place.place_field(fx, mir::Field::new(tag_field));
+                let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
+                let niche_value = ty::ScalarInt::try_from_uint(
+                    u128::from(niche_value).wrapping_add(niche_start),
+                    niche.layout().size,
+                )
+                .unwrap();
+                let niche_llval = CValue::const_val(fx, niche.layout(), niche_value);
+                niche.write_cvalue(fx, niche_llval);
+            }
+        }
+    }
+}
+
+pub(crate) fn codegen_get_discriminant<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    value: CValue<'tcx>,
+    dest_layout: TyAndLayout<'tcx>,
+) -> CValue<'tcx> {
+    let layout = value.layout();
+
+    if layout.abi == Abi::Uninhabited {
+        return trap_unreachable_ret_value(
+            fx,
+            dest_layout,
+            "[panic] Tried to get discriminant for uninhabited type.",
+        );
+    }
+
+    let (tag_scalar, tag_field, tag_encoding) = match &layout.variants {
+        Variants::Single { index } => {
+            let discr_val = layout
+                .ty
+                .discriminant_for_variant(fx.tcx, *index)
+                .map_or(u128::from(index.as_u32()), |discr| discr.val);
+            let discr_val = if dest_layout.abi.is_signed() {
+                ty::ScalarInt::try_from_int(
+                    dest_layout.size.sign_extend(discr_val) as i128,
+                    dest_layout.size,
+                )
+                .unwrap()
+            } else {
+                ty::ScalarInt::try_from_uint(discr_val, dest_layout.size).unwrap()
+            };
+            return CValue::const_val(fx, dest_layout, discr_val);
+        }
+        Variants::Multiple { tag, tag_field, tag_encoding, variants: _ } => {
+            (tag, *tag_field, tag_encoding)
+        }
+    };
+
+    let cast_to = fx.clif_type(dest_layout.ty).unwrap();
+
+    // Read the tag/niche-encoded discriminant from memory.
+    let tag = value.value_field(fx, mir::Field::new(tag_field));
+    let tag = tag.load_scalar(fx);
+
+    // Decode the discriminant (specifically if it's niche-encoded).
+    match *tag_encoding {
+        TagEncoding::Direct => {
+            let signed = match tag_scalar.value {
+                Int(_, signed) => signed,
+                _ => false,
+            };
+            let val = clif_intcast(fx, tag, cast_to, signed);
+            CValue::by_val(val, dest_layout)
+        }
+        TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+            // Rebase from niche values to discriminants, and check
+            // whether the result is in range for the niche variants.
+
+            // We first compute the "relative discriminant" (wrt `niche_variants`),
+            // that is, if `n = niche_variants.end() - niche_variants.start()`,
+            // we remap `niche_start..=niche_start + n` (which may wrap around)
+            // to (non-wrap-around) `0..=n`, to be able to check whether the
+            // discriminant corresponds to a niche variant with one comparison.
+            // We also can't go directly to the (variant index) discriminant
+            // and check that it is in the range `niche_variants`, because
+            // that might not fit in the same type, on top of needing an extra
+            // comparison (see also the comment on `let niche_discr`).
+            let relative_discr = if niche_start == 0 {
+                tag
+            } else {
+                // FIXME handle niche_start > i64::MAX
+                fx.bcx.ins().iadd_imm(tag, -i64::try_from(niche_start).unwrap())
+            };
+            let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
+            let is_niche = {
+                codegen_icmp_imm(
+                    fx,
+                    IntCC::UnsignedLessThanOrEqual,
+                    relative_discr,
+                    i128::from(relative_max),
+                )
+            };
+
+            // NOTE(eddyb) this addition needs to be performed on the final
+            // type, in case the niche itself can't represent all variant
+            // indices (e.g. `u8` niche with more than `256` variants,
+            // but enough uninhabited variants so that the remaining variants
+            // fit in the niche).
+            // In other words, `niche_variants.end - niche_variants.start`
+            // is representable in the niche, but `niche_variants.end`
+            // might not be, in extreme cases.
+            let niche_discr = {
+                let relative_discr = if relative_max == 0 {
+                    // HACK(eddyb) since we have only one niche, we know which
+                    // one it is, and we can avoid having a dynamic value here.
+                    fx.bcx.ins().iconst(cast_to, 0)
+                } else {
+                    clif_intcast(fx, relative_discr, cast_to, false)
+                };
+                fx.bcx.ins().iadd_imm(relative_discr, i64::from(niche_variants.start().as_u32()))
+            };
+
+            let dataful_variant = fx.bcx.ins().iconst(cast_to, i64::from(dataful_variant.as_u32()));
+            let discr = fx.bcx.ins().select(is_niche, niche_discr, dataful_variant);
+            CValue::by_val(discr, dest_layout)
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
new file mode 100644
index 00000000000..a8b802f4494
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
@@ -0,0 +1,420 @@
+//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
+//! standalone executable.
+
+use std::path::PathBuf;
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::mir::mono::{CodegenUnit, MonoItem};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{DebugInfo, OutputType};
+
+use cranelift_object::ObjectModule;
+
+use crate::{prelude::*, BackendConfig};
+
+struct ModuleCodegenResult(CompiledModule, Option<(WorkProductId, WorkProduct)>);
+
+impl<HCX> HashStable<HCX> for ModuleCodegenResult {
+    fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
+        // do nothing
+    }
+}
+
+fn emit_module(
+    tcx: TyCtxt<'_>,
+    backend_config: &BackendConfig,
+    name: String,
+    kind: ModuleKind,
+    module: ObjectModule,
+    debug: Option<DebugContext<'_>>,
+    unwind_context: UnwindContext,
+) -> ModuleCodegenResult {
+    let mut product = module.finish();
+
+    if let Some(mut debug) = debug {
+        debug.emit(&mut product);
+    }
+
+    unwind_context.emit(&mut product);
+
+    let tmp_file = tcx.output_filenames(()).temp_path(OutputType::Object, Some(&name));
+    let obj = product.object.write().unwrap();
+    if let Err(err) = std::fs::write(&tmp_file, obj) {
+        tcx.sess.fatal(&format!("error writing object file: {}", err));
+    }
+
+    let work_product = if backend_config.disable_incr_cache {
+        None
+    } else {
+        rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+            tcx.sess,
+            &name,
+            &Some(tmp_file.clone()),
+        )
+    };
+
+    ModuleCodegenResult(
+        CompiledModule { name, kind, object: Some(tmp_file), dwarf_object: None, bytecode: None },
+        work_product,
+    )
+}
+
+fn reuse_workproduct_for_cgu(
+    tcx: TyCtxt<'_>,
+    cgu: &CodegenUnit<'_>,
+    work_products: &mut FxHashMap<WorkProductId, WorkProduct>,
+) -> CompiledModule {
+    let incr_comp_session_dir = tcx.sess.incr_comp_session_dir();
+    let mut object = None;
+    let work_product = cgu.work_product(tcx);
+    if let Some(saved_file) = &work_product.saved_file {
+        let obj_out =
+            tcx.output_filenames(()).temp_path(OutputType::Object, Some(&cgu.name().as_str()));
+        object = Some(obj_out.clone());
+        let source_file = rustc_incremental::in_incr_comp_dir(&incr_comp_session_dir, &saved_file);
+        if let Err(err) = rustc_fs_util::link_or_copy(&source_file, &obj_out) {
+            tcx.sess.err(&format!(
+                "unable to copy {} to {}: {}",
+                source_file.display(),
+                obj_out.display(),
+                err
+            ));
+        }
+    }
+
+    work_products.insert(cgu.work_product_id(), work_product);
+
+    CompiledModule {
+        name: cgu.name().to_string(),
+        kind: ModuleKind::Regular,
+        object,
+        dwarf_object: None,
+        bytecode: None,
+    }
+}
+
+fn module_codegen(
+    tcx: TyCtxt<'_>,
+    (backend_config, cgu_name): (BackendConfig, rustc_span::Symbol),
+) -> ModuleCodegenResult {
+    let cgu = tcx.codegen_unit(cgu_name);
+    let mono_items = cgu.items_in_deterministic_order(tcx);
+
+    let isa = crate::build_isa(tcx.sess, &backend_config);
+    let mut module = crate::backend::make_module(tcx.sess, isa, cgu_name.as_str().to_string());
+
+    let mut cx = crate::CodegenCx::new(
+        tcx,
+        backend_config.clone(),
+        module.isa(),
+        tcx.sess.opts.debuginfo != DebugInfo::None,
+    );
+    super::predefine_mono_items(tcx, &mut module, &mono_items);
+    for (mono_item, _) in mono_items {
+        match mono_item {
+            MonoItem::Fn(inst) => {
+                cx.tcx
+                    .sess
+                    .time("codegen fn", || crate::base::codegen_fn(&mut cx, &mut module, inst));
+            }
+            MonoItem::Static(def_id) => crate::constant::codegen_static(tcx, &mut module, def_id),
+            MonoItem::GlobalAsm(item_id) => {
+                let item = cx.tcx.hir().item(item_id);
+                if let rustc_hir::ItemKind::GlobalAsm(asm) = item.kind {
+                    if !asm.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+                        cx.global_asm.push_str("\n.intel_syntax noprefix\n");
+                    } else {
+                        cx.global_asm.push_str("\n.att_syntax\n");
+                    }
+                    for piece in asm.template {
+                        match *piece {
+                            InlineAsmTemplatePiece::String(ref s) => cx.global_asm.push_str(s),
+                            InlineAsmTemplatePiece::Placeholder { .. } => todo!(),
+                        }
+                    }
+                    cx.global_asm.push_str("\n.att_syntax\n\n");
+                } else {
+                    bug!("Expected GlobalAsm found {:?}", item);
+                }
+            }
+        }
+    }
+    crate::main_shim::maybe_create_entry_wrapper(
+        tcx,
+        &mut module,
+        &mut cx.unwind_context,
+        false,
+        cgu.is_primary(),
+    );
+
+    let debug_context = cx.debug_context;
+    let unwind_context = cx.unwind_context;
+    let codegen_result = tcx.sess.time("write object file", || {
+        emit_module(
+            tcx,
+            &backend_config,
+            cgu.name().as_str().to_string(),
+            ModuleKind::Regular,
+            module,
+            debug_context,
+            unwind_context,
+        )
+    });
+
+    codegen_global_asm(tcx, &cgu.name().as_str(), &cx.global_asm);
+
+    codegen_result
+}
+
+pub(crate) fn run_aot(
+    tcx: TyCtxt<'_>,
+    backend_config: BackendConfig,
+    metadata: EncodedMetadata,
+    need_metadata_module: bool,
+) -> Box<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)> {
+    let mut work_products = FxHashMap::default();
+
+    let cgus = if tcx.sess.opts.output_types.should_codegen() {
+        tcx.collect_and_partition_mono_items(()).1
+    } else {
+        // If only `--emit metadata` is used, we shouldn't perform any codegen.
+        // Also `tcx.collect_and_partition_mono_items` may panic in that case.
+        &[]
+    };
+
+    if tcx.dep_graph.is_fully_enabled() {
+        for cgu in &*cgus {
+            tcx.ensure().codegen_unit(cgu.name());
+        }
+    }
+
+    let modules = super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
+        cgus.iter()
+            .map(|cgu| {
+                let cgu_reuse = determine_cgu_reuse(tcx, cgu);
+                tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
+
+                match cgu_reuse {
+                    _ if backend_config.disable_incr_cache => {}
+                    CguReuse::No => {}
+                    CguReuse::PreLto => {
+                        return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products);
+                    }
+                    CguReuse::PostLto => unreachable!(),
+                }
+
+                let dep_node = cgu.codegen_dep_node(tcx);
+                let (ModuleCodegenResult(module, work_product), _) = tcx.dep_graph.with_task(
+                    dep_node,
+                    tcx,
+                    (backend_config.clone(), cgu.name()),
+                    module_codegen,
+                    rustc_middle::dep_graph::hash_result,
+                );
+
+                if let Some((id, product)) = work_product {
+                    work_products.insert(id, product);
+                }
+
+                module
+            })
+            .collect::<Vec<_>>()
+    });
+
+    tcx.sess.abort_if_errors();
+
+    let isa = crate::build_isa(tcx.sess, &backend_config);
+    let mut allocator_module =
+        crate::backend::make_module(tcx.sess, isa, "allocator_shim".to_string());
+    assert_eq!(pointer_ty(tcx), allocator_module.target_config().pointer_type());
+    let mut allocator_unwind_context = UnwindContext::new(tcx, allocator_module.isa(), true);
+    let created_alloc_shim =
+        crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
+
+    let allocator_module = if created_alloc_shim {
+        let ModuleCodegenResult(module, work_product) = emit_module(
+            tcx,
+            &backend_config,
+            "allocator_shim".to_string(),
+            ModuleKind::Allocator,
+            allocator_module,
+            None,
+            allocator_unwind_context,
+        );
+        if let Some((id, product)) = work_product {
+            work_products.insert(id, product);
+        }
+        Some(module)
+    } else {
+        None
+    };
+
+    let metadata_module = if need_metadata_module {
+        let _timer = tcx.prof.generic_activity("codegen crate metadata");
+        let (metadata_cgu_name, tmp_file) = tcx.sess.time("write compressed metadata", || {
+            use rustc_middle::mir::mono::CodegenUnitNameBuilder;
+
+            let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+            let metadata_cgu_name = cgu_name_builder
+                .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
+                .as_str()
+                .to_string();
+
+            let tmp_file =
+                tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
+
+            let obj = crate::backend::with_object(tcx.sess, &metadata_cgu_name, |object| {
+                crate::metadata::write_metadata(tcx, object);
+            });
+
+            if let Err(err) = std::fs::write(&tmp_file, obj) {
+                tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
+            }
+
+            (metadata_cgu_name, tmp_file)
+        });
+
+        Some(CompiledModule {
+            name: metadata_cgu_name,
+            kind: ModuleKind::Metadata,
+            object: Some(tmp_file),
+            dwarf_object: None,
+            bytecode: None,
+        })
+    } else {
+        None
+    };
+
+    // FIXME handle `-Ctarget-cpu=native`
+    let target_cpu =
+        tcx.sess.opts.cg.target_cpu.as_ref().unwrap_or(&tcx.sess.target.cpu).to_owned();
+    Box::new((
+        CodegenResults {
+            modules,
+            allocator_module,
+            metadata_module,
+            metadata,
+            crate_info: CrateInfo::new(tcx, target_cpu),
+        },
+        work_products,
+    ))
+}
+
+fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
+    use std::io::Write;
+    use std::process::{Command, Stdio};
+
+    if global_asm.is_empty() {
+        return;
+    }
+
+    if cfg!(not(feature = "inline_asm"))
+        || tcx.sess.target.is_like_osx
+        || tcx.sess.target.is_like_windows
+    {
+        if global_asm.contains("__rust_probestack") {
+            return;
+        }
+
+        // FIXME fix linker error on macOS
+        if cfg!(not(feature = "inline_asm")) {
+            tcx.sess.fatal(
+                "asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift",
+            );
+        } else {
+            tcx.sess.fatal("asm! and global_asm! are not yet supported on macOS and Windows");
+        }
+    }
+
+    let assembler = crate::toolchain::get_toolchain_binary(tcx.sess, "as");
+    let linker = crate::toolchain::get_toolchain_binary(tcx.sess, "ld");
+
+    // Remove all LLVM style comments
+    let global_asm = global_asm
+        .lines()
+        .map(|line| if let Some(index) = line.find("//") { &line[0..index] } else { line })
+        .collect::<Vec<_>>()
+        .join("\n");
+
+    let output_object_file = tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu_name));
+
+    // Assemble `global_asm`
+    let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
+    let mut child = Command::new(assembler)
+        .arg("-o")
+        .arg(&global_asm_object_file)
+        .stdin(Stdio::piped())
+        .spawn()
+        .expect("Failed to spawn `as`.");
+    child.stdin.take().unwrap().write_all(global_asm.as_bytes()).unwrap();
+    let status = child.wait().expect("Failed to wait for `as`.");
+    if !status.success() {
+        tcx.sess.fatal(&format!("Failed to assemble `{}`", global_asm));
+    }
+
+    // Link the global asm and main object file together
+    let main_object_file = add_file_stem_postfix(output_object_file.clone(), ".main");
+    std::fs::rename(&output_object_file, &main_object_file).unwrap();
+    let status = Command::new(linker)
+        .arg("-r") // Create a new object file
+        .arg("-o")
+        .arg(output_object_file)
+        .arg(&main_object_file)
+        .arg(&global_asm_object_file)
+        .status()
+        .unwrap();
+    if !status.success() {
+        tcx.sess.fatal(&format!(
+            "Failed to link `{}` and `{}` together",
+            main_object_file.display(),
+            global_asm_object_file.display(),
+        ));
+    }
+
+    std::fs::remove_file(global_asm_object_file).unwrap();
+    std::fs::remove_file(main_object_file).unwrap();
+}
+
+fn add_file_stem_postfix(mut path: PathBuf, postfix: &str) -> PathBuf {
+    let mut new_filename = path.file_stem().unwrap().to_owned();
+    new_filename.push(postfix);
+    if let Some(extension) = path.extension() {
+        new_filename.push(".");
+        new_filename.push(extension);
+    }
+    path.set_file_name(new_filename);
+    path
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/303d8aff6092709edd4dbd35b1c88e9aa40bf6d8/src/librustc_codegen_ssa/base.rs#L922-L953
+fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
+    if !tcx.dep_graph.is_fully_enabled() {
+        return CguReuse::No;
+    }
+
+    let work_product_id = &cgu.work_product_id();
+    if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
+        // We don't have anything cached for this CGU. This can happen
+        // if the CGU did not exist in the previous session.
+        return CguReuse::No;
+    }
+
+    // Try to mark the CGU as green. If it we can do so, it means that nothing
+    // affecting the LLVM module has changed and we can re-use a cached version.
+    // If we compile with any kind of LTO, this means we can re-use the bitcode
+    // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
+    // know that later). If we are not doing LTO, there is only one optimized
+    // version of each module, so we re-use that.
+    let dep_node = cgu.codegen_dep_node(tcx);
+    assert!(
+        !tcx.dep_graph.dep_node_exists(&dep_node),
+        "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+        cgu.name()
+    );
+
+    if tcx.try_mark_green(&dep_node) { CguReuse::PreLto } else { CguReuse::No }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/jit.rs b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
new file mode 100644
index 00000000000..76fbc9ad51e
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
@@ -0,0 +1,380 @@
+//! The JIT driver uses [`cranelift_jit`] to JIT execute programs without writing any object
+//! files.
+
+use std::cell::RefCell;
+use std::ffi::CString;
+use std::lazy::{Lazy, SyncOnceCell};
+use std::os::raw::{c_char, c_int};
+use std::sync::{mpsc, Mutex};
+
+use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
+use rustc_codegen_ssa::CrateInfo;
+use rustc_middle::mir::mono::MonoItem;
+use rustc_session::Session;
+
+use cranelift_jit::{JITBuilder, JITModule};
+
+use crate::{prelude::*, BackendConfig};
+use crate::{CodegenCx, CodegenMode};
+
+struct JitState {
+    backend_config: BackendConfig,
+    jit_module: JITModule,
+}
+
+thread_local! {
+    static LAZY_JIT_STATE: RefCell<Option<JitState>> = RefCell::new(None);
+}
+
+/// The Sender owned by the rustc thread
+static GLOBAL_MESSAGE_SENDER: SyncOnceCell<Mutex<mpsc::Sender<UnsafeMessage>>> =
+    SyncOnceCell::new();
+
+/// A message that is sent from the jitted runtime to the rustc thread.
+/// Senders are responsible for upholding `Send` semantics.
+enum UnsafeMessage {
+    /// Request that the specified `Instance` be lazily jitted.
+    ///
+    /// Nothing accessible through `instance_ptr` may be moved or mutated by the sender after
+    /// this message is sent.
+    JitFn {
+        instance_ptr: *const Instance<'static>,
+        trampoline_ptr: *const u8,
+        tx: mpsc::Sender<*const u8>,
+    },
+}
+unsafe impl Send for UnsafeMessage {}
+
+impl UnsafeMessage {
+    /// Send the message.
+    fn send(self) -> Result<(), mpsc::SendError<UnsafeMessage>> {
+        thread_local! {
+            /// The Sender owned by the local thread
+            static LOCAL_MESSAGE_SENDER: Lazy<mpsc::Sender<UnsafeMessage>> = Lazy::new(||
+                GLOBAL_MESSAGE_SENDER
+                    .get().unwrap()
+                    .lock().unwrap()
+                    .clone()
+            );
+        }
+        LOCAL_MESSAGE_SENDER.with(|sender| sender.send(self))
+    }
+}
+
+fn create_jit_module<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    backend_config: &BackendConfig,
+    hotswap: bool,
+) -> (JITModule, CodegenCx<'tcx>) {
+    let crate_info = CrateInfo::new(tcx, "dummy_target_cpu".to_string());
+    let imported_symbols = load_imported_symbols_for_jit(tcx.sess, crate_info);
+
+    let isa = crate::build_isa(tcx.sess, backend_config);
+    let mut jit_builder = JITBuilder::with_isa(isa, cranelift_module::default_libcall_names());
+    jit_builder.hotswap(hotswap);
+    crate::compiler_builtins::register_functions_for_jit(&mut jit_builder);
+    jit_builder.symbols(imported_symbols);
+    let mut jit_module = JITModule::new(jit_builder);
+
+    let mut cx = crate::CodegenCx::new(tcx, backend_config.clone(), jit_module.isa(), false);
+
+    crate::allocator::codegen(tcx, &mut jit_module, &mut cx.unwind_context);
+    crate::main_shim::maybe_create_entry_wrapper(
+        tcx,
+        &mut jit_module,
+        &mut cx.unwind_context,
+        true,
+        true,
+    );
+
+    (jit_module, cx)
+}
+
+pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
+    if !tcx.sess.opts.output_types.should_codegen() {
+        tcx.sess.fatal("JIT mode doesn't work with `cargo check`");
+    }
+
+    if !tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable) {
+        tcx.sess.fatal("can't jit non-executable crate");
+    }
+
+    let (mut jit_module, mut cx) = create_jit_module(
+        tcx,
+        &backend_config,
+        matches!(backend_config.codegen_mode, CodegenMode::JitLazy),
+    );
+
+    let (_, cgus) = tcx.collect_and_partition_mono_items(());
+    let mono_items = cgus
+        .iter()
+        .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
+        .flatten()
+        .collect::<FxHashMap<_, (_, _)>>()
+        .into_iter()
+        .collect::<Vec<(_, (_, _))>>();
+
+    super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
+        super::predefine_mono_items(tcx, &mut jit_module, &mono_items);
+        for (mono_item, _) in mono_items {
+            match mono_item {
+                MonoItem::Fn(inst) => match backend_config.codegen_mode {
+                    CodegenMode::Aot => unreachable!(),
+                    CodegenMode::Jit => {
+                        cx.tcx.sess.time("codegen fn", || {
+                            crate::base::codegen_fn(&mut cx, &mut jit_module, inst)
+                        });
+                    }
+                    CodegenMode::JitLazy => codegen_shim(&mut cx, &mut jit_module, inst),
+                },
+                MonoItem::Static(def_id) => {
+                    crate::constant::codegen_static(tcx, &mut jit_module, def_id);
+                }
+                MonoItem::GlobalAsm(item_id) => {
+                    let item = tcx.hir().item(item_id);
+                    tcx.sess.span_fatal(item.span, "Global asm is not supported in JIT mode");
+                }
+            }
+        }
+    });
+
+    if !cx.global_asm.is_empty() {
+        tcx.sess.fatal("Inline asm is not supported in JIT mode");
+    }
+
+    tcx.sess.abort_if_errors();
+
+    jit_module.finalize_definitions();
+    unsafe { cx.unwind_context.register_jit(&jit_module) };
+
+    println!(
+        "Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed"
+    );
+
+    let args = std::iter::once(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())
+        .chain(backend_config.jit_args.iter().map(|arg| &**arg))
+        .map(|arg| CString::new(arg).unwrap())
+        .collect::<Vec<_>>();
+
+    let start_sig = Signature {
+        params: vec![
+            AbiParam::new(jit_module.target_config().pointer_type()),
+            AbiParam::new(jit_module.target_config().pointer_type()),
+        ],
+        returns: vec![AbiParam::new(jit_module.target_config().pointer_type() /*isize*/)],
+        call_conv: jit_module.target_config().default_call_conv,
+    };
+    let start_func_id = jit_module.declare_function("main", Linkage::Import, &start_sig).unwrap();
+    let finalized_start: *const u8 = jit_module.get_finalized_function(start_func_id);
+
+    LAZY_JIT_STATE.with(|lazy_jit_state| {
+        let mut lazy_jit_state = lazy_jit_state.borrow_mut();
+        assert!(lazy_jit_state.is_none());
+        *lazy_jit_state = Some(JitState { backend_config, jit_module });
+    });
+
+    let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
+        unsafe { ::std::mem::transmute(finalized_start) };
+
+    let (tx, rx) = mpsc::channel();
+    GLOBAL_MESSAGE_SENDER.set(Mutex::new(tx)).unwrap();
+
+    // Spawn the jitted runtime in a new thread so that this rustc thread can handle messages
+    // (eg to lazily JIT further functions as required)
+    std::thread::spawn(move || {
+        let mut argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
+
+        // Push a null pointer as a terminating argument. This is required by POSIX and
+        // useful as some dynamic linkers use it as a marker to jump over.
+        argv.push(std::ptr::null());
+
+        let ret = f(args.len() as c_int, argv.as_ptr());
+        std::process::exit(ret);
+    });
+
+    // Handle messages
+    loop {
+        match rx.recv().unwrap() {
+            // lazy JIT compilation request - compile requested instance and return pointer to result
+            UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx } => {
+                tx.send(jit_fn(instance_ptr, trampoline_ptr))
+                    .expect("jitted runtime hung up before response to lazy JIT request was sent");
+            }
+        }
+    }
+}
+
+#[no_mangle]
+extern "C" fn __clif_jit_fn(
+    instance_ptr: *const Instance<'static>,
+    trampoline_ptr: *const u8,
+) -> *const u8 {
+    // send the JIT request to the rustc thread, with a channel for the response
+    let (tx, rx) = mpsc::channel();
+    UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx }
+        .send()
+        .expect("rustc thread hung up before lazy JIT request was sent");
+
+    // block on JIT compilation result
+    rx.recv().expect("rustc thread hung up before responding to sent lazy JIT request")
+}
+
+fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) -> *const u8 {
+    rustc_middle::ty::tls::with(|tcx| {
+        // lift is used to ensure the correct lifetime for instance.
+        let instance = tcx.lift(unsafe { *instance_ptr }).unwrap();
+
+        LAZY_JIT_STATE.with(|lazy_jit_state| {
+            let mut lazy_jit_state = lazy_jit_state.borrow_mut();
+            let lazy_jit_state = lazy_jit_state.as_mut().unwrap();
+            let jit_module = &mut lazy_jit_state.jit_module;
+            let backend_config = lazy_jit_state.backend_config.clone();
+
+            let name = tcx.symbol_name(instance).name;
+            let sig = crate::abi::get_function_sig(tcx, jit_module.isa().triple(), instance);
+            let func_id = jit_module.declare_function(name, Linkage::Export, &sig).unwrap();
+
+            let current_ptr = jit_module.read_got_entry(func_id);
+
+            // If the function's GOT entry has already been updated to point at something other
+            // than the shim trampoline, don't re-jit but just return the new pointer instead.
+            // This does not need synchronization as this code is executed only by a sole rustc
+            // thread.
+            if current_ptr != trampoline_ptr {
+                return current_ptr;
+            }
+
+            jit_module.prepare_for_function_redefine(func_id).unwrap();
+
+            let mut cx = crate::CodegenCx::new(tcx, backend_config, jit_module.isa(), false);
+            tcx.sess.time("codegen fn", || crate::base::codegen_fn(&mut cx, jit_module, instance));
+
+            assert!(cx.global_asm.is_empty());
+            jit_module.finalize_definitions();
+            unsafe { cx.unwind_context.register_jit(&jit_module) };
+            jit_module.get_finalized_function(func_id)
+        })
+    })
+}
+
+fn load_imported_symbols_for_jit(
+    sess: &Session,
+    crate_info: CrateInfo,
+) -> Vec<(String, *const u8)> {
+    use rustc_middle::middle::dependency_format::Linkage;
+
+    let mut dylib_paths = Vec::new();
+
+    let data = &crate_info
+        .dependency_formats
+        .iter()
+        .find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
+        .unwrap()
+        .1;
+    for &cnum in &crate_info.used_crates {
+        let src = &crate_info.used_crate_source[&cnum];
+        match data[cnum.as_usize() - 1] {
+            Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+            Linkage::Static => {
+                let name = &crate_info.crate_name[&cnum];
+                let mut err = sess.struct_err(&format!("Can't load static lib {}", name.as_str()));
+                err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
+                err.emit();
+            }
+            Linkage::Dynamic => {
+                dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
+            }
+        }
+    }
+
+    let mut imported_symbols = Vec::new();
+    for path in dylib_paths {
+        use object::{Object, ObjectSymbol};
+        let lib = libloading::Library::new(&path).unwrap();
+        let obj = std::fs::read(path).unwrap();
+        let obj = object::File::parse(&*obj).unwrap();
+        imported_symbols.extend(obj.dynamic_symbols().filter_map(|symbol| {
+            let name = symbol.name().unwrap().to_string();
+            if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
+                return None;
+            }
+            if name.starts_with("rust_metadata_") {
+                // The metadata is part of a section that is not loaded by the dynamic linker in
+                // case of cg_llvm.
+                return None;
+            }
+            let dlsym_name = if cfg!(target_os = "macos") {
+                // On macOS `dlsym` expects the name without leading `_`.
+                assert!(name.starts_with('_'), "{:?}", name);
+                &name[1..]
+            } else {
+                &name
+            };
+            let symbol: libloading::Symbol<'_, *const u8> =
+                unsafe { lib.get(dlsym_name.as_bytes()) }.unwrap();
+            Some((name, *symbol))
+        }));
+        std::mem::forget(lib)
+    }
+
+    sess.abort_if_errors();
+
+    imported_symbols
+}
+
+fn codegen_shim<'tcx>(cx: &mut CodegenCx<'tcx>, module: &mut JITModule, inst: Instance<'tcx>) {
+    let tcx = cx.tcx;
+
+    let pointer_type = module.target_config().pointer_type();
+
+    let name = tcx.symbol_name(inst).name;
+    let sig = crate::abi::get_function_sig(tcx, module.isa().triple(), inst);
+    let func_id = module.declare_function(name, Linkage::Export, &sig).unwrap();
+
+    let instance_ptr = Box::into_raw(Box::new(inst));
+
+    let jit_fn = module
+        .declare_function(
+            "__clif_jit_fn",
+            Linkage::Import,
+            &Signature {
+                call_conv: module.target_config().default_call_conv,
+                params: vec![AbiParam::new(pointer_type), AbiParam::new(pointer_type)],
+                returns: vec![AbiParam::new(pointer_type)],
+            },
+        )
+        .unwrap();
+
+    cx.cached_context.clear();
+    let trampoline = &mut cx.cached_context.func;
+    trampoline.signature = sig.clone();
+
+    let mut builder_ctx = FunctionBuilderContext::new();
+    let mut trampoline_builder = FunctionBuilder::new(trampoline, &mut builder_ctx);
+
+    let trampoline_fn = module.declare_func_in_func(func_id, trampoline_builder.func);
+    let jit_fn = module.declare_func_in_func(jit_fn, trampoline_builder.func);
+    let sig_ref = trampoline_builder.func.import_signature(sig);
+
+    let entry_block = trampoline_builder.create_block();
+    trampoline_builder.append_block_params_for_function_params(entry_block);
+    let fn_args = trampoline_builder.func.dfg.block_params(entry_block).to_vec();
+
+    trampoline_builder.switch_to_block(entry_block);
+    let instance_ptr = trampoline_builder.ins().iconst(pointer_type, instance_ptr as u64 as i64);
+    let trampoline_ptr = trampoline_builder.ins().func_addr(pointer_type, trampoline_fn);
+    let jitted_fn = trampoline_builder.ins().call(jit_fn, &[instance_ptr, trampoline_ptr]);
+    let jitted_fn = trampoline_builder.func.dfg.inst_results(jitted_fn)[0];
+    let call_inst = trampoline_builder.ins().call_indirect(sig_ref, jitted_fn, &fn_args);
+    let ret_vals = trampoline_builder.func.dfg.inst_results(call_inst).to_vec();
+    trampoline_builder.ins().return_(&ret_vals);
+
+    module
+        .define_function(
+            func_id,
+            &mut cx.cached_context,
+            &mut NullTrapSink {},
+            &mut NullStackMapSink {},
+        )
+        .unwrap();
+}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/mod.rs b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
new file mode 100644
index 00000000000..8f5714ecb41
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
@@ -0,0 +1,53 @@
+//! Drivers are responsible for calling [`codegen_fn`] or [`codegen_static`] for each mono item and
+//! performing any further actions like JIT executing or writing object files.
+//!
+//! [`codegen_fn`]: crate::base::codegen_fn
+//! [`codegen_static`]: crate::constant::codegen_static
+
+use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+
+use crate::prelude::*;
+
+pub(crate) mod aot;
+#[cfg(feature = "jit")]
+pub(crate) mod jit;
+
+fn predefine_mono_items<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    module: &mut dyn Module,
+    mono_items: &[(MonoItem<'tcx>, (RLinkage, Visibility))],
+) {
+    tcx.sess.time("predefine functions", || {
+        let is_compiler_builtins = tcx.is_compiler_builtins(LOCAL_CRATE);
+        for &(mono_item, (linkage, visibility)) in mono_items {
+            match mono_item {
+                MonoItem::Fn(instance) => {
+                    let name = tcx.symbol_name(instance).name;
+                    let _inst_guard = crate::PrintOnPanic(|| format!("{:?} {}", instance, name));
+                    let sig = get_function_sig(tcx, module.isa().triple(), instance);
+                    let linkage = crate::linkage::get_clif_linkage(
+                        mono_item,
+                        linkage,
+                        visibility,
+                        is_compiler_builtins,
+                    );
+                    module.declare_function(name, linkage, &sig).unwrap();
+                }
+                MonoItem::Static(_) | MonoItem::GlobalAsm(_) => {}
+            }
+        }
+    });
+}
+
+fn time<R>(tcx: TyCtxt<'_>, display: bool, name: &'static str, f: impl FnOnce() -> R) -> R {
+    if display {
+        println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
+        let before = std::time::Instant::now();
+        let res = tcx.sess.time(name, f);
+        let after = std::time::Instant::now();
+        println!("[{:<30}: {}] end time: {:?}", tcx.crate_name(LOCAL_CRATE), name, after - before);
+        res
+    } else {
+        tcx.sess.time(name, f)
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
new file mode 100644
index 00000000000..09c5e6031c7
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
@@ -0,0 +1,336 @@
+//! Codegen of [`asm!`] invocations.
+
+use crate::prelude::*;
+
+use std::fmt::Write;
+
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_middle::mir::InlineAsmOperand;
+use rustc_target::asm::*;
+
+pub(crate) fn codegen_inline_asm<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    _span: Span,
+    template: &[InlineAsmTemplatePiece],
+    operands: &[InlineAsmOperand<'tcx>],
+    options: InlineAsmOptions,
+) {
+    // FIXME add .eh_frame unwind info directives
+
+    if template.is_empty() {
+        // Black box
+        return;
+    } else if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
+        let true_ = fx.bcx.ins().iconst(types::I32, 1);
+        fx.bcx.ins().trapnz(true_, TrapCode::User(1));
+        return;
+    } else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
+        && matches!(
+            template[1],
+            InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
+        )
+        && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
+        && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
+        && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
+        && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
+        && matches!(
+            template[6],
+            InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
+        )
+    {
+        assert_eq!(operands.len(), 4);
+        let (leaf, eax_place) = match operands[1] {
+            InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
+                let reg = expect_reg(reg);
+                assert_eq!(reg, InlineAsmReg::X86(X86InlineAsmReg::ax));
+                (
+                    crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+                    crate::base::codegen_place(fx, out_place.unwrap()),
+                )
+            }
+            _ => unreachable!(),
+        };
+        let ebx_place = match operands[0] {
+            InlineAsmOperand::Out { reg, late: true, place } => {
+                assert_eq!(
+                    reg,
+                    InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
+                        X86InlineAsmRegClass::reg
+                    ))
+                );
+                crate::base::codegen_place(fx, place.unwrap())
+            }
+            _ => unreachable!(),
+        };
+        let (sub_leaf, ecx_place) = match operands[2] {
+            InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
+                let reg = expect_reg(reg);
+                assert_eq!(reg, InlineAsmReg::X86(X86InlineAsmReg::cx));
+                (
+                    crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+                    crate::base::codegen_place(fx, out_place.unwrap()),
+                )
+            }
+            _ => unreachable!(),
+        };
+        let edx_place = match operands[3] {
+            InlineAsmOperand::Out { reg, late: true, place } => {
+                let reg = expect_reg(reg);
+                assert_eq!(reg, InlineAsmReg::X86(X86InlineAsmReg::dx));
+                crate::base::codegen_place(fx, place.unwrap())
+            }
+            _ => unreachable!(),
+        };
+
+        let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
+
+        eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
+        ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
+        ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
+        edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
+        return;
+    } else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
+        // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
+        crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
+    } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
+        crate::trap::trap_unimplemented(fx, "Alloca is not supported");
+    }
+
+    let mut slot_size = Size::from_bytes(0);
+    let mut clobbered_regs = Vec::new();
+    let mut inputs = Vec::new();
+    let mut outputs = Vec::new();
+
+    let mut new_slot = |reg_class: InlineAsmRegClass| {
+        let reg_size = reg_class
+            .supported_types(InlineAsmArch::X86_64)
+            .iter()
+            .map(|(ty, _)| ty.size())
+            .max()
+            .unwrap();
+        let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
+        slot_size = slot_size.align_to(align);
+        let offset = slot_size;
+        slot_size += reg_size;
+        offset
+    };
+
+    // FIXME overlap input and output slots to save stack space
+    for operand in operands {
+        match *operand {
+            InlineAsmOperand::In { reg, ref value } => {
+                let reg = expect_reg(reg);
+                clobbered_regs.push((reg, new_slot(reg.reg_class())));
+                inputs.push((
+                    reg,
+                    new_slot(reg.reg_class()),
+                    crate::base::codegen_operand(fx, value).load_scalar(fx),
+                ));
+            }
+            InlineAsmOperand::Out { reg, late: _, place } => {
+                let reg = expect_reg(reg);
+                clobbered_regs.push((reg, new_slot(reg.reg_class())));
+                if let Some(place) = place {
+                    outputs.push((
+                        reg,
+                        new_slot(reg.reg_class()),
+                        crate::base::codegen_place(fx, place),
+                    ));
+                }
+            }
+            InlineAsmOperand::InOut { reg, late: _, ref in_value, out_place } => {
+                let reg = expect_reg(reg);
+                clobbered_regs.push((reg, new_slot(reg.reg_class())));
+                inputs.push((
+                    reg,
+                    new_slot(reg.reg_class()),
+                    crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+                ));
+                if let Some(out_place) = out_place {
+                    outputs.push((
+                        reg,
+                        new_slot(reg.reg_class()),
+                        crate::base::codegen_place(fx, out_place),
+                    ));
+                }
+            }
+            InlineAsmOperand::Const { value: _ } => todo!(),
+            InlineAsmOperand::SymFn { value: _ } => todo!(),
+            InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
+        }
+    }
+
+    let inline_asm_index = fx.inline_asm_index;
+    fx.inline_asm_index += 1;
+    let asm_name = format!("{}__inline_asm_{}", fx.symbol_name, inline_asm_index);
+
+    let generated_asm = generate_asm_wrapper(
+        &asm_name,
+        InlineAsmArch::X86_64,
+        options,
+        template,
+        clobbered_regs,
+        &inputs,
+        &outputs,
+    );
+    fx.cx.global_asm.push_str(&generated_asm);
+
+    call_inline_asm(fx, &asm_name, slot_size, inputs, outputs);
+}
+
+fn generate_asm_wrapper(
+    asm_name: &str,
+    arch: InlineAsmArch,
+    options: InlineAsmOptions,
+    template: &[InlineAsmTemplatePiece],
+    clobbered_regs: Vec<(InlineAsmReg, Size)>,
+    inputs: &[(InlineAsmReg, Size, Value)],
+    outputs: &[(InlineAsmReg, Size, CPlace<'_>)],
+) -> String {
+    let mut generated_asm = String::new();
+    writeln!(generated_asm, ".globl {}", asm_name).unwrap();
+    writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
+    writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
+    writeln!(generated_asm, "{}:", asm_name).unwrap();
+
+    generated_asm.push_str(".intel_syntax noprefix\n");
+    generated_asm.push_str("    push rbp\n");
+    generated_asm.push_str("    mov rbp,rdi\n");
+
+    // Save clobbered registers
+    if !options.contains(InlineAsmOptions::NORETURN) {
+        // FIXME skip registers saved by the calling convention
+        for &(reg, offset) in &clobbered_regs {
+            save_register(&mut generated_asm, arch, reg, offset);
+        }
+    }
+
+    // Write input registers
+    for &(reg, offset, _value) in inputs {
+        restore_register(&mut generated_asm, arch, reg, offset);
+    }
+
+    if options.contains(InlineAsmOptions::ATT_SYNTAX) {
+        generated_asm.push_str(".att_syntax\n");
+    }
+
+    // The actual inline asm
+    for piece in template {
+        match piece {
+            InlineAsmTemplatePiece::String(s) => {
+                generated_asm.push_str(s);
+            }
+            InlineAsmTemplatePiece::Placeholder { operand_idx: _, modifier: _, span: _ } => todo!(),
+        }
+    }
+    generated_asm.push('\n');
+
+    if options.contains(InlineAsmOptions::ATT_SYNTAX) {
+        generated_asm.push_str(".intel_syntax noprefix\n");
+    }
+
+    if !options.contains(InlineAsmOptions::NORETURN) {
+        // Read output registers
+        for &(reg, offset, _place) in outputs {
+            save_register(&mut generated_asm, arch, reg, offset);
+        }
+
+        // Restore clobbered registers
+        for &(reg, offset) in clobbered_regs.iter().rev() {
+            restore_register(&mut generated_asm, arch, reg, offset);
+        }
+
+        generated_asm.push_str("    pop rbp\n");
+        generated_asm.push_str("    ret\n");
+    } else {
+        generated_asm.push_str("    ud2\n");
+    }
+
+    generated_asm.push_str(".att_syntax\n");
+    writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
+    generated_asm.push_str(".text\n");
+    generated_asm.push_str("\n\n");
+
+    generated_asm
+}
+
+fn call_inline_asm<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    asm_name: &str,
+    slot_size: Size,
+    inputs: Vec<(InlineAsmReg, Size, Value)>,
+    outputs: Vec<(InlineAsmReg, Size, CPlace<'tcx>)>,
+) {
+    let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
+        kind: StackSlotKind::ExplicitSlot,
+        offset: None,
+        size: u32::try_from(slot_size.bytes()).unwrap(),
+    });
+    if fx.clif_comments.enabled() {
+        fx.add_comment(stack_slot, "inline asm scratch slot");
+    }
+
+    let inline_asm_func = fx
+        .module
+        .declare_function(
+            asm_name,
+            Linkage::Import,
+            &Signature {
+                call_conv: CallConv::SystemV,
+                params: vec![AbiParam::new(fx.pointer_type)],
+                returns: vec![],
+            },
+        )
+        .unwrap();
+    let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
+    if fx.clif_comments.enabled() {
+        fx.add_comment(inline_asm_func, asm_name);
+    }
+
+    for (_reg, offset, value) in inputs {
+        fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
+    }
+
+    let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
+    fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
+
+    for (_reg, offset, place) in outputs {
+        let ty = fx.clif_type(place.layout().ty).unwrap();
+        let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
+        place.write_cvalue(fx, CValue::by_val(value, place.layout()));
+    }
+}
+
+fn expect_reg(reg_or_class: InlineAsmRegOrRegClass) -> InlineAsmReg {
+    match reg_or_class {
+        InlineAsmRegOrRegClass::Reg(reg) => reg,
+        InlineAsmRegOrRegClass::RegClass(class) => unimplemented!("{:?}", class),
+    }
+}
+
+fn save_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) {
+    match arch {
+        InlineAsmArch::X86_64 => {
+            write!(generated_asm, "    mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
+            reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+            generated_asm.push('\n');
+        }
+        _ => unimplemented!("save_register for {:?}", arch),
+    }
+}
+
+fn restore_register(
+    generated_asm: &mut String,
+    arch: InlineAsmArch,
+    reg: InlineAsmReg,
+    offset: Size,
+) {
+    match arch {
+        InlineAsmArch::X86_64 => {
+            generated_asm.push_str("    mov ");
+            reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+            writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
+        }
+        _ => unimplemented!("restore_register for {:?}", arch),
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
new file mode 100644
index 00000000000..d02dfd93c3e
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
@@ -0,0 +1,74 @@
+//! Emulation of a subset of the cpuid x86 instruction.
+
+use crate::prelude::*;
+
+/// Emulates a subset of the cpuid x86 instruction.
+///
+/// This emulates an intel cpu with sse and sse2 support, but which doesn't support anything else.
+pub(crate) fn codegen_cpuid_call<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    leaf: Value,
+    _sub_leaf: Value,
+) -> (Value, Value, Value, Value) {
+    let leaf_0 = fx.bcx.create_block();
+    let leaf_1 = fx.bcx.create_block();
+    let leaf_7 = fx.bcx.create_block();
+    let leaf_8000_0000 = fx.bcx.create_block();
+    let leaf_8000_0001 = fx.bcx.create_block();
+    let unsupported_leaf = fx.bcx.create_block();
+
+    let dest = fx.bcx.create_block();
+    let eax = fx.bcx.append_block_param(dest, types::I32);
+    let ebx = fx.bcx.append_block_param(dest, types::I32);
+    let ecx = fx.bcx.append_block_param(dest, types::I32);
+    let edx = fx.bcx.append_block_param(dest, types::I32);
+
+    let mut switch = cranelift_frontend::Switch::new();
+    switch.set_entry(0, leaf_0);
+    switch.set_entry(1, leaf_1);
+    switch.set_entry(7, leaf_7);
+    switch.set_entry(0x8000_0000, leaf_8000_0000);
+    switch.set_entry(0x8000_0001, leaf_8000_0001);
+    switch.emit(&mut fx.bcx, leaf, unsupported_leaf);
+
+    fx.bcx.switch_to_block(leaf_0);
+    let max_basic_leaf = fx.bcx.ins().iconst(types::I32, 1);
+    let vend0 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu")));
+    let vend2 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI")));
+    let vend1 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel")));
+    fx.bcx.ins().jump(dest, &[max_basic_leaf, vend0, vend1, vend2]);
+
+    fx.bcx.switch_to_block(leaf_1);
+    let cpu_signature = fx.bcx.ins().iconst(types::I32, 0);
+    let additional_information = fx.bcx.ins().iconst(types::I32, 0);
+    let ecx_features = fx.bcx.ins().iconst(types::I32, 0);
+    let edx_features = fx.bcx.ins().iconst(types::I32, 1 << 25 /* sse */ | 1 << 26 /* sse2 */);
+    fx.bcx.ins().jump(dest, &[cpu_signature, additional_information, ecx_features, edx_features]);
+
+    fx.bcx.switch_to_block(leaf_7);
+    // This leaf technically has subleaves, but we just return zero for all subleaves.
+    let zero = fx.bcx.ins().iconst(types::I32, 0);
+    fx.bcx.ins().jump(dest, &[zero, zero, zero, zero]);
+
+    fx.bcx.switch_to_block(leaf_8000_0000);
+    let extended_max_basic_leaf = fx.bcx.ins().iconst(types::I32, 0);
+    let zero = fx.bcx.ins().iconst(types::I32, 0);
+    fx.bcx.ins().jump(dest, &[extended_max_basic_leaf, zero, zero, zero]);
+
+    fx.bcx.switch_to_block(leaf_8000_0001);
+    let zero = fx.bcx.ins().iconst(types::I32, 0);
+    let proc_info_ecx = fx.bcx.ins().iconst(types::I32, 0);
+    let proc_info_edx = fx.bcx.ins().iconst(types::I32, 0);
+    fx.bcx.ins().jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
+
+    fx.bcx.switch_to_block(unsupported_leaf);
+    crate::trap::trap_unreachable(
+        fx,
+        "__cpuid_count arch intrinsic doesn't yet support specified leaf",
+    );
+
+    fx.bcx.switch_to_block(dest);
+    fx.bcx.ins().nop();
+
+    (eax, ebx, ecx, edx)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
new file mode 100644
index 00000000000..be3704ca276
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
@@ -0,0 +1,181 @@
+//! Emulate LLVM intrinsics
+
+use crate::intrinsics::*;
+use crate::prelude::*;
+
+use rustc_middle::ty::subst::SubstsRef;
+
+pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    intrinsic: &str,
+    substs: SubstsRef<'tcx>,
+    args: &[mir::Operand<'tcx>],
+    destination: Option<(CPlace<'tcx>, BasicBlock)>,
+) {
+    let ret = destination.unwrap().0;
+
+    intrinsic_match! {
+        fx, intrinsic, substs, args,
+        _ => {
+            fx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
+            crate::trap::trap_unimplemented(fx, intrinsic);
+        };
+
+        // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
+        "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd", (c a) {
+            let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+            let lane_ty = fx.clif_type(lane_ty).unwrap();
+            assert!(lane_count <= 32);
+
+            let mut res = fx.bcx.ins().iconst(types::I32, 0);
+
+            for lane in (0..lane_count).rev() {
+                let a_lane = a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
+
+                // cast float to int
+                let a_lane = match lane_ty {
+                    types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
+                    types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
+                    _ => a_lane,
+                };
+
+                // extract sign bit of an int
+                let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
+
+                // shift sign bit into result
+                let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
+                res = fx.bcx.ins().ishl_imm(res, 1);
+                res = fx.bcx.ins().bor(res, a_lane_sign);
+            }
+
+            let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
+            ret.write_cvalue(fx, res);
+        };
+        "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd", (c x, c y, o kind) {
+            let kind_const = crate::constant::mir_operand_get_const_val(fx, kind).expect("llvm.x86.sse2.cmp.* kind not const");
+            let flt_cc = match kind_const.try_to_bits(Size::from_bytes(1)).unwrap_or_else(|| panic!("kind not scalar: {:?}", kind_const)) {
+                0 => FloatCC::Equal,
+                1 => FloatCC::LessThan,
+                2 => FloatCC::LessThanOrEqual,
+                7 => {
+                    unimplemented!("Compares corresponding elements in `a` and `b` to see if neither is `NaN`.");
+                }
+                3 => {
+                    unimplemented!("Compares corresponding elements in `a` and `b` to see if either is `NaN`.");
+                }
+                4 => FloatCC::NotEqual,
+                5 => {
+                    unimplemented!("not less than");
+                }
+                6 => {
+                    unimplemented!("not less than or equal");
+                }
+                kind => unreachable!("kind {:?}", kind),
+            };
+
+            simd_pair_for_each_lane(fx, x, y, ret, |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+                let res_lane = match lane_layout.ty.kind() {
+                    ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
+                    _ => unreachable!("{:?}", lane_layout.ty),
+                };
+                bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+            });
+        };
+        "llvm.x86.sse2.psrli.d", (c a, o imm8) {
+            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
+            simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
+                let res_lane = match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
+                    imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
+                    _ => fx.bcx.ins().iconst(types::I32, 0),
+                };
+                CValue::by_val(res_lane, res_lane_layout)
+            });
+        };
+        "llvm.x86.sse2.pslli.d", (c a, o imm8) {
+            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8).expect("llvm.x86.sse2.psrli.d imm8 not const");
+            simd_for_each_lane(fx, a, ret, |fx, _lane_layout, res_lane_layout, lane| {
+                let res_lane = match imm8.try_to_bits(Size::from_bytes(4)).unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) {
+                    imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
+                    _ => fx.bcx.ins().iconst(types::I32, 0),
+                };
+                CValue::by_val(res_lane, res_lane_layout)
+            });
+        };
+        "llvm.x86.sse2.storeu.dq", (v mem_addr, c a) {
+            // FIXME correctly handle the unalignment
+            let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
+            dest.write_cvalue(fx, a);
+        };
+        "llvm.x86.addcarry.64", (v c_in, c a, c b) {
+            llvm_add_sub(
+                fx,
+                BinOp::Add,
+                ret,
+                c_in,
+                a,
+                b
+            );
+        };
+        "llvm.x86.subborrow.64", (v b_in, c a, c b) {
+            llvm_add_sub(
+                fx,
+                BinOp::Sub,
+                ret,
+                b_in,
+                a,
+                b
+            );
+        };
+    }
+
+    if let Some((_, dest)) = destination {
+        let ret_block = fx.get_block(dest);
+        fx.bcx.ins().jump(ret_block, &[]);
+    } else {
+        trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
+    }
+}
+
+// llvm.x86.avx2.vperm2i128
+// llvm.x86.ssse3.pshuf.b.128
+// llvm.x86.avx2.pshuf.b
+// llvm.x86.avx2.psrli.w
+// llvm.x86.sse2.psrli.w
+
+fn llvm_add_sub<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    bin_op: BinOp,
+    ret: CPlace<'tcx>,
+    cb_in: Value,
+    a: CValue<'tcx>,
+    b: CValue<'tcx>,
+) {
+    assert_eq!(
+        a.layout().ty,
+        fx.tcx.types.u64,
+        "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
+    );
+    assert_eq!(
+        b.layout().ty,
+        fx.tcx.types.u64,
+        "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
+    );
+
+    // c + carry -> c + first intermediate carry or borrow respectively
+    let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
+    let c = int0.value_field(fx, mir::Field::new(0));
+    let cb0 = int0.value_field(fx, mir::Field::new(1)).load_scalar(fx);
+
+    // c + carry -> c + second intermediate carry or borrow respectively
+    let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
+    let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
+    let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
+    let (c, cb1) = int1.load_scalar_pair(fx);
+
+    // carry0 | carry1 -> carry or borrow respectively
+    let cb_out = fx.bcx.ins().bor(cb0, cb1);
+
+    let layout = fx.layout_of(fx.tcx.mk_tup([fx.tcx.types.u8, fx.tcx.types.u64].iter()));
+    let val = CValue::by_val_pair(cb_out, c, layout);
+    ret.write_cvalue(fx, val);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
new file mode 100644
index 00000000000..52896fc7127
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -0,0 +1,1126 @@
+//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
+//! and LLVM intrinsics that have symbol names starting with `llvm.`.
+
+mod cpuid;
+mod llvm;
+mod simd;
+
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_span::symbol::{kw, sym};
+
+use crate::prelude::*;
+use cranelift_codegen::ir::AtomicRmwOp;
+
+macro intrinsic_pat {
+    (_) => {
+        _
+    },
+    ($name:ident) => {
+        sym::$name
+    },
+    (kw.$name:ident) => {
+        kw::$name
+    },
+    ($name:literal) => {
+        $name
+    },
+}
+
+macro intrinsic_arg {
+    (o $fx:expr, $arg:ident) => {
+        $arg
+    },
+    (c $fx:expr, $arg:ident) => {
+        codegen_operand($fx, $arg)
+    },
+    (v $fx:expr, $arg:ident) => {
+        codegen_operand($fx, $arg).load_scalar($fx)
+    }
+}
+
+macro intrinsic_substs {
+    ($substs:expr, $index:expr,) => {},
+    ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
+        let $first = $substs.type_at($index);
+        intrinsic_substs!($substs, $index+1, $($rest),*);
+    }
+}
+
+macro intrinsic_match {
+    ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
+    _ => $unknown:block;
+    $(
+        $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
+    )*) => {
+        let _ = $substs; // Silence warning when substs is unused.
+        match $intrinsic {
+            $(
+                $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
+                    #[allow(unused_parens, non_snake_case)]
+                    {
+                        $(
+                            intrinsic_substs!($substs, 0, $($subst),*);
+                        )?
+                        if let [$($arg),*] = $args {
+                            let ($($arg,)*) = (
+                                $(intrinsic_arg!($a $fx, $arg),)*
+                            );
+                            #[warn(unused_parens, non_snake_case)]
+                            {
+                                $content
+                            }
+                        } else {
+                            bug!("wrong number of args for intrinsic {:?}", $intrinsic);
+                        }
+                    }
+                }
+            )*
+            _ => $unknown,
+        }
+    }
+}
+
+macro call_intrinsic_match {
+    ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
+        $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
+    )*) => {
+        match $intrinsic {
+            $(
+                sym::$name => {
+                    assert!($substs.is_noop());
+                    if let [$(ref $arg),*] = *$args {
+                        let ($($arg,)*) = (
+                            $(codegen_operand($fx, $arg),)*
+                        );
+                        let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
+                        $ret.write_cvalue($fx, res);
+
+                        if let Some((_, dest)) = $destination {
+                            let ret_block = $fx.get_block(dest);
+                            $fx.bcx.ins().jump(ret_block, &[]);
+                            return;
+                        } else {
+                            unreachable!();
+                        }
+                    } else {
+                        bug!("wrong number of args for intrinsic {:?}", $intrinsic);
+                    }
+                }
+            )*
+            _ => {}
+        }
+    }
+}
+
+macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
+    match $ty.kind() {
+        ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+        _ => {
+            $fx.tcx.sess.span_err(
+                $span,
+                &format!(
+                    "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
+                    $intrinsic, $ty
+                ),
+            );
+            // Prevent verifier error
+            crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
+            return;
+        }
+    }
+}
+
+macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
+    if !$ty.is_simd() {
+        $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
+        // Prevent verifier error
+        crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
+        return;
+    }
+}
+
+pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
+    let (element, count) = match &layout.abi {
+        Abi::Vector { element, count } => (element.clone(), *count),
+        _ => unreachable!(),
+    };
+
+    match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+        // Cranelift currently only implements icmp for 128bit vectors.
+        Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
+        _ => None,
+    }
+}
+
+fn simd_for_each_lane<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    val: CValue<'tcx>,
+    ret: CPlace<'tcx>,
+    f: impl Fn(
+        &mut FunctionCx<'_, '_, 'tcx>,
+        TyAndLayout<'tcx>,
+        TyAndLayout<'tcx>,
+        Value,
+    ) -> CValue<'tcx>,
+) {
+    let layout = val.layout();
+
+    let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+    let lane_layout = fx.layout_of(lane_ty);
+    let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+    let ret_lane_layout = fx.layout_of(ret_lane_ty);
+    assert_eq!(lane_count, ret_lane_count);
+
+    for lane_idx in 0..lane_count {
+        let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
+        let lane = val.value_field(fx, lane_idx).load_scalar(fx);
+
+        let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
+
+        ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
+    }
+}
+
+fn simd_pair_for_each_lane<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    x: CValue<'tcx>,
+    y: CValue<'tcx>,
+    ret: CPlace<'tcx>,
+    f: impl Fn(
+        &mut FunctionCx<'_, '_, 'tcx>,
+        TyAndLayout<'tcx>,
+        TyAndLayout<'tcx>,
+        Value,
+        Value,
+    ) -> CValue<'tcx>,
+) {
+    assert_eq!(x.layout(), y.layout());
+    let layout = x.layout();
+
+    let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+    let lane_layout = fx.layout_of(lane_ty);
+    let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+    let ret_lane_layout = fx.layout_of(ret_lane_ty);
+    assert_eq!(lane_count, ret_lane_count);
+
+    for lane in 0..lane_count {
+        let lane = mir::Field::new(lane.try_into().unwrap());
+        let x_lane = x.value_field(fx, lane).load_scalar(fx);
+        let y_lane = y.value_field(fx, lane).load_scalar(fx);
+
+        let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
+
+        ret.place_field(fx, lane).write_cvalue(fx, res_lane);
+    }
+}
+
+fn simd_reduce<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    val: CValue<'tcx>,
+    ret: CPlace<'tcx>,
+    f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
+) {
+    let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+    let lane_layout = fx.layout_of(lane_ty);
+    assert_eq!(lane_layout, ret.layout());
+
+    let mut res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
+    for lane_idx in 1..lane_count {
+        let lane =
+            val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
+        res_val = f(fx, lane_layout, res_val, lane);
+    }
+    let res = CValue::by_val(res_val, lane_layout);
+    ret.write_cvalue(fx, res);
+}
+
+fn simd_reduce_bool<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    val: CValue<'tcx>,
+    ret: CPlace<'tcx>,
+    f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
+) {
+    let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+    assert!(ret.layout().ty.is_bool());
+
+    let res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
+    let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
+    for lane_idx in 1..lane_count {
+        let lane =
+            val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
+        let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
+        res_val = f(fx, res_val, lane);
+    }
+    let res = CValue::by_val(res_val, ret.layout());
+    ret.write_cvalue(fx, res);
+}
+
+fn bool_to_zero_or_max_uint<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    layout: TyAndLayout<'tcx>,
+    val: Value,
+) -> CValue<'tcx> {
+    let ty = fx.clif_type(layout.ty).unwrap();
+
+    let int_ty = match ty {
+        types::F32 => types::I32,
+        types::F64 => types::I64,
+        ty => ty,
+    };
+
+    let val = fx.bcx.ins().bint(int_ty, val);
+    let mut res = fx.bcx.ins().ineg(val);
+
+    if ty.is_float() {
+        res = fx.bcx.ins().bitcast(ty, res);
+    }
+
+    CValue::by_val(res, layout)
+}
+
+macro simd_cmp {
+    ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+        let vector_ty = clif_vector_type($fx.tcx, $x.layout());
+
+        if let Some(vector_ty) = vector_ty {
+            let x = $x.load_scalar($fx);
+            let y = $y.load_scalar($fx);
+            let val = $fx.bcx.ins().icmp(IntCC::$cc, x, y);
+
+            // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
+            let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
+
+            $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
+        } else {
+            simd_pair_for_each_lane(
+                $fx,
+                $x,
+                $y,
+                $ret,
+                |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+                    let res_lane = match lane_layout.ty.kind() {
+                        ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
+                        ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
+                        _ => unreachable!("{:?}", lane_layout.ty),
+                    };
+                    bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+                },
+            );
+        }
+    },
+    ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+        // FIXME use vector icmp when possible
+        simd_pair_for_each_lane(
+            $fx,
+            $x,
+            $y,
+            $ret,
+            |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
+                let res_lane = match lane_layout.ty.kind() {
+                    ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
+                    ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
+                    ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
+                    _ => unreachable!("{:?}", lane_layout.ty),
+                };
+                bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
+            },
+        );
+    },
+}
+
+macro simd_int_binop {
+    ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
+    },
+    ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_pair_for_each_lane(
+            $fx,
+            $x,
+            $y,
+            $ret,
+            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+                let res_lane = match lane_layout.ty.kind() {
+                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
+                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
+                    _ => unreachable!("{:?}", lane_layout.ty),
+                };
+                CValue::by_val(res_lane, ret_lane_layout)
+            },
+        );
+    },
+}
+
+macro simd_int_flt_binop {
+    ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
+    },
+    ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+        simd_pair_for_each_lane(
+            $fx,
+            $x,
+            $y,
+            $ret,
+            |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+                let res_lane = match lane_layout.ty.kind() {
+                    ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
+                    ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
+                    ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
+                    _ => unreachable!("{:?}", lane_layout.ty),
+                };
+                CValue::by_val(res_lane, ret_lane_layout)
+            },
+        );
+    },
+}
+
+macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
+    simd_pair_for_each_lane(
+        $fx,
+        $x,
+        $y,
+        $ret,
+        |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+            let res_lane = match lane_layout.ty.kind() {
+                ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
+                _ => unreachable!("{:?}", lane_layout.ty),
+            };
+            CValue::by_val(res_lane, ret_lane_layout)
+        },
+    );
+}
+
+pub(crate) fn codegen_intrinsic_call<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    instance: Instance<'tcx>,
+    args: &[mir::Operand<'tcx>],
+    destination: Option<(CPlace<'tcx>, BasicBlock)>,
+    span: Span,
+) {
+    let def_id = instance.def_id();
+    let substs = instance.substs;
+
+    let intrinsic = fx.tcx.item_name(def_id);
+
+    let ret = match destination {
+        Some((place, _)) => place,
+        None => {
+            // Insert non returning intrinsics here
+            match intrinsic {
+                sym::abort => {
+                    trap_abort(fx, "Called intrinsic::abort.");
+                }
+                sym::transmute => {
+                    crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
+                }
+                _ => unimplemented!("unsupported instrinsic {}", intrinsic),
+            }
+            return;
+        }
+    };
+
+    if intrinsic.as_str().starts_with("simd_") {
+        self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
+        let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
+        fx.bcx.ins().jump(ret_block, &[]);
+        return;
+    }
+
+    let usize_layout = fx.layout_of(fx.tcx.types.usize);
+
+    call_intrinsic_match! {
+        fx, intrinsic, substs, ret, destination, args,
+        expf32(flt) -> f32 => expf,
+        expf64(flt) -> f64 => exp,
+        exp2f32(flt) -> f32 => exp2f,
+        exp2f64(flt) -> f64 => exp2,
+        sqrtf32(flt) -> f32 => sqrtf,
+        sqrtf64(flt) -> f64 => sqrt,
+        powif32(a, x) -> f32 => __powisf2, // compiler-builtins
+        powif64(a, x) -> f64 => __powidf2, // compiler-builtins
+        powf32(a, x) -> f32 => powf,
+        powf64(a, x) -> f64 => pow,
+        logf32(flt) -> f32 => logf,
+        logf64(flt) -> f64 => log,
+        log2f32(flt) -> f32 => log2f,
+        log2f64(flt) -> f64 => log2,
+        log10f32(flt) -> f32 => log10f,
+        log10f64(flt) -> f64 => log10,
+        fabsf32(flt) -> f32 => fabsf,
+        fabsf64(flt) -> f64 => fabs,
+        fmaf32(x, y, z) -> f32 => fmaf,
+        fmaf64(x, y, z) -> f64 => fma,
+        copysignf32(x, y) -> f32 => copysignf,
+        copysignf64(x, y) -> f64 => copysign,
+
+        // rounding variants
+        // FIXME use clif insts
+        floorf32(flt) -> f32 => floorf,
+        floorf64(flt) -> f64 => floor,
+        ceilf32(flt) -> f32 => ceilf,
+        ceilf64(flt) -> f64 => ceil,
+        truncf32(flt) -> f32 => truncf,
+        truncf64(flt) -> f64 => trunc,
+        roundf32(flt) -> f32 => roundf,
+        roundf64(flt) -> f64 => round,
+
+        // trigonometry
+        sinf32(flt) -> f32 => sinf,
+        sinf64(flt) -> f64 => sin,
+        cosf32(flt) -> f32 => cosf,
+        cosf64(flt) -> f64 => cos,
+    }
+
+    intrinsic_match! {
+        fx, intrinsic, substs, args,
+        _ => {
+            fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
+        };
+
+        assume, (c _a) {};
+        likely | unlikely, (c a) {
+            ret.write_cvalue(fx, a);
+        };
+        breakpoint, () {
+            fx.bcx.ins().debugtrap();
+        };
+        copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
+            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+            assert_eq!(args.len(), 3);
+            let byte_amount = if elem_size != 1 {
+                fx.bcx.ins().imul_imm(count, elem_size as i64)
+            } else {
+                count
+            };
+
+            if intrinsic == sym::copy_nonoverlapping {
+                // FIXME emit_small_memcpy
+                fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
+            } else {
+                // FIXME emit_small_memmove
+                fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
+            }
+        };
+        // NOTE: the volatile variants have src and dst swapped
+        volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
+            let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+            assert_eq!(args.len(), 3);
+            let byte_amount = if elem_size != 1 {
+                fx.bcx.ins().imul_imm(count, elem_size as i64)
+            } else {
+                count
+            };
+
+            // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
+            if intrinsic == sym::volatile_copy_nonoverlapping_memory {
+                // FIXME emit_small_memcpy
+                fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
+            } else {
+                // FIXME emit_small_memmove
+                fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
+            }
+        };
+        size_of_val, <T> (c ptr) {
+            let layout = fx.layout_of(T);
+            let size = if layout.is_unsized() {
+                let (_ptr, info) = ptr.load_scalar_pair(fx);
+                let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+                size
+            } else {
+                fx
+                    .bcx
+                    .ins()
+                    .iconst(fx.pointer_type, layout.size.bytes() as i64)
+            };
+            ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+        };
+        min_align_of_val, <T> (c ptr) {
+            let layout = fx.layout_of(T);
+            let align = if layout.is_unsized() {
+                let (_ptr, info) = ptr.load_scalar_pair(fx);
+                let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+                align
+            } else {
+                fx
+                    .bcx
+                    .ins()
+                    .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
+            };
+            ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+        };
+
+        unchecked_add | unchecked_sub | unchecked_div | exact_div | unchecked_rem
+        | unchecked_shl | unchecked_shr, (c x, c y) {
+            // FIXME trap on overflow
+            let bin_op = match intrinsic {
+                sym::unchecked_add => BinOp::Add,
+                sym::unchecked_sub => BinOp::Sub,
+                sym::unchecked_div | sym::exact_div => BinOp::Div,
+                sym::unchecked_rem => BinOp::Rem,
+                sym::unchecked_shl => BinOp::Shl,
+                sym::unchecked_shr => BinOp::Shr,
+                _ => unreachable!(),
+            };
+            let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
+            ret.write_cvalue(fx, res);
+        };
+        add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
+            assert_eq!(x.layout().ty, y.layout().ty);
+            let bin_op = match intrinsic {
+                sym::add_with_overflow => BinOp::Add,
+                sym::sub_with_overflow => BinOp::Sub,
+                sym::mul_with_overflow => BinOp::Mul,
+                _ => unreachable!(),
+            };
+
+            let res = crate::num::codegen_checked_int_binop(
+                fx,
+                bin_op,
+                x,
+                y,
+            );
+            ret.write_cvalue(fx, res);
+        };
+        saturating_add | saturating_sub, <T> (c lhs, c rhs) {
+            assert_eq!(lhs.layout().ty, rhs.layout().ty);
+            let bin_op = match intrinsic {
+                sym::saturating_add => BinOp::Add,
+                sym::saturating_sub => BinOp::Sub,
+                _ => unreachable!(),
+            };
+
+            let signed = type_sign(T);
+
+            let checked_res = crate::num::codegen_checked_int_binop(
+                fx,
+                bin_op,
+                lhs,
+                rhs,
+            );
+
+            let (val, has_overflow) = checked_res.load_scalar_pair(fx);
+            let clif_ty = fx.clif_type(T).unwrap();
+
+            // `select.i8` is not implemented by Cranelift.
+            let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
+
+            let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
+
+            let val = match (intrinsic, signed) {
+                (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
+                (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
+                (sym::saturating_add, true) => {
+                    let rhs = rhs.load_scalar(fx);
+                    let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+                    let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
+                    fx.bcx.ins().select(has_overflow, sat_val, val)
+                }
+                (sym::saturating_sub, true) => {
+                    let rhs = rhs.load_scalar(fx);
+                    let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+                    let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
+                    fx.bcx.ins().select(has_overflow, sat_val, val)
+                }
+                _ => unreachable!(),
+            };
+
+            let res = CValue::by_val(val, fx.layout_of(T));
+
+            ret.write_cvalue(fx, res);
+        };
+        rotate_left, <T>(v x, v y) {
+            let layout = fx.layout_of(T);
+            let y = if fx.bcx.func.dfg.value_type(y) == types::I128 {
+                fx.bcx.ins().ireduce(types::I64, y)
+            } else {
+                y
+            };
+            let res = fx.bcx.ins().rotl(x, y);
+            ret.write_cvalue(fx, CValue::by_val(res, layout));
+        };
+        rotate_right, <T>(v x, v y) {
+            let layout = fx.layout_of(T);
+            let y = if fx.bcx.func.dfg.value_type(y) == types::I128 {
+                fx.bcx.ins().ireduce(types::I64, y)
+            } else {
+                y
+            };
+            let res = fx.bcx.ins().rotr(x, y);
+            ret.write_cvalue(fx, CValue::by_val(res, layout));
+        };
+
+        // The only difference between offset and arith_offset is regarding UB. Because Cranelift
+        // doesn't have UB both are codegen'ed the same way
+        offset | arith_offset, (c base, v offset) {
+            let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
+            let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+            let ptr_diff = if pointee_size != 1 {
+                fx.bcx.ins().imul_imm(offset, pointee_size as i64)
+            } else {
+                offset
+            };
+            let base_val = base.load_scalar(fx);
+            let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+            ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
+        };
+
+        transmute, (c from) {
+            ret.write_cvalue_transmute(fx, from);
+        };
+        write_bytes | volatile_set_memory, (c dst, v val, v count) {
+            let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
+            let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+            let count = if pointee_size != 1 {
+                fx.bcx.ins().imul_imm(count, pointee_size as i64)
+            } else {
+                count
+            };
+            let dst_ptr = dst.load_scalar(fx);
+            // FIXME make the memset actually volatile when switching to emit_small_memset
+            // FIXME use emit_small_memset
+            fx.bcx.call_memset(fx.module.target_config(), dst_ptr, val, count);
+        };
+        ctlz | ctlz_nonzero, <T> (v arg) {
+            // FIXME trap on `ctlz_nonzero` with zero arg.
+            let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
+                // FIXME verify this algorithm is correct
+                let (lsb, msb) = fx.bcx.ins().isplit(arg);
+                let lsb_lz = fx.bcx.ins().clz(lsb);
+                let msb_lz = fx.bcx.ins().clz(msb);
+                let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
+                let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
+                let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
+                fx.bcx.ins().uextend(types::I128, res)
+            } else {
+                fx.bcx.ins().clz(arg)
+            };
+            let res = CValue::by_val(res, fx.layout_of(T));
+            ret.write_cvalue(fx, res);
+        };
+        cttz | cttz_nonzero, <T> (v arg) {
+            // FIXME trap on `cttz_nonzero` with zero arg.
+            let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
+                // FIXME verify this algorithm is correct
+                let (lsb, msb) = fx.bcx.ins().isplit(arg);
+                let lsb_tz = fx.bcx.ins().ctz(lsb);
+                let msb_tz = fx.bcx.ins().ctz(msb);
+                let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
+                let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
+                let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
+                fx.bcx.ins().uextend(types::I128, res)
+            } else {
+                fx.bcx.ins().ctz(arg)
+            };
+            let res = CValue::by_val(res, fx.layout_of(T));
+            ret.write_cvalue(fx, res);
+        };
+        ctpop, <T> (v arg) {
+            let res = fx.bcx.ins().popcnt(arg);
+            let res = CValue::by_val(res, fx.layout_of(T));
+            ret.write_cvalue(fx, res);
+        };
+        bitreverse, <T> (v arg) {
+            let res = fx.bcx.ins().bitrev(arg);
+            let res = CValue::by_val(res, fx.layout_of(T));
+            ret.write_cvalue(fx, res);
+        };
+        bswap, <T> (v arg) {
+            // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
+            fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
+                match bcx.func.dfg.value_type(v) {
+                    types::I8 => v,
+
+                    // https://code.woboq.org/gcc/include/bits/byteswap.h.html
+                    types::I16 => {
+                        let tmp1 = bcx.ins().ishl_imm(v, 8);
+                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
+
+                        let tmp2 = bcx.ins().ushr_imm(v, 8);
+                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
+
+                        bcx.ins().bor(n1, n2)
+                    }
+                    types::I32 => {
+                        let tmp1 = bcx.ins().ishl_imm(v, 24);
+                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
+
+                        let tmp2 = bcx.ins().ishl_imm(v, 8);
+                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
+
+                        let tmp3 = bcx.ins().ushr_imm(v, 8);
+                        let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
+
+                        let tmp4 = bcx.ins().ushr_imm(v, 24);
+                        let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
+
+                        let or_tmp1 = bcx.ins().bor(n1, n2);
+                        let or_tmp2 = bcx.ins().bor(n3, n4);
+                        bcx.ins().bor(or_tmp1, or_tmp2)
+                    }
+                    types::I64 => {
+                        let tmp1 = bcx.ins().ishl_imm(v, 56);
+                        let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
+
+                        let tmp2 = bcx.ins().ishl_imm(v, 40);
+                        let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
+
+                        let tmp3 = bcx.ins().ishl_imm(v, 24);
+                        let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
+
+                        let tmp4 = bcx.ins().ishl_imm(v, 8);
+                        let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
+
+                        let tmp5 = bcx.ins().ushr_imm(v, 8);
+                        let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
+
+                        let tmp6 = bcx.ins().ushr_imm(v, 24);
+                        let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
+
+                        let tmp7 = bcx.ins().ushr_imm(v, 40);
+                        let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
+
+                        let tmp8 = bcx.ins().ushr_imm(v, 56);
+                        let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
+
+                        let or_tmp1 = bcx.ins().bor(n1, n2);
+                        let or_tmp2 = bcx.ins().bor(n3, n4);
+                        let or_tmp3 = bcx.ins().bor(n5, n6);
+                        let or_tmp4 = bcx.ins().bor(n7, n8);
+
+                        let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
+                        let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
+                        bcx.ins().bor(or_tmp5, or_tmp6)
+                    }
+                    types::I128 => {
+                        let (lo, hi) = bcx.ins().isplit(v);
+                        let lo = swap(bcx, lo);
+                        let hi = swap(bcx, hi);
+                        bcx.ins().iconcat(hi, lo)
+                    }
+                    ty => unreachable!("bswap {}", ty),
+                }
+            }
+            let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
+            ret.write_cvalue(fx, res);
+        };
+        assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
+            let layout = fx.layout_of(T);
+            if layout.abi.is_uninhabited() {
+                with_no_trimmed_paths(|| crate::base::codegen_panic(
+                    fx,
+                    &format!("attempted to instantiate uninhabited type `{}`", T),
+                    span,
+                ));
+                return;
+            }
+
+            if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
+                with_no_trimmed_paths(|| crate::base::codegen_panic(
+                    fx,
+                    &format!("attempted to zero-initialize type `{}`, which is invalid", T),
+                    span,
+                ));
+                return;
+            }
+
+            if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
+                with_no_trimmed_paths(|| crate::base::codegen_panic(
+                    fx,
+                    &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
+                    span,
+                ));
+                return;
+            }
+        };
+
+        volatile_load | unaligned_volatile_load, (c ptr) {
+            // Cranelift treats loads as volatile by default
+            // FIXME correctly handle unaligned_volatile_load
+            let inner_layout =
+                fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+            let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+            ret.write_cvalue(fx, val);
+        };
+        volatile_store | unaligned_volatile_store, (v ptr, c val) {
+            // Cranelift treats stores as volatile by default
+            // FIXME correctly handle unaligned_volatile_store
+            let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
+            dest.write_cvalue(fx, val);
+        };
+
+        pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
+            let const_val =
+                fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
+            let val = crate::constant::codegen_const_value(
+                fx,
+                const_val,
+                ret.layout().ty,
+            );
+            ret.write_cvalue(fx, val);
+        };
+
+        ptr_offset_from, <T> (v ptr, v base) {
+            let isize_layout = fx.layout_of(fx.tcx.types.isize);
+
+            let pointee_size: u64 = fx.layout_of(T).size.bytes();
+            let diff = fx.bcx.ins().isub(ptr, base);
+            // FIXME this can be an exact division.
+            let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
+            ret.write_cvalue(fx, val);
+        };
+
+        ptr_guaranteed_eq, (c a, c b) {
+            let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
+            ret.write_cvalue(fx, val);
+        };
+
+        ptr_guaranteed_ne, (c a, c b) {
+            let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
+            ret.write_cvalue(fx, val);
+        };
+
+        caller_location, () {
+            let caller_location = fx.get_caller_location(span);
+            ret.write_cvalue(fx, caller_location);
+        };
+
+        _ if intrinsic.as_str().starts_with("atomic_fence"), () {
+            fx.bcx.ins().fence();
+        };
+        _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
+            // FIXME use a compiler fence once Cranelift supports it
+            fx.bcx.ins().fence();
+        };
+        _ if intrinsic.as_str().starts_with("atomic_load"), <T> (v ptr) {
+            validate_atomic_type!(fx, intrinsic, span, T);
+            let ty = fx.clif_type(T).unwrap();
+
+            let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
+
+            let val = CValue::by_val(val, fx.layout_of(T));
+            ret.write_cvalue(fx, val);
+        };
+        _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
+            validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
+
+            let val = val.load_scalar(fx);
+
+            fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
+        };
+        _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
+            let layout = new.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let new = new.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
+        };
+        _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
+            let layout = new.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+
+            let test_old = test_old.load_scalar(fx);
+            let new = new.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
+            let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
+
+            let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
+            ret.write_cvalue(fx, ret_val)
+        };
+
+        _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
+            let layout = amount.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let amount = amount.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
+        };
+        _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
+            let layout = amount.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let amount = amount.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
+        };
+        _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
+        };
+        _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
+        };
+        _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
+        };
+
+        // FIXME https://github.com/bytecodealliance/wasmtime/issues/2647
+        _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
+        };
+        _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
+        };
+        _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
+        };
+        _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
+        };
+        _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
+            let layout = src.layout();
+            validate_atomic_type!(fx, intrinsic, span, layout.ty);
+            let ty = fx.clif_type(layout.ty).unwrap();
+
+            let src = src.load_scalar(fx);
+
+            let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
+
+            let old = CValue::by_val(old, layout);
+            ret.write_cvalue(fx, old);
+        };
+
+        minnumf32, (v a, v b) {
+            let val = fx.bcx.ins().fmin(a, b);
+            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+            ret.write_cvalue(fx, val);
+        };
+        minnumf64, (v a, v b) {
+            let val = fx.bcx.ins().fmin(a, b);
+            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+            ret.write_cvalue(fx, val);
+        };
+        maxnumf32, (v a, v b) {
+            let val = fx.bcx.ins().fmax(a, b);
+            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+            ret.write_cvalue(fx, val);
+        };
+        maxnumf64, (v a, v b) {
+            let val = fx.bcx.ins().fmax(a, b);
+            let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+            ret.write_cvalue(fx, val);
+        };
+
+        kw.Try, (v f, v data, v _catch_fn) {
+            // FIXME once unwinding is supported, change this to actually catch panics
+            let f_sig = fx.bcx.func.import_signature(Signature {
+                call_conv: CallConv::triple_default(fx.triple()),
+                params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
+                returns: vec![],
+            });
+
+            fx.bcx.ins().call_indirect(f_sig, f, &[data]);
+
+            let layout = ret.layout();
+            let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+            ret.write_cvalue(fx, ret_val);
+        };
+
+        fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
+            let res = crate::num::codegen_float_binop(fx, match intrinsic {
+                sym::fadd_fast => BinOp::Add,
+                sym::fsub_fast => BinOp::Sub,
+                sym::fmul_fast => BinOp::Mul,
+                sym::fdiv_fast => BinOp::Div,
+                sym::frem_fast => BinOp::Rem,
+                _ => unreachable!(),
+            }, x, y);
+            ret.write_cvalue(fx, res);
+        };
+        float_to_int_unchecked, (v f) {
+            let res = crate::cast::clif_int_or_float_cast(
+                fx,
+                f,
+                false,
+                fx.clif_type(ret.layout().ty).unwrap(),
+                type_sign(ret.layout().ty),
+            );
+            ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
+        };
+    }
+
+    if let Some((_, dest)) = destination {
+        let ret_block = fx.get_block(dest);
+        fx.bcx.ins().jump(ret_block, &[]);
+    } else {
+        trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
new file mode 100644
index 00000000000..c2f469fa021
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -0,0 +1,281 @@
+//! Codegen `extern "platform-intrinsic"` intrinsics.
+
+use super::*;
+use crate::prelude::*;
+
+pub(super) fn codegen_simd_intrinsic_call<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    instance: Instance<'tcx>,
+    args: &[mir::Operand<'tcx>],
+    ret: CPlace<'tcx>,
+    span: Span,
+) {
+    let def_id = instance.def_id();
+    let substs = instance.substs;
+
+    let intrinsic = fx.tcx.item_name(def_id);
+
+    intrinsic_match! {
+        fx, intrinsic, substs, args,
+        _ => {
+            fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
+        };
+
+        simd_cast, (c a) {
+            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+            simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
+                let ret_lane_ty = fx.clif_type(ret_lane_layout.ty).unwrap();
+
+                let from_signed = type_sign(lane_layout.ty);
+                let to_signed = type_sign(ret_lane_layout.ty);
+
+                let ret_lane = clif_int_or_float_cast(fx, lane, from_signed, ret_lane_ty, to_signed);
+                CValue::by_val(ret_lane, ret_lane_layout)
+            });
+        };
+
+        simd_eq, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_cmp!(fx, Equal|Equal(x, y) -> ret);
+        };
+        simd_ne, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_cmp!(fx, NotEqual|NotEqual(x, y) -> ret);
+        };
+        simd_lt, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_cmp!(fx, UnsignedLessThan|SignedLessThan|LessThan(x, y) -> ret);
+        };
+        simd_le, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_cmp!(fx, UnsignedLessThanOrEqual|SignedLessThanOrEqual|LessThanOrEqual(x, y) -> ret);
+        };
+        simd_gt, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_cmp!(fx, UnsignedGreaterThan|SignedGreaterThan|GreaterThan(x, y) -> ret);
+        };
+        simd_ge, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_cmp!(
+                fx,
+                UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual|GreaterThanOrEqual
+                (x, y) -> ret
+            );
+        };
+
+        // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
+        _ if intrinsic.as_str().starts_with("simd_shuffle"), (c x, c y, o idx) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+
+            let n: u16 = intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap();
+
+            assert_eq!(x.layout(), y.layout());
+            let layout = x.layout();
+
+            let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+            let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+
+            assert_eq!(lane_ty, ret_lane_ty);
+            assert_eq!(u64::from(n), ret_lane_count);
+
+            let total_len = lane_count * 2;
+
+            let indexes = {
+                use rustc_middle::mir::interpret::*;
+                let idx_const = crate::constant::mir_operand_get_const_val(fx, idx).expect("simd_shuffle* idx not const");
+
+                let idx_bytes = match idx_const {
+                    ConstValue::ByRef { alloc, offset } => {
+                        let size = Size::from_bytes(4 * ret_lane_count /* size_of([u32; ret_lane_count]) */);
+                        alloc.get_bytes(fx, alloc_range(offset, size)).unwrap()
+                    }
+                    _ => unreachable!("{:?}", idx_const),
+                };
+
+                (0..ret_lane_count).map(|i| {
+                    let i = usize::try_from(i).unwrap();
+                    let idx = rustc_middle::mir::interpret::read_target_uint(
+                        fx.tcx.data_layout.endian,
+                        &idx_bytes[4*i.. 4*i + 4],
+                    ).expect("read_target_uint");
+                    u16::try_from(idx).expect("try_from u32")
+                }).collect::<Vec<u16>>()
+            };
+
+            for &idx in &indexes {
+                assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
+            }
+
+            for (out_idx, in_idx) in indexes.into_iter().enumerate() {
+                let in_lane = if u64::from(in_idx) < lane_count {
+                    x.value_field(fx, mir::Field::new(in_idx.into()))
+                } else {
+                    y.value_field(fx, mir::Field::new(usize::from(in_idx) - usize::try_from(lane_count).unwrap()))
+                };
+                let out_lane = ret.place_field(fx, mir::Field::new(out_idx));
+                out_lane.write_cvalue(fx, in_lane);
+            }
+        };
+
+        simd_insert, (c base, o idx, c val) {
+            // FIXME validate
+            let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
+                idx_const
+            } else {
+                fx.tcx.sess.span_fatal(
+                    span,
+                    "Index argument for `simd_insert` is not a constant",
+                );
+            };
+
+            let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+            let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
+            if idx >= lane_count.into() {
+                fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
+            }
+
+            ret.write_cvalue(fx, base);
+            let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
+            ret_lane.write_cvalue(fx, val);
+        };
+
+        simd_extract, (c v, o idx) {
+            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+            let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
+                idx_const
+            } else {
+                fx.tcx.sess.span_warn(
+                    span,
+                    "Index argument for `simd_extract` is not a constant",
+                );
+                let res = crate::trap::trap_unimplemented_ret_value(
+                    fx,
+                    ret.layout(),
+                    "Index argument for `simd_extract` is not a constant",
+                );
+                ret.write_cvalue(fx, res);
+                return;
+            };
+
+            let idx = idx_const.try_to_bits(Size::from_bytes(4 /* u32*/)).unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+            let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
+            if idx >= lane_count.into() {
+                fx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
+            }
+
+            let ret_lane = v.value_field(fx, mir::Field::new(idx.try_into().unwrap()));
+            ret.write_cvalue(fx, ret_lane);
+        };
+
+        simd_add, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_flt_binop!(fx, iadd|fadd(x, y) -> ret);
+        };
+        simd_sub, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_flt_binop!(fx, isub|fsub(x, y) -> ret);
+        };
+        simd_mul, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_flt_binop!(fx, imul|fmul(x, y) -> ret);
+        };
+        simd_div, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_flt_binop!(fx, udiv|sdiv|fdiv(x, y) -> ret);
+        };
+        simd_shl, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_binop!(fx, ishl(x, y) -> ret);
+        };
+        simd_shr, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_binop!(fx, ushr|sshr(x, y) -> ret);
+        };
+        simd_and, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_binop!(fx, band(x, y) -> ret);
+        };
+        simd_or, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_binop!(fx, bor(x, y) -> ret);
+        };
+        simd_xor, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_int_binop!(fx, bxor(x, y) -> ret);
+        };
+
+        simd_fma, (c a, c b, c c) {
+            validate_simd_type!(fx, intrinsic, span, a.layout().ty);
+            assert_eq!(a.layout(), b.layout());
+            assert_eq!(a.layout(), c.layout());
+            let layout = a.layout();
+
+            let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+            let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+            assert_eq!(lane_count, ret_lane_count);
+            let ret_lane_layout = fx.layout_of(ret_lane_ty);
+
+            for lane in 0..lane_count {
+                let lane = mir::Field::new(lane.try_into().unwrap());
+                let a_lane = a.value_field(fx, lane).load_scalar(fx);
+                let b_lane = b.value_field(fx, lane).load_scalar(fx);
+                let c_lane = c.value_field(fx, lane).load_scalar(fx);
+
+                let mul_lane = fx.bcx.ins().fmul(a_lane, b_lane);
+                let res_lane = CValue::by_val(fx.bcx.ins().fadd(mul_lane, c_lane), ret_lane_layout);
+
+                ret.place_field(fx, lane).write_cvalue(fx, res_lane);
+            }
+        };
+
+        simd_fmin, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_flt_binop!(fx, fmin(x, y) -> ret);
+        };
+        simd_fmax, (c x, c y) {
+            validate_simd_type!(fx, intrinsic, span, x.layout().ty);
+            simd_flt_binop!(fx, fmax(x, y) -> ret);
+        };
+
+        simd_reduce_add_ordered | simd_reduce_add_unordered, (c v) {
+            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+            simd_reduce(fx, v, ret, |fx, lane_layout, a, b| {
+                if lane_layout.ty.is_floating_point() {
+                    fx.bcx.ins().fadd(a, b)
+                } else {
+                    fx.bcx.ins().iadd(a, b)
+                }
+            });
+        };
+
+        simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v) {
+            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+            simd_reduce(fx, v, ret, |fx, lane_layout, a, b| {
+                if lane_layout.ty.is_floating_point() {
+                    fx.bcx.ins().fmul(a, b)
+                } else {
+                    fx.bcx.ins().imul(a, b)
+                }
+            });
+        };
+
+        simd_reduce_all, (c v) {
+            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+            simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().band(a, b));
+        };
+
+        simd_reduce_any, (c v) {
+            validate_simd_type!(fx, intrinsic, span, v.layout().ty);
+            simd_reduce_bool(fx, v, ret, |fx, a, b| fx.bcx.ins().bor(a, b));
+        };
+
+        // simd_fabs
+        // simd_saturating_add
+        // simd_bitmask
+        // simd_select
+        // simd_rem
+        // simd_neg
+        // simd_trunc
+        // simd_floor
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
new file mode 100644
index 00000000000..cb1cb3c74db
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -0,0 +1,306 @@
+#![feature(rustc_private, decl_macro, never_type, hash_drain_filter, vec_into_raw_parts, once_cell)]
+#![warn(rust_2018_idioms)]
+#![warn(unused_lifetimes)]
+#![warn(unreachable_pub)]
+
+extern crate snap;
+#[macro_use]
+extern crate rustc_middle;
+extern crate rustc_ast;
+extern crate rustc_codegen_ssa;
+extern crate rustc_data_structures;
+extern crate rustc_errors;
+extern crate rustc_fs_util;
+extern crate rustc_hir;
+extern crate rustc_incremental;
+extern crate rustc_index;
+extern crate rustc_interface;
+extern crate rustc_metadata;
+extern crate rustc_mir;
+extern crate rustc_session;
+extern crate rustc_span;
+extern crate rustc_target;
+
+// This prevents duplicating functions and statics that are already part of the host rustc process.
+#[allow(unused_extern_crates)]
+extern crate rustc_driver;
+
+use std::any::Any;
+
+use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_codegen_ssa::CodegenResults;
+use rustc_errors::ErrorReported;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_session::config::OutputFilenames;
+use rustc_session::Session;
+
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_codegen::settings::{self, Configurable};
+
+pub use crate::config::*;
+use crate::prelude::*;
+
+mod abi;
+mod allocator;
+mod analyze;
+mod archive;
+mod backend;
+mod base;
+mod cast;
+mod codegen_i128;
+mod common;
+mod compiler_builtins;
+mod config;
+mod constant;
+mod debuginfo;
+mod discriminant;
+mod driver;
+mod inline_asm;
+mod intrinsics;
+mod linkage;
+mod main_shim;
+mod metadata;
+mod num;
+mod optimize;
+mod pointer;
+mod pretty_clif;
+mod toolchain;
+mod trap;
+mod unsize;
+mod value_and_place;
+mod vtable;
+
+mod prelude {
+    pub(crate) use std::convert::{TryFrom, TryInto};
+
+    pub(crate) use rustc_span::Span;
+
+    pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+    pub(crate) use rustc_middle::bug;
+    pub(crate) use rustc_middle::mir::{self, *};
+    pub(crate) use rustc_middle::ty::layout::{self, TyAndLayout};
+    pub(crate) use rustc_middle::ty::{
+        self, FloatTy, Instance, InstanceDef, IntTy, ParamEnv, Ty, TyCtxt, TypeAndMut,
+        TypeFoldable, UintTy,
+    };
+    pub(crate) use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx};
+
+    pub(crate) use rustc_data_structures::fx::FxHashMap;
+
+    pub(crate) use rustc_index::vec::Idx;
+
+    pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
+    pub(crate) use cranelift_codegen::ir::function::Function;
+    pub(crate) use cranelift_codegen::ir::types;
+    pub(crate) use cranelift_codegen::ir::{
+        AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
+        StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
+    };
+    pub(crate) use cranelift_codegen::isa::{self, CallConv};
+    pub(crate) use cranelift_codegen::Context;
+    pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
+    pub(crate) use cranelift_module::{self, DataContext, FuncId, Linkage, Module};
+
+    pub(crate) use crate::abi::*;
+    pub(crate) use crate::base::{codegen_operand, codegen_place};
+    pub(crate) use crate::cast::*;
+    pub(crate) use crate::common::*;
+    pub(crate) use crate::debuginfo::{DebugContext, UnwindContext};
+    pub(crate) use crate::pointer::Pointer;
+    pub(crate) use crate::trap::*;
+    pub(crate) use crate::value_and_place::{CPlace, CPlaceInner, CValue};
+}
+
+struct PrintOnPanic<F: Fn() -> String>(F);
+impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
+    fn drop(&mut self) {
+        if ::std::thread::panicking() {
+            println!("{}", (self.0)());
+        }
+    }
+}
+
+/// The codegen context holds any information shared between the codegen of individual functions
+/// inside a single codegen unit with the exception of the Cranelift [`Module`](cranelift_module::Module).
+struct CodegenCx<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    global_asm: String,
+    cached_context: Context,
+    debug_context: Option<DebugContext<'tcx>>,
+    unwind_context: UnwindContext,
+}
+
+impl<'tcx> CodegenCx<'tcx> {
+    fn new(
+        tcx: TyCtxt<'tcx>,
+        backend_config: BackendConfig,
+        isa: &dyn TargetIsa,
+        debug_info: bool,
+    ) -> Self {
+        assert_eq!(pointer_ty(tcx), isa.pointer_type());
+
+        let unwind_context =
+            UnwindContext::new(tcx, isa, matches!(backend_config.codegen_mode, CodegenMode::Aot));
+        let debug_context = if debug_info { Some(DebugContext::new(tcx, isa)) } else { None };
+        CodegenCx {
+            tcx,
+            global_asm: String::new(),
+            cached_context: Context::new(),
+            debug_context,
+            unwind_context,
+        }
+    }
+}
+
+pub struct CraneliftCodegenBackend {
+    pub config: Option<BackendConfig>,
+}
+
+impl CodegenBackend for CraneliftCodegenBackend {
+    fn init(&self, sess: &Session) {
+        use rustc_session::config::Lto;
+        match sess.lto() {
+            Lto::No | Lto::ThinLocal => {}
+            Lto::Thin | Lto::Fat => sess.warn("LTO is not supported. You may get a linker error."),
+        }
+    }
+
+    fn target_features(&self, _sess: &Session) -> Vec<rustc_span::Symbol> {
+        vec![]
+    }
+
+    fn print_version(&self) {
+        println!("Cranelift version: {}", cranelift_codegen::VERSION);
+    }
+
+    fn codegen_crate(
+        &self,
+        tcx: TyCtxt<'_>,
+        metadata: EncodedMetadata,
+        need_metadata_module: bool,
+    ) -> Box<dyn Any> {
+        tcx.sess.abort_if_errors();
+        let config = if let Some(config) = self.config.clone() {
+            config
+        } else {
+            BackendConfig::from_opts(&tcx.sess.opts.cg.llvm_args)
+                .unwrap_or_else(|err| tcx.sess.fatal(&err))
+        };
+        match config.codegen_mode {
+            CodegenMode::Aot => driver::aot::run_aot(tcx, config, metadata, need_metadata_module),
+            CodegenMode::Jit | CodegenMode::JitLazy => {
+                #[cfg(feature = "jit")]
+                let _: ! = driver::jit::run_jit(tcx, config);
+
+                #[cfg(not(feature = "jit"))]
+                tcx.sess.fatal("jit support was disabled when compiling rustc_codegen_cranelift");
+            }
+        }
+    }
+
+    fn join_codegen(
+        &self,
+        ongoing_codegen: Box<dyn Any>,
+        _sess: &Session,
+    ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported> {
+        Ok(*ongoing_codegen
+            .downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>()
+            .unwrap())
+    }
+
+    fn link(
+        &self,
+        sess: &Session,
+        codegen_results: CodegenResults,
+        outputs: &OutputFilenames,
+    ) -> Result<(), ErrorReported> {
+        use rustc_codegen_ssa::back::link::link_binary;
+
+        link_binary::<crate::archive::ArArchiveBuilder<'_>>(
+            sess,
+            &codegen_results,
+            outputs,
+        );
+
+        Ok(())
+    }
+}
+
+fn target_triple(sess: &Session) -> target_lexicon::Triple {
+    sess.target.llvm_target.parse().unwrap()
+}
+
+fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::TargetIsa + 'static> {
+    use target_lexicon::BinaryFormat;
+
+    let target_triple = crate::target_triple(sess);
+
+    let mut flags_builder = settings::builder();
+    flags_builder.enable("is_pic").unwrap();
+    flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
+    let enable_verifier = if backend_config.enable_verifier { "true" } else { "false" };
+    flags_builder.set("enable_verifier", enable_verifier).unwrap();
+
+    let tls_model = match target_triple.binary_format {
+        BinaryFormat::Elf => "elf_gd",
+        BinaryFormat::Macho => "macho",
+        BinaryFormat::Coff => "coff",
+        _ => "none",
+    };
+    flags_builder.set("tls_model", tls_model).unwrap();
+
+    flags_builder.set("enable_simd", "true").unwrap();
+
+    flags_builder.set("enable_llvm_abi_extensions", "true").unwrap();
+
+    flags_builder.set("regalloc", &backend_config.regalloc).unwrap();
+
+    use rustc_session::config::OptLevel;
+    match sess.opts.optimize {
+        OptLevel::No => {
+            flags_builder.set("opt_level", "none").unwrap();
+        }
+        OptLevel::Less | OptLevel::Default => {}
+        OptLevel::Size | OptLevel::SizeMin | OptLevel::Aggressive => {
+            flags_builder.set("opt_level", "speed_and_size").unwrap();
+        }
+    }
+
+    let flags = settings::Flags::new(flags_builder);
+
+    let variant = cranelift_codegen::isa::BackendVariant::MachInst;
+
+    let isa_builder = match sess.opts.cg.target_cpu.as_deref() {
+        Some("native") => {
+            let builder = cranelift_native::builder_with_options(variant, true).unwrap();
+            builder
+        }
+        Some(value) => {
+            let mut builder =
+                cranelift_codegen::isa::lookup_variant(target_triple, variant).unwrap();
+            if let Err(_) = builder.enable(value) {
+                sess.fatal("The specified target cpu isn't currently supported by Cranelift.");
+            }
+            builder
+        }
+        None => {
+            let mut builder =
+                cranelift_codegen::isa::lookup_variant(target_triple.clone(), variant).unwrap();
+            if target_triple.architecture == target_lexicon::Architecture::X86_64 {
+                // Don't use "haswell" as the default, as it implies `has_lzcnt`.
+                // macOS CI is still at Ivy Bridge EP, so `lzcnt` is interpreted as `bsr`.
+                builder.enable("nehalem").unwrap();
+            }
+            builder
+        }
+    };
+
+    isa_builder.finish(flags)
+}
+
+/// This is the entrypoint for a hot plugged rustc_codegen_cranelift
+#[no_mangle]
+pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
+    Box::new(CraneliftCodegenBackend { config: None })
+}
diff --git a/compiler/rustc_codegen_cranelift/src/linkage.rs b/compiler/rustc_codegen_cranelift/src/linkage.rs
new file mode 100644
index 00000000000..ca853aac158
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/linkage.rs
@@ -0,0 +1,36 @@
+use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+
+use crate::prelude::*;
+
+pub(crate) fn get_clif_linkage(
+    mono_item: MonoItem<'_>,
+    linkage: RLinkage,
+    visibility: Visibility,
+    is_compiler_builtins: bool,
+) -> Linkage {
+    match (linkage, visibility) {
+        (RLinkage::External, Visibility::Default) if is_compiler_builtins => Linkage::Hidden,
+        (RLinkage::External, Visibility::Default) => Linkage::Export,
+        (RLinkage::Internal, Visibility::Default) => Linkage::Local,
+        (RLinkage::External, Visibility::Hidden) => Linkage::Hidden,
+        (RLinkage::WeakAny, Visibility::Default) => Linkage::Preemptible,
+        _ => panic!("{:?} = {:?} {:?}", mono_item, linkage, visibility),
+    }
+}
+
+pub(crate) fn get_static_linkage(tcx: TyCtxt<'_>, def_id: DefId) -> Linkage {
+    let fn_attrs = tcx.codegen_fn_attrs(def_id);
+
+    if let Some(linkage) = fn_attrs.linkage {
+        match linkage {
+            RLinkage::External => Linkage::Export,
+            RLinkage::Internal => Linkage::Local,
+            RLinkage::ExternalWeak | RLinkage::WeakAny => Linkage::Preemptible,
+            _ => panic!("{:?}", linkage),
+        }
+    } else if tcx.is_reachable_non_generic(def_id) {
+        Linkage::Export
+    } else {
+        Linkage::Hidden
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/main_shim.rs b/compiler/rustc_codegen_cranelift/src/main_shim.rs
new file mode 100644
index 00000000000..8fd1e4f5811
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/main_shim.rs
@@ -0,0 +1,159 @@
+use cranelift_codegen::binemit::{NullStackMapSink, NullTrapSink};
+use rustc_hir::LangItem;
+use rustc_middle::ty::subst::GenericArg;
+use rustc_middle::ty::AssocKind;
+use rustc_session::config::EntryFnType;
+use rustc_span::symbol::Ident;
+
+use crate::prelude::*;
+
+/// Create the `main` function which will initialize the rust runtime and call
+/// users main function.
+pub(crate) fn maybe_create_entry_wrapper(
+    tcx: TyCtxt<'_>,
+    module: &mut impl Module,
+    unwind_context: &mut UnwindContext,
+    is_jit: bool,
+    is_primary_cgu: bool,
+) {
+    let (main_def_id, is_main_fn) = match tcx.entry_fn(()) {
+        Some((def_id, entry_ty)) => (
+            def_id,
+            match entry_ty {
+                EntryFnType::Main => true,
+                EntryFnType::Start => false,
+            },
+        ),
+        None => return,
+    };
+
+    if main_def_id.is_local() {
+        let instance = Instance::mono(tcx, main_def_id).polymorphize(tcx);
+        if !is_jit && module.get_name(&*tcx.symbol_name(instance).name).is_none() {
+            return;
+        }
+    } else if !is_primary_cgu {
+        return;
+    }
+
+    create_entry_fn(tcx, module, unwind_context, main_def_id, is_jit, is_main_fn);
+
+    fn create_entry_fn(
+        tcx: TyCtxt<'_>,
+        m: &mut impl Module,
+        unwind_context: &mut UnwindContext,
+        rust_main_def_id: DefId,
+        ignore_lang_start_wrapper: bool,
+        is_main_fn: bool,
+    ) {
+        let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
+        // Given that `main()` has no arguments,
+        // then its return type cannot have
+        // late-bound regions, since late-bound
+        // regions must appear in the argument
+        // listing.
+        let main_ret_ty = tcx.erase_regions(main_ret_ty.no_bound_vars().unwrap());
+
+        let cmain_sig = Signature {
+            params: vec![
+                AbiParam::new(m.target_config().pointer_type()),
+                AbiParam::new(m.target_config().pointer_type()),
+            ],
+            returns: vec![AbiParam::new(m.target_config().pointer_type() /*isize*/)],
+            call_conv: CallConv::triple_default(m.isa().triple()),
+        };
+
+        let cmain_func_id = m.declare_function("main", Linkage::Export, &cmain_sig).unwrap();
+
+        let instance = Instance::mono(tcx, rust_main_def_id).polymorphize(tcx);
+
+        let main_name = tcx.symbol_name(instance).name;
+        let main_sig = get_function_sig(tcx, m.isa().triple(), instance);
+        let main_func_id = m.declare_function(main_name, Linkage::Import, &main_sig).unwrap();
+
+        let mut ctx = Context::new();
+        ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig);
+        {
+            let mut func_ctx = FunctionBuilderContext::new();
+            let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+            let block = bcx.create_block();
+            bcx.switch_to_block(block);
+            let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
+            let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
+
+            let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
+
+            let result = if is_main_fn && ignore_lang_start_wrapper {
+                // regular main fn, but ignoring #[lang = "start"] as we are running in the jit
+                // FIXME set program arguments somehow
+                let call_inst = bcx.ins().call(main_func_ref, &[]);
+                let call_results = bcx.func.dfg.inst_results(call_inst).to_owned();
+
+                let termination_trait = tcx.require_lang_item(LangItem::Termination, None);
+                let report = tcx
+                    .associated_items(termination_trait)
+                    .find_by_name_and_kind(
+                        tcx,
+                        Ident::from_str("report"),
+                        AssocKind::Fn,
+                        termination_trait,
+                    )
+                    .unwrap();
+                let report = Instance::resolve(
+                    tcx,
+                    ParamEnv::reveal_all(),
+                    report.def_id,
+                    tcx.mk_substs([GenericArg::from(main_ret_ty)].iter()),
+                )
+                .unwrap()
+                .unwrap();
+
+                let report_name = tcx.symbol_name(report).name;
+                let report_sig = get_function_sig(tcx, m.isa().triple(), report);
+                let report_func_id =
+                    m.declare_function(report_name, Linkage::Import, &report_sig).unwrap();
+                let report_func_ref = m.declare_func_in_func(report_func_id, &mut bcx.func);
+
+                // FIXME do proper abi handling instead of expecting the pass mode to be identical
+                // for returns and arguments.
+                let report_call_inst = bcx.ins().call(report_func_ref, &call_results);
+                let res = bcx.func.dfg.inst_results(report_call_inst)[0];
+                match m.target_config().pointer_type() {
+                    types::I32 => res,
+                    types::I64 => bcx.ins().sextend(types::I64, res),
+                    _ => unimplemented!("16bit systems are not yet supported"),
+                }
+            } else if is_main_fn {
+                let start_def_id = tcx.require_lang_item(LangItem::Start, None);
+                let start_instance = Instance::resolve(
+                    tcx,
+                    ParamEnv::reveal_all(),
+                    start_def_id,
+                    tcx.intern_substs(&[main_ret_ty.into()]),
+                )
+                .unwrap()
+                .unwrap()
+                .polymorphize(tcx);
+                let start_func_id = import_function(tcx, m, start_instance);
+
+                let main_val = bcx.ins().func_addr(m.target_config().pointer_type(), main_func_ref);
+
+                let func_ref = m.declare_func_in_func(start_func_id, &mut bcx.func);
+                let call_inst = bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv]);
+                bcx.inst_results(call_inst)[0]
+            } else {
+                // using user-defined start fn
+                let call_inst = bcx.ins().call(main_func_ref, &[arg_argc, arg_argv]);
+                bcx.inst_results(call_inst)[0]
+            };
+
+            bcx.ins().return_(&[result]);
+            bcx.seal_all_blocks();
+            bcx.finalize();
+        }
+        m.define_function(cmain_func_id, &mut ctx, &mut NullTrapSink {}, &mut NullStackMapSink {})
+            .unwrap();
+        unwind_context.add_function(cmain_func_id, &ctx, m.isa());
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/metadata.rs b/compiler/rustc_codegen_cranelift/src/metadata.rs
new file mode 100644
index 00000000000..db24bf65eb5
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/metadata.rs
@@ -0,0 +1,20 @@
+//! Writing of the rustc metadata for dylibs
+
+use rustc_middle::ty::TyCtxt;
+
+use crate::backend::WriteMetadata;
+
+// Adapted from https://github.com/rust-lang/rust/blob/da573206f87b5510de4b0ee1a9c044127e409bd3/src/librustc_codegen_llvm/base.rs#L47-L112
+pub(crate) fn write_metadata<O: WriteMetadata>(tcx: TyCtxt<'_>, object: &mut O) {
+    use snap::write::FrameEncoder;
+    use std::io::Write;
+
+    let metadata = tcx.encode_metadata();
+    let mut compressed = rustc_metadata::METADATA_HEADER.to_vec();
+    FrameEncoder::new(&mut compressed).write_all(&metadata.raw_data).unwrap();
+
+    object.add_rustc_section(
+        rustc_middle::middle::exported_symbols::metadata_symbol_name(tcx),
+        compressed,
+    );
+}
diff --git a/compiler/rustc_codegen_cranelift/src/num.rs b/compiler/rustc_codegen_cranelift/src/num.rs
new file mode 100644
index 00000000000..b6d378a5fe1
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/num.rs
@@ -0,0 +1,437 @@
+//! Various operations on integer and floating-point numbers
+
+use crate::prelude::*;
+
+pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> {
+    use BinOp::*;
+    use IntCC::*;
+    Some(match bin_op {
+        Eq => Equal,
+        Lt => {
+            if signed {
+                SignedLessThan
+            } else {
+                UnsignedLessThan
+            }
+        }
+        Le => {
+            if signed {
+                SignedLessThanOrEqual
+            } else {
+                UnsignedLessThanOrEqual
+            }
+        }
+        Ne => NotEqual,
+        Ge => {
+            if signed {
+                SignedGreaterThanOrEqual
+            } else {
+                UnsignedGreaterThanOrEqual
+            }
+        }
+        Gt => {
+            if signed {
+                SignedGreaterThan
+            } else {
+                UnsignedGreaterThan
+            }
+        }
+        _ => return None,
+    })
+}
+
+fn codegen_compare_bin_op<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    bin_op: BinOp,
+    signed: bool,
+    lhs: Value,
+    rhs: Value,
+) -> CValue<'tcx> {
+    let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
+    let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
+    let val = fx.bcx.ins().bint(types::I8, val);
+    CValue::by_val(val, fx.layout_of(fx.tcx.types.bool))
+}
+
+pub(crate) fn codegen_binop<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    bin_op: BinOp,
+    in_lhs: CValue<'tcx>,
+    in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+    match bin_op {
+        BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+            match in_lhs.layout().ty.kind() {
+                ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => {
+                    let signed = type_sign(in_lhs.layout().ty);
+                    let lhs = in_lhs.load_scalar(fx);
+                    let rhs = in_rhs.load_scalar(fx);
+
+                    let (lhs, rhs) = if (bin_op == BinOp::Eq || bin_op == BinOp::Ne)
+                        && (in_lhs.layout().ty.kind() == fx.tcx.types.i8.kind()
+                            || in_lhs.layout().ty.kind() == fx.tcx.types.i16.kind())
+                    {
+                        // FIXME(CraneStation/cranelift#896) icmp_imm.i8/i16 with eq/ne for signed ints is implemented wrong.
+                        (
+                            fx.bcx.ins().sextend(types::I32, lhs),
+                            fx.bcx.ins().sextend(types::I32, rhs),
+                        )
+                    } else {
+                        (lhs, rhs)
+                    };
+
+                    return codegen_compare_bin_op(fx, bin_op, signed, lhs, rhs);
+                }
+                _ => {}
+            }
+        }
+        _ => {}
+    }
+
+    match in_lhs.layout().ty.kind() {
+        ty::Bool => crate::num::codegen_bool_binop(fx, bin_op, in_lhs, in_rhs),
+        ty::Uint(_) | ty::Int(_) => crate::num::codegen_int_binop(fx, bin_op, in_lhs, in_rhs),
+        ty::Float(_) => crate::num::codegen_float_binop(fx, bin_op, in_lhs, in_rhs),
+        ty::RawPtr(..) | ty::FnPtr(..) => crate::num::codegen_ptr_binop(fx, bin_op, in_lhs, in_rhs),
+        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
+    }
+}
+
+pub(crate) fn codegen_bool_binop<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    bin_op: BinOp,
+    in_lhs: CValue<'tcx>,
+    in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+    let lhs = in_lhs.load_scalar(fx);
+    let rhs = in_rhs.load_scalar(fx);
+
+    let b = fx.bcx.ins();
+    let res = match bin_op {
+        BinOp::BitXor => b.bxor(lhs, rhs),
+        BinOp::BitAnd => b.band(lhs, rhs),
+        BinOp::BitOr => b.bor(lhs, rhs),
+        // Compare binops handles by `codegen_binop`.
+        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+    };
+
+    CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
+}
+
+pub(crate) fn codegen_int_binop<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    bin_op: BinOp,
+    in_lhs: CValue<'tcx>,
+    in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+    if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
+        assert_eq!(
+            in_lhs.layout().ty,
+            in_rhs.layout().ty,
+            "int binop requires lhs and rhs of same type"
+        );
+    }
+
+    if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, false, in_lhs, in_rhs) {
+        return res;
+    }
+
+    let signed = type_sign(in_lhs.layout().ty);
+
+    let lhs = in_lhs.load_scalar(fx);
+    let rhs = in_rhs.load_scalar(fx);
+
+    let b = fx.bcx.ins();
+    let val = match bin_op {
+        BinOp::Add => b.iadd(lhs, rhs),
+        BinOp::Sub => b.isub(lhs, rhs),
+        BinOp::Mul => b.imul(lhs, rhs),
+        BinOp::Div => {
+            if signed {
+                b.sdiv(lhs, rhs)
+            } else {
+                b.udiv(lhs, rhs)
+            }
+        }
+        BinOp::Rem => {
+            if signed {
+                b.srem(lhs, rhs)
+            } else {
+                b.urem(lhs, rhs)
+            }
+        }
+        BinOp::BitXor => b.bxor(lhs, rhs),
+        BinOp::BitAnd => b.band(lhs, rhs),
+        BinOp::BitOr => b.bor(lhs, rhs),
+        BinOp::Shl => {
+            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+            fx.bcx.ins().ishl(lhs, actual_shift)
+        }
+        BinOp::Shr => {
+            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+            if signed {
+                fx.bcx.ins().sshr(lhs, actual_shift)
+            } else {
+                fx.bcx.ins().ushr(lhs, actual_shift)
+            }
+        }
+        // Compare binops handles by `codegen_binop`.
+        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
+    };
+
+    CValue::by_val(val, in_lhs.layout())
+}
+
+pub(crate) fn codegen_checked_int_binop<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    bin_op: BinOp,
+    in_lhs: CValue<'tcx>,
+    in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+    if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
+        assert_eq!(
+            in_lhs.layout().ty,
+            in_rhs.layout().ty,
+            "checked int binop requires lhs and rhs of same type"
+        );
+    }
+
+    let lhs = in_lhs.load_scalar(fx);
+    let rhs = in_rhs.load_scalar(fx);
+
+    if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, true, in_lhs, in_rhs) {
+        return res;
+    }
+
+    let signed = type_sign(in_lhs.layout().ty);
+
+    let (res, has_overflow) = match bin_op {
+        BinOp::Add => {
+            /*let (val, c_out) = fx.bcx.ins().iadd_cout(lhs, rhs);
+            (val, c_out)*/
+            // FIXME(CraneStation/cranelift#849) legalize iadd_cout for i8 and i16
+            let val = fx.bcx.ins().iadd(lhs, rhs);
+            let has_overflow = if !signed {
+                fx.bcx.ins().icmp(IntCC::UnsignedLessThan, val, lhs)
+            } else {
+                let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
+                let slt = fx.bcx.ins().icmp(IntCC::SignedLessThan, val, lhs);
+                fx.bcx.ins().bxor(rhs_is_negative, slt)
+            };
+            (val, has_overflow)
+        }
+        BinOp::Sub => {
+            /*let (val, b_out) = fx.bcx.ins().isub_bout(lhs, rhs);
+            (val, b_out)*/
+            // FIXME(CraneStation/cranelift#849) legalize isub_bout for i8 and i16
+            let val = fx.bcx.ins().isub(lhs, rhs);
+            let has_overflow = if !signed {
+                fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, val, lhs)
+            } else {
+                let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
+                let sgt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, val, lhs);
+                fx.bcx.ins().bxor(rhs_is_negative, sgt)
+            };
+            (val, has_overflow)
+        }
+        BinOp::Mul => {
+            let ty = fx.bcx.func.dfg.value_type(lhs);
+            match ty {
+                types::I8 | types::I16 | types::I32 if !signed => {
+                    let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
+                    let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
+                    let val = fx.bcx.ins().imul(lhs, rhs);
+                    let has_overflow = fx.bcx.ins().icmp_imm(
+                        IntCC::UnsignedGreaterThan,
+                        val,
+                        (1 << ty.bits()) - 1,
+                    );
+                    let val = fx.bcx.ins().ireduce(ty, val);
+                    (val, has_overflow)
+                }
+                types::I8 | types::I16 | types::I32 if signed => {
+                    let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
+                    let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
+                    let val = fx.bcx.ins().imul(lhs, rhs);
+                    let has_underflow =
+                        fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
+                    let has_overflow = fx.bcx.ins().icmp_imm(
+                        IntCC::SignedGreaterThan,
+                        val,
+                        (1 << (ty.bits() - 1)) - 1,
+                    );
+                    let val = fx.bcx.ins().ireduce(ty, val);
+                    (val, fx.bcx.ins().bor(has_underflow, has_overflow))
+                }
+                types::I64 => {
+                    let val = fx.bcx.ins().imul(lhs, rhs);
+                    let has_overflow = if !signed {
+                        let val_hi = fx.bcx.ins().umulhi(lhs, rhs);
+                        fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0)
+                    } else {
+                        // Based on LLVM's instruction sequence for compiling
+                        // a.checked_mul(b).is_some() to riscv64gc:
+                        // mulh    a2, a0, a1
+                        // mul     a0, a0, a1
+                        // srai    a0, a0, 63
+                        // xor     a0, a0, a2
+                        // snez    a0, a0
+                        let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
+                        let val_sign = fx.bcx.ins().sshr_imm(val, i64::from(ty.bits() - 1));
+                        let xor = fx.bcx.ins().bxor(val_hi, val_sign);
+                        fx.bcx.ins().icmp_imm(IntCC::NotEqual, xor, 0)
+                    };
+                    (val, has_overflow)
+                }
+                types::I128 => {
+                    unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
+                }
+                _ => unreachable!("invalid non-integer type {}", ty),
+            }
+        }
+        BinOp::Shl => {
+            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+            let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
+            let val = fx.bcx.ins().ishl(lhs, actual_shift);
+            let ty = fx.bcx.func.dfg.value_type(val);
+            let max_shift = i64::from(ty.bits()) - 1;
+            let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+            (val, has_overflow)
+        }
+        BinOp::Shr => {
+            let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+            let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+            let actual_shift = clif_intcast(fx, actual_shift, types::I8, false);
+            let val = if !signed {
+                fx.bcx.ins().ushr(lhs, actual_shift)
+            } else {
+                fx.bcx.ins().sshr(lhs, actual_shift)
+            };
+            let ty = fx.bcx.func.dfg.value_type(val);
+            let max_shift = i64::from(ty.bits()) - 1;
+            let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+            (val, has_overflow)
+        }
+        _ => bug!("binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs),
+    };
+
+    let has_overflow = fx.bcx.ins().bint(types::I8, has_overflow);
+
+    let out_layout = fx.layout_of(fx.tcx.mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()));
+    CValue::by_val_pair(res, has_overflow, out_layout)
+}
+
+pub(crate) fn codegen_float_binop<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    bin_op: BinOp,
+    in_lhs: CValue<'tcx>,
+    in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+    assert_eq!(in_lhs.layout().ty, in_rhs.layout().ty);
+
+    let lhs = in_lhs.load_scalar(fx);
+    let rhs = in_rhs.load_scalar(fx);
+
+    let b = fx.bcx.ins();
+    let res = match bin_op {
+        BinOp::Add => b.fadd(lhs, rhs),
+        BinOp::Sub => b.fsub(lhs, rhs),
+        BinOp::Mul => b.fmul(lhs, rhs),
+        BinOp::Div => b.fdiv(lhs, rhs),
+        BinOp::Rem => {
+            let name = match in_lhs.layout().ty.kind() {
+                ty::Float(FloatTy::F32) => "fmodf",
+                ty::Float(FloatTy::F64) => "fmod",
+                _ => bug!(),
+            };
+            return fx.easy_call(name, &[in_lhs, in_rhs], in_lhs.layout().ty);
+        }
+        BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+            let fltcc = match bin_op {
+                BinOp::Eq => FloatCC::Equal,
+                BinOp::Lt => FloatCC::LessThan,
+                BinOp::Le => FloatCC::LessThanOrEqual,
+                BinOp::Ne => FloatCC::NotEqual,
+                BinOp::Ge => FloatCC::GreaterThanOrEqual,
+                BinOp::Gt => FloatCC::GreaterThan,
+                _ => unreachable!(),
+            };
+            let val = fx.bcx.ins().fcmp(fltcc, lhs, rhs);
+            let val = fx.bcx.ins().bint(types::I8, val);
+            return CValue::by_val(val, fx.layout_of(fx.tcx.types.bool));
+        }
+        _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+    };
+
+    CValue::by_val(res, in_lhs.layout())
+}
+
+pub(crate) fn codegen_ptr_binop<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    bin_op: BinOp,
+    in_lhs: CValue<'tcx>,
+    in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+    let is_thin_ptr = in_lhs
+        .layout()
+        .ty
+        .builtin_deref(true)
+        .map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
+        .unwrap_or(true);
+
+    if is_thin_ptr {
+        match bin_op {
+            BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+                let lhs = in_lhs.load_scalar(fx);
+                let rhs = in_rhs.load_scalar(fx);
+
+                codegen_compare_bin_op(fx, bin_op, false, lhs, rhs)
+            }
+            BinOp::Offset => {
+                let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap().ty;
+                let (base, offset) = (in_lhs, in_rhs.load_scalar(fx));
+                let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+                let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
+                let base_val = base.load_scalar(fx);
+                let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+                CValue::by_val(res, base.layout())
+            }
+            _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+        }
+    } else {
+        let (lhs_ptr, lhs_extra) = in_lhs.load_scalar_pair(fx);
+        let (rhs_ptr, rhs_extra) = in_rhs.load_scalar_pair(fx);
+
+        let res = match bin_op {
+            BinOp::Eq => {
+                let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
+                let extra_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_extra, rhs_extra);
+                fx.bcx.ins().band(ptr_eq, extra_eq)
+            }
+            BinOp::Ne => {
+                let ptr_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_ptr, rhs_ptr);
+                let extra_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_extra, rhs_extra);
+                fx.bcx.ins().bor(ptr_ne, extra_ne)
+            }
+            BinOp::Lt | BinOp::Le | BinOp::Ge | BinOp::Gt => {
+                let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
+
+                let ptr_cmp =
+                    fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
+                let extra_cmp = fx.bcx.ins().icmp(
+                    bin_op_to_intcc(bin_op, false).unwrap(),
+                    lhs_extra,
+                    rhs_extra,
+                );
+
+                fx.bcx.ins().select(ptr_eq, extra_cmp, ptr_cmp)
+            }
+            _ => panic!("bin_op {:?} on ptr", bin_op),
+        };
+
+        CValue::by_val(fx.bcx.ins().bint(types::I8, res), fx.layout_of(fx.tcx.types.bool))
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/mod.rs b/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
new file mode 100644
index 00000000000..61033d85a12
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
@@ -0,0 +1,20 @@
+//! Various optimizations specific to cg_clif
+
+use cranelift_codegen::isa::TargetIsa;
+
+use crate::prelude::*;
+
+pub(crate) mod peephole;
+
+pub(crate) fn optimize_function<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    isa: &dyn TargetIsa,
+    instance: Instance<'tcx>,
+    ctx: &mut Context,
+    clif_comments: &mut crate::pretty_clif::CommentWriter,
+) {
+    // FIXME classify optimizations over opt levels once we have more
+
+    crate::pretty_clif::write_clif_file(tcx, "preopt", isa, instance, &ctx, &*clif_comments);
+    crate::base::verify_func(tcx, &*clif_comments, &ctx.func);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs b/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
new file mode 100644
index 00000000000..b95e2d72877
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
@@ -0,0 +1,106 @@
+//! Peephole optimizations that can be performed while creating clif ir.
+
+use cranelift_codegen::ir::{
+    condcodes::IntCC, types, InstBuilder, InstructionData, Opcode, Value, ValueDef,
+};
+use cranelift_frontend::FunctionBuilder;
+
+/// If the given value was produced by a `bint` instruction, return it's input, otherwise return the
+/// given value.
+pub(crate) fn maybe_unwrap_bint(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
+    if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+        match bcx.func.dfg[arg_inst] {
+            InstructionData::Unary { opcode: Opcode::Bint, arg } => arg,
+            _ => arg,
+        }
+    } else {
+        arg
+    }
+}
+
+/// If the given value was produced by the lowering of `Rvalue::Not` return the input and true,
+/// otherwise return the given value and false.
+pub(crate) fn maybe_unwrap_bool_not(bcx: &mut FunctionBuilder<'_>, arg: Value) -> (Value, bool) {
+    if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+        match bcx.func.dfg[arg_inst] {
+            // This is the lowering of `Rvalue::Not`
+            InstructionData::IntCompareImm {
+                opcode: Opcode::IcmpImm,
+                cond: IntCC::Equal,
+                arg,
+                imm,
+            } if imm.bits() == 0 => (arg, true),
+            _ => (arg, false),
+        }
+    } else {
+        (arg, false)
+    }
+}
+
+pub(crate) fn make_branchable_value(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
+    if bcx.func.dfg.value_type(arg).is_bool() {
+        return arg;
+    }
+
+    (|| {
+        let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+            arg_inst
+        } else {
+            return None;
+        };
+
+        match bcx.func.dfg[arg_inst] {
+            // This is the lowering of Rvalue::Not
+            InstructionData::Load { opcode: Opcode::Load, arg: ptr, flags, offset } => {
+                // Using `load.i8 + uextend.i32` would legalize to `uload8 + ireduce.i8 +
+                // uextend.i32`. Just `uload8` is much faster.
+                match bcx.func.dfg.ctrl_typevar(arg_inst) {
+                    types::I8 => Some(bcx.ins().uload8(types::I32, flags, ptr, offset)),
+                    types::I16 => Some(bcx.ins().uload16(types::I32, flags, ptr, offset)),
+                    _ => None,
+                }
+            }
+            _ => None,
+        }
+    })()
+    .unwrap_or_else(|| {
+        match bcx.func.dfg.value_type(arg) {
+            types::I8 | types::I16 => {
+                // WORKAROUND for brz.i8 and brnz.i8 not yet being implemented
+                bcx.ins().uextend(types::I32, arg)
+            }
+            _ => arg,
+        }
+    })
+}
+
+/// Returns whether the branch is statically known to be taken or `None` if it isn't statically known.
+pub(crate) fn maybe_known_branch_taken(
+    bcx: &FunctionBuilder<'_>,
+    arg: Value,
+    test_zero: bool,
+) -> Option<bool> {
+    let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+        arg_inst
+    } else {
+        return None;
+    };
+
+    match bcx.func.dfg[arg_inst] {
+        InstructionData::UnaryBool { opcode: Opcode::Bconst, imm } => {
+            if test_zero {
+                Some(!imm)
+            } else {
+                Some(imm)
+            }
+        }
+        InstructionData::UnaryImm { opcode: Opcode::Iconst, imm } => {
+            if test_zero {
+                Some(imm.bits() == 0)
+            } else {
+                Some(imm.bits() != 0)
+            }
+        }
+        _ => None,
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/pointer.rs b/compiler/rustc_codegen_cranelift/src/pointer.rs
new file mode 100644
index 00000000000..31d827f83bf
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/pointer.rs
@@ -0,0 +1,134 @@
+//! Defines [`Pointer`] which is used to improve the quality of the generated clif ir for pointer
+//! operations.
+
+use crate::prelude::*;
+
+use rustc_target::abi::Align;
+
+use cranelift_codegen::ir::immediates::Offset32;
+
+/// A pointer pointing either to a certain address, a certain stack slot or nothing.
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct Pointer {
+    base: PointerBase,
+    offset: Offset32,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum PointerBase {
+    Addr(Value),
+    Stack(StackSlot),
+    Dangling(Align),
+}
+
+impl Pointer {
+    pub(crate) fn new(addr: Value) -> Self {
+        Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
+    }
+
+    pub(crate) fn stack_slot(stack_slot: StackSlot) -> Self {
+        Pointer { base: PointerBase::Stack(stack_slot), offset: Offset32::new(0) }
+    }
+
+    pub(crate) fn const_addr(fx: &mut FunctionCx<'_, '_, '_>, addr: i64) -> Self {
+        let addr = fx.bcx.ins().iconst(fx.pointer_type, addr);
+        Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
+    }
+
+    pub(crate) fn dangling(align: Align) -> Self {
+        Pointer { base: PointerBase::Dangling(align), offset: Offset32::new(0) }
+    }
+
+    pub(crate) fn debug_base_and_offset(self) -> (PointerBase, Offset32) {
+        (self.base, self.offset)
+    }
+
+    pub(crate) fn get_addr(self, fx: &mut FunctionCx<'_, '_, '_>) -> Value {
+        match self.base {
+            PointerBase::Addr(base_addr) => {
+                let offset: i64 = self.offset.into();
+                if offset == 0 { base_addr } else { fx.bcx.ins().iadd_imm(base_addr, offset) }
+            }
+            PointerBase::Stack(stack_slot) => {
+                fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset)
+            }
+            PointerBase::Dangling(align) => {
+                fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap())
+            }
+        }
+    }
+
+    pub(crate) fn offset(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: Offset32) -> Self {
+        self.offset_i64(fx, extra_offset.into())
+    }
+
+    pub(crate) fn offset_i64(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: i64) -> Self {
+        if let Some(new_offset) = self.offset.try_add_i64(extra_offset) {
+            Pointer { base: self.base, offset: new_offset }
+        } else {
+            let base_offset: i64 = self.offset.into();
+            if let Some(new_offset) = base_offset.checked_add(extra_offset) {
+                let base_addr = match self.base {
+                    PointerBase::Addr(addr) => addr,
+                    PointerBase::Stack(stack_slot) => {
+                        fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0)
+                    }
+                    PointerBase::Dangling(align) => {
+                        fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap())
+                    }
+                };
+                let addr = fx.bcx.ins().iadd_imm(base_addr, new_offset);
+                Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
+            } else {
+                panic!(
+                    "self.offset ({}) + extra_offset ({}) not representable in i64",
+                    base_offset, extra_offset
+                );
+            }
+        }
+    }
+
+    pub(crate) fn offset_value(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: Value) -> Self {
+        match self.base {
+            PointerBase::Addr(addr) => Pointer {
+                base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
+                offset: self.offset,
+            },
+            PointerBase::Stack(stack_slot) => {
+                let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset);
+                Pointer {
+                    base: PointerBase::Addr(fx.bcx.ins().iadd(base_addr, extra_offset)),
+                    offset: Offset32::new(0),
+                }
+            }
+            PointerBase::Dangling(align) => {
+                let addr =
+                    fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap());
+                Pointer {
+                    base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
+                    offset: self.offset,
+                }
+            }
+        }
+    }
+
+    pub(crate) fn load(self, fx: &mut FunctionCx<'_, '_, '_>, ty: Type, flags: MemFlags) -> Value {
+        match self.base {
+            PointerBase::Addr(base_addr) => fx.bcx.ins().load(ty, flags, base_addr, self.offset),
+            PointerBase::Stack(stack_slot) => fx.bcx.ins().stack_load(ty, stack_slot, self.offset),
+            PointerBase::Dangling(_align) => unreachable!(),
+        }
+    }
+
+    pub(crate) fn store(self, fx: &mut FunctionCx<'_, '_, '_>, value: Value, flags: MemFlags) {
+        match self.base {
+            PointerBase::Addr(base_addr) => {
+                fx.bcx.ins().store(flags, value, base_addr, self.offset);
+            }
+            PointerBase::Stack(stack_slot) => {
+                fx.bcx.ins().stack_store(value, stack_slot, self.offset);
+            }
+            PointerBase::Dangling(_align) => unreachable!(),
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
new file mode 100644
index 00000000000..05db74745a1
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
@@ -0,0 +1,284 @@
+//! This module provides the [CommentWriter] which makes it possible
+//! to add comments to the written cranelift ir.
+//!
+//! # Example
+//!
+//! ```clif
+//! test compile
+//! target x86_64
+//!
+//! function u0:0(i64, i64, i64) system_v {
+//! ; symbol _ZN119_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$u27$a$u20$$RF$$u27$b$u20$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17he85059d5e6a760a0E
+//! ; instance Instance { def: Item(DefId(0/0:29 ~ example[8787]::{{impl}}[0]::call_once[0])), substs: [ReErased, ReErased] }
+//! ; sig ([IsNotEmpty, (&&[u16],)]; c_variadic: false)->(u8, u8)
+//!
+//! ; ssa {_2: NOT_SSA, _4: NOT_SSA, _0: NOT_SSA, _3: (empty), _1: NOT_SSA}
+//! ; msg   loc.idx    param    pass mode            ssa flags  ty
+//! ; ret    _0      = v0       ByRef                NOT_SSA    (u8, u8)
+//! ; arg    _1      = v1       ByRef                NOT_SSA    IsNotEmpty
+//! ; arg    _2.0    = v2       ByVal(types::I64)    NOT_SSA    &&[u16]
+//!
+//!     ss0 = explicit_slot 0 ; _1: IsNotEmpty size=0 align=1,8
+//!     ss1 = explicit_slot 8 ; _2: (&&[u16],) size=8 align=8,8
+//!     ss2 = explicit_slot 8 ; _4: (&&[u16],) size=8 align=8,8
+//!     sig0 = (i64, i64, i64) system_v
+//!     sig1 = (i64, i64, i64) system_v
+//!     fn0 = colocated u0:6 sig1 ; Instance { def: Item(DefId(0/0:31 ~ example[8787]::{{impl}}[1]::call_mut[0])), substs: [ReErased, ReErased] }
+//!
+//! block0(v0: i64, v1: i64, v2: i64):
+//!     v3 = stack_addr.i64 ss0
+//!     v4 = stack_addr.i64 ss1
+//!     store v2, v4
+//!     v5 = stack_addr.i64 ss2
+//!     jump block1
+//!
+//! block1:
+//!     nop
+//! ; _3 = &mut _1
+//! ; _4 = _2
+//!     v6 = load.i64 v4
+//!     store v6, v5
+//! ;
+//! ; _0 = const mini_core::FnMut::call_mut(move _3, move _4)
+//!     v7 = load.i64 v5
+//!     call fn0(v0, v3, v7)
+//!     jump block2
+//!
+//! block2:
+//!     nop
+//! ;
+//! ; return
+//!     return
+//! }
+//! ```
+
+use std::fmt;
+use std::io::Write;
+
+use cranelift_codegen::{
+    entity::SecondaryMap,
+    ir::{entities::AnyEntity, function::DisplayFunctionAnnotations},
+    write::{FuncWriter, PlainWriter},
+};
+
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_session::config::OutputType;
+use rustc_target::abi::call::FnAbi;
+
+use crate::prelude::*;
+
+#[derive(Debug)]
+pub(crate) struct CommentWriter {
+    enabled: bool,
+    global_comments: Vec<String>,
+    entity_comments: FxHashMap<AnyEntity, String>,
+}
+
+impl CommentWriter {
+    pub(crate) fn new<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+        let enabled = should_write_ir(tcx);
+        let global_comments = if enabled {
+            vec![
+                format!("symbol {}", tcx.symbol_name(instance).name),
+                format!("instance {:?}", instance),
+                format!("abi {:?}", FnAbi::of_instance(&RevealAllLayoutCx(tcx), instance, &[])),
+                String::new(),
+            ]
+        } else {
+            vec![]
+        };
+
+        CommentWriter { enabled, global_comments, entity_comments: FxHashMap::default() }
+    }
+}
+
+impl CommentWriter {
+    pub(crate) fn enabled(&self) -> bool {
+        self.enabled
+    }
+
+    pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+        debug_assert!(self.enabled);
+        self.global_comments.push(comment.into());
+    }
+
+    pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+        &mut self,
+        entity: E,
+        comment: S,
+    ) {
+        debug_assert!(self.enabled);
+
+        use std::collections::hash_map::Entry;
+        match self.entity_comments.entry(entity.into()) {
+            Entry::Occupied(mut occ) => {
+                occ.get_mut().push('\n');
+                occ.get_mut().push_str(comment.as_ref());
+            }
+            Entry::Vacant(vac) => {
+                vac.insert(comment.into());
+            }
+        }
+    }
+}
+
+impl FuncWriter for &'_ CommentWriter {
+    fn write_preamble(
+        &mut self,
+        w: &mut dyn fmt::Write,
+        func: &Function,
+        reg_info: Option<&isa::RegInfo>,
+    ) -> Result<bool, fmt::Error> {
+        for comment in &self.global_comments {
+            if !comment.is_empty() {
+                writeln!(w, "; {}", comment)?;
+            } else {
+                writeln!(w)?;
+            }
+        }
+        if !self.global_comments.is_empty() {
+            writeln!(w)?;
+        }
+
+        self.super_preamble(w, func, reg_info)
+    }
+
+    fn write_entity_definition(
+        &mut self,
+        w: &mut dyn fmt::Write,
+        _func: &Function,
+        entity: AnyEntity,
+        value: &dyn fmt::Display,
+    ) -> fmt::Result {
+        write!(w, "    {} = {}", entity, value)?;
+
+        if let Some(comment) = self.entity_comments.get(&entity) {
+            writeln!(w, " ; {}", comment.replace('\n', "\n; "))
+        } else {
+            writeln!(w)
+        }
+    }
+
+    fn write_block_header(
+        &mut self,
+        w: &mut dyn fmt::Write,
+        func: &Function,
+        isa: Option<&dyn isa::TargetIsa>,
+        block: Block,
+        indent: usize,
+    ) -> fmt::Result {
+        PlainWriter.write_block_header(w, func, isa, block, indent)
+    }
+
+    fn write_instruction(
+        &mut self,
+        w: &mut dyn fmt::Write,
+        func: &Function,
+        aliases: &SecondaryMap<Value, Vec<Value>>,
+        isa: Option<&dyn isa::TargetIsa>,
+        inst: Inst,
+        indent: usize,
+    ) -> fmt::Result {
+        PlainWriter.write_instruction(w, func, aliases, isa, inst, indent)?;
+        if let Some(comment) = self.entity_comments.get(&inst.into()) {
+            writeln!(w, "; {}", comment.replace('\n', "\n; "))?;
+        }
+        Ok(())
+    }
+}
+
+impl FunctionCx<'_, '_, '_> {
+    pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+        self.clif_comments.add_global_comment(comment);
+    }
+
+    pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+        &mut self,
+        entity: E,
+        comment: S,
+    ) {
+        self.clif_comments.add_comment(entity, comment);
+    }
+}
+
+pub(crate) fn should_write_ir(tcx: TyCtxt<'_>) -> bool {
+    tcx.sess.opts.output_types.contains_key(&OutputType::LlvmAssembly)
+}
+
+pub(crate) fn write_ir_file(
+    tcx: TyCtxt<'_>,
+    name: impl FnOnce() -> String,
+    write: impl FnOnce(&mut dyn Write) -> std::io::Result<()>,
+) {
+    if !should_write_ir(tcx) {
+        return;
+    }
+
+    let clif_output_dir = tcx.output_filenames(()).with_extension("clif");
+
+    match std::fs::create_dir(&clif_output_dir) {
+        Ok(()) => {}
+        Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {}
+        res @ Err(_) => res.unwrap(),
+    }
+
+    let clif_file_name = clif_output_dir.join(name());
+
+    let res = std::fs::File::create(clif_file_name).and_then(|mut file| write(&mut file));
+    if let Err(err) = res {
+        tcx.sess.warn(&format!("error writing ir file: {}", err));
+    }
+}
+
+pub(crate) fn write_clif_file<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    postfix: &str,
+    isa: &dyn cranelift_codegen::isa::TargetIsa,
+    instance: Instance<'tcx>,
+    context: &cranelift_codegen::Context,
+    mut clif_comments: &CommentWriter,
+) {
+    write_ir_file(
+        tcx,
+        || format!("{}.{}.clif", tcx.symbol_name(instance).name, postfix),
+        |file| {
+            let mut clif = String::new();
+            cranelift_codegen::write::decorate_function(
+                &mut clif_comments,
+                &mut clif,
+                &context.func,
+                &DisplayFunctionAnnotations { isa: Some(isa), value_ranges: None },
+            )
+            .unwrap();
+
+            for flag in isa.flags().iter() {
+                writeln!(file, "set {}", flag)?;
+            }
+            write!(file, "target {}", isa.triple().architecture.to_string())?;
+            for isa_flag in isa.isa_flags().iter() {
+                write!(file, " {}", isa_flag)?;
+            }
+            writeln!(file, "\n")?;
+            writeln!(file)?;
+            file.write_all(clif.as_bytes())?;
+            Ok(())
+        },
+    );
+}
+
+impl fmt::Debug for FunctionCx<'_, '_, '_> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        writeln!(f, "{:?}", self.instance.substs)?;
+        writeln!(f, "{:?}", self.local_map)?;
+
+        let mut clif = String::new();
+        ::cranelift_codegen::write::decorate_function(
+            &mut &self.clif_comments,
+            &mut clif,
+            &self.bcx.func,
+            &DisplayFunctionAnnotations::default(),
+        )
+        .unwrap();
+        writeln!(f, "\n{}", clif)
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/toolchain.rs b/compiler/rustc_codegen_cranelift/src/toolchain.rs
new file mode 100644
index 00000000000..f86236ef3ea
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/toolchain.rs
@@ -0,0 +1,31 @@
+//! Locating various executables part of a C toolchain.
+
+use std::path::PathBuf;
+
+use rustc_codegen_ssa::back::link::linker_and_flavor;
+use rustc_session::Session;
+
+/// Tries to infer the path of a binary for the target toolchain from the linker name.
+pub(crate) fn get_toolchain_binary(sess: &Session, tool: &str) -> PathBuf {
+    let (mut linker, _linker_flavor) = linker_and_flavor(sess);
+    let linker_file_name = linker
+        .file_name()
+        .and_then(|name| name.to_str())
+        .unwrap_or_else(|| sess.fatal("couldn't extract file name from specified linker"));
+
+    if linker_file_name == "ld.lld" {
+        if tool != "ld" {
+            linker.set_file_name(tool)
+        }
+    } else {
+        let tool_file_name = linker_file_name
+            .replace("ld", tool)
+            .replace("gcc", tool)
+            .replace("clang", tool)
+            .replace("cc", tool);
+
+        linker.set_file_name(tool_file_name)
+    }
+
+    linker
+}
diff --git a/compiler/rustc_codegen_cranelift/src/trap.rs b/compiler/rustc_codegen_cranelift/src/trap.rs
new file mode 100644
index 00000000000..21d3e68dbc7
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/trap.rs
@@ -0,0 +1,78 @@
+//! Helpers used to print a message and abort in case of certain panics and some detected UB.
+
+use crate::prelude::*;
+
+fn codegen_print(fx: &mut FunctionCx<'_, '_, '_>, msg: &str) {
+    let puts = fx
+        .module
+        .declare_function(
+            "puts",
+            Linkage::Import,
+            &Signature {
+                call_conv: CallConv::triple_default(fx.triple()),
+                params: vec![AbiParam::new(pointer_ty(fx.tcx))],
+                returns: vec![AbiParam::new(types::I32)],
+            },
+        )
+        .unwrap();
+    let puts = fx.module.declare_func_in_func(puts, &mut fx.bcx.func);
+    if fx.clif_comments.enabled() {
+        fx.add_comment(puts, "puts");
+    }
+
+    let real_msg = format!("trap at {:?} ({}): {}\0", fx.instance, fx.symbol_name, msg);
+    let msg_ptr = fx.anonymous_str(&real_msg);
+    fx.bcx.ins().call(puts, &[msg_ptr]);
+}
+
+/// Trap code: user1
+pub(crate) fn trap_abort(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
+    codegen_print(fx, msg.as_ref());
+    fx.bcx.ins().trap(TrapCode::User(1));
+}
+
+/// Use this for example when a function call should never return. This will fill the current block,
+/// so you can **not** add instructions to it afterwards.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unreachable(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
+    codegen_print(fx, msg.as_ref());
+    fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+}
+
+/// Like `trap_unreachable` but returns a fake value of the specified type.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unreachable_ret_value<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    dest_layout: TyAndLayout<'tcx>,
+    msg: impl AsRef<str>,
+) -> CValue<'tcx> {
+    codegen_print(fx, msg.as_ref());
+    let true_ = fx.bcx.ins().iconst(types::I32, 1);
+    fx.bcx.ins().trapnz(true_, TrapCode::UnreachableCodeReached);
+    CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
+}
+
+/// Use this when something is unimplemented, but `libcore` or `libstd` requires it to codegen.
+/// Unlike `trap_unreachable` this will not fill the current block, so you **must** add instructions
+/// to it afterwards.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
+    codegen_print(fx, msg.as_ref());
+    let true_ = fx.bcx.ins().iconst(types::I32, 1);
+    fx.bcx.ins().trapnz(true_, TrapCode::User(!0));
+}
+
+/// Like `trap_unimplemented` but returns a fake value of the specified type.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unimplemented_ret_value<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    dest_layout: TyAndLayout<'tcx>,
+    msg: impl AsRef<str>,
+) -> CValue<'tcx> {
+    trap_unimplemented(fx, msg);
+    CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs
new file mode 100644
index 00000000000..b9d379c6117
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/unsize.rs
@@ -0,0 +1,211 @@
+//! Codegen of the [`PointerCast::Unsize`] operation.
+//!
+//! [`PointerCast::Unsize`]: `rustc_middle::ty::adjustment::PointerCast::Unsize`
+
+use crate::prelude::*;
+
+// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/base.rs#L159-L307
+
+/// Retrieve the information we are losing (making dynamic) in an unsizing
+/// adjustment.
+///
+/// The `old_info` argument is a bit funny. It is intended for use
+/// in an upcast, where the new vtable for an object will be derived
+/// from the old one.
+pub(crate) fn unsized_info<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    source: Ty<'tcx>,
+    target: Ty<'tcx>,
+    old_info: Option<Value>,
+) -> Value {
+    let (source, target) =
+        fx.tcx.struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
+    match (&source.kind(), &target.kind()) {
+        (&ty::Array(_, len), &ty::Slice(_)) => fx
+            .bcx
+            .ins()
+            .iconst(fx.pointer_type, len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64),
+        (&ty::Dynamic(..), &ty::Dynamic(..)) => {
+            // For now, upcasts are limited to changes in marker
+            // traits, and hence never actually require an actual
+            // change to the vtable.
+            old_info.expect("unsized_info: missing old info for trait upcast")
+        }
+        (_, &ty::Dynamic(ref data, ..)) => crate::vtable::get_vtable(fx, source, data.principal()),
+        _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
+    }
+}
+
+/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
+fn unsize_thin_ptr<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    src: Value,
+    src_layout: TyAndLayout<'tcx>,
+    dst_layout: TyAndLayout<'tcx>,
+) -> (Value, Value) {
+    match (&src_layout.ty.kind(), &dst_layout.ty.kind()) {
+        (&ty::Ref(_, a, _), &ty::Ref(_, b, _))
+        | (&ty::Ref(_, a, _), &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
+        | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
+            assert!(!fx.layout_of(a).is_unsized());
+            (src, unsized_info(fx, a, b, None))
+        }
+        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+            let (a, b) = (src_layout.ty.boxed_ty(), dst_layout.ty.boxed_ty());
+            assert!(!fx.layout_of(a).is_unsized());
+            (src, unsized_info(fx, a, b, None))
+        }
+        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+            assert_eq!(def_a, def_b);
+
+            let mut result = None;
+            for i in 0..src_layout.fields.count() {
+                let src_f = src_layout.field(fx, i);
+                assert_eq!(src_layout.fields.offset(i).bytes(), 0);
+                assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
+                if src_f.is_zst() {
+                    continue;
+                }
+                assert_eq!(src_layout.size, src_f.size);
+
+                let dst_f = dst_layout.field(fx, i);
+                assert_ne!(src_f.ty, dst_f.ty);
+                assert_eq!(result, None);
+                result = Some(unsize_thin_ptr(fx, src, src_f, dst_f));
+            }
+            result.unwrap()
+        }
+        _ => bug!("unsize_thin_ptr: called on bad types"),
+    }
+}
+
+/// Coerce `src`, which is a reference to a value of type `src_ty`,
+/// to a value of type `dst_ty` and store the result in `dst`
+pub(crate) fn coerce_unsized_into<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    src: CValue<'tcx>,
+    dst: CPlace<'tcx>,
+) {
+    let src_ty = src.layout().ty;
+    let dst_ty = dst.layout().ty;
+    let mut coerce_ptr = || {
+        let (base, info) =
+            if fx.layout_of(src.layout().ty.builtin_deref(true).unwrap().ty).is_unsized() {
+                // fat-ptr to fat-ptr unsize preserves the vtable
+                // i.e., &'a fmt::Debug+Send => &'a fmt::Debug
+                src.load_scalar_pair(fx)
+            } else {
+                let base = src.load_scalar(fx);
+                unsize_thin_ptr(fx, base, src.layout(), dst.layout())
+            };
+        dst.write_cvalue(fx, CValue::by_val_pair(base, info, dst.layout()));
+    };
+    match (&src_ty.kind(), &dst_ty.kind()) {
+        (&ty::Ref(..), &ty::Ref(..))
+        | (&ty::Ref(..), &ty::RawPtr(..))
+        | (&ty::RawPtr(..), &ty::RawPtr(..)) => coerce_ptr(),
+        (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+            assert_eq!(def_a, def_b);
+
+            for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
+                let src_f = src.value_field(fx, mir::Field::new(i));
+                let dst_f = dst.place_field(fx, mir::Field::new(i));
+
+                if dst_f.layout().is_zst() {
+                    continue;
+                }
+
+                if src_f.layout().ty == dst_f.layout().ty {
+                    dst_f.write_cvalue(fx, src_f);
+                } else {
+                    coerce_unsized_into(fx, src_f, dst_f);
+                }
+            }
+        }
+        _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty),
+    }
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/glue.rs
+
+pub(crate) fn size_and_align_of_dst<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    layout: TyAndLayout<'tcx>,
+    info: Value,
+) -> (Value, Value) {
+    if !layout.is_unsized() {
+        let size = fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64);
+        let align = fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64);
+        return (size, align);
+    }
+    match layout.ty.kind() {
+        ty::Dynamic(..) => {
+            // load size/align from vtable
+            (crate::vtable::size_of_obj(fx, info), crate::vtable::min_align_of_obj(fx, info))
+        }
+        ty::Slice(_) | ty::Str => {
+            let unit = layout.field(fx, 0);
+            // The info in this case is the length of the str, so the size is that
+            // times the unit size.
+            (
+                fx.bcx.ins().imul_imm(info, unit.size.bytes() as i64),
+                fx.bcx.ins().iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
+            )
+        }
+        _ => {
+            // First get the size of all statically known fields.
+            // Don't use size_of because it also rounds up to alignment, which we
+            // want to avoid, as the unsized field's alignment could be smaller.
+            assert!(!layout.ty.is_simd());
+
+            let i = layout.fields.count() - 1;
+            let sized_size = layout.fields.offset(i).bytes();
+            let sized_align = layout.align.abi.bytes();
+            let sized_align = fx.bcx.ins().iconst(fx.pointer_type, sized_align as i64);
+
+            // Recurse to get the size of the dynamically sized field (must be
+            // the last field).
+            let field_layout = layout.field(fx, i);
+            let (unsized_size, mut unsized_align) = size_and_align_of_dst(fx, field_layout, info);
+
+            // FIXME (#26403, #27023): We should be adding padding
+            // to `sized_size` (to accommodate the `unsized_align`
+            // required of the unsized field that follows) before
+            // summing it with `sized_size`. (Note that since #26403
+            // is unfixed, we do not yet add the necessary padding
+            // here. But this is where the add would go.)
+
+            // Return the sum of sizes and max of aligns.
+            let size = fx.bcx.ins().iadd_imm(unsized_size, sized_size as i64);
+
+            // Packed types ignore the alignment of their fields.
+            if let ty::Adt(def, _) = layout.ty.kind() {
+                if def.repr.packed() {
+                    unsized_align = sized_align;
+                }
+            }
+
+            // Choose max of two known alignments (combined value must
+            // be aligned according to more restrictive of the two).
+            let cmp = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, sized_align, unsized_align);
+            let align = fx.bcx.ins().select(cmp, sized_align, unsized_align);
+
+            // Issue #27023: must add any necessary padding to `size`
+            // (to make it a multiple of `align`) before returning it.
+            //
+            // Namely, the returned size should be, in C notation:
+            //
+            //   `size + ((size & (align-1)) ? align : 0)`
+            //
+            // emulated via the semi-standard fast bit trick:
+            //
+            //   `(size + (align-1)) & -align`
+            let addend = fx.bcx.ins().iadd_imm(align, -1);
+            let add = fx.bcx.ins().iadd(size, addend);
+            let neg = fx.bcx.ins().ineg(align);
+            let size = fx.bcx.ins().band(add, neg);
+
+            (size, align)
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
new file mode 100644
index 00000000000..171f39805f8
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -0,0 +1,730 @@
+//! Definition of [`CValue`] and [`CPlace`]
+
+use crate::prelude::*;
+
+use cranelift_codegen::ir::immediates::Offset32;
+
+fn codegen_field<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    base: Pointer,
+    extra: Option<Value>,
+    layout: TyAndLayout<'tcx>,
+    field: mir::Field,
+) -> (Pointer, TyAndLayout<'tcx>) {
+    let field_offset = layout.fields.offset(field.index());
+    let field_layout = layout.field(&*fx, field.index());
+
+    let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
+        (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
+    };
+
+    if let Some(extra) = extra {
+        if !field_layout.is_unsized() {
+            return simple(fx);
+        }
+        match field_layout.ty.kind() {
+            ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
+            ty::Adt(def, _) if def.repr.packed() => {
+                assert_eq!(layout.align.abi.bytes(), 1);
+                simple(fx)
+            }
+            _ => {
+                // We have to align the offset for DST's
+                let unaligned_offset = field_offset.bytes();
+                let (_, unsized_align) =
+                    crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
+
+                let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
+                let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
+                let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
+                let zero = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 0);
+                let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
+                let offset = fx.bcx.ins().band(and_lhs, and_rhs);
+
+                (base.offset_value(fx, offset), field_layout)
+            }
+        }
+    } else {
+        simple(fx)
+    }
+}
+
+fn scalar_pair_calculate_b_offset(
+    tcx: TyCtxt<'_>,
+    a_scalar: &Scalar,
+    b_scalar: &Scalar,
+) -> Offset32 {
+    let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
+    Offset32::new(b_offset.bytes().try_into().unwrap())
+}
+
+/// A read-only value
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
+
+#[derive(Debug, Copy, Clone)]
+enum CValueInner {
+    ByRef(Pointer, Option<Value>),
+    ByVal(Value),
+    ByValPair(Value, Value),
+}
+
+impl<'tcx> CValue<'tcx> {
+    pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+        CValue(CValueInner::ByRef(ptr, None), layout)
+    }
+
+    pub(crate) fn by_ref_unsized(
+        ptr: Pointer,
+        meta: Value,
+        layout: TyAndLayout<'tcx>,
+    ) -> CValue<'tcx> {
+        CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
+    }
+
+    pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+        CValue(CValueInner::ByVal(value), layout)
+    }
+
+    pub(crate) fn by_val_pair(
+        value: Value,
+        extra: Value,
+        layout: TyAndLayout<'tcx>,
+    ) -> CValue<'tcx> {
+        CValue(CValueInner::ByValPair(value, extra), layout)
+    }
+
+    pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+        self.1
+    }
+
+    // FIXME remove
+    pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
+        let layout = self.1;
+        match self.0 {
+            CValueInner::ByRef(ptr, meta) => (ptr, meta),
+            CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
+                let cplace = CPlace::new_stack_slot(fx, layout);
+                cplace.write_cvalue(fx, self);
+                (cplace.to_ptr(), None)
+            }
+        }
+    }
+
+    pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
+        match self.0 {
+            CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
+            CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
+        }
+    }
+
+    /// Load a value with layout.abi of scalar
+    pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
+        let layout = self.1;
+        match self.0 {
+            CValueInner::ByRef(ptr, None) => {
+                let clif_ty = match layout.abi {
+                    Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
+                    Abi::Vector { ref element, count } => {
+                        scalar_to_clif_type(fx.tcx, element.clone())
+                            .by(u16::try_from(count).unwrap())
+                            .unwrap()
+                    }
+                    _ => unreachable!("{:?}", layout.ty),
+                };
+                let mut flags = MemFlags::new();
+                flags.set_notrap();
+                ptr.load(fx, clif_ty, flags)
+            }
+            CValueInner::ByVal(value) => value,
+            CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
+            CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
+        }
+    }
+
+    /// Load a value pair with layout.abi of scalar pair
+    pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
+        let layout = self.1;
+        match self.0 {
+            CValueInner::ByRef(ptr, None) => {
+                let (a_scalar, b_scalar) = match &layout.abi {
+                    Abi::ScalarPair(a, b) => (a, b),
+                    _ => unreachable!("load_scalar_pair({:?})", self),
+                };
+                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+                let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
+                let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
+                let mut flags = MemFlags::new();
+                flags.set_notrap();
+                let val1 = ptr.load(fx, clif_ty1, flags);
+                let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
+                (val1, val2)
+            }
+            CValueInner::ByRef(_, Some(_)) => {
+                bug!("load_scalar_pair for unsized value not allowed")
+            }
+            CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
+            CValueInner::ByValPair(val1, val2) => (val1, val2),
+        }
+    }
+
+    pub(crate) fn value_field(
+        self,
+        fx: &mut FunctionCx<'_, '_, 'tcx>,
+        field: mir::Field,
+    ) -> CValue<'tcx> {
+        let layout = self.1;
+        match self.0 {
+            CValueInner::ByVal(val) => match layout.abi {
+                Abi::Vector { element: _, count } => {
+                    let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
+                    let field = u8::try_from(field.index()).unwrap();
+                    assert!(field < count);
+                    let lane = fx.bcx.ins().extractlane(val, field);
+                    let field_layout = layout.field(&*fx, usize::from(field));
+                    CValue::by_val(lane, field_layout)
+                }
+                _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
+            },
+            CValueInner::ByValPair(val1, val2) => match layout.abi {
+                Abi::ScalarPair(_, _) => {
+                    let val = match field.as_u32() {
+                        0 => val1,
+                        1 => val2,
+                        _ => bug!("field should be 0 or 1"),
+                    };
+                    let field_layout = layout.field(&*fx, usize::from(field));
+                    CValue::by_val(val, field_layout)
+                }
+                _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
+            },
+            CValueInner::ByRef(ptr, None) => {
+                let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
+                CValue::by_ref(field_ptr, field_layout)
+            }
+            CValueInner::ByRef(_, Some(_)) => todo!(),
+        }
+    }
+
+    pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
+        crate::unsize::coerce_unsized_into(fx, self, dest);
+    }
+
+    /// If `ty` is signed, `const_val` must already be sign extended.
+    pub(crate) fn const_val(
+        fx: &mut FunctionCx<'_, '_, 'tcx>,
+        layout: TyAndLayout<'tcx>,
+        const_val: ty::ScalarInt,
+    ) -> CValue<'tcx> {
+        assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
+        use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
+
+        let clif_ty = fx.clif_type(layout.ty).unwrap();
+
+        if let ty::Bool = layout.ty.kind() {
+            assert!(
+                const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
+                "Invalid bool 0x{:032X}",
+                const_val
+            );
+        }
+
+        let val = match layout.ty.kind() {
+            ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+                let const_val = const_val.to_bits(layout.size).unwrap();
+                let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
+                let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
+                fx.bcx.ins().iconcat(lsb, msb)
+            }
+            ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
+                fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
+            }
+            ty::Float(FloatTy::F32) => {
+                fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
+            }
+            ty::Float(FloatTy::F64) => {
+                fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
+            }
+            _ => panic!(
+                "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
+                layout.ty
+            ),
+        };
+
+        CValue::by_val(val, layout)
+    }
+
+    pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
+        assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+        assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+        assert_eq!(self.layout().abi, layout.abi);
+        CValue(self.0, layout)
+    }
+}
+
+/// A place where you can write a value to or read a value from
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CPlace<'tcx> {
+    inner: CPlaceInner,
+    layout: TyAndLayout<'tcx>,
+}
+
+#[derive(Debug, Copy, Clone)]
+pub(crate) enum CPlaceInner {
+    Var(Local, Variable),
+    VarPair(Local, Variable, Variable),
+    VarLane(Local, Variable, u8),
+    Addr(Pointer, Option<Value>),
+}
+
+impl<'tcx> CPlace<'tcx> {
+    pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+        self.layout
+    }
+
+    pub(crate) fn inner(&self) -> &CPlaceInner {
+        &self.inner
+    }
+
+    pub(crate) fn no_place(layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
+        CPlace { inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None), layout }
+    }
+
+    pub(crate) fn new_stack_slot(
+        fx: &mut FunctionCx<'_, '_, 'tcx>,
+        layout: TyAndLayout<'tcx>,
+    ) -> CPlace<'tcx> {
+        assert!(!layout.is_unsized());
+        if layout.size.bytes() == 0 {
+            return CPlace::no_place(layout);
+        }
+
+        let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+            kind: StackSlotKind::ExplicitSlot,
+            // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+            // specify stack slot alignment.
+            size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
+            offset: None,
+        });
+        CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
+    }
+
+    pub(crate) fn new_var(
+        fx: &mut FunctionCx<'_, '_, 'tcx>,
+        local: Local,
+        layout: TyAndLayout<'tcx>,
+    ) -> CPlace<'tcx> {
+        let var = Variable::with_u32(fx.next_ssa_var);
+        fx.next_ssa_var += 1;
+        fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
+        CPlace { inner: CPlaceInner::Var(local, var), layout }
+    }
+
+    pub(crate) fn new_var_pair(
+        fx: &mut FunctionCx<'_, '_, 'tcx>,
+        local: Local,
+        layout: TyAndLayout<'tcx>,
+    ) -> CPlace<'tcx> {
+        let var1 = Variable::with_u32(fx.next_ssa_var);
+        fx.next_ssa_var += 1;
+        let var2 = Variable::with_u32(fx.next_ssa_var);
+        fx.next_ssa_var += 1;
+
+        let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
+        fx.bcx.declare_var(var1, ty1);
+        fx.bcx.declare_var(var2, ty2);
+        CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
+    }
+
+    pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
+        CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
+    }
+
+    pub(crate) fn for_ptr_with_extra(
+        ptr: Pointer,
+        extra: Value,
+        layout: TyAndLayout<'tcx>,
+    ) -> CPlace<'tcx> {
+        CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
+    }
+
+    pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
+        let layout = self.layout();
+        match self.inner {
+            CPlaceInner::Var(_local, var) => {
+                let val = fx.bcx.use_var(var);
+                //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+                CValue::by_val(val, layout)
+            }
+            CPlaceInner::VarPair(_local, var1, var2) => {
+                let val1 = fx.bcx.use_var(var1);
+                //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
+                let val2 = fx.bcx.use_var(var2);
+                //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
+                CValue::by_val_pair(val1, val2, layout)
+            }
+            CPlaceInner::VarLane(_local, var, lane) => {
+                let val = fx.bcx.use_var(var);
+                //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+                let val = fx.bcx.ins().extractlane(val, lane);
+                CValue::by_val(val, layout)
+            }
+            CPlaceInner::Addr(ptr, extra) => {
+                if let Some(extra) = extra {
+                    CValue::by_ref_unsized(ptr, extra, layout)
+                } else {
+                    CValue::by_ref(ptr, layout)
+                }
+            }
+        }
+    }
+
+    pub(crate) fn to_ptr(self) -> Pointer {
+        match self.to_ptr_maybe_unsized() {
+            (ptr, None) => ptr,
+            (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
+        }
+    }
+
+    pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
+        match self.inner {
+            CPlaceInner::Addr(ptr, extra) => (ptr, extra),
+            CPlaceInner::Var(_, _)
+            | CPlaceInner::VarPair(_, _, _)
+            | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
+        }
+    }
+
+    pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
+        assert_assignable(fx, from.layout().ty, self.layout().ty);
+
+        self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
+    }
+
+    pub(crate) fn write_cvalue_transmute(
+        self,
+        fx: &mut FunctionCx<'_, '_, 'tcx>,
+        from: CValue<'tcx>,
+    ) {
+        self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
+    }
+
+    fn write_cvalue_maybe_transmute(
+        self,
+        fx: &mut FunctionCx<'_, '_, 'tcx>,
+        from: CValue<'tcx>,
+        method: &'static str,
+    ) {
+        fn transmute_value<'tcx>(
+            fx: &mut FunctionCx<'_, '_, 'tcx>,
+            var: Variable,
+            data: Value,
+            dst_ty: Type,
+        ) {
+            let src_ty = fx.bcx.func.dfg.value_type(data);
+            assert_eq!(
+                src_ty.bytes(),
+                dst_ty.bytes(),
+                "write_cvalue_transmute: {:?} -> {:?}",
+                src_ty,
+                dst_ty,
+            );
+            let data = match (src_ty, dst_ty) {
+                (_, _) if src_ty == dst_ty => data,
+
+                // This is a `write_cvalue_transmute`.
+                (types::I32, types::F32)
+                | (types::F32, types::I32)
+                | (types::I64, types::F64)
+                | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
+                _ if src_ty.is_vector() && dst_ty.is_vector() => {
+                    fx.bcx.ins().raw_bitcast(dst_ty, data)
+                }
+                _ if src_ty.is_vector() || dst_ty.is_vector() => {
+                    // FIXME do something more efficient for transmutes between vectors and integers.
+                    let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+                        kind: StackSlotKind::ExplicitSlot,
+                        // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+                        // specify stack slot alignment.
+                        size: (src_ty.bytes() + 15) / 16 * 16,
+                        offset: None,
+                    });
+                    let ptr = Pointer::stack_slot(stack_slot);
+                    ptr.store(fx, data, MemFlags::trusted());
+                    ptr.load(fx, dst_ty, MemFlags::trusted())
+                }
+                _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
+            };
+            //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
+            fx.bcx.def_var(var, data);
+        }
+
+        assert_eq!(self.layout().size, from.layout().size);
+
+        if fx.clif_comments.enabled() {
+            use cranelift_codegen::cursor::{Cursor, CursorPosition};
+            let cur_block = match fx.bcx.cursor().position() {
+                CursorPosition::After(block) => block,
+                _ => unreachable!(),
+            };
+            fx.add_comment(
+                fx.bcx.func.layout.last_inst(cur_block).unwrap(),
+                format!(
+                    "{}: {:?}: {:?} <- {:?}: {:?}",
+                    method,
+                    self.inner(),
+                    self.layout().ty,
+                    from.0,
+                    from.layout().ty
+                ),
+            );
+        }
+
+        let dst_layout = self.layout();
+        let to_ptr = match self.inner {
+            CPlaceInner::Var(_local, var) => {
+                let data = CValue(from.0, dst_layout).load_scalar(fx);
+                let dst_ty = fx.clif_type(self.layout().ty).unwrap();
+                transmute_value(fx, var, data, dst_ty);
+                return;
+            }
+            CPlaceInner::VarPair(_local, var1, var2) => {
+                let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
+                let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
+                transmute_value(fx, var1, data1, dst_ty1);
+                transmute_value(fx, var2, data2, dst_ty2);
+                return;
+            }
+            CPlaceInner::VarLane(_local, var, lane) => {
+                let data = from.load_scalar(fx);
+
+                // First get the old vector
+                let vector = fx.bcx.use_var(var);
+                //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+
+                // Next insert the written lane into the vector
+                let vector = fx.bcx.ins().insertlane(vector, data, lane);
+
+                // Finally write the new vector
+                //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+                fx.bcx.def_var(var, vector);
+
+                return;
+            }
+            CPlaceInner::Addr(ptr, None) => {
+                if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
+                    return;
+                }
+                ptr
+            }
+            CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
+        };
+
+        let mut flags = MemFlags::new();
+        flags.set_notrap();
+        match from.layout().abi {
+            // FIXME make Abi::Vector work too
+            Abi::Scalar(_) => {
+                let val = from.load_scalar(fx);
+                to_ptr.store(fx, val, flags);
+                return;
+            }
+            Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
+                let (value, extra) = from.load_scalar_pair(fx);
+                let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+                to_ptr.store(fx, value, flags);
+                to_ptr.offset(fx, b_offset).store(fx, extra, flags);
+                return;
+            }
+            _ => {}
+        }
+
+        match from.0 {
+            CValueInner::ByVal(val) => {
+                to_ptr.store(fx, val, flags);
+            }
+            CValueInner::ByValPair(_, _) => {
+                bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
+            }
+            CValueInner::ByRef(from_ptr, None) => {
+                let from_addr = from_ptr.get_addr(fx);
+                let to_addr = to_ptr.get_addr(fx);
+                let src_layout = from.1;
+                let size = dst_layout.size.bytes();
+                let src_align = src_layout.align.abi.bytes() as u8;
+                let dst_align = dst_layout.align.abi.bytes() as u8;
+                fx.bcx.emit_small_memory_copy(
+                    fx.module.target_config(),
+                    to_addr,
+                    from_addr,
+                    size,
+                    dst_align,
+                    src_align,
+                    true,
+                    MemFlags::trusted(),
+                );
+            }
+            CValueInner::ByRef(_, Some(_)) => todo!(),
+        }
+    }
+
+    pub(crate) fn place_field(
+        self,
+        fx: &mut FunctionCx<'_, '_, 'tcx>,
+        field: mir::Field,
+    ) -> CPlace<'tcx> {
+        let layout = self.layout();
+
+        match self.inner {
+            CPlaceInner::Var(local, var) => {
+                if let Abi::Vector { .. } = layout.abi {
+                    return CPlace {
+                        inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
+                        layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+                    };
+                }
+            }
+            CPlaceInner::VarPair(local, var1, var2) => {
+                let layout = layout.field(&*fx, field.index());
+
+                match field.as_u32() {
+                    0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
+                    1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
+                    _ => unreachable!("field should be 0 or 1"),
+                }
+            }
+            _ => {}
+        }
+
+        let (base, extra) = self.to_ptr_maybe_unsized();
+
+        let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
+        if field_layout.is_unsized() {
+            CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
+        } else {
+            CPlace::for_ptr(field_ptr, field_layout)
+        }
+    }
+
+    pub(crate) fn place_index(
+        self,
+        fx: &mut FunctionCx<'_, '_, 'tcx>,
+        index: Value,
+    ) -> CPlace<'tcx> {
+        let (elem_layout, ptr) = match self.layout().ty.kind() {
+            ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_ptr()),
+            ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_ptr_maybe_unsized().0),
+            _ => bug!("place_index({:?})", self.layout().ty),
+        };
+
+        let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
+
+        CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
+    }
+
+    pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
+        let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
+        if has_ptr_meta(fx.tcx, inner_layout.ty) {
+            let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
+            CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
+        } else {
+            CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
+        }
+    }
+
+    pub(crate) fn place_ref(
+        self,
+        fx: &mut FunctionCx<'_, '_, 'tcx>,
+        layout: TyAndLayout<'tcx>,
+    ) -> CValue<'tcx> {
+        if has_ptr_meta(fx.tcx, self.layout().ty) {
+            let (ptr, extra) = self.to_ptr_maybe_unsized();
+            CValue::by_val_pair(
+                ptr.get_addr(fx),
+                extra.expect("unsized type without metadata"),
+                layout,
+            )
+        } else {
+            CValue::by_val(self.to_ptr().get_addr(fx), layout)
+        }
+    }
+
+    pub(crate) fn downcast_variant(
+        self,
+        fx: &FunctionCx<'_, '_, 'tcx>,
+        variant: VariantIdx,
+    ) -> Self {
+        assert!(!self.layout().is_unsized());
+        let layout = self.layout().for_variant(fx, variant);
+        CPlace { inner: self.inner, layout }
+    }
+}
+
+#[track_caller]
+pub(crate) fn assert_assignable<'tcx>(
+    fx: &FunctionCx<'_, '_, 'tcx>,
+    from_ty: Ty<'tcx>,
+    to_ty: Ty<'tcx>,
+) {
+    match (from_ty.kind(), to_ty.kind()) {
+        (ty::Ref(_, a, _), ty::Ref(_, b, _))
+        | (
+            ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
+            ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
+        ) => {
+            assert_assignable(fx, a, b);
+        }
+        (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
+        | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
+            assert_assignable(fx, a, b);
+        }
+        (ty::FnPtr(_), ty::FnPtr(_)) => {
+            let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
+                ParamEnv::reveal_all(),
+                from_ty.fn_sig(fx.tcx),
+            );
+            let to_sig = fx
+                .tcx
+                .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
+            assert_eq!(
+                from_sig, to_sig,
+                "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+                from_sig, to_sig, fx,
+            );
+            // fn(&T) -> for<'l> fn(&'l T) is allowed
+        }
+        (&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => {
+            for (from, to) in from_traits.iter().zip(to_traits) {
+                let from =
+                    fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
+                let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
+                assert_eq!(
+                    from, to,
+                    "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
+                    from_traits, to_traits, fx,
+                );
+            }
+            // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
+        }
+        (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
+            if adt_def_a.did == adt_def_b.did =>
+        {
+            let mut types_a = substs_a.types();
+            let mut types_b = substs_b.types();
+            loop {
+                match (types_a.next(), types_b.next()) {
+                    (Some(a), Some(b)) => assert_assignable(fx, a, b),
+                    (None, None) => return,
+                    (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+                }
+            }
+        }
+        _ => {
+            assert_eq!(
+                from_ty, to_ty,
+                "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
+                from_ty, to_ty, fx,
+            );
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs
new file mode 100644
index 00000000000..4a5f9f133a2
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/vtable.rs
@@ -0,0 +1,79 @@
+//! Codegen vtables and vtable accesses.
+//!
+//! See `rustc_codegen_ssa/src/meth.rs` for reference.
+
+use crate::constant::data_id_for_alloc_id;
+use crate::prelude::*;
+
+fn vtable_memflags() -> MemFlags {
+    let mut flags = MemFlags::trusted(); // A vtable access is always aligned and will never trap.
+    flags.set_readonly(); // A vtable is always read-only.
+    flags
+}
+
+pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+    fx.bcx.ins().load(
+        pointer_ty(fx.tcx),
+        vtable_memflags(),
+        vtable,
+        (ty::COMMON_VTABLE_ENTRIES_DROPINPLACE * usize_size) as i32,
+    )
+}
+
+pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+    fx.bcx.ins().load(
+        pointer_ty(fx.tcx),
+        vtable_memflags(),
+        vtable,
+        (ty::COMMON_VTABLE_ENTRIES_SIZE * usize_size) as i32,
+    )
+}
+
+pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+    fx.bcx.ins().load(
+        pointer_ty(fx.tcx),
+        vtable_memflags(),
+        vtable,
+        (ty::COMMON_VTABLE_ENTRIES_ALIGN * usize_size) as i32,
+    )
+}
+
+pub(crate) fn get_ptr_and_method_ref<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    arg: CValue<'tcx>,
+    idx: usize,
+) -> (Value, Value) {
+    let (ptr, vtable) = if let Abi::ScalarPair(_, _) = arg.layout().abi {
+        arg.load_scalar_pair(fx)
+    } else {
+        let (ptr, vtable) = arg.try_to_ptr().unwrap();
+        (ptr.get_addr(fx), vtable.unwrap())
+    };
+
+    let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
+    let func_ref = fx.bcx.ins().load(
+        pointer_ty(fx.tcx),
+        vtable_memflags(),
+        vtable,
+        (idx * usize_size as usize) as i32,
+    );
+    (ptr, func_ref)
+}
+
+pub(crate) fn get_vtable<'tcx>(
+    fx: &mut FunctionCx<'_, '_, 'tcx>,
+    ty: Ty<'tcx>,
+    trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> Value {
+    let alloc_id = fx.tcx.vtable_allocation(ty, trait_ref);
+    let data_id =
+        data_id_for_alloc_id(&mut fx.constants_cx, &mut *fx.module, alloc_id, Mutability::Not);
+    let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+    if fx.clif_comments.enabled() {
+        fx.add_comment(local_data_id, format!("vtable: {:?}", alloc_id));
+    }
+    fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+}
diff --git a/compiler/rustc_codegen_cranelift/test.sh b/compiler/rustc_codegen_cranelift/test.sh
new file mode 100755
index 00000000000..a10924628bb
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/test.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+set -e
+
+./y.rs build --sysroot none "$@"
+
+rm -r target/out || true
+
+scripts/tests.sh no_sysroot
+
+./y.rs build "$@"
+
+scripts/tests.sh base_sysroot
+scripts/tests.sh extended_sysroot
diff --git a/compiler/rustc_codegen_cranelift/y.rs b/compiler/rustc_codegen_cranelift/y.rs
new file mode 100755
index 00000000000..43937588b48
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/y.rs
@@ -0,0 +1,153 @@
+#!/usr/bin/env bash
+#![allow()] /*This line is ignored by bash
+# This block is ignored by rustc
+set -e
+echo "[BUILD] y.rs" 1>&2
+rustc $0 -o ${0/.rs/.bin} -g
+exec ${0/.rs/.bin} $@
+*/
+
+//! The build system for cg_clif
+//!
+//! # Manual compilation
+//!
+//! If your system doesn't support shell scripts you can manually compile and run this file using
+//! for example:
+//!
+//! ```shell
+//! $ rustc y.rs -o build/y.bin
+//! $ build/y.bin
+//! ```
+//!
+//! # Naming
+//!
+//! The name `y.rs` was chosen to not conflict with rustc's `x.py`.
+
+use std::env;
+use std::path::PathBuf;
+use std::process;
+
+#[path = "build_system/build_backend.rs"]
+mod build_backend;
+#[path = "build_system/build_sysroot.rs"]
+mod build_sysroot;
+#[path = "build_system/config.rs"]
+mod config;
+#[path = "build_system/prepare.rs"]
+mod prepare;
+#[path = "build_system/rustc_info.rs"]
+mod rustc_info;
+#[path = "build_system/utils.rs"]
+mod utils;
+
+fn usage() {
+    eprintln!("Usage:");
+    eprintln!("  ./y.rs prepare");
+    eprintln!("  ./y.rs build [--debug] [--sysroot none|clif|llvm] [--target-dir DIR]");
+}
+
+macro_rules! arg_error {
+    ($($err:tt)*) => {{
+        eprintln!($($err)*);
+        usage();
+        std::process::exit(1);
+    }};
+}
+
+enum Command {
+    Build,
+}
+
+#[derive(Copy, Clone)]
+enum SysrootKind {
+    None,
+    Clif,
+    Llvm,
+}
+
+fn main() {
+    env::set_var("CG_CLIF_DISPLAY_CG_TIME", "1");
+    env::set_var("CG_CLIF_DISABLE_INCR_CACHE", "1");
+    // The target dir is expected in the default location. Guard against the user changing it.
+    env::set_var("CARGO_TARGET_DIR", "target");
+
+    let mut args = env::args().skip(1);
+    let command = match args.next().as_deref() {
+        Some("prepare") => {
+            if args.next().is_some() {
+                arg_error!("./x.rs prepare doesn't expect arguments");
+            }
+            prepare::prepare();
+            process::exit(0);
+        }
+        Some("build") => Command::Build,
+        Some(flag) if flag.starts_with('-') => arg_error!("Expected command found flag {}", flag),
+        Some(command) => arg_error!("Unknown command {}", command),
+        None => {
+            usage();
+            process::exit(0);
+        }
+    };
+
+    let mut target_dir = PathBuf::from("build");
+    let mut channel = "release";
+    let mut sysroot_kind = SysrootKind::Clif;
+    while let Some(arg) = args.next().as_deref() {
+        match arg {
+            "--target-dir" => {
+                target_dir = PathBuf::from(args.next().unwrap_or_else(|| {
+                    arg_error!("--target-dir requires argument");
+                }))
+            }
+            "--debug" => channel = "debug",
+            "--sysroot" => {
+                sysroot_kind = match args.next().as_deref() {
+                    Some("none") => SysrootKind::None,
+                    Some("clif") => SysrootKind::Clif,
+                    Some("llvm") => SysrootKind::Llvm,
+                    Some(arg) => arg_error!("Unknown sysroot kind {}", arg),
+                    None => arg_error!("--sysroot requires argument"),
+                }
+            }
+            flag if flag.starts_with("-") => arg_error!("Unknown flag {}", flag),
+            arg => arg_error!("Unexpected argument {}", arg),
+        }
+    }
+
+    let host_triple = if let Ok(host_triple) = std::env::var("HOST_TRIPLE") {
+        host_triple
+    } else if let Some(host_triple) = crate::config::get_value("host") {
+        host_triple
+    } else {
+        rustc_info::get_host_triple()
+    };
+    let target_triple = if let Ok(target_triple) = std::env::var("TARGET_TRIPLE") {
+        if target_triple != "" {
+            target_triple
+        } else {
+            host_triple.clone() // Empty target triple can happen on GHA
+        }
+    } else if let Some(target_triple) = crate::config::get_value("target") {
+        target_triple
+    } else {
+        host_triple.clone()
+    };
+
+    if target_triple.ends_with("-msvc") {
+        eprintln!("The MSVC toolchain is not yet supported by rustc_codegen_cranelift.");
+        eprintln!("Switch to the MinGW toolchain for Windows support.");
+        eprintln!("Hint: You can use `rustup set default-host x86_64-pc-windows-gnu` to");
+        eprintln!("set the global default target to MinGW");
+        process::exit(1);
+    }
+
+    let cg_clif_build_dir = build_backend::build_backend(channel, &host_triple);
+    build_sysroot::build_sysroot(
+        channel,
+        sysroot_kind,
+        &target_dir,
+        cg_clif_build_dir,
+        &host_triple,
+        &target_triple,
+    );
+}