about summary refs log tree commit diff
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2022-05-19 01:41:07 +0000
committerbors <bors@rust-lang.org>2022-05-19 01:41:07 +0000
commite6327bc8b8d437b66ff91d9ce798a9eb45310967 (patch)
tree4c913afbc55e2f83b2d8c656366bd0259af8a868
parentd8a3fc4d71bae720cc2534ff5b97164f47622e12 (diff)
parent9babb1c0daf4c91513e4c1372426da04ed80a1d4 (diff)
downloadrust-e6327bc8b8d437b66ff91d9ce798a9eb45310967.tar.gz
rust-e6327bc8b8d437b66ff91d9ce798a9eb45310967.zip
Auto merge of #97159 - JohnTitor:rollup-ibl51vw, r=JohnTitor
Rollup of 6 pull requests

Successful merges:

 - #96866 (Switch CI bucket uploads to intelligent tiering)
 - #97062 (Couple of refactorings to cg_ssa::base::codegen_crate)
 - #97127 (Revert "Auto merge of #96441 - ChrisDenton:sync-pipes, r=m-ou-se")
 - #97131 (Improve println! documentation)
 - #97139 (Move some settings DOM generation out of JS)
 - #97152 (Update cargo)

Failed merges:

r? `@ghost`
`@rustbot` modify labels: rollup
-rw-r--r--compiler/rustc_codegen_ssa/src/base.rs57
-rw-r--r--library/std/src/macros.rs2
-rw-r--r--library/std/src/os/windows/io/handle.rs13
-rw-r--r--library/std/src/sys/windows/c.rs6
-rw-r--r--library/std/src/sys/windows/handle.rs5
-rw-r--r--library/std/src/sys/windows/pipe.rs101
-rw-r--r--library/std/src/sys/windows/process.rs38
-rw-r--r--src/ci/docker/host-x86_64/x86_64-gnu-tools/browser-ui-test.version2
-rwxr-xr-xsrc/ci/scripts/upload-artifacts.sh3
-rw-r--r--src/librustdoc/html/render/context.rs16
-rw-r--r--src/librustdoc/html/static/js/settings.js18
m---------src/tools/cargo0
12 files changed, 74 insertions, 187 deletions
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index 7b7e09208a2..d11f1534153 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -15,8 +15,9 @@ use rustc_attr as attr;
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
 
+use rustc_data_structures::sync::par_iter;
 #[cfg(parallel_compiler)]
-use rustc_data_structures::sync::{par_iter, ParallelIterator};
+use rustc_data_structures::sync::ParallelIterator;
 use rustc_hir as hir;
 use rustc_hir::def_id::{DefId, LOCAL_CRATE};
 use rustc_hir::lang_items::LangItem;
@@ -607,6 +608,14 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
         second_half.iter().rev().interleave(first_half).copied().collect()
     };
 
+    // Calculate the CGU reuse
+    let cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
+        codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect::<Vec<_>>()
+    });
+
+    let mut total_codegen_time = Duration::new(0, 0);
+    let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
+
     // The non-parallel compiler can only translate codegen units to LLVM IR
     // on a single thread, leading to a staircase effect where the N LLVM
     // threads have to wait on the single codegen threads to generate work
@@ -617,8 +626,7 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
     // This likely is a temporary measure. Once we don't have to support the
     // non-parallel compiler anymore, we can compile CGUs end-to-end in
     // parallel and get rid of the complicated scheduling logic.
-    #[cfg(parallel_compiler)]
-    let pre_compile_cgus = |cgu_reuse: &[CguReuse]| {
+    let mut pre_compiled_cgus = if cfg!(parallel_compiler) {
         tcx.sess.time("compile_first_CGU_batch", || {
             // Try to find one CGU to compile per thread.
             let cgus: Vec<_> = cgu_reuse
@@ -638,48 +646,31 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
                 })
                 .collect();
 
-            (pre_compiled_cgus, start_time.elapsed())
+            total_codegen_time += start_time.elapsed();
+
+            pre_compiled_cgus
         })
+    } else {
+        FxHashMap::default()
     };
 
-    #[cfg(not(parallel_compiler))]
-    let pre_compile_cgus = |_: &[CguReuse]| (FxHashMap::default(), Duration::new(0, 0));
-
-    let mut cgu_reuse = Vec::new();
-    let mut pre_compiled_cgus: Option<FxHashMap<usize, _>> = None;
-    let mut total_codegen_time = Duration::new(0, 0);
-    let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
-
     for (i, cgu) in codegen_units.iter().enumerate() {
         ongoing_codegen.wait_for_signal_to_codegen_item();
         ongoing_codegen.check_for_errors(tcx.sess);
 
-        // Do some setup work in the first iteration
-        if pre_compiled_cgus.is_none() {
-            // Calculate the CGU reuse
-            cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
-                codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect()
-            });
-            // Pre compile some CGUs
-            let (compiled_cgus, codegen_time) = pre_compile_cgus(&cgu_reuse);
-            pre_compiled_cgus = Some(compiled_cgus);
-            total_codegen_time += codegen_time;
-        }
-
         let cgu_reuse = cgu_reuse[i];
         tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
 
         match cgu_reuse {
             CguReuse::No => {
-                let (module, cost) =
-                    if let Some(cgu) = pre_compiled_cgus.as_mut().unwrap().remove(&i) {
-                        cgu
-                    } else {
-                        let start_time = Instant::now();
-                        let module = backend.compile_codegen_unit(tcx, cgu.name());
-                        total_codegen_time += start_time.elapsed();
-                        module
-                    };
+                let (module, cost) = if let Some(cgu) = pre_compiled_cgus.remove(&i) {
+                    cgu
+                } else {
+                    let start_time = Instant::now();
+                    let module = backend.compile_codegen_unit(tcx, cgu.name());
+                    total_codegen_time += start_time.elapsed();
+                    module
+                };
                 // This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
                 // guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
                 // compilation hang on post-monomorphization errors.
diff --git a/library/std/src/macros.rs b/library/std/src/macros.rs
index e512c0d81a0..c7348951511 100644
--- a/library/std/src/macros.rs
+++ b/library/std/src/macros.rs
@@ -72,7 +72,7 @@ macro_rules! print {
 /// On all platforms, the newline is the LINE FEED character (`\n`/`U+000A`) alone
 /// (no additional CARRIAGE RETURN (`\r`/`U+000D`)).
 ///
-/// Use the [`format!`] syntax to write data to the standard output.
+/// This macro uses the same syntax as [`format!`], but writes to the standard output instead.
 /// See [`std::fmt`] for more information.
 ///
 /// Use `println!` only for the primary output of your program. Use
diff --git a/library/std/src/os/windows/io/handle.rs b/library/std/src/os/windows/io/handle.rs
index 0ecac6b4475..90a5b7466fe 100644
--- a/library/std/src/os/windows/io/handle.rs
+++ b/library/std/src/os/windows/io/handle.rs
@@ -204,19 +204,6 @@ impl OwnedHandle {
         })?;
         unsafe { Ok(Self::from_raw_handle(ret)) }
     }
-
-    /// Allow child processes to inherit the handle.
-    #[cfg(not(target_vendor = "uwp"))]
-    pub(crate) fn set_inheritable(&self) -> io::Result<()> {
-        cvt(unsafe {
-            c::SetHandleInformation(
-                self.as_raw_handle(),
-                c::HANDLE_FLAG_INHERIT,
-                c::HANDLE_FLAG_INHERIT,
-            )
-        })?;
-        Ok(())
-    }
 }
 
 impl TryFrom<HandleOrInvalid> for OwnedHandle {
diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs
index 0bb6fee60c9..27776fdf533 100644
--- a/library/std/src/sys/windows/c.rs
+++ b/library/std/src/sys/windows/c.rs
@@ -1026,12 +1026,6 @@ extern "system" {
         bWaitAll: BOOL,
         dwMilliseconds: DWORD,
     ) -> DWORD;
-    pub fn CreatePipe(
-        hReadPipe: *mut HANDLE,
-        hWritePipe: *mut HANDLE,
-        lpPipeAttributes: *const SECURITY_ATTRIBUTES,
-        nSize: DWORD,
-    ) -> BOOL;
     pub fn CreateNamedPipeW(
         lpName: LPCWSTR,
         dwOpenMode: DWORD,
diff --git a/library/std/src/sys/windows/handle.rs b/library/std/src/sys/windows/handle.rs
index c319cb28630..ef9a8bd6900 100644
--- a/library/std/src/sys/windows/handle.rs
+++ b/library/std/src/sys/windows/handle.rs
@@ -221,11 +221,6 @@ impl Handle {
         Ok(Self(self.0.duplicate(access, inherit, options)?))
     }
 
-    #[cfg(not(target_vendor = "uwp"))]
-    pub(crate) fn set_inheritable(&self) -> io::Result<()> {
-        self.0.set_inheritable()
-    }
-
     /// Performs a synchronous read.
     ///
     /// If the handle is opened for asynchronous I/O then this abort the process.
diff --git a/library/std/src/sys/windows/pipe.rs b/library/std/src/sys/windows/pipe.rs
index 2c586f1abe4..013c776c476 100644
--- a/library/std/src/sys/windows/pipe.rs
+++ b/library/std/src/sys/windows/pipe.rs
@@ -18,20 +18,13 @@ use crate::sys_common::IntoInner;
 // Anonymous pipes
 ////////////////////////////////////////////////////////////////////////////////
 
-// A 64kb pipe capacity is the same as a typical Linux default.
-const PIPE_BUFFER_CAPACITY: u32 = 64 * 1024;
-
-pub enum AnonPipe {
-    Sync(Handle),
-    Async(Handle),
+pub struct AnonPipe {
+    inner: Handle,
 }
 
 impl IntoInner<Handle> for AnonPipe {
     fn into_inner(self) -> Handle {
-        match self {
-            Self::Sync(handle) => handle,
-            Self::Async(handle) => handle,
-        }
+        self.inner
     }
 }
 
@@ -39,46 +32,6 @@ pub struct Pipes {
     pub ours: AnonPipe,
     pub theirs: AnonPipe,
 }
-impl Pipes {
-    /// Create a new pair of pipes where both pipes are synchronous.
-    ///
-    /// These must not be used asynchronously.
-    pub fn new_synchronous(
-        ours_readable: bool,
-        their_handle_inheritable: bool,
-    ) -> io::Result<Self> {
-        unsafe {
-            // If `CreatePipe` succeeds, these will be our pipes.
-            let mut read = ptr::null_mut();
-            let mut write = ptr::null_mut();
-
-            if c::CreatePipe(&mut read, &mut write, ptr::null(), PIPE_BUFFER_CAPACITY) == 0 {
-                Err(io::Error::last_os_error())
-            } else {
-                let (ours, theirs) = if ours_readable { (read, write) } else { (write, read) };
-                let ours = Handle::from_raw_handle(ours);
-                #[cfg(not(target_vendor = "uwp"))]
-                let theirs = Handle::from_raw_handle(theirs);
-                #[cfg(target_vendor = "uwp")]
-                let mut theirs = Handle::from_raw_handle(theirs);
-
-                if their_handle_inheritable {
-                    #[cfg(not(target_vendor = "uwp"))]
-                    {
-                        theirs.set_inheritable()?;
-                    }
-
-                    #[cfg(target_vendor = "uwp")]
-                    {
-                        theirs = theirs.duplicate(0, true, c::DUPLICATE_SAME_ACCESS)?;
-                    }
-                }
-
-                Ok(Pipes { ours: AnonPipe::Sync(ours), theirs: AnonPipe::Sync(theirs) })
-            }
-        }
-    }
-}
 
 /// Although this looks similar to `anon_pipe` in the Unix module it's actually
 /// subtly different. Here we'll return two pipes in the `Pipes` return value,
@@ -100,6 +53,9 @@ impl Pipes {
 /// with `OVERLAPPED` instances, but also works out ok if it's only ever used
 /// once at a time (which we do indeed guarantee).
 pub fn anon_pipe(ours_readable: bool, their_handle_inheritable: bool) -> io::Result<Pipes> {
+    // A 64kb pipe capacity is the same as a typical Linux default.
+    const PIPE_BUFFER_CAPACITY: u32 = 64 * 1024;
+
     // Note that we specifically do *not* use `CreatePipe` here because
     // unfortunately the anonymous pipes returned do not support overlapped
     // operations. Instead, we create a "hopefully unique" name and create a
@@ -200,9 +156,12 @@ pub fn anon_pipe(ours_readable: bool, their_handle_inheritable: bool) -> io::Res
         };
         opts.security_attributes(&mut sa);
         let theirs = File::open(Path::new(&name), &opts)?;
-        let theirs = AnonPipe::Sync(theirs.into_inner());
+        let theirs = AnonPipe { inner: theirs.into_inner() };
 
-        Ok(Pipes { ours: AnonPipe::Async(ours), theirs })
+        Ok(Pipes {
+            ours: AnonPipe { inner: ours },
+            theirs: AnonPipe { inner: theirs.into_inner() },
+        })
     }
 }
 
@@ -212,12 +171,12 @@ pub fn anon_pipe(ours_readable: bool, their_handle_inheritable: bool) -> io::Res
 /// This is achieved by creating a new set of pipes and spawning a thread that
 /// relays messages between the source and the synchronous pipe.
 pub fn spawn_pipe_relay(
-    source: &Handle,
+    source: &AnonPipe,
     ours_readable: bool,
     their_handle_inheritable: bool,
 ) -> io::Result<AnonPipe> {
     // We need this handle to live for the lifetime of the thread spawned below.
-    let source = AnonPipe::Async(source.duplicate(0, true, c::DUPLICATE_SAME_ACCESS)?);
+    let source = source.duplicate()?;
 
     // create a new pair of anon pipes.
     let Pipes { theirs, ours } = anon_pipe(ours_readable, their_handle_inheritable)?;
@@ -268,24 +227,19 @@ type AlertableIoFn = unsafe extern "system" fn(
 
 impl AnonPipe {
     pub fn handle(&self) -> &Handle {
-        match self {
-            Self::Async(ref handle) => handle,
-            Self::Sync(ref handle) => handle,
-        }
+        &self.inner
     }
     pub fn into_handle(self) -> Handle {
-        self.into_inner()
+        self.inner
+    }
+    fn duplicate(&self) -> io::Result<Self> {
+        self.inner.duplicate(0, false, c::DUPLICATE_SAME_ACCESS).map(|inner| AnonPipe { inner })
     }
 
     pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> {
         let result = unsafe {
             let len = crate::cmp::min(buf.len(), c::DWORD::MAX as usize) as c::DWORD;
-            match self {
-                Self::Sync(ref handle) => handle.read(buf),
-                Self::Async(_) => {
-                    self.alertable_io_internal(c::ReadFileEx, buf.as_mut_ptr() as _, len)
-                }
-            }
+            self.alertable_io_internal(c::ReadFileEx, buf.as_mut_ptr() as _, len)
         };
 
         match result {
@@ -299,33 +253,28 @@ impl AnonPipe {
     }
 
     pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
-        io::default_read_vectored(|buf| self.read(buf), bufs)
+        self.inner.read_vectored(bufs)
     }
 
     #[inline]
     pub fn is_read_vectored(&self) -> bool {
-        false
+        self.inner.is_read_vectored()
     }
 
     pub fn write(&self, buf: &[u8]) -> io::Result<usize> {
         unsafe {
             let len = crate::cmp::min(buf.len(), c::DWORD::MAX as usize) as c::DWORD;
-            match self {
-                Self::Sync(ref handle) => handle.write(buf),
-                Self::Async(_) => {
-                    self.alertable_io_internal(c::WriteFileEx, buf.as_ptr() as _, len)
-                }
-            }
+            self.alertable_io_internal(c::WriteFileEx, buf.as_ptr() as _, len)
         }
     }
 
     pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
-        io::default_write_vectored(|buf| self.write(buf), bufs)
+        self.inner.write_vectored(bufs)
     }
 
     #[inline]
     pub fn is_write_vectored(&self) -> bool {
-        false
+        self.inner.is_write_vectored()
     }
 
     /// Synchronizes asynchronous reads or writes using our anonymous pipe.
@@ -397,7 +346,7 @@ impl AnonPipe {
 
         // Asynchronous read of the pipe.
         // If successful, `callback` will be called once it completes.
-        let result = io(self.handle().as_handle(), buf, len, &mut overlapped, callback);
+        let result = io(self.inner.as_handle(), buf, len, &mut overlapped, callback);
         if result == c::FALSE {
             // We can return here because the call failed.
             // After this we must not return until the I/O completes.
diff --git a/library/std/src/sys/windows/process.rs b/library/std/src/sys/windows/process.rs
index 8e5325b80e4..9fd399f4ba1 100644
--- a/library/std/src/sys/windows/process.rs
+++ b/library/std/src/sys/windows/process.rs
@@ -23,7 +23,7 @@ use crate::sys::cvt;
 use crate::sys::fs::{File, OpenOptions};
 use crate::sys::handle::Handle;
 use crate::sys::path;
-use crate::sys::pipe::{self, AnonPipe, Pipes};
+use crate::sys::pipe::{self, AnonPipe};
 use crate::sys::stdio;
 use crate::sys_common::mutex::StaticMutex;
 use crate::sys_common::process::{CommandEnv, CommandEnvs};
@@ -172,7 +172,7 @@ pub enum Stdio {
     Inherit,
     Null,
     MakePipe,
-    AsyncPipe(Handle),
+    Pipe(AnonPipe),
     Handle(Handle),
 }
 
@@ -527,33 +527,13 @@ impl Stdio {
             },
 
             Stdio::MakePipe => {
-                // Handles that are passed to a child process must be synchronous
-                // because they will be read synchronously (see #95759).
-                // Therefore we prefer to make both ends of a pipe synchronous
-                // just in case our end of the pipe is passed to another process.
-                //
-                // However, we may need to read from both the child's stdout and
-                // stderr simultaneously when waiting for output. This requires
-                // async reads so as to avoid blocking either pipe.
-                //
-                // The solution used here is to make handles synchronous
-                // except for our side of the stdout and sterr pipes.
-                // If our side of those pipes do end up being given to another
-                // process then we use a "pipe relay" to synchronize access
-                // (see `Stdio::AsyncPipe` below).
-                let pipes = if stdio_id == c::STD_INPUT_HANDLE {
-                    // For stdin both sides of the pipe are synchronous.
-                    Pipes::new_synchronous(false, true)?
-                } else {
-                    // For stdout/stderr our side of the pipe is async and their side is synchronous.
-                    pipe::anon_pipe(true, true)?
-                };
+                let ours_readable = stdio_id != c::STD_INPUT_HANDLE;
+                let pipes = pipe::anon_pipe(ours_readable, true)?;
                 *pipe = Some(pipes.ours);
                 Ok(pipes.theirs.into_handle())
             }
 
-            Stdio::AsyncPipe(ref source) => {
-                // We need to synchronize asynchronous pipes by using a pipe relay.
+            Stdio::Pipe(ref source) => {
                 let ours_readable = stdio_id != c::STD_INPUT_HANDLE;
                 pipe::spawn_pipe_relay(source, ours_readable, true).map(AnonPipe::into_handle)
             }
@@ -582,13 +562,7 @@ impl Stdio {
 
 impl From<AnonPipe> for Stdio {
     fn from(pipe: AnonPipe) -> Stdio {
-        // Note that it's very important we don't give async handles to child processes.
-        // Therefore if the pipe is asynchronous we must have a way to turn it synchronous.
-        // See #95759.
-        match pipe {
-            AnonPipe::Sync(handle) => Stdio::Handle(handle),
-            AnonPipe::Async(handle) => Stdio::AsyncPipe(handle),
-        }
+        Stdio::Pipe(pipe)
     }
 }
 
diff --git a/src/ci/docker/host-x86_64/x86_64-gnu-tools/browser-ui-test.version b/src/ci/docker/host-x86_64/x86_64-gnu-tools/browser-ui-test.version
index f76f9131742..b3ec1638fda 100644
--- a/src/ci/docker/host-x86_64/x86_64-gnu-tools/browser-ui-test.version
+++ b/src/ci/docker/host-x86_64/x86_64-gnu-tools/browser-ui-test.version
@@ -1 +1 @@
-0.9.2
\ No newline at end of file
+0.9.3
\ No newline at end of file
diff --git a/src/ci/scripts/upload-artifacts.sh b/src/ci/scripts/upload-artifacts.sh
index 312ec9d8050..cea9b770f2a 100755
--- a/src/ci/scripts/upload-artifacts.sh
+++ b/src/ci/scripts/upload-artifacts.sh
@@ -38,4 +38,5 @@ if [[ "${DEPLOY_ALT-0}" -eq "1" ]]; then
 fi
 deploy_url="s3://${DEPLOY_BUCKET}/${deploy_dir}/$(ciCommit)"
 
-retry aws s3 cp --no-progress --recursive --acl public-read "${upload_dir}" "${deploy_url}"
+retry aws s3 cp --storage-class INTELLIGENT_TIERING \
+    --no-progress --recursive --acl public-read "${upload_dir}" "${deploy_url}"
diff --git a/src/librustdoc/html/render/context.rs b/src/librustdoc/html/render/context.rs
index 528180288de..81f961992b6 100644
--- a/src/librustdoc/html/render/context.rs
+++ b/src/librustdoc/html/render/context.rs
@@ -596,9 +596,19 @@ impl<'tcx> FormatRenderer<'tcx> for Context<'tcx> {
             |buf: &mut Buffer| {
                 write!(
                     buf,
-                    "<link rel=\"stylesheet\" type=\"text/css\" \
-                        href=\"{root_path}settings{suffix}.css\">\
-                    <script defer src=\"{root_path}settings{suffix}.js\"></script>",
+                    "<div class=\"main-heading\">\
+                     <h1 class=\"fqn\">\
+                         <span class=\"in-band\">Rustdoc settings</span>\
+                     </h1>\
+                     <span class=\"out-of-band\">\
+                         <a id=\"back\" href=\"javascript:void(0)\" onclick=\"history.back();\">\
+                            Back\
+                        </a>\
+                     </span>\
+                     </div>\
+                     <link rel=\"stylesheet\" type=\"text/css\" \
+                         href=\"{root_path}settings{suffix}.css\">\
+                     <script defer src=\"{root_path}settings{suffix}.js\"></script>",
                     root_path = page.static_root_path.unwrap_or(""),
                     suffix = page.resource_suffix,
                 )
diff --git a/src/librustdoc/html/static/js/settings.js b/src/librustdoc/html/static/js/settings.js
index 2e2305029cd..8770cc3f3b1 100644
--- a/src/librustdoc/html/static/js/settings.js
+++ b/src/librustdoc/html/static/js/settings.js
@@ -206,22 +206,8 @@
         ];
 
         // Then we build the DOM.
-        let innerHTML = "";
-        let elementKind = "div";
-
-        if (isSettingsPage) {
-            elementKind = "section";
-            innerHTML = `<div class="main-heading">
-                <h1 class="fqn">
-                    <span class="in-band">Rustdoc settings</span>
-                </h1>
-                <span class="out-of-band">
-                    <a id="back" href="javascript:void(0)" onclick="history.back();">Back</a>
-                </span>
-                </div>`;
-        }
-        innerHTML += `<div class="settings">${buildSettingsPageSections(settings)}</div>`;
-
+        const elementKind = isSettingsPage ? "section" : "div";
+        const innerHTML = `<div class="settings">${buildSettingsPageSections(settings)}</div>`;
         const el = document.createElement(elementKind);
         el.id = "settings";
         el.innerHTML = innerHTML;
diff --git a/src/tools/cargo b/src/tools/cargo
-Subproject 3f052d8eed98c6a24f8b332fb2e6e6249d12d8c
+Subproject a4c1cd0eb6b18082a7e693f5a665548fe1534be