about summary refs log tree commit diff
path: root/compiler/rustc_data_structures/src
diff options
context:
space:
mode:
authorRalf Jung <post@ralfj.de>2024-10-14 17:04:43 +0200
committerRalf Jung <post@ralfj.de>2024-10-14 17:04:43 +0200
commit9d579f5358d6722fa20cbf243e201defbd5d84b3 (patch)
tree456828aefcdd5ca1414414fdb876f8c528d82dff /compiler/rustc_data_structures/src
parent5e6170b97f81b83041666170ceeadefe04d00fb4 (diff)
parent17a19e684cdf3ca088af8b4da6a6209d128913f4 (diff)
downloadrust-9d579f5358d6722fa20cbf243e201defbd5d84b3.tar.gz
rust-9d579f5358d6722fa20cbf243e201defbd5d84b3.zip
Merge from rustc
Diffstat (limited to 'compiler/rustc_data_structures/src')
-rw-r--r--compiler/rustc_data_structures/src/stack.rs4
-rw-r--r--compiler/rustc_data_structures/src/unhash.rs1
2 files changed, 5 insertions, 0 deletions
diff --git a/compiler/rustc_data_structures/src/stack.rs b/compiler/rustc_data_structures/src/stack.rs
index 7ff1339c5ab..3d6d0003483 100644
--- a/compiler/rustc_data_structures/src/stack.rs
+++ b/compiler/rustc_data_structures/src/stack.rs
@@ -5,7 +5,11 @@ const RED_ZONE: usize = 100 * 1024; // 100k
 
 // Only the first stack that is pushed, grows exponentially (2^n * STACK_PER_RECURSION) from then
 // on. This flag has performance relevant characteristics. Don't set it too high.
+#[cfg(not(target_os = "aix"))]
 const STACK_PER_RECURSION: usize = 1024 * 1024; // 1MB
+// LLVM for AIX doesn't feature TCO, increase recursion size for workaround.
+#[cfg(target_os = "aix")]
+const STACK_PER_RECURSION: usize = 16 * 1024 * 1024; // 16MB
 
 /// Grows the stack on demand to prevent stack overflow. Call this in strategic locations
 /// to "break up" recursive calls. E.g. almost any call to `visit_expr` or equivalent can benefit
diff --git a/compiler/rustc_data_structures/src/unhash.rs b/compiler/rustc_data_structures/src/unhash.rs
index 48e21a9dab1..0617ef83aad 100644
--- a/compiler/rustc_data_structures/src/unhash.rs
+++ b/compiler/rustc_data_structures/src/unhash.rs
@@ -3,6 +3,7 @@ use std::hash::{BuildHasherDefault, Hasher};
 
 pub type UnhashMap<K, V> = HashMap<K, V, BuildHasherDefault<Unhasher>>;
 pub type UnhashSet<V> = HashSet<V, BuildHasherDefault<Unhasher>>;
+pub type UnindexMap<K, V> = indexmap::IndexMap<K, V, BuildHasherDefault<Unhasher>>;
 
 /// This no-op hasher expects only a single `write_u64` call. It's intended for
 /// map keys that already have hash-like quality, like `Fingerprint`.