diff options
| author | Nicholas Nethercote <nnethercote@mozilla.com> | 2019-03-28 12:27:26 +1100 |
|---|---|---|
| committer | Nicholas Nethercote <nnethercote@mozilla.com> | 2019-03-29 09:32:58 +1100 |
| commit | 17a8aff20abdef46ae90801c85cc232e81443e1b (patch) | |
| tree | fdc560f228255949dae032b2f28409dbdf991580 /src/libsyntax | |
| parent | 4c27fb19ba15a2e45485e601a79914c6280196b0 (diff) | |
| download | rust-17a8aff20abdef46ae90801c85cc232e81443e1b.tar.gz rust-17a8aff20abdef46ae90801c85cc232e81443e1b.zip | |
Use `SmallVec` in `TokenStreamBuilder`.
This reduces by 12% the number of allocations done for a "clean incremental" of `webrender_api`, which reduces the instruction count by about 0.5%. It also reduces instruction counts by up to 1.4% across a range of rustc-perf benchmark runs.
Diffstat (limited to 'src/libsyntax')
| -rw-r--r-- | src/libsyntax/parse/attr.rs | 3 | ||||
| -rw-r--r-- | src/libsyntax/tokenstream.rs | 14 |
2 files changed, 10 insertions, 7 deletions
diff --git a/src/libsyntax/parse/attr.rs b/src/libsyntax/parse/attr.rs index 4211268f33e..e99a86e807f 100644 --- a/src/libsyntax/parse/attr.rs +++ b/src/libsyntax/parse/attr.rs @@ -6,6 +6,7 @@ use crate::parse::parser::{Parser, TokenType, PathStyle}; use crate::tokenstream::{TokenStream, TokenTree}; use log::debug; +use smallvec::smallvec; #[derive(Debug)] enum InnerAttributeParsePolicy<'a> { @@ -171,7 +172,7 @@ impl<'a> Parser<'a> { } else { self.parse_unsuffixed_lit()?.tokens() }; - TokenStream::from_streams(vec![eq.into(), tokens]) + TokenStream::from_streams(smallvec![eq.into(), tokens]) } else { TokenStream::empty() }; diff --git a/src/libsyntax/tokenstream.rs b/src/libsyntax/tokenstream.rs index 80a7bde606a..2d47b982ebd 100644 --- a/src/libsyntax/tokenstream.rs +++ b/src/libsyntax/tokenstream.rs @@ -24,6 +24,7 @@ use syntax_pos::{BytePos, Mark, Span, DUMMY_SP}; use rustc_data_structures::static_assert; use rustc_data_structures::sync::Lrc; use serialize::{Decoder, Decodable, Encoder, Encodable}; +use smallvec::{SmallVec, smallvec}; use std::borrow::Cow; use std::{fmt, iter, mem}; @@ -224,7 +225,7 @@ impl From<Token> for TokenStream { impl<T: Into<TokenStream>> iter::FromIterator<T> for TokenStream { fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self { - TokenStream::from_streams(iter.into_iter().map(Into::into).collect::<Vec<_>>()) + TokenStream::from_streams(iter.into_iter().map(Into::into).collect::<SmallVec<_>>()) } } @@ -256,7 +257,7 @@ impl TokenStream { } } - pub(crate) fn from_streams(mut streams: Vec<TokenStream>) -> TokenStream { + pub(crate) fn from_streams(mut streams: SmallVec<[TokenStream; 2]>) -> TokenStream { match streams.len() { 0 => TokenStream::empty(), 1 => streams.pop().unwrap(), @@ -393,12 +394,13 @@ impl TokenStream { } } +// 99.5%+ of the time we have 1 or 2 elements in this vector. #[derive(Clone)] -pub struct TokenStreamBuilder(Vec<TokenStream>); +pub struct TokenStreamBuilder(SmallVec<[TokenStream; 2]>); impl TokenStreamBuilder { pub fn new() -> TokenStreamBuilder { - TokenStreamBuilder(Vec::new()) + TokenStreamBuilder(SmallVec::new()) } pub fn push<T: Into<TokenStream>>(&mut self, stream: T) { @@ -485,7 +487,7 @@ impl Cursor { } let index = self.index; let stream = mem::replace(&mut self.stream, TokenStream(None)); - *self = TokenStream::from_streams(vec![stream, new_stream]).into_trees(); + *self = TokenStream::from_streams(smallvec![stream, new_stream]).into_trees(); self.index = index; } @@ -572,7 +574,7 @@ mod tests { let test_res = string_to_ts("foo::bar::baz"); let test_fst = string_to_ts("foo::bar"); let test_snd = string_to_ts("::baz"); - let eq_res = TokenStream::from_streams(vec![test_fst, test_snd]); + let eq_res = TokenStream::from_streams(smallvec![test_fst, test_snd]); assert_eq!(test_res.trees().count(), 5); assert_eq!(eq_res.trees().count(), 5); assert_eq!(test_res.eq_unspanned(&eq_res), true); |
