diff options
| author | Alex Crichton <alex@alexcrichton.com> | 2015-01-21 09:13:51 -0800 |
|---|---|---|
| committer | Alex Crichton <alex@alexcrichton.com> | 2015-01-21 09:13:51 -0800 |
| commit | 0c981875e46763a9b3cd53443bf73dfd3e291d18 (patch) | |
| tree | b0d1f49551beab62865f5945d588a8a65931c9f5 /src/libsyntax/parse | |
| parent | 5da25386b3e70a5a538f75fbd5b42a8db04dd93d (diff) | |
| parent | 3c32cd1be27f321658382e39d34f5d993d99ae8b (diff) | |
| download | rust-0c981875e46763a9b3cd53443bf73dfd3e291d18.tar.gz rust-0c981875e46763a9b3cd53443bf73dfd3e291d18.zip | |
rollup merge of #21340: pshc/libsyntax-no-more-ints
Collaboration with @rylev! I didn't change `int` in the [quasi-quoter](https://github.com/pshc/rust/blob/99ae1a30f3ca28c0f7e431620560d30e44627124/src/libsyntax/ext/quote.rs#L328), because I'm not sure if there will be adverse effects. Addresses #21095.
Diffstat (limited to 'src/libsyntax/parse')
| -rw-r--r-- | src/libsyntax/parse/lexer/comments.rs | 24 | ||||
| -rw-r--r-- | src/libsyntax/parse/lexer/mod.rs | 60 | ||||
| -rw-r--r-- | src/libsyntax/parse/mod.rs | 52 | ||||
| -rw-r--r-- | src/libsyntax/parse/obsolete.rs | 2 | ||||
| -rw-r--r-- | src/libsyntax/parse/parser.rs | 56 | ||||
| -rw-r--r-- | src/libsyntax/parse/token.rs | 8 |
6 files changed, 101 insertions, 101 deletions
diff --git a/src/libsyntax/parse/lexer/comments.rs b/src/libsyntax/parse/lexer/comments.rs index 16ade904be8..2799696e8eb 100644 --- a/src/libsyntax/parse/lexer/comments.rs +++ b/src/libsyntax/parse/lexer/comments.rs @@ -22,7 +22,7 @@ use print::pprust; use std::io; use std::str; use std::string::String; -use std::uint; +use std::usize; #[derive(Clone, Copy, PartialEq)] pub enum CommentStyle { @@ -62,7 +62,7 @@ pub fn doc_comment_style(comment: &str) -> ast::AttrStyle { pub fn strip_doc_comment_decoration(comment: &str) -> String { /// remove whitespace-only lines from the start/end of lines fn vertical_trim(lines: Vec<String> ) -> Vec<String> { - let mut i = 0u; + let mut i = 0us; let mut j = lines.len(); // first line of all-stars should be omitted if lines.len() > 0 && @@ -87,7 +87,7 @@ pub fn strip_doc_comment_decoration(comment: &str) -> String { /// remove a "[ \t]*\*" block from each line, if possible fn horizontal_trim(lines: Vec<String> ) -> Vec<String> { - let mut i = uint::MAX; + let mut i = usize::MAX; let mut can_trim = true; let mut first = true; for line in lines.iter() { @@ -132,7 +132,7 @@ pub fn strip_doc_comment_decoration(comment: &str) -> String { } if comment.starts_with("/*") { - let lines = comment[3u..(comment.len() - 2u)] + let lines = comment[3us..(comment.len() - 2us)] .lines_any() .map(|s| s.to_string()) .collect::<Vec<String> >(); @@ -158,7 +158,7 @@ fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec<Comment>) { fn consume_whitespace_counting_blank_lines(rdr: &mut StringReader, comments: &mut Vec<Comment>) { while is_whitespace(rdr.curr) && !rdr.is_eof() { - if rdr.col == CharPos(0u) && rdr.curr_is('\n') { + if rdr.col == CharPos(0us) && rdr.curr_is('\n') { push_blank_line_comment(rdr, &mut *comments); } rdr.bump(); @@ -206,10 +206,10 @@ fn read_line_comments(rdr: &mut StringReader, code_to_the_left: bool, /// Returns None if the first col chars of s contain a non-whitespace char. /// Otherwise returns Some(k) where k is first char offset after that leading /// whitespace. Note k may be outside bounds of s. -fn all_whitespace(s: &str, col: CharPos) -> Option<uint> { +fn all_whitespace(s: &str, col: CharPos) -> Option<usize> { let len = s.len(); - let mut col = col.to_uint(); - let mut cursor: uint = 0; + let mut col = col.to_usize(); + let mut cursor: usize = 0; while col > 0 && cursor < len { let r: str::CharRange = s.char_range_at(cursor); if !r.ch.is_whitespace() { @@ -267,7 +267,7 @@ fn read_block_comment(rdr: &mut StringReader, assert!(!curr_line.contains_char('\n')); lines.push(curr_line); } else { - let mut level: int = 1; + let mut level: isize = 1; while level > 0 { debug!("=== block comment level {}", level); if rdr.is_eof() { @@ -305,7 +305,7 @@ fn read_block_comment(rdr: &mut StringReader, let mut style = if code_to_the_left { Trailing } else { Isolated }; rdr.consume_non_eol_whitespace(); - if !rdr.is_eof() && !rdr.curr_is('\n') && lines.len() == 1u { + if !rdr.is_eof() && !rdr.curr_is('\n') && lines.len() == 1us { style = Mixed; } debug!("<<< block comment"); @@ -399,9 +399,9 @@ mod test { } #[test] fn test_block_doc_comment_3() { - let comment = "/**\n let a: *int;\n *a = 5;\n*/"; + let comment = "/**\n let a: *i32;\n *a = 5;\n*/"; let stripped = strip_doc_comment_decoration(comment); - assert_eq!(stripped, " let a: *int;\n *a = 5;"); + assert_eq!(stripped, " let a: *i32;\n *a = 5;"); } #[test] fn test_block_doc_comment_4() { diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs index 4cdafb36eec..d18bf554975 100644 --- a/src/libsyntax/parse/lexer/mod.rs +++ b/src/libsyntax/parse/lexer/mod.rs @@ -212,8 +212,8 @@ impl<'a> StringReader<'a> { /// offending string to the error message fn fatal_span_verbose(&self, from_pos: BytePos, to_pos: BytePos, mut m: String) -> ! { m.push_str(": "); - let from = self.byte_offset(from_pos).to_uint(); - let to = self.byte_offset(to_pos).to_uint(); + let from = self.byte_offset(from_pos).to_usize(); + let to = self.byte_offset(to_pos).to_usize(); m.push_str(&self.filemap.src[from..to]); self.fatal_span_(from_pos, to_pos, &m[]); } @@ -272,14 +272,14 @@ impl<'a> StringReader<'a> { F: FnOnce(&str) -> T, { f(self.filemap.src.slice( - self.byte_offset(start).to_uint(), - self.byte_offset(end).to_uint())) + self.byte_offset(start).to_usize(), + self.byte_offset(end).to_usize())) } /// Converts CRLF to LF in the given string, raising an error on bare CR. fn translate_crlf<'b>(&self, start: BytePos, s: &'b str, errmsg: &'b str) -> CowString<'b> { - let mut i = 0u; + let mut i = 0us; while i < s.len() { let str::CharRange { ch, next } = s.char_range_at(i); if ch == '\r' { @@ -295,7 +295,7 @@ impl<'a> StringReader<'a> { return s.into_cow(); fn translate_crlf_(rdr: &StringReader, start: BytePos, - s: &str, errmsg: &str, mut i: uint) -> String { + s: &str, errmsg: &str, mut i: usize) -> String { let mut buf = String::with_capacity(s.len()); let mut j = 0; while i < s.len() { @@ -321,7 +321,7 @@ impl<'a> StringReader<'a> { /// discovered, add it to the FileMap's list of line start offsets. pub fn bump(&mut self) { self.last_pos = self.pos; - let current_byte_offset = self.byte_offset(self.pos).to_uint(); + let current_byte_offset = self.byte_offset(self.pos).to_usize(); if current_byte_offset < self.filemap.src.len() { assert!(self.curr.is_some()); let last_char = self.curr.unwrap(); @@ -329,12 +329,12 @@ impl<'a> StringReader<'a> { .src .char_range_at(current_byte_offset); let byte_offset_diff = next.next - current_byte_offset; - self.pos = self.pos + Pos::from_uint(byte_offset_diff); + self.pos = self.pos + Pos::from_usize(byte_offset_diff); self.curr = Some(next.ch); - self.col = self.col + CharPos(1u); + self.col = self.col + CharPos(1us); if last_char == '\n' { self.filemap.next_line(self.last_pos); - self.col = CharPos(0u); + self.col = CharPos(0us); } if byte_offset_diff > 1 { @@ -346,7 +346,7 @@ impl<'a> StringReader<'a> { } pub fn nextch(&self) -> Option<char> { - let offset = self.byte_offset(self.pos).to_uint(); + let offset = self.byte_offset(self.pos).to_usize(); if offset < self.filemap.src.len() { Some(self.filemap.src.char_at(offset)) } else { @@ -359,7 +359,7 @@ impl<'a> StringReader<'a> { } pub fn nextnextch(&self) -> Option<char> { - let offset = self.byte_offset(self.pos).to_uint(); + let offset = self.byte_offset(self.pos).to_usize(); let s = self.filemap.src.as_slice(); if offset >= s.len() { return None } let str::CharRange { next, .. } = s.char_range_at(offset); @@ -472,7 +472,7 @@ impl<'a> StringReader<'a> { cmap.files.borrow_mut().push(self.filemap.clone()); let loc = cmap.lookup_char_pos_adj(self.last_pos); debug!("Skipping a shebang"); - if loc.line == 1u && loc.col == CharPos(0u) { + if loc.line == 1us && loc.col == CharPos(0us) { // FIXME: Add shebang "token", return it let start = self.last_pos; while !self.curr_is('\n') && !self.is_eof() { self.bump(); } @@ -519,7 +519,7 @@ impl<'a> StringReader<'a> { let is_doc_comment = self.curr_is('*') || self.curr_is('!'); let start_bpos = self.last_pos - BytePos(2); - let mut level: int = 1; + let mut level: isize = 1; let mut has_cr = false; while level > 0 { if self.is_eof() { @@ -645,8 +645,8 @@ impl<'a> StringReader<'a> { /// Scan through any digits (base `radix`) or underscores, and return how /// many digits there were. - fn scan_digits(&mut self, radix: uint) -> uint { - let mut len = 0u; + fn scan_digits(&mut self, radix: usize) -> usize { + let mut len = 0us; loop { let c = self.curr; if c == Some('_') { debug!("skipping a _"); self.bump(); continue; } @@ -724,7 +724,7 @@ impl<'a> StringReader<'a> { /// Scan over `n_digits` hex digits, stopping at `delim`, reporting an /// error if too many or too few digits are encountered. fn scan_hex_digits(&mut self, - n_digits: uint, + n_digits: usize, delim: char, below_0x7f_only: bool) -> bool { @@ -799,14 +799,14 @@ impl<'a> StringReader<'a> { if self.curr == Some('{') { self.scan_unicode_escape(delim) } else { - let res = self.scan_hex_digits(4u, delim, false); + let res = self.scan_hex_digits(4us, delim, false); let sp = codemap::mk_sp(escaped_pos, self.last_pos); self.old_escape_warning(sp); res } } 'U' if !ascii_only => { - let res = self.scan_hex_digits(8u, delim, false); + let res = self.scan_hex_digits(8us, delim, false); let sp = codemap::mk_sp(escaped_pos, self.last_pos); self.old_escape_warning(sp); res @@ -877,7 +877,7 @@ impl<'a> StringReader<'a> { fn scan_unicode_escape(&mut self, delim: char) -> bool { self.bump(); // past the { let start_bpos = self.last_pos; - let mut count: uint = 0; + let mut count = 0us; let mut accum_int = 0; while !self.curr_is('}') && count <= 6 { @@ -935,13 +935,13 @@ impl<'a> StringReader<'a> { /// Check that a base is valid for a floating literal, emitting a nice /// error if it isn't. - fn check_float_base(&mut self, start_bpos: BytePos, last_bpos: BytePos, base: uint) { + fn check_float_base(&mut self, start_bpos: BytePos, last_bpos: BytePos, base: usize) { match base { - 16u => self.err_span_(start_bpos, last_bpos, "hexadecimal float literal is not \ - supported"), - 8u => self.err_span_(start_bpos, last_bpos, "octal float literal is not supported"), - 2u => self.err_span_(start_bpos, last_bpos, "binary float literal is not supported"), - _ => () + 16us => self.err_span_(start_bpos, last_bpos, "hexadecimal float literal is not \ + supported"), + 8us => self.err_span_(start_bpos, last_bpos, "octal float literal is not supported"), + 2us => self.err_span_(start_bpos, last_bpos, "binary float literal is not supported"), + _ => () } } @@ -1189,7 +1189,7 @@ impl<'a> StringReader<'a> { 'r' => { let start_bpos = self.last_pos; self.bump(); - let mut hash_count = 0u; + let mut hash_count = 0us; while self.curr_is('#') { self.bump(); hash_count += 1; @@ -1374,7 +1374,7 @@ impl<'a> StringReader<'a> { fn scan_raw_byte_string(&mut self) -> token::Lit { let start_bpos = self.last_pos; self.bump(); - let mut hash_count = 0u; + let mut hash_count = 0us; while self.curr_is('#') { self.bump(); hash_count += 1; @@ -1616,9 +1616,9 @@ mod test { test!("1.0", Float, "1.0"); test!("1.0e10", Float, "1.0e10"); - assert_eq!(setup(&mk_sh(), "2u".to_string()).next_token().tok, + assert_eq!(setup(&mk_sh(), "2us".to_string()).next_token().tok, token::Literal(token::Integer(token::intern("2")), - Some(token::intern("u")))); + Some(token::intern("us")))); assert_eq!(setup(&mk_sh(), "r###\"raw\"###suffix".to_string()).next_token().tok, token::Literal(token::StrRaw(token::intern("raw"), 3), Some(token::intern("suffix")))); diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index dd376fe9e10..28adba7eee7 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -181,7 +181,7 @@ pub fn parse_tts_from_source_str(name: String, name, source ); - p.quote_depth += 1u; + p.quote_depth += 1us; // right now this is re-creating the token trees from ... token trees. maybe_aborted(p.parse_all_token_trees(),p) } @@ -324,7 +324,7 @@ pub mod with_hygiene { name, source ); - p.quote_depth += 1u; + p.quote_depth += 1us; // right now this is re-creating the token trees from ... token trees. maybe_aborted(p.parse_all_token_trees(),p) } @@ -373,7 +373,7 @@ pub fn maybe_aborted<T>(result: T, mut p: Parser) -> T { /// Rather than just accepting/rejecting a given literal, unescapes it as /// well. Can take any slice prefixed by a character escape. Returns the /// character and the number of characters consumed. -pub fn char_lit(lit: &str) -> (char, int) { +pub fn char_lit(lit: &str) -> (char, isize) { use std::{num, char}; let mut chars = lit.chars(); @@ -400,19 +400,19 @@ pub fn char_lit(lit: &str) -> (char, int) { let msg = format!("lexer should have rejected a bad character escape {}", lit); let msg2 = &msg[]; - fn esc(len: uint, lit: &str) -> Option<(char, int)> { + fn esc(len: usize, lit: &str) -> Option<(char, isize)> { num::from_str_radix(&lit[2..len], 16) .and_then(char::from_u32) - .map(|x| (x, len as int)) + .map(|x| (x, len as isize)) } - let unicode_escape = |&: | -> Option<(char, int)> + let unicode_escape = |&: | -> Option<(char, isize)> if lit.as_bytes()[2] == b'{' { let idx = lit.find('}').expect(msg2); let subslice = &lit[3..idx]; num::from_str_radix(subslice, 16) .and_then(char::from_u32) - .map(|x| (x, subslice.chars().count() as int + 4)) + .map(|x| (x, subslice.chars().count() as isize + 4)) } else { esc(6, lit) }; @@ -436,7 +436,7 @@ pub fn str_lit(lit: &str) -> String { let error = |&: i| format!("lexer should have rejected {} at {}", lit, i); /// Eat everything up to a non-whitespace - fn eat<'a>(it: &mut iter::Peekable<(uint, char), str::CharIndices<'a>>) { + fn eat<'a>(it: &mut iter::Peekable<(usize, char), str::CharIndices<'a>>) { loop { match it.peek().map(|x| x.1) { Some(' ') | Some('\n') | Some('\r') | Some('\t') => { @@ -567,13 +567,13 @@ pub fn float_lit(s: &str, suffix: Option<&str>, sd: &SpanHandler, sp: Span) -> a } /// Parse a string representing a byte literal into its final form. Similar to `char_lit` -pub fn byte_lit(lit: &str) -> (u8, uint) { +pub fn byte_lit(lit: &str) -> (u8, usize) { let err = |&: i| format!("lexer accepted invalid byte literal {} step {}", lit, i); if lit.len() == 1 { (lit.as_bytes()[0], 1) } else { - assert!(lit.as_bytes()[0] == b'\\', err(0i)); + assert!(lit.as_bytes()[0] == b'\\', err(0is)); let b = match lit.as_bytes()[1] { b'"' => b'"', b'n' => b'\n', @@ -605,7 +605,7 @@ pub fn binary_lit(lit: &str) -> Rc<Vec<u8>> { let error = |&: i| format!("lexer should have rejected {} at {}", lit, i); /// Eat everything up to a non-whitespace - fn eat<'a, I: Iterator<Item=(uint, u8)>>(it: &mut iter::Peekable<(uint, u8), I>) { + fn eat<'a, I: Iterator<Item=(usize, u8)>>(it: &mut iter::Peekable<(usize, u8), I>) { loop { match it.peek().map(|x| x.1) { Some(b' ') | Some(b'\n') | Some(b'\r') | Some(b'\t') => { @@ -683,9 +683,9 @@ pub fn integer_lit(s: &str, suffix: Option<&str>, sd: &SpanHandler, sp: Span) -> match suffix { Some(suf) if looks_like_width_suffix(&['f'], suf) => { match base { - 16u => sd.span_err(sp, "hexadecimal float literal is not supported"), - 8u => sd.span_err(sp, "octal float literal is not supported"), - 2u => sd.span_err(sp, "binary float literal is not supported"), + 16us => sd.span_err(sp, "hexadecimal float literal is not supported"), + 8us => sd.span_err(sp, "octal float literal is not supported"), + 2us => sd.span_err(sp, "binary float literal is not supported"), _ => () } let ident = token::intern_and_get_ident(&*s); @@ -854,7 +854,7 @@ mod test { #[test] fn string_to_tts_1 () { - let tts = string_to_tts("fn a (b : int) { b; }".to_string()); + let tts = string_to_tts("fn a (b : i32) { b; }".to_string()); assert_eq!(json::encode(&tts), "[\ {\ @@ -918,7 +918,7 @@ mod test { {\ \"variant\":\"Ident\",\ \"fields\":[\ - \"int\",\ + \"i32\",\ \"Plain\"\ ]\ }\ @@ -1030,8 +1030,8 @@ mod test { // check the contents of the tt manually: #[test] fn parse_fundecl () { - // this test depends on the intern order of "fn" and "int" - assert!(string_to_item("fn a (b : int) { b; }".to_string()) == + // this test depends on the intern order of "fn" and "i32" + assert_eq!(string_to_item("fn a (b : i32) { b; }".to_string()), Some( P(ast::Item{ident:str_to_ident("a"), attrs:Vec::new(), @@ -1045,7 +1045,7 @@ mod test { segments: vec!( ast::PathSegment { identifier: - str_to_ident("int"), + str_to_ident("i32"), parameters: ast::PathParameters::none(), } ), @@ -1158,19 +1158,19 @@ mod test { #[test] fn span_of_self_arg_pat_idents_are_correct() { - let srcs = ["impl z { fn a (&self, &myarg: int) {} }", - "impl z { fn a (&mut self, &myarg: int) {} }", - "impl z { fn a (&'a self, &myarg: int) {} }", - "impl z { fn a (self, &myarg: int) {} }", - "impl z { fn a (self: Foo, &myarg: int) {} }", + let srcs = ["impl z { fn a (&self, &myarg: i32) {} }", + "impl z { fn a (&mut self, &myarg: i32) {} }", + "impl z { fn a (&'a self, &myarg: i32) {} }", + "impl z { fn a (self, &myarg: i32) {} }", + "impl z { fn a (self: Foo, &myarg: i32) {} }", ]; for &src in srcs.iter() { let spans = get_spans_of_pat_idents(src); let Span{ lo, hi, .. } = spans[0]; - assert!("self" == &src[lo.to_uint()..hi.to_uint()], + assert!("self" == &src[lo.to_usize()..hi.to_usize()], "\"{}\" != \"self\". src=\"{}\"", - &src[lo.to_uint()..hi.to_uint()], src) + &src[lo.to_usize()..hi.to_usize()], src) } } diff --git a/src/libsyntax/parse/obsolete.rs b/src/libsyntax/parse/obsolete.rs index 9d03ec73af8..a3600506057 100644 --- a/src/libsyntax/parse/obsolete.rs +++ b/src/libsyntax/parse/obsolete.rs @@ -62,7 +62,7 @@ impl<'a> ParserObsoleteMethods for parser::Parser<'a> { "use a `move ||` expression instead", ), ObsoleteSyntax::ClosureType => ( - "`|uint| -> bool` closure type syntax", + "`|usize| -> bool` closure type syntax", "use unboxed closures instead, no type annotation needed" ), ObsoleteSyntax::Sized => ( diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 83a7504bc49..1a296d39360 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -291,11 +291,11 @@ pub struct Parser<'a> { /// the previous token or None (only stashed sometimes). pub last_token: Option<Box<token::Token>>, pub buffer: [TokenAndSpan; 4], - pub buffer_start: int, - pub buffer_end: int, - pub tokens_consumed: uint, + pub buffer_start: isize, + pub buffer_end: isize, + pub tokens_consumed: usize, pub restrictions: Restrictions, - pub quote_depth: uint, // not (yet) related to the quasiquoter + pub quote_depth: usize, // not (yet) related to the quasiquoter pub reader: Box<Reader+'a>, pub interner: Rc<token::IdentInterner>, /// The set of seen errors about obsolete syntax. Used to suppress @@ -768,7 +768,7 @@ impl<'a> Parser<'a> { // would encounter a `>` and stop. This lets the parser handle trailing // commas in generic parameters, because it can stop either after // parsing a type or after parsing a comma. - for i in iter::count(0u, 1) { + for i in iter::count(0us, 1) { if self.check(&token::Gt) || self.token == token::BinOp(token::Shr) || self.token == token::Ge @@ -933,9 +933,9 @@ impl<'a> Parser<'a> { self.reader.real_token() } else { // Avoid token copies with `replace`. - let buffer_start = self.buffer_start as uint; - let next_index = (buffer_start + 1) & 3 as uint; - self.buffer_start = next_index as int; + let buffer_start = self.buffer_start as usize; + let next_index = (buffer_start + 1) & 3 as usize; + self.buffer_start = next_index as isize; let placeholder = TokenAndSpan { tok: token::Underscore, @@ -945,7 +945,7 @@ impl<'a> Parser<'a> { }; self.span = next.sp; self.token = next.tok; - self.tokens_consumed += 1u; + self.tokens_consumed += 1us; self.expected_tokens.clear(); // check after each token self.check_unknown_macro_variable(); @@ -967,21 +967,21 @@ impl<'a> Parser<'a> { self.token = next; self.span = mk_sp(lo, hi); } - pub fn buffer_length(&mut self) -> int { + pub fn buffer_length(&mut self) -> isize { if self.buffer_start <= self.buffer_end { return self.buffer_end - self.buffer_start; } return (4 - self.buffer_start) + self.buffer_end; } - pub fn look_ahead<R, F>(&mut self, distance: uint, f: F) -> R where + pub fn look_ahead<R, F>(&mut self, distance: usize, f: F) -> R where F: FnOnce(&token::Token) -> R, { - let dist = distance as int; + let dist = distance as isize; while self.buffer_length() < dist { - self.buffer[self.buffer_end as uint] = self.reader.real_token(); + self.buffer[self.buffer_end as usize] = self.reader.real_token(); self.buffer_end = (self.buffer_end + 1) & 3; } - f(&self.buffer[((self.buffer_start + dist - 1) & 3) as uint].tok) + f(&self.buffer[((self.buffer_start + dist - 1) & 3) as usize].tok) } pub fn fatal(&mut self, m: &str) -> ! { self.sess.span_diagnostic.span_fatal(self.span, m) @@ -1496,7 +1496,7 @@ impl<'a> Parser<'a> { self.expect(&token::OpenDelim(token::Bracket)); let t = self.parse_ty_sum(); - // Parse the `; e` in `[ int; e ]` + // Parse the `; e` in `[ i32; e ]` // where `e` is a const expression let t = match self.maybe_parse_fixed_length_of_vec() { None => TyVec(t), @@ -2084,7 +2084,7 @@ impl<'a> Parser<'a> { ExprField(expr, ident) } - pub fn mk_tup_field(&mut self, expr: P<Expr>, idx: codemap::Spanned<uint>) -> ast::Expr_ { + pub fn mk_tup_field(&mut self, expr: P<Expr>, idx: codemap::Spanned<usize>) -> ast::Expr_ { ExprTupField(expr, idx) } @@ -2483,7 +2483,7 @@ impl<'a> Parser<'a> { hi = self.span.hi; self.bump(); - let index = n.as_str().parse::<uint>(); + let index = n.as_str().parse::<usize>(); match index { Some(n) => { let id = spanned(dot, hi, n); @@ -2509,7 +2509,7 @@ impl<'a> Parser<'a> { }; self.span_help(last_span, &format!("try parenthesizing the first index; e.g., `(foo.{}){}`", - float.trunc() as uint, + float.trunc() as usize, &float.fract().to_string()[1..])[]); } self.abort_if_errors(); @@ -2636,7 +2636,7 @@ impl<'a> Parser<'a> { } pub fn check_unknown_macro_variable(&mut self) { - if self.quote_depth == 0u { + if self.quote_depth == 0us { match self.token { token::SubstNt(name, _) => self.fatal(&format!("unknown macro variable `{}`", @@ -2705,7 +2705,7 @@ impl<'a> Parser<'a> { token_str)[]) }, /* we ought to allow different depths of unquotation */ - token::Dollar | token::SubstNt(..) if p.quote_depth > 0u => { + token::Dollar | token::SubstNt(..) if p.quote_depth > 0us => { p.parse_unquoted() } _ => { @@ -2863,7 +2863,7 @@ impl<'a> Parser<'a> { } /// Parse an expression of binops of at least min_prec precedence - pub fn parse_more_binops(&mut self, lhs: P<Expr>, min_prec: uint) -> P<Expr> { + pub fn parse_more_binops(&mut self, lhs: P<Expr>, min_prec: usize) -> P<Expr> { if self.expr_is_complete(&*lhs) { return lhs; } // Prevent dynamic borrow errors later on by limiting the @@ -4795,7 +4795,7 @@ impl<'a> Parser<'a> { Some(attrs)) } - /// Parse a::B<String,int> + /// Parse a::B<String,i32> fn parse_trait_ref(&mut self) -> TraitRef { ast::TraitRef { path: self.parse_path(LifetimeAndTypesWithoutColons), @@ -4814,7 +4814,7 @@ impl<'a> Parser<'a> { } } - /// Parse for<'l> a::B<String,int> + /// Parse for<'l> a::B<String,i32> fn parse_poly_trait_ref(&mut self) -> PolyTraitRef { let lifetime_defs = self.parse_late_bound_lifetime_defs(); @@ -5071,7 +5071,7 @@ impl<'a> Parser<'a> { } } - if first && attrs_remaining_len > 0u { + if first && attrs_remaining_len > 0us { // We parsed attributes for the first item but didn't find it let last_span = self.last_span; self.span_err(last_span, @@ -5668,7 +5668,7 @@ impl<'a> Parser<'a> { return IoviItem(item); } if self.token.is_keyword(keywords::Unsafe) && - self.look_ahead(1u, |t| t.is_keyword(keywords::Trait)) + self.look_ahead(1us, |t| t.is_keyword(keywords::Trait)) { // UNSAFE TRAIT ITEM self.expect_keyword(keywords::Unsafe); @@ -5685,7 +5685,7 @@ impl<'a> Parser<'a> { return IoviItem(item); } if self.token.is_keyword(keywords::Unsafe) && - self.look_ahead(1u, |t| t.is_keyword(keywords::Impl)) + self.look_ahead(1us, |t| t.is_keyword(keywords::Impl)) { // IMPL ITEM self.expect_keyword(keywords::Unsafe); @@ -5715,7 +5715,7 @@ impl<'a> Parser<'a> { return IoviItem(item); } if self.token.is_keyword(keywords::Unsafe) - && self.look_ahead(1u, |t| *t != token::OpenDelim(token::Brace)) { + && self.look_ahead(1us, |t| *t != token::OpenDelim(token::Brace)) { // UNSAFE FUNCTION ITEM self.bump(); let abi = if self.eat_keyword(keywords::Extern) { @@ -6019,7 +6019,7 @@ impl<'a> Parser<'a> { } } } - let mut rename_to = path[path.len() - 1u]; + let mut rename_to = path[path.len() - 1us]; let path = ast::Path { span: mk_sp(lo, self.last_span.hi), global: false, diff --git a/src/libsyntax/parse/token.rs b/src/libsyntax/parse/token.rs index e5aef12e827..e3762bb011c 100644 --- a/src/libsyntax/parse/token.rs +++ b/src/libsyntax/parse/token.rs @@ -83,9 +83,9 @@ pub enum Lit { Integer(ast::Name), Float(ast::Name), Str_(ast::Name), - StrRaw(ast::Name, uint), /* raw str delimited by n hash symbols */ + StrRaw(ast::Name, usize), /* raw str delimited by n hash symbols */ Binary(ast::Name), - BinaryRaw(ast::Name, uint), /* raw binary str delimited by n hash symbols */ + BinaryRaw(ast::Name, usize), /* raw binary str delimited by n hash symbols */ } impl Lit { @@ -724,7 +724,7 @@ pub fn intern(s: &str) -> ast::Name { get_ident_interner().intern(s) } -/// gensym's a new uint, using the current interner. +/// gensym's a new usize, using the current interner. #[inline] pub fn gensym(s: &str) -> ast::Name { get_ident_interner().gensym(s) @@ -757,7 +757,7 @@ pub fn fresh_name(src: &ast::Ident) -> ast::Name { // create a fresh mark. pub fn fresh_mark() -> ast::Mrk { - gensym("mark").uint() as u32 + gensym("mark").usize() as u32 } #[cfg(test)] |
