From a32249d4477f449646162bbad607c39d0ad7f3ca Mon Sep 17 00:00:00 2001 From: Paul Collier Date: Sat, 17 Jan 2015 23:33:05 +0000 Subject: libsyntax: uint types to usize --- src/libsyntax/parse/lexer/comments.rs | 8 ++++---- src/libsyntax/parse/lexer/mod.rs | 10 +++++----- src/libsyntax/parse/mod.rs | 26 +++++++++++++------------- src/libsyntax/parse/obsolete.rs | 2 +- src/libsyntax/parse/parser.rs | 22 +++++++++++----------- src/libsyntax/parse/token.rs | 6 +++--- 6 files changed, 37 insertions(+), 37 deletions(-) (limited to 'src/libsyntax/parse') diff --git a/src/libsyntax/parse/lexer/comments.rs b/src/libsyntax/parse/lexer/comments.rs index 16ade904be8..59a3c3bcb3e 100644 --- a/src/libsyntax/parse/lexer/comments.rs +++ b/src/libsyntax/parse/lexer/comments.rs @@ -22,7 +22,7 @@ use print::pprust; use std::io; use std::str; use std::string::String; -use std::uint; +use std::usize; #[derive(Clone, Copy, PartialEq)] pub enum CommentStyle { @@ -87,7 +87,7 @@ pub fn strip_doc_comment_decoration(comment: &str) -> String { /// remove a "[ \t]*\*" block from each line, if possible fn horizontal_trim(lines: Vec ) -> Vec { - let mut i = uint::MAX; + let mut i = usize::MAX; let mut can_trim = true; let mut first = true; for line in lines.iter() { @@ -206,10 +206,10 @@ fn read_line_comments(rdr: &mut StringReader, code_to_the_left: bool, /// Returns None if the first col chars of s contain a non-whitespace char. /// Otherwise returns Some(k) where k is first char offset after that leading /// whitespace. Note k may be outside bounds of s. -fn all_whitespace(s: &str, col: CharPos) -> Option { +fn all_whitespace(s: &str, col: CharPos) -> Option { let len = s.len(); let mut col = col.to_uint(); - let mut cursor: uint = 0; + let mut cursor: usize = 0; while col > 0 && cursor < len { let r: str::CharRange = s.char_range_at(cursor); if !r.ch.is_whitespace() { diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs index 4cdafb36eec..cd159f7c599 100644 --- a/src/libsyntax/parse/lexer/mod.rs +++ b/src/libsyntax/parse/lexer/mod.rs @@ -295,7 +295,7 @@ impl<'a> StringReader<'a> { return s.into_cow(); fn translate_crlf_(rdr: &StringReader, start: BytePos, - s: &str, errmsg: &str, mut i: uint) -> String { + s: &str, errmsg: &str, mut i: usize) -> String { let mut buf = String::with_capacity(s.len()); let mut j = 0; while i < s.len() { @@ -645,7 +645,7 @@ impl<'a> StringReader<'a> { /// Scan through any digits (base `radix`) or underscores, and return how /// many digits there were. - fn scan_digits(&mut self, radix: uint) -> uint { + fn scan_digits(&mut self, radix: usize) -> usize { let mut len = 0u; loop { let c = self.curr; @@ -724,7 +724,7 @@ impl<'a> StringReader<'a> { /// Scan over `n_digits` hex digits, stopping at `delim`, reporting an /// error if too many or too few digits are encountered. fn scan_hex_digits(&mut self, - n_digits: uint, + n_digits: usize, delim: char, below_0x7f_only: bool) -> bool { @@ -877,7 +877,7 @@ impl<'a> StringReader<'a> { fn scan_unicode_escape(&mut self, delim: char) -> bool { self.bump(); // past the { let start_bpos = self.last_pos; - let mut count: uint = 0; + let mut count = 0us; let mut accum_int = 0; while !self.curr_is('}') && count <= 6 { @@ -935,7 +935,7 @@ impl<'a> StringReader<'a> { /// Check that a base is valid for a floating literal, emitting a nice /// error if it isn't. - fn check_float_base(&mut self, start_bpos: BytePos, last_bpos: BytePos, base: uint) { + fn check_float_base(&mut self, start_bpos: BytePos, last_bpos: BytePos, base: usize) { match base { 16u => self.err_span_(start_bpos, last_bpos, "hexadecimal float literal is not \ supported"), diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index f1f547ba0c7..7bde32326fa 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -374,7 +374,7 @@ pub fn maybe_aborted(result: T, mut p: Parser) -> T { /// Rather than just accepting/rejecting a given literal, unescapes it as /// well. Can take any slice prefixed by a character escape. Returns the /// character and the number of characters consumed. -pub fn char_lit(lit: &str) -> (char, int) { +pub fn char_lit(lit: &str) -> (char, isize) { use std::{num, char}; let mut chars = lit.chars(); @@ -401,19 +401,19 @@ pub fn char_lit(lit: &str) -> (char, int) { let msg = format!("lexer should have rejected a bad character escape {}", lit); let msg2 = &msg[]; - fn esc(len: uint, lit: &str) -> Option<(char, int)> { + fn esc(len: usize, lit: &str) -> Option<(char, isize)> { num::from_str_radix(&lit[2..len], 16) .and_then(char::from_u32) - .map(|x| (x, len as int)) + .map(|x| (x, len as isize)) } - let unicode_escape = |&: | -> Option<(char, int)> + let unicode_escape = |&: | -> Option<(char, isize)> if lit.as_bytes()[2] == b'{' { let idx = lit.find('}').expect(msg2); let subslice = &lit[3..idx]; num::from_str_radix(subslice, 16) .and_then(char::from_u32) - .map(|x| (x, subslice.chars().count() as int + 4)) + .map(|x| (x, subslice.chars().count() as isize + 4)) } else { esc(6, lit) }; @@ -437,7 +437,7 @@ pub fn str_lit(lit: &str) -> String { let error = |&: i| format!("lexer should have rejected {} at {}", lit, i); /// Eat everything up to a non-whitespace - fn eat<'a>(it: &mut iter::Peekable<(uint, char), str::CharIndices<'a>>) { + fn eat<'a>(it: &mut iter::Peekable<(usize, char), str::CharIndices<'a>>) { loop { match it.peek().map(|x| x.1) { Some(' ') | Some('\n') | Some('\r') | Some('\t') => { @@ -568,7 +568,7 @@ pub fn float_lit(s: &str, suffix: Option<&str>, sd: &SpanHandler, sp: Span) -> a } /// Parse a string representing a byte literal into its final form. Similar to `char_lit` -pub fn byte_lit(lit: &str) -> (u8, uint) { +pub fn byte_lit(lit: &str) -> (u8, usize) { let err = |&: i| format!("lexer accepted invalid byte literal {} step {}", lit, i); if lit.len() == 1 { @@ -606,7 +606,7 @@ pub fn binary_lit(lit: &str) -> Rc> { let error = |&: i| format!("lexer should have rejected {} at {}", lit, i); /// Eat everything up to a non-whitespace - fn eat<'a, I: Iterator>(it: &mut iter::Peekable<(uint, u8), I>) { + fn eat<'a, I: Iterator>(it: &mut iter::Peekable<(usize, u8), I>) { loop { match it.peek().map(|x| x.1) { Some(b' ') | Some(b'\n') | Some(b'\r') | Some(b'\t') => { @@ -1161,11 +1161,11 @@ mod test { #[test] fn span_of_self_arg_pat_idents_are_correct() { - let srcs = ["impl z { fn a (&self, &myarg: int) {} }", - "impl z { fn a (&mut self, &myarg: int) {} }", - "impl z { fn a (&'a self, &myarg: int) {} }", - "impl z { fn a (self, &myarg: int) {} }", - "impl z { fn a (self: Foo, &myarg: int) {} }", + let srcs = ["impl z { fn a (&self, &myarg: i32) {} }", + "impl z { fn a (&mut self, &myarg: i32) {} }", + "impl z { fn a (&'a self, &myarg: i32) {} }", + "impl z { fn a (self, &myarg: i32) {} }", + "impl z { fn a (self: Foo, &myarg: i32) {} }", ]; for &src in srcs.iter() { diff --git a/src/libsyntax/parse/obsolete.rs b/src/libsyntax/parse/obsolete.rs index 9d03ec73af8..a3600506057 100644 --- a/src/libsyntax/parse/obsolete.rs +++ b/src/libsyntax/parse/obsolete.rs @@ -62,7 +62,7 @@ impl<'a> ParserObsoleteMethods for parser::Parser<'a> { "use a `move ||` expression instead", ), ObsoleteSyntax::ClosureType => ( - "`|uint| -> bool` closure type syntax", + "`|usize| -> bool` closure type syntax", "use unboxed closures instead, no type annotation needed" ), ObsoleteSyntax::Sized => ( diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 30cc9836374..9822dcef0c0 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -292,9 +292,9 @@ pub struct Parser<'a> { pub buffer: [TokenAndSpan; 4], pub buffer_start: int, pub buffer_end: int, - pub tokens_consumed: uint, + pub tokens_consumed: usize, pub restrictions: Restrictions, - pub quote_depth: uint, // not (yet) related to the quasiquoter + pub quote_depth: usize, // not (yet) related to the quasiquoter pub reader: Box, pub interner: Rc, /// The set of seen errors about obsolete syntax. Used to suppress @@ -932,8 +932,8 @@ impl<'a> Parser<'a> { self.reader.real_token() } else { // Avoid token copies with `replace`. - let buffer_start = self.buffer_start as uint; - let next_index = (buffer_start + 1) & 3 as uint; + let buffer_start = self.buffer_start as usize; + let next_index = (buffer_start + 1) & 3 as usize; self.buffer_start = next_index as int; let placeholder = TokenAndSpan { @@ -972,15 +972,15 @@ impl<'a> Parser<'a> { } return (4 - self.buffer_start) + self.buffer_end; } - pub fn look_ahead(&mut self, distance: uint, f: F) -> R where + pub fn look_ahead(&mut self, distance: usize, f: F) -> R where F: FnOnce(&token::Token) -> R, { let dist = distance as int; while self.buffer_length() < dist { - self.buffer[self.buffer_end as uint] = self.reader.real_token(); + self.buffer[self.buffer_end as usize] = self.reader.real_token(); self.buffer_end = (self.buffer_end + 1) & 3; } - f(&self.buffer[((self.buffer_start + dist - 1) & 3) as uint].tok) + f(&self.buffer[((self.buffer_start + dist - 1) & 3) as usize].tok) } pub fn fatal(&mut self, m: &str) -> ! { self.sess.span_diagnostic.span_fatal(self.span, m) @@ -2087,7 +2087,7 @@ impl<'a> Parser<'a> { ExprField(expr, ident) } - pub fn mk_tup_field(&mut self, expr: P, idx: codemap::Spanned) -> ast::Expr_ { + pub fn mk_tup_field(&mut self, expr: P, idx: codemap::Spanned) -> ast::Expr_ { ExprTupField(expr, idx) } @@ -2485,7 +2485,7 @@ impl<'a> Parser<'a> { hi = self.span.hi; self.bump(); - let index = n.as_str().parse::(); + let index = n.as_str().parse::(); match index { Some(n) => { let id = spanned(dot, hi, n); @@ -2511,7 +2511,7 @@ impl<'a> Parser<'a> { }; self.span_help(last_span, &format!("try parenthesizing the first index; e.g., `(foo.{}){}`", - float.trunc() as uint, + float.trunc() as usize, &float.fract().to_string()[1..])[]); } self.abort_if_errors(); @@ -2864,7 +2864,7 @@ impl<'a> Parser<'a> { } /// Parse an expression of binops of at least min_prec precedence - pub fn parse_more_binops(&mut self, lhs: P, min_prec: uint) -> P { + pub fn parse_more_binops(&mut self, lhs: P, min_prec: usize) -> P { if self.expr_is_complete(&*lhs) { return lhs; } // Prevent dynamic borrow errors later on by limiting the diff --git a/src/libsyntax/parse/token.rs b/src/libsyntax/parse/token.rs index 4b3573f84c5..aeb9599923e 100644 --- a/src/libsyntax/parse/token.rs +++ b/src/libsyntax/parse/token.rs @@ -83,9 +83,9 @@ pub enum Lit { Integer(ast::Name), Float(ast::Name), Str_(ast::Name), - StrRaw(ast::Name, uint), /* raw str delimited by n hash symbols */ + StrRaw(ast::Name, usize), /* raw str delimited by n hash symbols */ Binary(ast::Name), - BinaryRaw(ast::Name, uint), /* raw binary str delimited by n hash symbols */ + BinaryRaw(ast::Name, usize), /* raw binary str delimited by n hash symbols */ } impl Lit { @@ -724,7 +724,7 @@ pub fn intern(s: &str) -> ast::Name { get_ident_interner().intern(s) } -/// gensym's a new uint, using the current interner. +/// gensym's a new usize, using the current interner. #[inline] pub fn gensym(s: &str) -> ast::Name { get_ident_interner().gensym(s) -- cgit 1.4.1-3-g733a5 From d5c83652b33a6e5049699ccc7e6bd6fffb42c2b8 Mon Sep 17 00:00:00 2001 From: Paul Collier Date: Sat, 17 Jan 2015 23:49:08 +0000 Subject: libsyntax: rename functions from uint to usize --- src/librustc/lint/builtin.rs | 2 +- src/librustc/util/ppaux.rs | 2 +- src/librustc_resolve/lib.rs | 2 +- src/librustc_trans/save/span_utils.rs | 4 +-- src/librustc_trans/trans/common.rs | 6 ++--- src/librustc_trans/trans/debuginfo.rs | 8 +++--- src/librustc_trans/trans/meth.rs | 2 +- src/librustdoc/clean/mod.rs | 4 +-- src/libsyntax/ast.rs | 4 +-- src/libsyntax/codemap.rs | 44 ++++++++++++++++----------------- src/libsyntax/diagnostic.rs | 6 ++--- src/libsyntax/ext/build.rs | 10 ++++---- src/libsyntax/ext/deriving/decodable.rs | 8 +++--- src/libsyntax/ext/deriving/default.rs | 2 +- src/libsyntax/ext/deriving/encodable.rs | 12 ++++----- src/libsyntax/ext/deriving/hash.rs | 2 +- src/libsyntax/ext/deriving/rand.rs | 6 ++--- src/libsyntax/ext/env.rs | 2 +- src/libsyntax/ext/format.rs | 12 ++++----- src/libsyntax/ext/mtwt.rs | 4 +-- src/libsyntax/ext/quote.rs | 2 +- src/libsyntax/ext/source_util.rs | 4 +-- src/libsyntax/fold.rs | 8 +++--- src/libsyntax/parse/lexer/comments.rs | 2 +- src/libsyntax/parse/lexer/mod.rs | 16 ++++++------ src/libsyntax/parse/mod.rs | 4 +-- src/libsyntax/parse/token.rs | 2 +- src/libsyntax/print/pprust.rs | 4 +-- src/libsyntax/util/interner.rs | 6 ++--- src/test/auxiliary/roman_numerals.rs | 4 +-- 30 files changed, 97 insertions(+), 97 deletions(-) (limited to 'src/libsyntax/parse') diff --git a/src/librustc/lint/builtin.rs b/src/librustc/lint/builtin.rs index 59808b302f4..75897fe6d7c 100644 --- a/src/librustc/lint/builtin.rs +++ b/src/librustc/lint/builtin.rs @@ -1329,7 +1329,7 @@ impl UnusedMut { let ident = path1.node; if let ast::BindByValue(ast::MutMutable) = mode { if !token::get_ident(ident).get().starts_with("_") { - match mutables.entry(ident.name.uint()) { + match mutables.entry(ident.name.usize()) { Vacant(entry) => { entry.insert(vec![id]); }, Occupied(mut entry) => { entry.get_mut().push(id); }, } diff --git a/src/librustc/util/ppaux.rs b/src/librustc/util/ppaux.rs index fb44d0cadfa..8427c471eee 100644 --- a/src/librustc/util/ppaux.rs +++ b/src/librustc/util/ppaux.rs @@ -164,7 +164,7 @@ pub fn explain_region_and_span(cx: &ctxt, region: ty::Region) fn explain_span(cx: &ctxt, heading: &str, span: Span) -> (String, Option) { let lo = cx.sess.codemap().lookup_char_pos_adj(span.lo); - (format!("the {} at {}:{}", heading, lo.line, lo.col.to_uint()), + (format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize()), Some(span)) } } diff --git a/src/librustc_resolve/lib.rs b/src/librustc_resolve/lib.rs index 7743a437858..19992164118 100644 --- a/src/librustc_resolve/lib.rs +++ b/src/librustc_resolve/lib.rs @@ -1962,7 +1962,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { let module_name = self.module_to_string(&*search_module); let mut span = span; let msg = if "???" == &module_name[] { - span.hi = span.lo + Pos::from_uint(segment_name.get().len()); + span.hi = span.lo + Pos::from_usize(segment_name.get().len()); match search_parent_externals(name, &self.current_module) { diff --git a/src/librustc_trans/save/span_utils.rs b/src/librustc_trans/save/span_utils.rs index 77343612ac8..97b3cda006b 100644 --- a/src/librustc_trans/save/span_utils.rs +++ b/src/librustc_trans/save/span_utils.rs @@ -40,8 +40,8 @@ impl<'a> SpanUtils<'a> { format!("file_name,{},file_line,{},file_col,{},extent_start,{},extent_start_bytes,{},\ file_line_end,{},file_col_end,{},extent_end,{},extent_end_bytes,{}", lo_loc.file.name, - lo_loc.line, lo_loc.col.to_uint(), lo_pos.to_uint(), lo_pos_byte.to_uint(), - hi_loc.line, hi_loc.col.to_uint(), hi_pos.to_uint(), hi_pos_byte.to_uint()) + lo_loc.line, lo_loc.col.to_usize(), lo_pos.to_usize(), lo_pos_byte.to_usize(), + hi_loc.line, hi_loc.col.to_usize(), hi_pos.to_usize(), hi_pos_byte.to_usize()) } // sub_span starts at span.lo, so we need to adjust the positions etc. diff --git a/src/librustc_trans/trans/common.rs b/src/librustc_trans/trans/common.rs index 3eee4637de1..519b4f628e2 100644 --- a/src/librustc_trans/trans/common.rs +++ b/src/librustc_trans/trans/common.rs @@ -275,7 +275,7 @@ pub fn return_type_is_void(ccx: &CrateContext, ty: Ty) -> bool { /// Generates a unique symbol based off the name given. This is used to create /// unique symbols for things like closures. pub fn gensym_name(name: &str) -> PathElem { - let num = token::gensym(name).uint(); + let num = token::gensym(name).usize(); // use one colon which will get translated to a period by the mangler, and // we're guaranteed that `num` is globally unique for this crate. PathName(token::gensym(&format!("{}:{}", name, num)[])) @@ -848,7 +848,7 @@ pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> Va !null_terminated as Bool); let gsym = token::gensym("str"); - let buf = CString::from_vec(format!("str{}", gsym.uint()).into_bytes()); + let buf = CString::from_vec(format!("str{}", gsym.usize()).into_bytes()); let g = llvm::LLVMAddGlobal(cx.llmod(), val_ty(sc).to_ref(), buf.as_ptr()); llvm::LLVMSetInitializer(g, sc); llvm::LLVMSetGlobalConstant(g, True); @@ -873,7 +873,7 @@ pub fn C_binary_slice(cx: &CrateContext, data: &[u8]) -> ValueRef { let lldata = C_bytes(cx, data); let gsym = token::gensym("binary"); - let name = format!("binary{}", gsym.uint()); + let name = format!("binary{}", gsym.usize()); let name = CString::from_vec(name.into_bytes()); let g = llvm::LLVMAddGlobal(cx.llmod(), val_ty(lldata).to_ref(), name.as_ptr()); diff --git a/src/librustc_trans/trans/debuginfo.rs b/src/librustc_trans/trans/debuginfo.rs index a03a5090c05..4b52e5f989f 100644 --- a/src/librustc_trans/trans/debuginfo.rs +++ b/src/librustc_trans/trans/debuginfo.rs @@ -1204,7 +1204,7 @@ pub fn set_source_location(fcx: &FunctionContext, set_debug_location(cx, DebugLocation::new(scope, loc.line, - loc.col.to_uint())); + loc.col.to_usize())); } else { set_debug_location(cx, UnknownLocation); } @@ -1719,7 +1719,7 @@ fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, set_debug_location(cx, DebugLocation::new(scope_metadata, loc.line, - loc.col.to_uint())); + loc.col.to_usize())); unsafe { let instr = llvm::LLVMDIBuilderInsertDeclareAtEnd( DIB(cx), @@ -3282,7 +3282,7 @@ fn create_scope_map(cx: &CrateContext, parent_scope, file_metadata, loc.line as c_uint, - loc.col.to_uint() as c_uint) + loc.col.to_usize() as c_uint) }; scope_stack.push(ScopeStackEntry { scope_metadata: scope_metadata, @@ -3404,7 +3404,7 @@ fn create_scope_map(cx: &CrateContext, parent_scope, file_metadata, loc.line as c_uint, - loc.col.to_uint() as c_uint) + loc.col.to_usize() as c_uint) }; scope_stack.push(ScopeStackEntry { diff --git a/src/librustc_trans/trans/meth.rs b/src/librustc_trans/trans/meth.rs index 0fb0dffe930..712f5fa53c9 100644 --- a/src/librustc_trans/trans/meth.rs +++ b/src/librustc_trans/trans/meth.rs @@ -785,7 +785,7 @@ pub fn make_vtable>(ccx: &CrateContext, unsafe { let tbl = C_struct(ccx, &components[], false); let sym = token::gensym("vtable"); - let buf = CString::from_vec(format!("vtable{}", sym.uint()).into_bytes()); + let buf = CString::from_vec(format!("vtable{}", sym.usize()).into_bytes()); let vt_gvar = llvm::LLVMAddGlobal(ccx.llmod(), val_ty(tbl).to_ref(), buf.as_ptr()); llvm::LLVMSetInitializer(vt_gvar, tbl); diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index 8dc3adad3b2..c6ac616932d 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -1870,9 +1870,9 @@ impl Clean for syntax::codemap::Span { Span { filename: filename.to_string(), loline: lo.line, - locol: lo.col.to_uint(), + locol: lo.col.to_usize(), hiline: hi.line, - hicol: hi.col.to_uint(), + hicol: hi.col.to_usize(), } } } diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs index 81ce521f68c..47d55818f5e 100644 --- a/src/libsyntax/ast.rs +++ b/src/libsyntax/ast.rs @@ -95,7 +95,7 @@ impl Ident { pub fn encode_with_hygiene(&self) -> String { format!("\x00name_{},ctxt_{}\x00", - self.name.uint(), + self.name.usize(), self.ctxt) } } @@ -181,7 +181,7 @@ impl Name { } } - pub fn uint(&self) -> usize { + pub fn usize(&self) -> usize { let Name(nm) = *self; nm as usize } diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs index 6b9dda88a36..07544f35b19 100644 --- a/src/libsyntax/codemap.rs +++ b/src/libsyntax/codemap.rs @@ -30,8 +30,8 @@ use libc::c_uint; use serialize::{Encodable, Decodable, Encoder, Decoder}; pub trait Pos { - fn from_uint(n: usize) -> Self; - fn to_uint(&self) -> usize; + fn from_usize(n: usize) -> Self; + fn to_usize(&self) -> usize; } /// A byte offset. Keep this small (currently 32-bits), as AST contains @@ -49,15 +49,15 @@ pub struct CharPos(pub usize); // have been unsuccessful impl Pos for BytePos { - fn from_uint(n: usize) -> BytePos { BytePos(n as u32) } - fn to_uint(&self) -> usize { let BytePos(n) = *self; n as usize } + fn from_usize(n: usize) -> BytePos { BytePos(n as u32) } + fn to_usize(&self) -> usize { let BytePos(n) = *self; n as usize } } impl Add for BytePos { type Output = BytePos; fn add(self, rhs: BytePos) -> BytePos { - BytePos((self.to_uint() + rhs.to_uint()) as u32) + BytePos((self.to_usize() + rhs.to_usize()) as u32) } } @@ -65,20 +65,20 @@ impl Sub for BytePos { type Output = BytePos; fn sub(self, rhs: BytePos) -> BytePos { - BytePos((self.to_uint() - rhs.to_uint()) as u32) + BytePos((self.to_usize() - rhs.to_usize()) as u32) } } impl Pos for CharPos { - fn from_uint(n: usize) -> CharPos { CharPos(n) } - fn to_uint(&self) -> usize { let CharPos(n) = *self; n } + fn from_usize(n: usize) -> CharPos { CharPos(n) } + fn to_usize(&self) -> usize { let CharPos(n) = *self; n } } impl Add for CharPos { type Output = CharPos; fn add(self, rhs: CharPos) -> CharPos { - CharPos(self.to_uint() + rhs.to_uint()) + CharPos(self.to_usize() + rhs.to_usize()) } } @@ -86,7 +86,7 @@ impl Sub for CharPos { type Output = CharPos; fn sub(self, rhs: CharPos) -> CharPos { - CharPos(self.to_uint() - rhs.to_uint()) + CharPos(self.to_usize() - rhs.to_usize()) } } @@ -310,7 +310,7 @@ impl FileMap { let lines = self.lines.borrow(); lines.get(line_number).map(|&line| { let begin: BytePos = line - self.start_pos; - let begin = begin.to_uint(); + let begin = begin.to_usize(); let slice = &self.src[begin..]; match slice.find('\n') { Some(e) => &slice[..e], @@ -351,7 +351,7 @@ impl CodeMap { let mut files = self.files.borrow_mut(); let start_pos = match files.last() { None => 0, - Some(last) => last.start_pos.to_uint() + last.src.len(), + Some(last) => last.start_pos.to_usize() + last.src.len(), }; // Remove utf-8 BOM if any. @@ -374,7 +374,7 @@ impl CodeMap { let filemap = Rc::new(FileMap { name: filename, src: src.to_string(), - start_pos: Pos::from_uint(start_pos), + start_pos: Pos::from_usize(start_pos), lines: RefCell::new(Vec::new()), multibyte_chars: RefCell::new(Vec::new()), }); @@ -389,7 +389,7 @@ impl CodeMap { (format!("<{}:{}:{}>", pos.file.name, pos.line, - pos.col.to_uint() + 1)).to_string() + pos.col.to_usize() + 1)).to_string() } /// Lookup source information about a BytePos @@ -417,9 +417,9 @@ impl CodeMap { return (format!("{}:{}:{}: {}:{}", lo.filename, lo.line, - lo.col.to_uint() + 1, + lo.col.to_usize() + 1, hi.line, - hi.col.to_uint() + 1)).to_string() + hi.col.to_usize() + 1)).to_string() } pub fn span_to_filename(&self, sp: Span) -> FileName { @@ -447,7 +447,7 @@ impl CodeMap { if begin.fm.start_pos != end.fm.start_pos { None } else { - Some((&begin.fm.src[begin.pos.to_uint()..end.pos.to_uint()]).to_string()) + Some((&begin.fm.src[begin.pos.to_usize()..end.pos.to_usize()]).to_string()) } } @@ -484,14 +484,14 @@ impl CodeMap { total_extra_bytes += mbc.bytes - 1; // We should never see a byte position in the middle of a // character - assert!(bpos.to_uint() >= mbc.pos.to_uint() + mbc.bytes); + assert!(bpos.to_usize() >= mbc.pos.to_usize() + mbc.bytes); } else { break; } } - assert!(map.start_pos.to_uint() + total_extra_bytes <= bpos.to_uint()); - CharPos(bpos.to_uint() - map.start_pos.to_uint() - total_extra_bytes) + assert!(map.start_pos.to_usize() + total_extra_bytes <= bpos.to_usize()); + CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes) } fn lookup_filemap_idx(&self, pos: BytePos) -> usize { @@ -520,13 +520,13 @@ impl CodeMap { } if a == 0 { panic!("position {} does not resolve to a source location", - pos.to_uint()); + pos.to_usize()); } a -= 1; } if a >= len { panic!("position {} does not resolve to a source location", - pos.to_uint()) + pos.to_usize()) } return a; diff --git a/src/libsyntax/diagnostic.rs b/src/libsyntax/diagnostic.rs index 9c8ea7d9d68..c86991b6e40 100644 --- a/src/libsyntax/diagnostic.rs +++ b/src/libsyntax/diagnostic.rs @@ -475,7 +475,7 @@ fn highlight_lines(err: &mut EmitterWriter, while num > 0u { num /= 10u; digits += 1u; } // indent past |name:## | and the 0-offset column location - let left = fm.name.len() + digits + lo.col.to_uint() + 3u; + let left = fm.name.len() + digits + lo.col.to_usize() + 3u; let mut s = String::new(); // Skip is the number of characters we need to skip because they are // part of the 'filename:line ' part of the previous line. @@ -502,7 +502,7 @@ fn highlight_lines(err: &mut EmitterWriter, let hi = cm.lookup_char_pos(sp.hi); if hi.col != lo.col { // the ^ already takes up one space - let num_squigglies = hi.col.to_uint() - lo.col.to_uint() - 1u; + let num_squigglies = hi.col.to_usize() - lo.col.to_usize() - 1us; for _ in range(0, num_squigglies) { s.push('~'); } @@ -551,7 +551,7 @@ fn custom_highlight_lines(w: &mut EmitterWriter, let last_line_start = format!("{}:{} ", fm.name, lines[lines.len()-1]+1); let hi = cm.lookup_char_pos(sp.hi); // Span seems to use half-opened interval, so subtract 1 - let skip = last_line_start.len() + hi.col.to_uint() - 1; + let skip = last_line_start.len() + hi.col.to_usize() - 1; let mut s = String::new(); for _ in range(0, skip) { s.push(' '); diff --git a/src/libsyntax/ext/build.rs b/src/libsyntax/ext/build.rs index d960186cdd8..3bac972dbb5 100644 --- a/src/libsyntax/ext/build.rs +++ b/src/libsyntax/ext/build.rs @@ -134,7 +134,7 @@ pub trait AstBuilder { fn expr_lit(&self, sp: Span, lit: ast::Lit_) -> P; - fn expr_uint(&self, span: Span, i: usize) -> P; + fn expr_usize(&self, span: Span, i: usize) -> P; fn expr_int(&self, sp: Span, i: int) -> P; fn expr_u8(&self, sp: Span, u: u8) -> P; fn expr_bool(&self, sp: Span, value: bool) -> P; @@ -579,7 +579,7 @@ impl<'a> AstBuilder for ExtCtxt<'a> { fn expr_field_access(&self, sp: Span, expr: P, ident: ast::Ident) -> P { let field_name = token::get_ident(ident); let field_span = Span { - lo: sp.lo - Pos::from_uint(field_name.get().len()), + lo: sp.lo - Pos::from_usize(field_name.get().len()), hi: sp.hi, expn_id: sp.expn_id, }; @@ -589,7 +589,7 @@ impl<'a> AstBuilder for ExtCtxt<'a> { } fn expr_tup_field_access(&self, sp: Span, expr: P, idx: usize) -> P { let field_span = Span { - lo: sp.lo - Pos::from_uint(idx.to_string().len()), + lo: sp.lo - Pos::from_usize(idx.to_string().len()), hi: sp.hi, expn_id: sp.expn_id, }; @@ -641,7 +641,7 @@ impl<'a> AstBuilder for ExtCtxt<'a> { fn expr_lit(&self, sp: Span, lit: ast::Lit_) -> P { self.expr(sp, ast::ExprLit(P(respan(sp, lit)))) } - fn expr_uint(&self, span: Span, i: usize) -> P { + fn expr_usize(&self, span: Span, i: usize) -> P { self.expr_lit(span, ast::LitInt(i as u64, ast::UnsignedIntLit(ast::TyUs(false)))) } fn expr_int(&self, sp: Span, i: int) -> P { @@ -710,7 +710,7 @@ impl<'a> AstBuilder for ExtCtxt<'a> { let loc = self.codemap().lookup_char_pos(span.lo); let expr_file = self.expr_str(span, token::intern_and_get_ident(&loc.file.name[])); - let expr_line = self.expr_uint(span, loc.line); + let expr_line = self.expr_usize(span, loc.line); let expr_file_line_tuple = self.expr_tuple(span, vec!(expr_file, expr_line)); let expr_file_line_ptr = self.expr_addr_of(span, expr_file_line_tuple); self.expr_call_global( diff --git a/src/libsyntax/ext/deriving/decodable.rs b/src/libsyntax/ext/deriving/decodable.rs index 6a41874b935..62304265dfa 100644 --- a/src/libsyntax/ext/deriving/decodable.rs +++ b/src/libsyntax/ext/deriving/decodable.rs @@ -114,7 +114,7 @@ fn decodable_substructure(cx: &mut ExtCtxt, trait_span: Span, cx.expr_try(span, cx.expr_method_call(span, blkdecoder.clone(), read_struct_field, vec!(cx.expr_str(span, name), - cx.expr_uint(span, field), + cx.expr_usize(span, field), exprdecode.clone()))) }); let result = cx.expr_ok(trait_span, result); @@ -123,7 +123,7 @@ fn decodable_substructure(cx: &mut ExtCtxt, trait_span: Span, cx.ident_of("read_struct"), vec!( cx.expr_str(trait_span, token::get_ident(substr.type_ident)), - cx.expr_uint(trait_span, nfields), + cx.expr_usize(trait_span, nfields), cx.lambda_expr_1(trait_span, result, blkarg) )) } @@ -143,14 +143,14 @@ fn decodable_substructure(cx: &mut ExtCtxt, trait_span: Span, path, parts, |cx, span, _, field| { - let idx = cx.expr_uint(span, field); + let idx = cx.expr_usize(span, field); cx.expr_try(span, cx.expr_method_call(span, blkdecoder.clone(), rvariant_arg, vec!(idx, exprdecode.clone()))) }); arms.push(cx.arm(v_span, - vec!(cx.pat_lit(v_span, cx.expr_uint(v_span, i))), + vec!(cx.pat_lit(v_span, cx.expr_usize(v_span, i))), decoded)); } diff --git a/src/libsyntax/ext/deriving/default.rs b/src/libsyntax/ext/deriving/default.rs index 047c4fef3c4..f22e02d7b8d 100644 --- a/src/libsyntax/ext/deriving/default.rs +++ b/src/libsyntax/ext/deriving/default.rs @@ -79,7 +79,7 @@ fn default_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructur StaticEnum(..) => { cx.span_err(trait_span, "`Default` cannot be derived for enums, only structs"); // let compilation continue - cx.expr_uint(trait_span, 0) + cx.expr_usize(trait_span, 0) } _ => cx.span_bug(trait_span, "Non-static method in `deriving(Default)`") }; diff --git a/src/libsyntax/ext/deriving/encodable.rs b/src/libsyntax/ext/deriving/encodable.rs index 496aec556f1..3189a39160e 100644 --- a/src/libsyntax/ext/deriving/encodable.rs +++ b/src/libsyntax/ext/deriving/encodable.rs @@ -27,7 +27,7 @@ //! s.emit_struct("Node", 1, |this| { //! this.emit_struct_field("id", 0, |this| { //! Encodable::encode(&self.id, this) -//! /* this.emit_uint(self.id) can also be used */ +//! /* this.emit_usize(self.id) can also be used */ //! }) //! }) //! } @@ -192,7 +192,7 @@ fn encodable_substructure(cx: &mut ExtCtxt, trait_span: Span, let call = cx.expr_method_call(span, blkencoder.clone(), emit_struct_field, vec!(cx.expr_str(span, name), - cx.expr_uint(span, i), + cx.expr_usize(span, i), lambda)); // last call doesn't need a try! @@ -218,7 +218,7 @@ fn encodable_substructure(cx: &mut ExtCtxt, trait_span: Span, cx.ident_of("emit_struct"), vec!( cx.expr_str(trait_span, token::get_ident(substr.type_ident)), - cx.expr_uint(trait_span, fields.len()), + cx.expr_usize(trait_span, fields.len()), blk )) } @@ -239,7 +239,7 @@ fn encodable_substructure(cx: &mut ExtCtxt, trait_span: Span, let lambda = cx.lambda_expr_1(span, enc, blkarg); let call = cx.expr_method_call(span, blkencoder.clone(), emit_variant_arg, - vec!(cx.expr_uint(span, i), + vec!(cx.expr_usize(span, i), lambda)); let call = if i != last { cx.expr_try(span, call) @@ -262,8 +262,8 @@ fn encodable_substructure(cx: &mut ExtCtxt, trait_span: Span, let call = cx.expr_method_call(trait_span, blkencoder, cx.ident_of("emit_enum_variant"), vec!(name, - cx.expr_uint(trait_span, idx), - cx.expr_uint(trait_span, fields.len()), + cx.expr_usize(trait_span, idx), + cx.expr_usize(trait_span, fields.len()), blk)); let blk = cx.lambda_expr_1(trait_span, call, blkarg); let ret = cx.expr_method_call(trait_span, diff --git a/src/libsyntax/ext/deriving/hash.rs b/src/libsyntax/ext/deriving/hash.rs index 08336be87d1..8dac864c2ae 100644 --- a/src/libsyntax/ext/deriving/hash.rs +++ b/src/libsyntax/ext/deriving/hash.rs @@ -89,7 +89,7 @@ fn hash_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) // iteration function. let discriminant = match variant.node.disr_expr { Some(ref d) => d.clone(), - None => cx.expr_uint(trait_span, index) + None => cx.expr_usize(trait_span, index) }; stmts.push(call_hash(trait_span, discriminant)); diff --git a/src/libsyntax/ext/deriving/rand.rs b/src/libsyntax/ext/deriving/rand.rs index 1359cada673..be45a54e710 100644 --- a/src/libsyntax/ext/deriving/rand.rs +++ b/src/libsyntax/ext/deriving/rand.rs @@ -80,10 +80,10 @@ fn rand_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) if variants.is_empty() { cx.span_err(trait_span, "`Rand` cannot be derived for enums with no variants"); // let compilation continue - return cx.expr_uint(trait_span, 0); + return cx.expr_usize(trait_span, 0); } - let variant_count = cx.expr_uint(trait_span, variants.len()); + let variant_count = cx.expr_usize(trait_span, variants.len()); let rand_name = cx.path_all(trait_span, true, @@ -115,7 +115,7 @@ fn rand_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) variant_count); let mut arms = variants.iter().enumerate().map(|(i, &(ident, v_span, ref summary))| { - let i_expr = cx.expr_uint(v_span, i); + let i_expr = cx.expr_usize(v_span, i); let pat = cx.pat_lit(v_span, i_expr); let path = cx.path(v_span, vec![substr.type_ident, ident]); diff --git a/src/libsyntax/ext/env.rs b/src/libsyntax/ext/env.rs index 9b54e259761..9f6bf352b04 100644 --- a/src/libsyntax/ext/env.rs +++ b/src/libsyntax/ext/env.rs @@ -104,7 +104,7 @@ pub fn expand_env<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) let e = match os::getenv(var.get()) { None => { cx.span_err(sp, msg.get()); - cx.expr_uint(sp, 0) + cx.expr_usize(sp, 0) } Some(s) => cx.expr_str(sp, token::intern_and_get_ident(&s[])) }; diff --git a/src/libsyntax/ext/format.rs b/src/libsyntax/ext/format.rs index 684ae84872b..068c7ec0867 100644 --- a/src/libsyntax/ext/format.rs +++ b/src/libsyntax/ext/format.rs @@ -326,11 +326,11 @@ impl<'a, 'b> Context<'a, 'b> { match c { parse::CountIs(i) => { self.ecx.expr_call_global(sp, Context::rtpath(self.ecx, "CountIs"), - vec!(self.ecx.expr_uint(sp, i))) + vec!(self.ecx.expr_usize(sp, i))) } parse::CountIsParam(i) => { self.ecx.expr_call_global(sp, Context::rtpath(self.ecx, "CountIsParam"), - vec!(self.ecx.expr_uint(sp, i))) + vec!(self.ecx.expr_usize(sp, i))) } parse::CountImplied => { let path = self.ecx.path_global(sp, Context::rtpath(self.ecx, @@ -349,7 +349,7 @@ impl<'a, 'b> Context<'a, 'b> { }; let i = i + self.args.len(); self.ecx.expr_call_global(sp, Context::rtpath(self.ecx, "CountIsParam"), - vec!(self.ecx.expr_uint(sp, i))) + vec!(self.ecx.expr_usize(sp, i))) } } } @@ -382,7 +382,7 @@ impl<'a, 'b> Context<'a, 'b> { } parse::ArgumentIs(i) => { self.ecx.expr_call_global(sp, Context::rtpath(self.ecx, "ArgumentIs"), - vec!(self.ecx.expr_uint(sp, i))) + vec!(self.ecx.expr_usize(sp, i))) } // Named arguments are converted to positional arguments at // the end of the list of arguments @@ -393,7 +393,7 @@ impl<'a, 'b> Context<'a, 'b> { }; let i = i + self.args.len(); self.ecx.expr_call_global(sp, Context::rtpath(self.ecx, "ArgumentIs"), - vec!(self.ecx.expr_uint(sp, i))) + vec!(self.ecx.expr_usize(sp, i))) } }; @@ -432,7 +432,7 @@ impl<'a, 'b> Context<'a, 'b> { } }; let align = self.ecx.expr_path(align); - let flags = self.ecx.expr_uint(sp, arg.format.flags); + let flags = self.ecx.expr_usize(sp, arg.format.flags); let prec = self.trans_count(arg.format.precision); let width = self.trans_count(arg.format.width); let path = self.ecx.path_global(sp, Context::rtpath(self.ecx, "FormatSpec")); diff --git a/src/libsyntax/ext/mtwt.rs b/src/libsyntax/ext/mtwt.rs index 9bcb026a550..7adc443759f 100644 --- a/src/libsyntax/ext/mtwt.rs +++ b/src/libsyntax/ext/mtwt.rs @@ -398,7 +398,7 @@ mod tests { assert_eq! (marksof_internal (ans, stopname,&t), vec!(16));} // rename where stop doesn't match: { let chain = vec!(M(9), - R(id(name1.uint() as u32, + R(id(name1.usize() as u32, apply_mark_internal (4, EMPTY_CTXT,&mut t)), Name(100101102)), M(14)); @@ -407,7 +407,7 @@ mod tests { // rename where stop does match { let name1sc = apply_mark_internal(4, EMPTY_CTXT, &mut t); let chain = vec!(M(9), - R(id(name1.uint() as u32, name1sc), + R(id(name1.usize() as u32, name1sc), stopname), M(14)); let ans = unfold_test_sc(chain,EMPTY_CTXT,&mut t); diff --git a/src/libsyntax/ext/quote.rs b/src/libsyntax/ext/quote.rs index c42b188302c..a5885ac6c5d 100644 --- a/src/libsyntax/ext/quote.rs +++ b/src/libsyntax/ext/quote.rs @@ -588,7 +588,7 @@ fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P { } token::Literal(token::StrRaw(ident, n), suf) => { - return mk_lit!("StrRaw", suf, mk_name(cx, sp, ident.ident()), cx.expr_uint(sp, n)) + return mk_lit!("StrRaw", suf, mk_name(cx, sp, ident.ident()), cx.expr_usize(sp, n)) } token::Ident(ident, style) => { diff --git a/src/libsyntax/ext/source_util.rs b/src/libsyntax/ext/source_util.rs index 31a1a838b13..a74adbf4085 100644 --- a/src/libsyntax/ext/source_util.rs +++ b/src/libsyntax/ext/source_util.rs @@ -35,7 +35,7 @@ pub fn expand_line(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) let topmost = cx.original_span_in_file(); let loc = cx.codemap().lookup_char_pos(topmost.lo); - base::MacExpr::new(cx.expr_uint(topmost, loc.line)) + base::MacExpr::new(cx.expr_usize(topmost, loc.line)) } /* column!(): expands to the current column number */ @@ -45,7 +45,7 @@ pub fn expand_column(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) let topmost = cx.original_span_in_file(); let loc = cx.codemap().lookup_char_pos(topmost.lo); - base::MacExpr::new(cx.expr_uint(topmost, loc.col.to_uint())) + base::MacExpr::new(cx.expr_usize(topmost, loc.col.to_usize())) } /// file!(): expands to the current filename */ diff --git a/src/libsyntax/fold.rs b/src/libsyntax/fold.rs index cae0cf904f6..1649075fd71 100644 --- a/src/libsyntax/fold.rs +++ b/src/libsyntax/fold.rs @@ -174,8 +174,8 @@ pub trait Folder : Sized { noop_fold_ident(i, self) } - fn fold_uint(&mut self, i: usize) -> usize { - noop_fold_uint(i, self) + fn fold_usize(&mut self, i: usize) -> usize { + noop_fold_usize(i, self) } fn fold_path(&mut self, p: Path) -> Path { @@ -505,7 +505,7 @@ pub fn noop_fold_ident(i: Ident, _: &mut T) -> Ident { i } -pub fn noop_fold_uint(i: usize, _: &mut T) -> usize { +pub fn noop_fold_usize(i: usize, _: &mut T) -> usize { i } @@ -1377,7 +1377,7 @@ pub fn noop_fold_expr(Expr {id, node, span}: Expr, folder: &mut T) -> } ExprTupField(el, ident) => { ExprTupField(folder.fold_expr(el), - respan(ident.span, folder.fold_uint(ident.node))) + respan(ident.span, folder.fold_usize(ident.node))) } ExprIndex(el, er) => { ExprIndex(folder.fold_expr(el), folder.fold_expr(er)) diff --git a/src/libsyntax/parse/lexer/comments.rs b/src/libsyntax/parse/lexer/comments.rs index 59a3c3bcb3e..d710c6d6c0f 100644 --- a/src/libsyntax/parse/lexer/comments.rs +++ b/src/libsyntax/parse/lexer/comments.rs @@ -208,7 +208,7 @@ fn read_line_comments(rdr: &mut StringReader, code_to_the_left: bool, /// whitespace. Note k may be outside bounds of s. fn all_whitespace(s: &str, col: CharPos) -> Option { let len = s.len(); - let mut col = col.to_uint(); + let mut col = col.to_usize(); let mut cursor: usize = 0; while col > 0 && cursor < len { let r: str::CharRange = s.char_range_at(cursor); diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs index cd159f7c599..dfb0230cf34 100644 --- a/src/libsyntax/parse/lexer/mod.rs +++ b/src/libsyntax/parse/lexer/mod.rs @@ -212,8 +212,8 @@ impl<'a> StringReader<'a> { /// offending string to the error message fn fatal_span_verbose(&self, from_pos: BytePos, to_pos: BytePos, mut m: String) -> ! { m.push_str(": "); - let from = self.byte_offset(from_pos).to_uint(); - let to = self.byte_offset(to_pos).to_uint(); + let from = self.byte_offset(from_pos).to_usize(); + let to = self.byte_offset(to_pos).to_usize(); m.push_str(&self.filemap.src[from..to]); self.fatal_span_(from_pos, to_pos, &m[]); } @@ -272,8 +272,8 @@ impl<'a> StringReader<'a> { F: FnOnce(&str) -> T, { f(self.filemap.src.slice( - self.byte_offset(start).to_uint(), - self.byte_offset(end).to_uint())) + self.byte_offset(start).to_usize(), + self.byte_offset(end).to_usize())) } /// Converts CRLF to LF in the given string, raising an error on bare CR. @@ -321,7 +321,7 @@ impl<'a> StringReader<'a> { /// discovered, add it to the FileMap's list of line start offsets. pub fn bump(&mut self) { self.last_pos = self.pos; - let current_byte_offset = self.byte_offset(self.pos).to_uint(); + let current_byte_offset = self.byte_offset(self.pos).to_usize(); if current_byte_offset < self.filemap.src.len() { assert!(self.curr.is_some()); let last_char = self.curr.unwrap(); @@ -329,7 +329,7 @@ impl<'a> StringReader<'a> { .src .char_range_at(current_byte_offset); let byte_offset_diff = next.next - current_byte_offset; - self.pos = self.pos + Pos::from_uint(byte_offset_diff); + self.pos = self.pos + Pos::from_usize(byte_offset_diff); self.curr = Some(next.ch); self.col = self.col + CharPos(1u); if last_char == '\n' { @@ -346,7 +346,7 @@ impl<'a> StringReader<'a> { } pub fn nextch(&self) -> Option { - let offset = self.byte_offset(self.pos).to_uint(); + let offset = self.byte_offset(self.pos).to_usize(); if offset < self.filemap.src.len() { Some(self.filemap.src.char_at(offset)) } else { @@ -359,7 +359,7 @@ impl<'a> StringReader<'a> { } pub fn nextnextch(&self) -> Option { - let offset = self.byte_offset(self.pos).to_uint(); + let offset = self.byte_offset(self.pos).to_usize(); let s = self.filemap.src.as_slice(); if offset >= s.len() { return None } let str::CharRange { next, .. } = s.char_range_at(offset); diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index 7bde32326fa..1a29766cee6 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -1171,9 +1171,9 @@ mod test { for &src in srcs.iter() { let spans = get_spans_of_pat_idents(src); let Span{ lo, hi, .. } = spans[0]; - assert!("self" == &src[lo.to_uint()..hi.to_uint()], + assert!("self" == &src[lo.to_usize()..hi.to_usize()], "\"{}\" != \"self\". src=\"{}\"", - &src[lo.to_uint()..hi.to_uint()], src) + &src[lo.to_usize()..hi.to_usize()], src) } } diff --git a/src/libsyntax/parse/token.rs b/src/libsyntax/parse/token.rs index aeb9599923e..08d0e5e997c 100644 --- a/src/libsyntax/parse/token.rs +++ b/src/libsyntax/parse/token.rs @@ -757,7 +757,7 @@ pub fn fresh_name(src: &ast::Ident) -> ast::Name { // create a fresh mark. pub fn fresh_mark() -> ast::Mrk { - gensym("mark").uint() as u32 + gensym("mark").usize() as u32 } #[cfg(test)] diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs index 2583c302293..83ac8432f98 100644 --- a/src/libsyntax/print/pprust.rs +++ b/src/libsyntax/print/pprust.rs @@ -1789,7 +1789,7 @@ impl<'a> State<'a> { ast::ExprTupField(ref expr, id) => { try!(self.print_expr(&**expr)); try!(word(&mut self.s, ".")); - try!(self.print_uint(id.node)); + try!(self.print_usize(id.node)); } ast::ExprIndex(ref expr, ref index) => { try!(self.print_expr(&**expr)); @@ -1951,7 +1951,7 @@ impl<'a> State<'a> { self.ann.post(self, NodeIdent(&ident)) } - pub fn print_uint(&mut self, i: usize) -> IoResult<()> { + pub fn print_usize(&mut self, i: usize) -> IoResult<()> { word(&mut self.s, &i.to_string()[]) } diff --git a/src/libsyntax/util/interner.rs b/src/libsyntax/util/interner.rs index 757bafaf5cb..66b225f30dd 100644 --- a/src/libsyntax/util/interner.rs +++ b/src/libsyntax/util/interner.rs @@ -70,7 +70,7 @@ impl + Clone + 'static> Interner { pub fn get(&self, idx: Name) -> T { let vect = self.vect.borrow(); - (*vect)[idx.uint()].clone() + (*vect)[idx.usize()].clone() } pub fn len(&self) -> usize { @@ -190,13 +190,13 @@ impl StrInterner { let new_idx = Name(self.len() as u32); // leave out of map to avoid colliding let mut vect = self.vect.borrow_mut(); - let existing = (*vect)[idx.uint()].clone(); + let existing = (*vect)[idx.usize()].clone(); vect.push(existing); new_idx } pub fn get(&self, idx: Name) -> RcStr { - (*self.vect.borrow())[idx.uint()].clone() + (*self.vect.borrow())[idx.usize()].clone() } pub fn len(&self) -> usize { diff --git a/src/test/auxiliary/roman_numerals.rs b/src/test/auxiliary/roman_numerals.rs index 519f32fc248..a4edc607279 100644 --- a/src/test/auxiliary/roman_numerals.rs +++ b/src/test/auxiliary/roman_numerals.rs @@ -20,7 +20,7 @@ use syntax::codemap::Span; use syntax::parse::token; use syntax::ast::{TokenTree, TtToken}; use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacExpr}; -use syntax::ext::build::AstBuilder; // trait for expr_uint +use syntax::ext::build::AstBuilder; // trait for expr_usize use rustc::plugin::Registry; // WARNING WARNING WARNING WARNING WARNING @@ -61,7 +61,7 @@ fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree]) } } - MacExpr::new(cx.expr_uint(sp, total)) + MacExpr::new(cx.expr_usize(sp, total)) } #[plugin_registrar] -- cgit 1.4.1-3-g733a5 From 7a24b3a4d7769ad9a4863a2cc61c009056459a67 Mon Sep 17 00:00:00 2001 From: Paul Collier Date: Sat, 17 Jan 2015 23:55:21 +0000 Subject: libsyntax: int => i32 in appropriate places --- src/libsyntax/ext/deriving/generic/mod.rs | 26 ++++++++--------- src/libsyntax/ext/deriving/generic/ty.rs | 4 +-- src/libsyntax/ext/expand.rs | 46 +++++++++++++++---------------- src/libsyntax/parse/lexer/comments.rs | 4 +-- src/libsyntax/parse/mod.rs | 10 +++---- src/libsyntax/parse/parser.rs | 6 ++-- 6 files changed, 48 insertions(+), 48 deletions(-) (limited to 'src/libsyntax/parse') diff --git a/src/libsyntax/ext/deriving/generic/mod.rs b/src/libsyntax/ext/deriving/generic/mod.rs index 27199de0ea8..ca3c1d36fba 100644 --- a/src/libsyntax/ext/deriving/generic/mod.rs +++ b/src/libsyntax/ext/deriving/generic/mod.rs @@ -28,7 +28,7 @@ //! arguments: //! //! - `Struct`, when `Self` is a struct (including tuple structs, e.g -//! `struct T(int, char)`). +//! `struct T(i32, char)`). //! - `EnumMatching`, when `Self` is an enum and all the arguments are the //! same variant of the enum (e.g. `Some(1)`, `Some(3)` and `Some(4)`) //! - `EnumNonMatchingCollapsed` when `Self` is an enum and the arguments @@ -54,17 +54,17 @@ //! following snippet //! //! ```rust -//! struct A { x : int } +//! struct A { x : i32 } //! -//! struct B(int); +//! struct B(i32); //! //! enum C { -//! C0(int), -//! C1 { x: int } +//! C0(i32), +//! C1 { x: i32 } //! } //! ``` //! -//! The `int`s in `B` and `C0` don't have an identifier, so the +//! The `i32`s in `B` and `C0` don't have an identifier, so the //! `Option`s would be `None` for them. //! //! In the static cases, the structure is summarised, either into the just @@ -90,8 +90,8 @@ //! trait PartialEq { //! fn eq(&self, other: &Self); //! } -//! impl PartialEq for int { -//! fn eq(&self, other: &int) -> bool { +//! impl PartialEq for i32 { +//! fn eq(&self, other: &i32) -> bool { //! *self == *other //! } //! } @@ -117,7 +117,7 @@ //! //! ```{.text} //! Struct(vec![FieldInfo { -//! span: , +//! span: , //! name: None, //! self_: //! other: vec![] @@ -132,7 +132,7 @@ //! ```{.text} //! EnumMatching(0, , //! vec![FieldInfo { -//! span: +//! span: //! name: None, //! self_: , //! other: vec![] @@ -179,7 +179,7 @@ //! StaticStruct(, Unnamed(vec![])) //! //! StaticEnum(, -//! vec![(, , Unnamed(vec![])), +//! vec![(, , Unnamed(vec![])), //! (, , Named(vec![(, )]))]) //! ``` @@ -719,7 +719,7 @@ impl<'a> MethodDef<'a> { /// ``` /// #[derive(PartialEq)] - /// struct A { x: int, y: int } + /// struct A { x: i32, y: i32 } /// /// // equivalent to: /// impl PartialEq for A { @@ -825,7 +825,7 @@ impl<'a> MethodDef<'a> { /// #[derive(PartialEq)] /// enum A { /// A1, - /// A2(int) + /// A2(i32) /// } /// /// // is equivalent to diff --git a/src/libsyntax/ext/deriving/generic/ty.rs b/src/libsyntax/ext/deriving/generic/ty.rs index a236fa33eb1..e7634716ed7 100644 --- a/src/libsyntax/ext/deriving/generic/ty.rs +++ b/src/libsyntax/ext/deriving/generic/ty.rs @@ -32,7 +32,7 @@ pub enum PtrTy<'a> { Raw(ast::Mutability), } -/// A path, e.g. `::std::option::Option::` (global). Has support +/// A path, e.g. `::std::option::Option::` (global). Has support /// for type parameters and a lifetime. #[derive(Clone)] pub struct Path<'a> { @@ -91,7 +91,7 @@ pub enum Ty<'a> { /// &/Box/ Ty Ptr(Box>, PtrTy<'a>), /// mod::mod::Type<[lifetime], [Params...]>, including a plain type - /// parameter, and things like `int` + /// parameter, and things like `i32` Literal(Path<'a>), /// includes unit Tuple(Vec> ) diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs index 1fd0334c016..4f614c1a937 100644 --- a/src/libsyntax/ext/expand.rs +++ b/src/libsyntax/ext/expand.rs @@ -1507,7 +1507,7 @@ mod test { #[should_fail] #[test] fn macros_cant_escape_fns_test () { let src = "fn bogus() {macro_rules! z (() => (3+4));}\ - fn inty() -> int { z!() }".to_string(); + fn inty() -> i32 { z!() }".to_string(); let sess = parse::new_parse_sess(); let crate_ast = parse::parse_crate_from_source_str( "".to_string(), @@ -1521,7 +1521,7 @@ mod test { #[should_fail] #[test] fn macros_cant_escape_mods_test () { let src = "mod foo {macro_rules! z (() => (3+4));}\ - fn inty() -> int { z!() }".to_string(); + fn inty() -> i32 { z!() }".to_string(); let sess = parse::new_parse_sess(); let crate_ast = parse::parse_crate_from_source_str( "".to_string(), @@ -1533,7 +1533,7 @@ mod test { // macro_use modules should allow macros to escape #[test] fn macros_can_escape_flattened_mods_test () { let src = "#[macro_use] mod foo {macro_rules! z (() => (3+4));}\ - fn inty() -> int { z!() }".to_string(); + fn inty() -> i32 { z!() }".to_string(); let sess = parse::new_parse_sess(); let crate_ast = parse::parse_crate_from_source_str( "".to_string(), @@ -1564,8 +1564,8 @@ mod test { // should be able to use a bound identifier as a literal in a macro definition: #[test] fn self_macro_parsing(){ expand_crate_str( - "macro_rules! foo ((zz) => (287u;)); - fn f(zz : int) {foo!(zz);}".to_string() + "macro_rules! foo ((zz) => (287;)); + fn f(zz: i32) {foo!(zz);}".to_string() ); } @@ -1601,23 +1601,23 @@ mod test { fn automatic_renaming () { let tests: Vec = vec!(// b & c should get new names throughout, in the expr too: - ("fn a() -> int { let b = 13; let c = b; b+c }", + ("fn a() -> i32 { let b = 13; let c = b; b+c }", vec!(vec!(0,1),vec!(2)), false), // both x's should be renamed (how is this causing a bug?) - ("fn main () {let x: int = 13;x;}", + ("fn main () {let x: i32 = 13;x;}", vec!(vec!(0)), false), // the use of b after the + should be renamed, the other one not: - ("macro_rules! f (($x:ident) => (b + $x)); fn a() -> int { let b = 13; f!(b)}", + ("macro_rules! f (($x:ident) => (b + $x)); fn a() -> i32 { let b = 13; f!(b)}", vec!(vec!(1)), false), // the b before the plus should not be renamed (requires marks) - ("macro_rules! f (($x:ident) => ({let b=9; ($x + b)})); fn a() -> int { f!(b)}", + ("macro_rules! f (($x:ident) => ({let b=9; ($x + b)})); fn a() -> i32 { f!(b)}", vec!(vec!(1)), false), // the marks going in and out of letty should cancel, allowing that $x to // capture the one following the semicolon. // this was an awesome test case, and caught a *lot* of bugs. ("macro_rules! letty(($x:ident) => (let $x = 15;)); macro_rules! user(($x:ident) => ({letty!($x); $x})); - fn main() -> int {user!(z)}", + fn main() -> i32 {user!(z)}", vec!(vec!(0)), false) ); for (idx,s) in tests.iter().enumerate() { @@ -1680,13 +1680,13 @@ mod test { // can't write this test case until we have macro-generating macros. // method arg hygiene - // method expands to fn get_x(&self_0, x_1:int) {self_0 + self_2 + x_3 + x_1} + // method expands to fn get_x(&self_0, x_1: i32) {self_0 + self_2 + x_3 + x_1} #[test] fn method_arg_hygiene(){ run_renaming_test( &("macro_rules! inject_x (()=>(x)); macro_rules! inject_self (()=>(self)); struct A; - impl A{fn get_x(&self, x: int) {self + inject_self!() + inject_x!() + x;} }", + impl A{fn get_x(&self, x: i32) {self + inject_self!() + inject_x!() + x;} }", vec!(vec!(0),vec!(3)), true), 0) @@ -1706,21 +1706,21 @@ mod test { } // item fn hygiene - // expands to fn q(x_1:int){fn g(x_2:int){x_2 + x_1};} + // expands to fn q(x_1: i32){fn g(x_2: i32){x_2 + x_1};} #[test] fn issue_9383(){ run_renaming_test( - &("macro_rules! bad_macro (($ex:expr) => (fn g(x:int){ x + $ex })); - fn q(x:int) { bad_macro!(x); }", + &("macro_rules! bad_macro (($ex:expr) => (fn g(x: i32){ x + $ex })); + fn q(x: i32) { bad_macro!(x); }", vec!(vec!(1),vec!(0)),true), 0) } // closure arg hygiene (ExprClosure) - // expands to fn f(){(|x_1 : int| {(x_2 + x_1)})(3);} + // expands to fn f(){(|x_1 : i32| {(x_2 + x_1)})(3);} #[test] fn closure_arg_hygiene(){ run_renaming_test( &("macro_rules! inject_x (()=>(x)); - fn f(){(|x : int| {(inject_x!() + x)})(3);}", + fn f(){(|x : i32| {(inject_x!() + x)})(3);}", vec!(vec!(1)), true), 0) @@ -1729,7 +1729,7 @@ mod test { // macro_rules in method position. Sadly, unimplemented. #[test] fn macro_in_method_posn(){ expand_crate_str( - "macro_rules! my_method (() => (fn thirteen(&self) -> int {13})); + "macro_rules! my_method (() => (fn thirteen(&self) -> i32 {13})); struct A; impl A{ my_method!(); } fn f(){A.thirteen;}".to_string()); @@ -1876,7 +1876,7 @@ foo_module!(); // it's the name of a 0-ary variant, and that 'i' appears twice in succession. #[test] fn crate_bindings_test(){ - let the_crate = string_to_crate("fn main (a : int) -> int {|b| { + let the_crate = string_to_crate("fn main (a: i32) -> i32 {|b| { match 34 {None => 3, Some(i) | i => j, Foo{k:z,l:y} => \"banana\"}} }".to_string()); let idents = crate_bindings(&the_crate); assert_eq!(idents, strs_to_idents(vec!("a","b","None","i","i","z","y"))); @@ -1885,10 +1885,10 @@ foo_module!(); // test the IdentRenamer directly #[test] fn ident_renamer_test () { - let the_crate = string_to_crate("fn f(x : int){let x = x; x}".to_string()); + let the_crate = string_to_crate("fn f(x: i32){let x = x; x}".to_string()); let f_ident = token::str_to_ident("f"); let x_ident = token::str_to_ident("x"); - let int_ident = token::str_to_ident("int"); + let int_ident = token::str_to_ident("i32"); let renames = vec!((x_ident,Name(16))); let mut renamer = IdentRenamer{renames: &renames}; let renamed_crate = renamer.fold_crate(the_crate); @@ -1900,10 +1900,10 @@ foo_module!(); // test the PatIdentRenamer; only PatIdents get renamed #[test] fn pat_ident_renamer_test () { - let the_crate = string_to_crate("fn f(x : int){let x = x; x}".to_string()); + let the_crate = string_to_crate("fn f(x: i32){let x = x; x}".to_string()); let f_ident = token::str_to_ident("f"); let x_ident = token::str_to_ident("x"); - let int_ident = token::str_to_ident("int"); + let int_ident = token::str_to_ident("i32"); let renames = vec!((x_ident,Name(16))); let mut renamer = PatIdentRenamer{renames: &renames}; let renamed_crate = renamer.fold_crate(the_crate); diff --git a/src/libsyntax/parse/lexer/comments.rs b/src/libsyntax/parse/lexer/comments.rs index d710c6d6c0f..58d114fb2a7 100644 --- a/src/libsyntax/parse/lexer/comments.rs +++ b/src/libsyntax/parse/lexer/comments.rs @@ -399,9 +399,9 @@ mod test { } #[test] fn test_block_doc_comment_3() { - let comment = "/**\n let a: *int;\n *a = 5;\n*/"; + let comment = "/**\n let a: *i32;\n *a = 5;\n*/"; let stripped = strip_doc_comment_decoration(comment); - assert_eq!(stripped, " let a: *int;\n *a = 5;"); + assert_eq!(stripped, " let a: *i32;\n *a = 5;"); } #[test] fn test_block_doc_comment_4() { diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index 1a29766cee6..33da7c160d9 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -855,7 +855,7 @@ mod test { #[test] fn string_to_tts_1 () { - let tts = string_to_tts("fn a (b : int) { b; }".to_string()); + let tts = string_to_tts("fn a (b : i32) { b; }".to_string()); assert_eq!(json::encode(&tts), "[\ {\ @@ -919,7 +919,7 @@ mod test { {\ \"variant\":\"Ident\",\ \"fields\":[\ - \"int\",\ + \"i32\",\ \"Plain\"\ ]\ }\ @@ -1031,8 +1031,8 @@ mod test { // check the contents of the tt manually: #[test] fn parse_fundecl () { - // this test depends on the intern order of "fn" and "int" - assert!(string_to_item("fn a (b : int) { b; }".to_string()) == + // this test depends on the intern order of "fn" and "i32" + assert_eq!(string_to_item("fn a (b : i32) { b; }".to_string()), Some( P(ast::Item{ident:str_to_ident("a"), attrs:Vec::new(), @@ -1046,7 +1046,7 @@ mod test { segments: vec!( ast::PathSegment { identifier: - str_to_ident("int"), + str_to_ident("i32"), parameters: ast::PathParameters::none(), } ), diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 9822dcef0c0..920cfeb3693 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -1499,7 +1499,7 @@ impl<'a> Parser<'a> { self.expect(&token::OpenDelim(token::Bracket)); let t = self.parse_ty_sum(); - // Parse the `; e` in `[ int; e ]` + // Parse the `; e` in `[ i32; e ]` // where `e` is a const expression let t = match self.maybe_parse_fixed_length_of_vec() { None => TyVec(t), @@ -4803,7 +4803,7 @@ impl<'a> Parser<'a> { Some(attrs)) } - /// Parse a::B + /// Parse a::B fn parse_trait_ref(&mut self) -> TraitRef { ast::TraitRef { path: self.parse_path(LifetimeAndTypesWithoutColons), @@ -4822,7 +4822,7 @@ impl<'a> Parser<'a> { } } - /// Parse for<'l> a::B + /// Parse for<'l> a::B fn parse_poly_trait_ref(&mut self) -> PolyTraitRef { let lifetime_defs = self.parse_late_bound_lifetime_defs(); -- cgit 1.4.1-3-g733a5 From 591337431df612dd4e0df8d46b6291358085ac7c Mon Sep 17 00:00:00 2001 From: Paul Collier Date: Sun, 18 Jan 2015 00:18:19 +0000 Subject: libsyntax: int types -> isize --- src/libsyntax/ext/build.rs | 4 +-- src/libsyntax/parse/lexer/comments.rs | 2 +- src/libsyntax/parse/lexer/mod.rs | 2 +- src/libsyntax/parse/parser.rs | 10 +++---- src/libsyntax/print/pp.rs | 50 +++++++++++++++++------------------ src/libsyntax/print/pprust.rs | 4 +-- src/libsyntax/util/small_vector.rs | 8 +++--- 7 files changed, 40 insertions(+), 40 deletions(-) (limited to 'src/libsyntax/parse') diff --git a/src/libsyntax/ext/build.rs b/src/libsyntax/ext/build.rs index 3bac972dbb5..8773c0f2f79 100644 --- a/src/libsyntax/ext/build.rs +++ b/src/libsyntax/ext/build.rs @@ -135,7 +135,7 @@ pub trait AstBuilder { fn expr_lit(&self, sp: Span, lit: ast::Lit_) -> P; fn expr_usize(&self, span: Span, i: usize) -> P; - fn expr_int(&self, sp: Span, i: int) -> P; + fn expr_int(&self, sp: Span, i: isize) -> P; fn expr_u8(&self, sp: Span, u: u8) -> P; fn expr_bool(&self, sp: Span, value: bool) -> P; @@ -644,7 +644,7 @@ impl<'a> AstBuilder for ExtCtxt<'a> { fn expr_usize(&self, span: Span, i: usize) -> P { self.expr_lit(span, ast::LitInt(i as u64, ast::UnsignedIntLit(ast::TyUs(false)))) } - fn expr_int(&self, sp: Span, i: int) -> P { + fn expr_int(&self, sp: Span, i: isize) -> P { self.expr_lit(sp, ast::LitInt(i as u64, ast::SignedIntLit(ast::TyIs(false), ast::Sign::new(i)))) } diff --git a/src/libsyntax/parse/lexer/comments.rs b/src/libsyntax/parse/lexer/comments.rs index 58d114fb2a7..ca9091856d6 100644 --- a/src/libsyntax/parse/lexer/comments.rs +++ b/src/libsyntax/parse/lexer/comments.rs @@ -267,7 +267,7 @@ fn read_block_comment(rdr: &mut StringReader, assert!(!curr_line.contains_char('\n')); lines.push(curr_line); } else { - let mut level: int = 1; + let mut level: isize = 1; while level > 0 { debug!("=== block comment level {}", level); if rdr.is_eof() { diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs index dfb0230cf34..2fcf43f9ead 100644 --- a/src/libsyntax/parse/lexer/mod.rs +++ b/src/libsyntax/parse/lexer/mod.rs @@ -519,7 +519,7 @@ impl<'a> StringReader<'a> { let is_doc_comment = self.curr_is('*') || self.curr_is('!'); let start_bpos = self.last_pos - BytePos(2); - let mut level: int = 1; + let mut level: isize = 1; let mut has_cr = false; while level > 0 { if self.is_eof() { diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 920cfeb3693..88825cb3f34 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -290,8 +290,8 @@ pub struct Parser<'a> { /// the previous token or None (only stashed sometimes). pub last_token: Option>, pub buffer: [TokenAndSpan; 4], - pub buffer_start: int, - pub buffer_end: int, + pub buffer_start: isize, + pub buffer_end: isize, pub tokens_consumed: usize, pub restrictions: Restrictions, pub quote_depth: usize, // not (yet) related to the quasiquoter @@ -934,7 +934,7 @@ impl<'a> Parser<'a> { // Avoid token copies with `replace`. let buffer_start = self.buffer_start as usize; let next_index = (buffer_start + 1) & 3 as usize; - self.buffer_start = next_index as int; + self.buffer_start = next_index as isize; let placeholder = TokenAndSpan { tok: token::Underscore, @@ -966,7 +966,7 @@ impl<'a> Parser<'a> { self.token = next; self.span = mk_sp(lo, hi); } - pub fn buffer_length(&mut self) -> int { + pub fn buffer_length(&mut self) -> isize { if self.buffer_start <= self.buffer_end { return self.buffer_end - self.buffer_start; } @@ -975,7 +975,7 @@ impl<'a> Parser<'a> { pub fn look_ahead(&mut self, distance: usize, f: F) -> R where F: FnOnce(&token::Token) -> R, { - let dist = distance as int; + let dist = distance as isize; while self.buffer_length() < dist { self.buffer[self.buffer_end as usize] = self.reader.real_token(); self.buffer_end = (self.buffer_end + 1) & 3; diff --git a/src/libsyntax/print/pp.rs b/src/libsyntax/print/pp.rs index 0cfc3d38e15..ca99fb2ef89 100644 --- a/src/libsyntax/print/pp.rs +++ b/src/libsyntax/print/pp.rs @@ -71,19 +71,19 @@ pub enum Breaks { #[derive(Clone, Copy)] pub struct BreakToken { - offset: int, - blank_space: int + offset: isize, + blank_space: isize } #[derive(Clone, Copy)] pub struct BeginToken { - offset: int, + offset: isize, breaks: Breaks } #[derive(Clone)] pub enum Token { - String(String, int), + String(String, isize), Break(BreakToken), Begin(BeginToken), End, @@ -122,7 +122,7 @@ pub fn tok_str(token: &Token) -> String { } pub fn buf_str(toks: &[Token], - szs: &[int], + szs: &[isize], left: usize, right: usize, lim: usize) @@ -155,11 +155,11 @@ pub enum PrintStackBreak { #[derive(Copy)] pub struct PrintStackElem { - offset: int, + offset: isize, pbreak: PrintStackBreak } -static SIZE_INFINITY: int = 0xffff; +static SIZE_INFINITY: isize = 0xffff; pub fn mk_printer(out: Box, linewidth: usize) -> Printer { // Yes 3, it makes the ring buffers big enough to never @@ -167,13 +167,13 @@ pub fn mk_printer(out: Box, linewidth: usize) -> Printer { let n: usize = 3 * linewidth; debug!("mk_printer {}", linewidth); let token: Vec = repeat(Token::Eof).take(n).collect(); - let size: Vec = repeat(0i).take(n).collect(); + let size: Vec = repeat(0i).take(n).collect(); let scan_stack: Vec = repeat(0us).take(n).collect(); Printer { out: out, buf_len: n, - margin: linewidth as int, - space: linewidth as int, + margin: linewidth as isize, + space: linewidth as isize, left: 0, right: 0, token: token, @@ -269,9 +269,9 @@ pub struct Printer { pub out: Box, buf_len: usize, /// Width of lines we're constrained to - margin: int, + margin: isize, /// Number of spaces left on line - space: int, + space: isize, /// Index of left side of input stream left: usize, /// Index of right side of input stream @@ -279,11 +279,11 @@ pub struct Printer { /// Ring-buffer stream goes through token: Vec , /// Ring-buffer of calculated sizes - size: Vec , + size: Vec , /// Running size of stream "...left" - left_total: int, + left_total: isize, /// Running size of stream "...right" - right_total: int, + right_total: isize, /// Pseudo-stack, really a ring too. Holds the /// primary-ring-buffers index of the Begin that started the /// current block, possibly with the most recent Break after that @@ -300,7 +300,7 @@ pub struct Printer { /// Stack of blocks-in-progress being flushed by print print_stack: Vec , /// Buffered indentation to avoid writing trailing whitespace - pending_indentation: int, + pending_indentation: isize, } impl Printer { @@ -479,7 +479,7 @@ impl Printer { Ok(()) } - pub fn check_stack(&mut self, k: int) { + pub fn check_stack(&mut self, k: isize) { if !self.scan_stack_empty { let x = self.scan_top(); match self.token[x] { @@ -506,14 +506,14 @@ impl Printer { } } } - pub fn print_newline(&mut self, amount: int) -> io::IoResult<()> { + pub fn print_newline(&mut self, amount: isize) -> io::IoResult<()> { debug!("NEWLINE {}", amount); let ret = write!(self.out, "\n"); self.pending_indentation = 0; self.indent(amount); return ret; } - pub fn indent(&mut self, amount: int) { + pub fn indent(&mut self, amount: isize) { debug!("INDENT {}", amount); self.pending_indentation += amount; } @@ -536,7 +536,7 @@ impl Printer { } write!(self.out, "{}", s) } - pub fn print(&mut self, token: Token, l: int) -> io::IoResult<()> { + pub fn print(&mut self, token: Token, l: isize) -> io::IoResult<()> { debug!("print {} {} (remaining line space={})", tok_str(&token), l, self.space); debug!("{}", buf_str(&self.token[], @@ -622,7 +622,7 @@ impl Printer { // "raw box" pub fn rbox(p: &mut Printer, indent: usize, b: Breaks) -> io::IoResult<()> { p.pretty_print(Token::Begin(BeginToken { - offset: indent as int, + offset: indent as isize, breaks: b })) } @@ -635,10 +635,10 @@ pub fn cbox(p: &mut Printer, indent: usize) -> io::IoResult<()> { rbox(p, indent, Breaks::Consistent) } -pub fn break_offset(p: &mut Printer, n: usize, off: int) -> io::IoResult<()> { +pub fn break_offset(p: &mut Printer, n: usize, off: isize) -> io::IoResult<()> { p.pretty_print(Token::Break(BreakToken { offset: off, - blank_space: n as int + blank_space: n as isize })) } @@ -651,7 +651,7 @@ pub fn eof(p: &mut Printer) -> io::IoResult<()> { } pub fn word(p: &mut Printer, wrd: &str) -> io::IoResult<()> { - p.pretty_print(Token::String(/* bad */ wrd.to_string(), wrd.len() as int)) + p.pretty_print(Token::String(/* bad */ wrd.to_string(), wrd.len() as isize)) } pub fn huge_word(p: &mut Printer, wrd: &str) -> io::IoResult<()> { @@ -678,7 +678,7 @@ pub fn hardbreak(p: &mut Printer) -> io::IoResult<()> { spaces(p, SIZE_INFINITY as usize) } -pub fn hardbreak_tok_offset(off: int) -> Token { +pub fn hardbreak_tok_offset(off: isize) -> Token { Token::Break(BreakToken {offset: off, blank_space: SIZE_INFINITY}) } diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs index 83ac8432f98..c537ca60b18 100644 --- a/src/libsyntax/print/pprust.rs +++ b/src/libsyntax/print/pprust.rs @@ -520,7 +520,7 @@ impl<'a> State<'a> { pub fn bclose_maybe_open (&mut self, span: codemap::Span, indented: usize, close_box: bool) -> IoResult<()> { try!(self.maybe_print_comment(span.hi)); - try!(self.break_offset_if_not_bol(1u, -(indented as int))); + try!(self.break_offset_if_not_bol(1u, -(indented as isize))); try!(word(&mut self.s, "}")); if close_box { try!(self.end()); // close the outer-box @@ -568,7 +568,7 @@ impl<'a> State<'a> { Ok(()) } pub fn break_offset_if_not_bol(&mut self, n: usize, - off: int) -> IoResult<()> { + off: isize) -> IoResult<()> { if !self.is_bol() { break_offset(&mut self.s, n, off) } else { diff --git a/src/libsyntax/util/small_vector.rs b/src/libsyntax/util/small_vector.rs index a4494a98864..da358b70225 100644 --- a/src/libsyntax/util/small_vector.rs +++ b/src/libsyntax/util/small_vector.rs @@ -191,7 +191,7 @@ mod test { #[test] fn test_len() { - let v: SmallVector = SmallVector::zero(); + let v: SmallVector = SmallVector::zero(); assert_eq!(0, v.len()); assert_eq!(1, SmallVector::one(1i).len()); @@ -214,7 +214,7 @@ mod test { #[test] fn test_from_iter() { - let v: SmallVector = (vec!(1i, 2, 3)).into_iter().collect(); + let v: SmallVector = (vec![1is, 2, 3]).into_iter().collect(); assert_eq!(3, v.len()); assert_eq!(&1, v.get(0)); assert_eq!(&2, v.get(1)); @@ -224,7 +224,7 @@ mod test { #[test] fn test_move_iter() { let v = SmallVector::zero(); - let v: Vec = v.into_iter().collect(); + let v: Vec = v.into_iter().collect(); assert_eq!(Vec::new(), v); let v = SmallVector::one(1i); @@ -237,7 +237,7 @@ mod test { #[test] #[should_fail] fn test_expect_one_zero() { - let _: int = SmallVector::zero().expect_one(""); + let _: isize = SmallVector::zero().expect_one(""); } #[test] -- cgit 1.4.1-3-g733a5 From 3c32cd1be27f321658382e39d34f5d993d99ae8b Mon Sep 17 00:00:00 2001 From: Paul Collier Date: Sun, 18 Jan 2015 00:41:56 +0000 Subject: libsyntax: 0u -> 0us, 0i -> 0is --- src/libsyntax/ast_util.rs | 18 +++++----- src/libsyntax/codemap.rs | 18 +++++----- src/libsyntax/diagnostic.rs | 32 ++++++++--------- src/libsyntax/ext/deriving/generic/mod.rs | 16 ++++----- src/libsyntax/ext/expand.rs | 4 +-- src/libsyntax/ext/quote.rs | 2 +- src/libsyntax/ext/tt/macro_parser.rs | 34 +++++++++--------- src/libsyntax/ext/tt/transcribe.rs | 2 +- src/libsyntax/parse/lexer/comments.rs | 8 ++--- src/libsyntax/parse/lexer/mod.rs | 32 ++++++++--------- src/libsyntax/parse/mod.rs | 12 +++---- src/libsyntax/parse/parser.rs | 18 +++++----- src/libsyntax/print/pp.rs | 34 +++++++++--------- src/libsyntax/print/pprust.rs | 58 +++++++++++++++---------------- src/libsyntax/test.rs | 4 +-- src/libsyntax/util/small_vector.rs | 20 +++++------ 16 files changed, 156 insertions(+), 156 deletions(-) (limited to 'src/libsyntax/parse') diff --git a/src/libsyntax/ast_util.rs b/src/libsyntax/ast_util.rs index 73f84f4fbe7..34fd498322c 100644 --- a/src/libsyntax/ast_util.rs +++ b/src/libsyntax/ast_util.rs @@ -322,15 +322,15 @@ pub fn struct_field_visibility(field: ast::StructField) -> Visibility { pub fn operator_prec(op: ast::BinOp) -> usize { match op { // 'as' sits here with 12 - BiMul | BiDiv | BiRem => 11u, - BiAdd | BiSub => 10u, - BiShl | BiShr => 9u, - BiBitAnd => 8u, - BiBitXor => 7u, - BiBitOr => 6u, - BiLt | BiLe | BiGe | BiGt | BiEq | BiNe => 3u, - BiAnd => 2u, - BiOr => 1u + BiMul | BiDiv | BiRem => 11us, + BiAdd | BiSub => 10us, + BiShl | BiShr => 9us, + BiBitAnd => 8us, + BiBitXor => 7us, + BiBitOr => 6us, + BiLt | BiLe | BiGe | BiGt | BiEq | BiNe => 3us, + BiAnd => 2us, + BiOr => 1us } } diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs index 07544f35b19..a5e10f42750 100644 --- a/src/libsyntax/codemap.rs +++ b/src/libsyntax/codemap.rs @@ -430,7 +430,7 @@ impl CodeMap { let lo = self.lookup_char_pos(sp.lo); let hi = self.lookup_char_pos(sp.hi); let mut lines = Vec::new(); - for i in range(lo.line - 1u, hi.line as usize) { + for i in range(lo.line - 1us, hi.line as usize) { lines.push(i); }; FileLines {file: lo.file, lines: lines} @@ -498,10 +498,10 @@ impl CodeMap { let files = self.files.borrow(); let files = &*files; let len = files.len(); - let mut a = 0u; + let mut a = 0us; let mut b = len; - while b - a > 1u { - let m = (a + b) / 2u; + while b - a > 1us { + let m = (a + b) / 2us; if files[m].start_pos > pos { b = m; } else { @@ -537,12 +537,12 @@ impl CodeMap { let files = self.files.borrow(); let f = (*files)[idx].clone(); - let mut a = 0u; + let mut a = 0us; { let lines = f.lines.borrow(); let mut b = lines.len(); - while b - a > 1u { - let m = (a + b) / 2u; + while b - a > 1us { + let m = (a + b) / 2us; if (*lines)[m] > pos { b = m; } else { a = m; } } } @@ -551,7 +551,7 @@ impl CodeMap { fn lookup_pos(&self, pos: BytePos) -> Loc { let FileMapAndLine {fm: f, line: a} = self.lookup_line(pos); - let line = a + 1u; // Line numbers start at 1 + let line = a + 1us; // Line numbers start at 1 let chpos = self.bytepos_to_file_charpos(pos); let linebpos = (*f.lines.borrow())[a]; let linechpos = self.bytepos_to_file_charpos(linebpos); @@ -762,7 +762,7 @@ mod test { assert_eq!(file_lines.file.name, "blork.rs"); assert_eq!(file_lines.lines.len(), 1); - assert_eq!(file_lines.lines[0], 1u); + assert_eq!(file_lines.lines[0], 1us); } #[test] diff --git a/src/libsyntax/diagnostic.rs b/src/libsyntax/diagnostic.rs index c86991b6e40..6de466ea9bd 100644 --- a/src/libsyntax/diagnostic.rs +++ b/src/libsyntax/diagnostic.rs @@ -26,7 +26,7 @@ use term::WriterWrapper; use term; /// maximum number of lines we will print for each error; arbitrary. -static MAX_LINES: usize = 6u; +static MAX_LINES: usize = 6us; #[derive(Clone, Copy)] pub enum RenderSpan { @@ -151,20 +151,20 @@ impl Handler { self.bump_err_count(); } pub fn bump_err_count(&self) { - self.err_count.set(self.err_count.get() + 1u); + self.err_count.set(self.err_count.get() + 1us); } pub fn err_count(&self) -> usize { self.err_count.get() } pub fn has_errors(&self) -> bool { - self.err_count.get()> 0u + self.err_count.get() > 0us } pub fn abort_if_errors(&self) { let s; match self.err_count.get() { - 0u => return, - 1u => s = "aborting due to previous error".to_string(), - _ => { + 0us => return, + 1us => s = "aborting due to previous error".to_string(), + _ => { s = format!("aborting due to {} previous errors", self.err_count.get()); } @@ -448,7 +448,7 @@ fn highlight_lines(err: &mut EmitterWriter, let mut elided = false; let mut display_lines = &lines.lines[]; if display_lines.len() > MAX_LINES { - display_lines = &display_lines[0u..MAX_LINES]; + display_lines = &display_lines[0us..MAX_LINES]; elided = true; } // Print the offending lines @@ -459,32 +459,32 @@ fn highlight_lines(err: &mut EmitterWriter, } } if elided { - let last_line = display_lines[display_lines.len() - 1u]; - let s = format!("{}:{} ", fm.name, last_line + 1u); + let last_line = display_lines[display_lines.len() - 1us]; + let s = format!("{}:{} ", fm.name, last_line + 1us); try!(write!(&mut err.dst, "{0:1$}...\n", "", s.len())); } // FIXME (#3260) // If there's one line at fault we can easily point to the problem - if lines.lines.len() == 1u { + if lines.lines.len() == 1us { let lo = cm.lookup_char_pos(sp.lo); - let mut digits = 0u; - let mut num = (lines.lines[0] + 1u) / 10u; + let mut digits = 0us; + let mut num = (lines.lines[0] + 1us) / 10us; // how many digits must be indent past? - while num > 0u { num /= 10u; digits += 1u; } + while num > 0us { num /= 10us; digits += 1us; } // indent past |name:## | and the 0-offset column location - let left = fm.name.len() + digits + lo.col.to_usize() + 3u; + let left = fm.name.len() + digits + lo.col.to_usize() + 3us; let mut s = String::new(); // Skip is the number of characters we need to skip because they are // part of the 'filename:line ' part of the previous line. - let skip = fm.name.len() + digits + 3u; + let skip = fm.name.len() + digits + 3us; for _ in range(0, skip) { s.push(' '); } if let Some(orig) = fm.get_line(lines.lines[0]) { - for pos in range(0u, left - skip) { + for pos in range(0us, left - skip) { let cur_char = orig.as_bytes()[pos] as char; // Whenever a tab occurs on the previous line, we insert one on // the error-point-squiggly-line as well (instead of a space). diff --git a/src/libsyntax/ext/deriving/generic/mod.rs b/src/libsyntax/ext/deriving/generic/mod.rs index ca3c1d36fba..b77c203acfb 100644 --- a/src/libsyntax/ext/deriving/generic/mod.rs +++ b/src/libsyntax/ext/deriving/generic/mod.rs @@ -748,7 +748,7 @@ impl<'a> MethodDef<'a> { let mut raw_fields = Vec::new(); // ~[[fields of self], // [fields of next Self arg], [etc]] let mut patterns = Vec::new(); - for i in range(0u, self_args.len()) { + for i in range(0us, self_args.len()) { let struct_path= cx.path(DUMMY_SP, vec!( type_ident )); let (pat, ident_expr) = trait_.create_struct_pattern(cx, @@ -837,8 +837,8 @@ impl<'a> MethodDef<'a> { /// (&A2(ref __self_0), /// &A2(ref __arg_1_0)) => (*__self_0).eq(&(*__arg_1_0)), /// _ => { - /// let __self_vi = match *self { A1(..) => 0u, A2(..) => 1u }; - /// let __arg_1_vi = match *__arg_1 { A1(..) => 0u, A2(..) => 1u }; + /// let __self_vi = match *self { A1(..) => 0us, A2(..) => 1us }; + /// let __arg_1_vi = match *__arg_1 { A1(..) => 0us, A2(..) => 1us }; /// false /// } /// } @@ -882,8 +882,8 @@ impl<'a> MethodDef<'a> { /// (Variant2, Variant2, Variant2) => ... // delegate Matching on Variant2 /// ... /// _ => { - /// let __this_vi = match this { Variant1 => 0u, Variant2 => 1u, ... }; - /// let __that_vi = match that { Variant1 => 0u, Variant2 => 1u, ... }; + /// let __this_vi = match this { Variant1 => 0us, Variant2 => 1us, ... }; + /// let __that_vi = match that { Variant1 => 0us, Variant2 => 1us, ... }; /// ... // catch-all remainder can inspect above variant index values. /// } /// } @@ -1045,13 +1045,13 @@ impl<'a> MethodDef<'a> { // // ``` // let __self0_vi = match self { - // A => 0u, B(..) => 1u, C(..) => 2u + // A => 0us, B(..) => 1us, C(..) => 2us // }; // let __self1_vi = match __arg1 { - // A => 0u, B(..) => 1u, C(..) => 2u + // A => 0us, B(..) => 1us, C(..) => 2us // }; // let __self2_vi = match __arg2 { - // A => 0u, B(..) => 1u, C(..) => 2u + // A => 0us, B(..) => 1us, C(..) => 2us // }; // ``` let mut index_let_stmts: Vec> = Vec::new(); diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs index 4f614c1a937..9aa602b39b4 100644 --- a/src/libsyntax/ext/expand.rs +++ b/src/libsyntax/ext/expand.rs @@ -273,7 +273,7 @@ fn expand_mac_invoc(mac: ast::Mac, span: codemap::Span, // in this file. // Token-tree macros: MacInvocTT(pth, tts, _) => { - if pth.segments.len() > 1u { + if pth.segments.len() > 1us { fld.cx.span_err(pth.span, "expected macro name without module \ separators"); @@ -844,7 +844,7 @@ fn expand_pat(p: P, fld: &mut MacroExpander) -> P { }, _ => unreachable!() }; - if pth.segments.len() > 1u { + if pth.segments.len() > 1us { fld.cx.span_err(pth.span, "expected macro name without module separators"); return DummyResult::raw_pat(span); } diff --git a/src/libsyntax/ext/quote.rs b/src/libsyntax/ext/quote.rs index a5885ac6c5d..5339c3d77c6 100644 --- a/src/libsyntax/ext/quote.rs +++ b/src/libsyntax/ext/quote.rs @@ -716,7 +716,7 @@ fn expand_tts(cx: &ExtCtxt, sp: Span, tts: &[ast::TokenTree]) // try removing it when enough of them are gone. let mut p = cx.new_parser_from_tts(tts); - p.quote_depth += 1u; + p.quote_depth += 1us; let cx_expr = p.parse_expr(); if !p.eat(&token::Comma) { diff --git a/src/libsyntax/ext/tt/macro_parser.rs b/src/libsyntax/ext/tt/macro_parser.rs index 75d904a4632..d115f2ed620 100644 --- a/src/libsyntax/ext/tt/macro_parser.rs +++ b/src/libsyntax/ext/tt/macro_parser.rs @@ -171,11 +171,11 @@ pub fn initial_matcher_pos(ms: Rc>, sep: Option, lo: ByteP stack: vec![], top_elts: TtSeq(ms), sep: sep, - idx: 0u, + idx: 0us, up: None, matches: matches, - match_lo: 0u, - match_cur: 0u, + match_lo: 0us, + match_cur: 0us, match_hi: match_idx_hi, sp_lo: lo } @@ -238,7 +238,7 @@ pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc]) } } let mut ret_val = HashMap::new(); - let mut idx = 0u; + let mut idx = 0us; for m in ms.iter() { n_rec(p_s, m, res, &mut ret_val, &mut idx) } ret_val } @@ -383,7 +383,7 @@ pub fn parse(sess: &ParseSess, if seq.op == ast::ZeroOrMore { let mut new_ei = ei.clone(); new_ei.match_cur += seq.num_captures; - new_ei.idx += 1u; + new_ei.idx += 1us; //we specifically matched zero repeats. for idx in range(ei.match_cur, ei.match_cur + seq.num_captures) { (&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp))); @@ -398,7 +398,7 @@ pub fn parse(sess: &ParseSess, cur_eis.push(box MatcherPos { stack: vec![], sep: seq.separator.clone(), - idx: 0u, + idx: 0us, matches: matches, match_lo: ei_t.match_cur, match_cur: ei_t.match_cur, @@ -442,20 +442,20 @@ pub fn parse(sess: &ParseSess, /* error messages here could be improved with links to orig. rules */ if token_name_eq(&tok, &token::Eof) { - if eof_eis.len() == 1u { + if eof_eis.len() == 1us { let mut v = Vec::new(); for dv in (&mut eof_eis[0]).matches.iter_mut() { v.push(dv.pop().unwrap()); } return Success(nameize(sess, ms, &v[])); - } else if eof_eis.len() > 1u { + } else if eof_eis.len() > 1us { return Error(sp, "ambiguity: multiple successful parses".to_string()); } else { return Failure(sp, "unexpected end of macro invocation".to_string()); } } else { - if (bb_eis.len() > 0u && next_eis.len() > 0u) - || bb_eis.len() > 1u { + if (bb_eis.len() > 0us && next_eis.len() > 0us) + || bb_eis.len() > 1us { let nts = bb_eis.iter().map(|ei| { match ei.top_elts.get_tt(ei.idx) { TtToken(_, MatchNt(bind, name, _, _)) => { @@ -469,12 +469,12 @@ pub fn parse(sess: &ParseSess, "local ambiguity: multiple parsing options: \ built-in NTs {} or {} other options.", nts, next_eis.len()).to_string()); - } else if bb_eis.len() == 0u && next_eis.len() == 0u { + } else if bb_eis.len() == 0us && next_eis.len() == 0us { return Failure(sp, format!("no rules expected the token `{}`", pprust::token_to_string(&tok)).to_string()); - } else if next_eis.len() > 0u { + } else if next_eis.len() > 0us { /* Now process the next token */ - while next_eis.len() > 0u { + while next_eis.len() > 0us { cur_eis.push(next_eis.pop().unwrap()); } rdr.next_token(); @@ -488,7 +488,7 @@ pub fn parse(sess: &ParseSess, let match_cur = ei.match_cur; (&mut ei.matches[match_cur]).push(Rc::new(MatchedNonterminal( parse_nt(&mut rust_parser, name_string.get())))); - ei.idx += 1u; + ei.idx += 1us; ei.match_cur += 1; } _ => panic!() @@ -501,16 +501,16 @@ pub fn parse(sess: &ParseSess, } } - assert!(cur_eis.len() > 0u); + assert!(cur_eis.len() > 0us); } } pub fn parse_nt(p: &mut Parser, name: &str) -> Nonterminal { match name { "tt" => { - p.quote_depth += 1u; //but in theory, non-quoted tts might be useful + p.quote_depth += 1us; //but in theory, non-quoted tts might be useful let res = token::NtTT(P(p.parse_token_tree())); - p.quote_depth -= 1u; + p.quote_depth -= 1us; return res; } _ => {} diff --git a/src/libsyntax/ext/tt/transcribe.rs b/src/libsyntax/ext/tt/transcribe.rs index 7936b9fcfc7..0bf20b8f3e1 100644 --- a/src/libsyntax/ext/tt/transcribe.rs +++ b/src/libsyntax/ext/tt/transcribe.rs @@ -223,7 +223,7 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan { r.repeat_len.pop(); } } else { /* repeat */ - *r.repeat_idx.last_mut().unwrap() += 1u; + *r.repeat_idx.last_mut().unwrap() += 1us; r.stack.last_mut().unwrap().idx = 0; match r.stack.last().unwrap().sep.clone() { Some(tk) => { diff --git a/src/libsyntax/parse/lexer/comments.rs b/src/libsyntax/parse/lexer/comments.rs index ca9091856d6..2799696e8eb 100644 --- a/src/libsyntax/parse/lexer/comments.rs +++ b/src/libsyntax/parse/lexer/comments.rs @@ -62,7 +62,7 @@ pub fn doc_comment_style(comment: &str) -> ast::AttrStyle { pub fn strip_doc_comment_decoration(comment: &str) -> String { /// remove whitespace-only lines from the start/end of lines fn vertical_trim(lines: Vec ) -> Vec { - let mut i = 0u; + let mut i = 0us; let mut j = lines.len(); // first line of all-stars should be omitted if lines.len() > 0 && @@ -132,7 +132,7 @@ pub fn strip_doc_comment_decoration(comment: &str) -> String { } if comment.starts_with("/*") { - let lines = comment[3u..(comment.len() - 2u)] + let lines = comment[3us..(comment.len() - 2us)] .lines_any() .map(|s| s.to_string()) .collect:: >(); @@ -158,7 +158,7 @@ fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec) { fn consume_whitespace_counting_blank_lines(rdr: &mut StringReader, comments: &mut Vec) { while is_whitespace(rdr.curr) && !rdr.is_eof() { - if rdr.col == CharPos(0u) && rdr.curr_is('\n') { + if rdr.col == CharPos(0us) && rdr.curr_is('\n') { push_blank_line_comment(rdr, &mut *comments); } rdr.bump(); @@ -305,7 +305,7 @@ fn read_block_comment(rdr: &mut StringReader, let mut style = if code_to_the_left { Trailing } else { Isolated }; rdr.consume_non_eol_whitespace(); - if !rdr.is_eof() && !rdr.curr_is('\n') && lines.len() == 1u { + if !rdr.is_eof() && !rdr.curr_is('\n') && lines.len() == 1us { style = Mixed; } debug!("<<< block comment"); diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs index 2fcf43f9ead..d18bf554975 100644 --- a/src/libsyntax/parse/lexer/mod.rs +++ b/src/libsyntax/parse/lexer/mod.rs @@ -279,7 +279,7 @@ impl<'a> StringReader<'a> { /// Converts CRLF to LF in the given string, raising an error on bare CR. fn translate_crlf<'b>(&self, start: BytePos, s: &'b str, errmsg: &'b str) -> CowString<'b> { - let mut i = 0u; + let mut i = 0us; while i < s.len() { let str::CharRange { ch, next } = s.char_range_at(i); if ch == '\r' { @@ -331,10 +331,10 @@ impl<'a> StringReader<'a> { let byte_offset_diff = next.next - current_byte_offset; self.pos = self.pos + Pos::from_usize(byte_offset_diff); self.curr = Some(next.ch); - self.col = self.col + CharPos(1u); + self.col = self.col + CharPos(1us); if last_char == '\n' { self.filemap.next_line(self.last_pos); - self.col = CharPos(0u); + self.col = CharPos(0us); } if byte_offset_diff > 1 { @@ -472,7 +472,7 @@ impl<'a> StringReader<'a> { cmap.files.borrow_mut().push(self.filemap.clone()); let loc = cmap.lookup_char_pos_adj(self.last_pos); debug!("Skipping a shebang"); - if loc.line == 1u && loc.col == CharPos(0u) { + if loc.line == 1us && loc.col == CharPos(0us) { // FIXME: Add shebang "token", return it let start = self.last_pos; while !self.curr_is('\n') && !self.is_eof() { self.bump(); } @@ -646,7 +646,7 @@ impl<'a> StringReader<'a> { /// Scan through any digits (base `radix`) or underscores, and return how /// many digits there were. fn scan_digits(&mut self, radix: usize) -> usize { - let mut len = 0u; + let mut len = 0us; loop { let c = self.curr; if c == Some('_') { debug!("skipping a _"); self.bump(); continue; } @@ -799,14 +799,14 @@ impl<'a> StringReader<'a> { if self.curr == Some('{') { self.scan_unicode_escape(delim) } else { - let res = self.scan_hex_digits(4u, delim, false); + let res = self.scan_hex_digits(4us, delim, false); let sp = codemap::mk_sp(escaped_pos, self.last_pos); self.old_escape_warning(sp); res } } 'U' if !ascii_only => { - let res = self.scan_hex_digits(8u, delim, false); + let res = self.scan_hex_digits(8us, delim, false); let sp = codemap::mk_sp(escaped_pos, self.last_pos); self.old_escape_warning(sp); res @@ -937,11 +937,11 @@ impl<'a> StringReader<'a> { /// error if it isn't. fn check_float_base(&mut self, start_bpos: BytePos, last_bpos: BytePos, base: usize) { match base { - 16u => self.err_span_(start_bpos, last_bpos, "hexadecimal float literal is not \ - supported"), - 8u => self.err_span_(start_bpos, last_bpos, "octal float literal is not supported"), - 2u => self.err_span_(start_bpos, last_bpos, "binary float literal is not supported"), - _ => () + 16us => self.err_span_(start_bpos, last_bpos, "hexadecimal float literal is not \ + supported"), + 8us => self.err_span_(start_bpos, last_bpos, "octal float literal is not supported"), + 2us => self.err_span_(start_bpos, last_bpos, "binary float literal is not supported"), + _ => () } } @@ -1189,7 +1189,7 @@ impl<'a> StringReader<'a> { 'r' => { let start_bpos = self.last_pos; self.bump(); - let mut hash_count = 0u; + let mut hash_count = 0us; while self.curr_is('#') { self.bump(); hash_count += 1; @@ -1374,7 +1374,7 @@ impl<'a> StringReader<'a> { fn scan_raw_byte_string(&mut self) -> token::Lit { let start_bpos = self.last_pos; self.bump(); - let mut hash_count = 0u; + let mut hash_count = 0us; while self.curr_is('#') { self.bump(); hash_count += 1; @@ -1616,9 +1616,9 @@ mod test { test!("1.0", Float, "1.0"); test!("1.0e10", Float, "1.0e10"); - assert_eq!(setup(&mk_sh(), "2u".to_string()).next_token().tok, + assert_eq!(setup(&mk_sh(), "2us".to_string()).next_token().tok, token::Literal(token::Integer(token::intern("2")), - Some(token::intern("u")))); + Some(token::intern("us")))); assert_eq!(setup(&mk_sh(), "r###\"raw\"###suffix".to_string()).next_token().tok, token::Literal(token::StrRaw(token::intern("raw"), 3), Some(token::intern("suffix")))); diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index 33da7c160d9..29414ef94b5 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -181,7 +181,7 @@ pub fn parse_tts_from_source_str(name: String, name, source ); - p.quote_depth += 1u; + p.quote_depth += 1us; // right now this is re-creating the token trees from ... token trees. maybe_aborted(p.parse_all_token_trees(),p) } @@ -325,7 +325,7 @@ pub mod with_hygiene { name, source ); - p.quote_depth += 1u; + p.quote_depth += 1us; // right now this is re-creating the token trees from ... token trees. maybe_aborted(p.parse_all_token_trees(),p) } @@ -574,7 +574,7 @@ pub fn byte_lit(lit: &str) -> (u8, usize) { if lit.len() == 1 { (lit.as_bytes()[0], 1) } else { - assert!(lit.as_bytes()[0] == b'\\', err(0i)); + assert!(lit.as_bytes()[0] == b'\\', err(0is)); let b = match lit.as_bytes()[1] { b'"' => b'"', b'n' => b'\n', @@ -684,9 +684,9 @@ pub fn integer_lit(s: &str, suffix: Option<&str>, sd: &SpanHandler, sp: Span) -> match suffix { Some(suf) if looks_like_width_suffix(&['f'], suf) => { match base { - 16u => sd.span_err(sp, "hexadecimal float literal is not supported"), - 8u => sd.span_err(sp, "octal float literal is not supported"), - 2u => sd.span_err(sp, "binary float literal is not supported"), + 16us => sd.span_err(sp, "hexadecimal float literal is not supported"), + 8us => sd.span_err(sp, "octal float literal is not supported"), + 2us => sd.span_err(sp, "binary float literal is not supported"), _ => () } let ident = token::intern_and_get_ident(&*s); diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 88825cb3f34..ea7c80d3fc4 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -767,7 +767,7 @@ impl<'a> Parser<'a> { // would encounter a `>` and stop. This lets the parser handle trailing // commas in generic parameters, because it can stop either after // parsing a type or after parsing a comma. - for i in iter::count(0u, 1) { + for i in iter::count(0us, 1) { if self.check(&token::Gt) || self.token == token::BinOp(token::Shr) || self.token == token::Ge @@ -944,7 +944,7 @@ impl<'a> Parser<'a> { }; self.span = next.sp; self.token = next.tok; - self.tokens_consumed += 1u; + self.tokens_consumed += 1us; self.expected_tokens.clear(); // check after each token self.check_unknown_macro_variable(); @@ -2638,7 +2638,7 @@ impl<'a> Parser<'a> { } pub fn check_unknown_macro_variable(&mut self) { - if self.quote_depth == 0u { + if self.quote_depth == 0us { match self.token { token::SubstNt(name, _) => self.fatal(&format!("unknown macro variable `{}`", @@ -2707,7 +2707,7 @@ impl<'a> Parser<'a> { token_str)[]) }, /* we ought to allow different depths of unquotation */ - token::Dollar | token::SubstNt(..) if p.quote_depth > 0u => { + token::Dollar | token::SubstNt(..) if p.quote_depth > 0us => { p.parse_unquoted() } _ => { @@ -5079,7 +5079,7 @@ impl<'a> Parser<'a> { } } - if first && attrs_remaining_len > 0u { + if first && attrs_remaining_len > 0us { // We parsed attributes for the first item but didn't find it let last_span = self.last_span; self.span_err(last_span, @@ -5683,7 +5683,7 @@ impl<'a> Parser<'a> { return IoviItem(item); } if self.token.is_keyword(keywords::Unsafe) && - self.look_ahead(1u, |t| t.is_keyword(keywords::Trait)) + self.look_ahead(1us, |t| t.is_keyword(keywords::Trait)) { // UNSAFE TRAIT ITEM self.expect_keyword(keywords::Unsafe); @@ -5700,7 +5700,7 @@ impl<'a> Parser<'a> { return IoviItem(item); } if self.token.is_keyword(keywords::Unsafe) && - self.look_ahead(1u, |t| t.is_keyword(keywords::Impl)) + self.look_ahead(1us, |t| t.is_keyword(keywords::Impl)) { // IMPL ITEM self.expect_keyword(keywords::Unsafe); @@ -5731,7 +5731,7 @@ impl<'a> Parser<'a> { return IoviItem(item); } if self.token.is_keyword(keywords::Unsafe) - && self.look_ahead(1u, |t| *t != token::OpenDelim(token::Brace)) { + && self.look_ahead(1us, |t| *t != token::OpenDelim(token::Brace)) { // UNSAFE FUNCTION ITEM self.bump(); let abi = if self.eat_keyword(keywords::Extern) { @@ -6035,7 +6035,7 @@ impl<'a> Parser<'a> { } } } - let mut rename_to = path[path.len() - 1u]; + let mut rename_to = path[path.len() - 1us]; let path = ast::Path { span: mk_sp(lo, self.last_span.hi), global: false, diff --git a/src/libsyntax/print/pp.rs b/src/libsyntax/print/pp.rs index ca99fb2ef89..0b1bd282941 100644 --- a/src/libsyntax/print/pp.rs +++ b/src/libsyntax/print/pp.rs @@ -132,15 +132,15 @@ pub fn buf_str(toks: &[Token], let mut i = left; let mut l = lim; let mut s = string::String::from_str("["); - while i != right && l != 0u { - l -= 1u; + while i != right && l != 0us { + l -= 1us; if i != left { s.push_str(", "); } s.push_str(&format!("{}={}", szs[i], tok_str(&toks[i]))[]); - i += 1u; + i += 1us; i %= n; } s.push(']'); @@ -167,7 +167,7 @@ pub fn mk_printer(out: Box, linewidth: usize) -> Printer { let n: usize = 3 * linewidth; debug!("mk_printer {}", linewidth); let token: Vec = repeat(Token::Eof).take(n).collect(); - let size: Vec = repeat(0i).take(n).collect(); + let size: Vec = repeat(0is).take(n).collect(); let scan_stack: Vec = repeat(0us).take(n).collect(); Printer { out: out, @@ -326,8 +326,8 @@ impl Printer { if self.scan_stack_empty { self.left_total = 1; self.right_total = 1; - self.left = 0u; - self.right = 0u; + self.left = 0us; + self.right = 0us; } else { self.advance_right(); } debug!("pp Begin({})/buffer ~[{},{}]", b.offset, self.left, self.right); @@ -355,8 +355,8 @@ impl Printer { if self.scan_stack_empty { self.left_total = 1; self.right_total = 1; - self.left = 0u; - self.right = 0u; + self.left = 0us; + self.right = 0us; } else { self.advance_right(); } debug!("pp Break({})/buffer ~[{},{}]", b.offset, self.left, self.right); @@ -410,7 +410,7 @@ impl Printer { if self.scan_stack_empty { self.scan_stack_empty = false; } else { - self.top += 1u; + self.top += 1us; self.top %= self.buf_len; assert!((self.top != self.bottom)); } @@ -422,7 +422,7 @@ impl Printer { if self.top == self.bottom { self.scan_stack_empty = true; } else { - self.top += self.buf_len - 1u; self.top %= self.buf_len; + self.top += self.buf_len - 1us; self.top %= self.buf_len; } return x; } @@ -436,12 +436,12 @@ impl Printer { if self.top == self.bottom { self.scan_stack_empty = true; } else { - self.bottom += 1u; self.bottom %= self.buf_len; + self.bottom += 1us; self.bottom %= self.buf_len; } return x; } pub fn advance_right(&mut self) { - self.right += 1u; + self.right += 1us; self.right %= self.buf_len; assert!((self.right != self.left)); } @@ -471,7 +471,7 @@ impl Printer { break; } - self.left += 1u; + self.left += 1us; self.left %= self.buf_len; left_size = self.size[self.left]; @@ -520,7 +520,7 @@ impl Printer { pub fn get_top(&mut self) -> PrintStackElem { let print_stack = &mut self.print_stack; let n = print_stack.len(); - if n != 0u { + if n != 0us { (*print_stack)[n - 1] } else { PrintStackElem { @@ -565,7 +565,7 @@ impl Printer { Token::End => { debug!("print End -> pop End"); let print_stack = &mut self.print_stack; - assert!((print_stack.len() != 0u)); + assert!((print_stack.len() != 0us)); print_stack.pop().unwrap(); Ok(()) } @@ -667,11 +667,11 @@ pub fn spaces(p: &mut Printer, n: usize) -> io::IoResult<()> { } pub fn zerobreak(p: &mut Printer) -> io::IoResult<()> { - spaces(p, 0u) + spaces(p, 0us) } pub fn space(p: &mut Printer) -> io::IoResult<()> { - spaces(p, 1u) + spaces(p, 1us) } pub fn hardbreak(p: &mut Printer) -> io::IoResult<()> { diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs index c537ca60b18..78a97f310d2 100644 --- a/src/libsyntax/print/pprust.rs +++ b/src/libsyntax/print/pprust.rs @@ -381,7 +381,7 @@ pub fn block_to_string(blk: &ast::Block) -> String { // containing cbox, will be closed by print-block at } try!(s.cbox(indent_unit)); // head-ibox, will be closed by print-block after { - try!(s.ibox(0u)); + try!(s.ibox(0us)); s.print_block(blk) }) } @@ -520,7 +520,7 @@ impl<'a> State<'a> { pub fn bclose_maybe_open (&mut self, span: codemap::Span, indented: usize, close_box: bool) -> IoResult<()> { try!(self.maybe_print_comment(span.hi)); - try!(self.break_offset_if_not_bol(1u, -(indented as isize))); + try!(self.break_offset_if_not_bol(1us, -(indented as isize))); try!(word(&mut self.s, "}")); if close_box { try!(self.end()); // close the outer-box @@ -595,7 +595,7 @@ impl<'a> State<'a> { pub fn commasep(&mut self, b: Breaks, elts: &[T], mut op: F) -> IoResult<()> where F: FnMut(&mut State, &T) -> IoResult<()>, { - try!(self.rbox(0u, b)); + try!(self.rbox(0us, b)); let mut first = true; for elt in elts.iter() { if first { first = false; } else { try!(self.word_space(",")); } @@ -613,13 +613,13 @@ impl<'a> State<'a> { F: FnMut(&mut State, &T) -> IoResult<()>, G: FnMut(&T) -> codemap::Span, { - try!(self.rbox(0u, b)); + try!(self.rbox(0us, b)); let len = elts.len(); - let mut i = 0u; + let mut i = 0us; for elt in elts.iter() { try!(self.maybe_print_comment(get_span(elt).hi)); try!(op(self, elt)); - i += 1u; + i += 1us; if i < len { try!(word(&mut self.s, ",")); try!(self.maybe_print_trailing_comment(get_span(elt), @@ -670,7 +670,7 @@ impl<'a> State<'a> { pub fn print_type(&mut self, ty: &ast::Ty) -> IoResult<()> { try!(self.maybe_print_comment(ty.span.lo)); - try!(self.ibox(0u)); + try!(self.ibox(0us)); match ty.node { ast::TyVec(ref ty) => { try!(word(&mut self.s, "[")); @@ -871,7 +871,7 @@ impl<'a> State<'a> { } ast::ItemTy(ref ty, ref params) => { try!(self.ibox(indent_unit)); - try!(self.ibox(0u)); + try!(self.ibox(0us)); try!(self.word_nbsp(&visibility_qualified(item.vis, "type")[])); try!(self.print_ident(item.ident)); try!(self.print_generics(params)); @@ -1262,7 +1262,7 @@ impl<'a> State<'a> { pub fn print_outer_attributes(&mut self, attrs: &[ast::Attribute]) -> IoResult<()> { - let mut count = 0u; + let mut count = 0us; for attr in attrs.iter() { match attr.node.style { ast::AttrOuter => { @@ -1280,7 +1280,7 @@ impl<'a> State<'a> { pub fn print_inner_attributes(&mut self, attrs: &[ast::Attribute]) -> IoResult<()> { - let mut count = 0u; + let mut count = 0us; for attr in attrs.iter() { match attr.node.style { ast::AttrInner => { @@ -1404,8 +1404,8 @@ impl<'a> State<'a> { match _else.node { // "another else-if" ast::ExprIf(ref i, ref then, ref e) => { - try!(self.cbox(indent_unit - 1u)); - try!(self.ibox(0u)); + try!(self.cbox(indent_unit - 1us)); + try!(self.ibox(0us)); try!(word(&mut self.s, " else if ")); try!(self.print_expr(&**i)); try!(space(&mut self.s)); @@ -1414,8 +1414,8 @@ impl<'a> State<'a> { } // "another else-if-let" ast::ExprIfLet(ref pat, ref expr, ref then, ref e) => { - try!(self.cbox(indent_unit - 1u)); - try!(self.ibox(0u)); + try!(self.cbox(indent_unit - 1us)); + try!(self.ibox(0us)); try!(word(&mut self.s, " else if let ")); try!(self.print_pat(&**pat)); try!(space(&mut self.s)); @@ -1427,8 +1427,8 @@ impl<'a> State<'a> { } // "final else" ast::ExprBlock(ref b) => { - try!(self.cbox(indent_unit - 1u)); - try!(self.ibox(0u)); + try!(self.cbox(indent_unit - 1us)); + try!(self.ibox(0us)); try!(word(&mut self.s, " else ")); self.print_block(&**b) } @@ -1594,7 +1594,7 @@ impl<'a> State<'a> { try!(self.print_expr(&*args[0])); try!(word(&mut self.s, ".")); try!(self.print_ident(ident.node)); - if tys.len() > 0u { + if tys.len() > 0us { try!(word(&mut self.s, "::<")); try!(self.commasep(Inconsistent, tys, |s, ty| s.print_type(&**ty))); @@ -1765,7 +1765,7 @@ impl<'a> State<'a> { // containing cbox, will be closed by print-block at } try!(self.cbox(indent_unit)); // head-box, will be closed by print-block after { - try!(self.ibox(0u)); + try!(self.ibox(0us)); try!(self.print_block(&**blk)); } ast::ExprAssign(ref lhs, ref rhs) => { @@ -2142,7 +2142,7 @@ impl<'a> State<'a> { }, |f| f.node.pat.span)); if etc { - if fields.len() != 0u { try!(self.word_space(",")); } + if fields.len() != 0us { try!(self.word_space(",")); } try!(word(&mut self.s, "..")); } try!(space(&mut self.s)); @@ -2209,7 +2209,7 @@ impl<'a> State<'a> { try!(space(&mut self.s)); } try!(self.cbox(indent_unit)); - try!(self.ibox(0u)); + try!(self.ibox(0us)); try!(self.print_outer_attributes(&arm.attrs[])); let mut first = true; for p in arm.pats.iter() { @@ -2295,7 +2295,7 @@ impl<'a> State<'a> { -> IoResult<()> { // It is unfortunate to duplicate the commasep logic, but we want the // self type and the args all in the same box. - try!(self.rbox(0u, Inconsistent)); + try!(self.rbox(0us, Inconsistent)); let mut first = true; for &explicit_self in opt_explicit_self.iter() { let m = match explicit_self { @@ -2472,7 +2472,7 @@ impl<'a> State<'a> { try!(word(&mut self.s, "<")); let mut ints = Vec::new(); - for i in range(0u, total) { + for i in range(0us, total) { ints.push(i); } @@ -2794,7 +2794,7 @@ impl<'a> State<'a> { if span.hi < (*cmnt).pos && (*cmnt).pos < next && span_line.line == comment_line.line { try!(self.print_comment(cmnt)); - self.cur_cmnt_and_lit.cur_cmnt += 1u; + self.cur_cmnt_and_lit.cur_cmnt += 1us; } } _ => () @@ -2812,7 +2812,7 @@ impl<'a> State<'a> { match self.next_comment() { Some(ref cmnt) => { try!(self.print_comment(cmnt)); - self.cur_cmnt_and_lit.cur_cmnt += 1u; + self.cur_cmnt_and_lit.cur_cmnt += 1us; } _ => break } @@ -2894,7 +2894,7 @@ impl<'a> State<'a> { while self.cur_cmnt_and_lit.cur_lit < lits.len() { let ltrl = (*lits)[self.cur_cmnt_and_lit.cur_lit].clone(); if ltrl.pos > pos { return None; } - self.cur_cmnt_and_lit.cur_lit += 1u; + self.cur_cmnt_and_lit.cur_lit += 1us; if ltrl.pos == pos { return Some(ltrl); } } None @@ -2909,7 +2909,7 @@ impl<'a> State<'a> { Some(ref cmnt) => { if (*cmnt).pos < pos { try!(self.print_comment(cmnt)); - self.cur_cmnt_and_lit.cur_cmnt += 1u; + self.cur_cmnt_and_lit.cur_cmnt += 1us; } else { break; } } _ => break @@ -2922,7 +2922,7 @@ impl<'a> State<'a> { cmnt: &comments::Comment) -> IoResult<()> { match cmnt.style { comments::Mixed => { - assert_eq!(cmnt.lines.len(), 1u); + assert_eq!(cmnt.lines.len(), 1us); try!(zerobreak(&mut self.s)); try!(word(&mut self.s, &cmnt.lines[0][])); zerobreak(&mut self.s) @@ -2941,11 +2941,11 @@ impl<'a> State<'a> { } comments::Trailing => { try!(word(&mut self.s, " ")); - if cmnt.lines.len() == 1u { + if cmnt.lines.len() == 1us { try!(word(&mut self.s, &cmnt.lines[0][])); hardbreak(&mut self.s) } else { - try!(self.ibox(0u)); + try!(self.ibox(0us)); for line in cmnt.lines.iter() { if !line.is_empty() { try!(word(&mut self.s, &line[])); diff --git a/src/libsyntax/test.rs b/src/libsyntax/test.rs index 895268f96c8..ee8742825f4 100644 --- a/src/libsyntax/test.rs +++ b/src/libsyntax/test.rs @@ -345,8 +345,8 @@ fn is_bench_fn(cx: &TestCtxt, i: &ast::Item) -> bool { let tparm_cnt = generics.ty_params.len(); // NB: inadequate check, but we're running // well before resolve, can't get too deep. - input_cnt == 1u - && no_output && tparm_cnt == 0u + input_cnt == 1us + && no_output && tparm_cnt == 0us } _ => false } diff --git a/src/libsyntax/util/small_vector.rs b/src/libsyntax/util/small_vector.rs index da358b70225..d8bac19805b 100644 --- a/src/libsyntax/util/small_vector.rs +++ b/src/libsyntax/util/small_vector.rs @@ -194,14 +194,14 @@ mod test { let v: SmallVector = SmallVector::zero(); assert_eq!(0, v.len()); - assert_eq!(1, SmallVector::one(1i).len()); - assert_eq!(5, SmallVector::many(vec!(1i, 2, 3, 4, 5)).len()); + assert_eq!(1, SmallVector::one(1is).len()); + assert_eq!(5, SmallVector::many(vec!(1is, 2, 3, 4, 5)).len()); } #[test] fn test_push_get() { let mut v = SmallVector::zero(); - v.push(1i); + v.push(1is); assert_eq!(1, v.len()); assert_eq!(&1, v.get(0)); v.push(2); @@ -227,11 +227,11 @@ mod test { let v: Vec = v.into_iter().collect(); assert_eq!(Vec::new(), v); - let v = SmallVector::one(1i); - assert_eq!(vec!(1i), v.into_iter().collect::>()); + let v = SmallVector::one(1is); + assert_eq!(vec!(1is), v.into_iter().collect::>()); - let v = SmallVector::many(vec!(1i, 2i, 3i)); - assert_eq!(vec!(1i, 2i, 3i), v.into_iter().collect::>()); + let v = SmallVector::many(vec!(1is, 2is, 3is)); + assert_eq!(vec!(1is, 2is, 3is), v.into_iter().collect::>()); } #[test] @@ -243,12 +243,12 @@ mod test { #[test] #[should_fail] fn test_expect_one_many() { - SmallVector::many(vec!(1i, 2)).expect_one(""); + SmallVector::many(vec!(1is, 2)).expect_one(""); } #[test] fn test_expect_one_one() { - assert_eq!(1i, SmallVector::one(1i).expect_one("")); - assert_eq!(1i, SmallVector::many(vec!(1i)).expect_one("")); + assert_eq!(1is, SmallVector::one(1is).expect_one("")); + assert_eq!(1is, SmallVector::many(vec!(1is)).expect_one("")); } } -- cgit 1.4.1-3-g733a5