about summary refs log tree commit diff
path: root/src/libsyntax/parse
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2014-04-18 18:46:33 -0700
committerbors <bors@rust-lang.org>2014-04-18 18:46:33 -0700
commitaf24045ff0e17764524a9eaf243479a3260c2d8b (patch)
treebc160d119b2d963afa53e9bbb59aacee6bdc4ecd /src/libsyntax/parse
parent9b7cfd3c724bbad9dd8a0115bb2619f307b73f8c (diff)
parent919889a1d688a6bbe2edac8705f048f06b1b455c (diff)
downloadrust-af24045ff0e17764524a9eaf243479a3260c2d8b.tar.gz
rust-af24045ff0e17764524a9eaf243479a3260c2d8b.zip
auto merge of #13607 : brson/rust/to_owned, r=brson
Continues https://github.com/mozilla/rust/pull/13548
Diffstat (limited to 'src/libsyntax/parse')
-rw-r--r--src/libsyntax/parse/comments.rs26
-rw-r--r--src/libsyntax/parse/lexer.rs81
-rw-r--r--src/libsyntax/parse/mod.rs30
-rw-r--r--src/libsyntax/parse/parser.rs2
-rw-r--r--src/libsyntax/parse/token.rs106
5 files changed, 123 insertions, 122 deletions
diff --git a/src/libsyntax/parse/comments.rs b/src/libsyntax/parse/comments.rs
index 1a246eb7f2c..a96905f8597 100644
--- a/src/libsyntax/parse/comments.rs
+++ b/src/libsyntax/parse/comments.rs
@@ -238,7 +238,7 @@ fn trim_whitespace_prefix_and_push_line(lines: &mut Vec<~str> ,
         Some(col) => {
             if col < len {
                 s.slice(col, len).to_owned()
-            } else {  ~"" }
+            } else {  "".to_owned() }
         }
         None => s,
     };
@@ -279,7 +279,7 @@ fn read_block_comment(rdr: &mut StringReader,
         while level > 0 {
             debug!("=== block comment level {}", level);
             if is_eof(rdr) {
-                rdr.fatal(~"unterminated block comment");
+                rdr.fatal("unterminated block comment".to_owned());
             }
             if rdr.curr_is('\n') {
                 trim_whitespace_prefix_and_push_line(&mut lines,
@@ -405,41 +405,41 @@ mod test {
     #[test] fn test_block_doc_comment_1() {
         let comment = "/**\n * Test \n **  Test\n *   Test\n*/";
         let stripped = strip_doc_comment_decoration(comment);
-        assert_eq!(stripped, ~" Test \n*  Test\n   Test");
+        assert_eq!(stripped, " Test \n*  Test\n   Test".to_owned());
     }
 
     #[test] fn test_block_doc_comment_2() {
         let comment = "/**\n * Test\n *  Test\n*/";
         let stripped = strip_doc_comment_decoration(comment);
-        assert_eq!(stripped, ~" Test\n  Test");
+        assert_eq!(stripped, " Test\n  Test".to_owned());
     }
 
     #[test] fn test_block_doc_comment_3() {
         let comment = "/**\n let a: *int;\n *a = 5;\n*/";
         let stripped = strip_doc_comment_decoration(comment);
-        assert_eq!(stripped, ~" let a: *int;\n *a = 5;");
+        assert_eq!(stripped, " let a: *int;\n *a = 5;".to_owned());
     }
 
     #[test] fn test_block_doc_comment_4() {
         let comment = "/*******************\n test\n *********************/";
         let stripped = strip_doc_comment_decoration(comment);
-        assert_eq!(stripped, ~" test");
+        assert_eq!(stripped, " test".to_owned());
     }
 
     #[test] fn test_line_doc_comment() {
         let stripped = strip_doc_comment_decoration("/// test");
-        assert_eq!(stripped, ~" test");
+        assert_eq!(stripped, " test".to_owned());
         let stripped = strip_doc_comment_decoration("///! test");
-        assert_eq!(stripped, ~" test");
+        assert_eq!(stripped, " test".to_owned());
         let stripped = strip_doc_comment_decoration("// test");
-        assert_eq!(stripped, ~" test");
+        assert_eq!(stripped, " test".to_owned());
         let stripped = strip_doc_comment_decoration("// test");
-        assert_eq!(stripped, ~" test");
+        assert_eq!(stripped, " test".to_owned());
         let stripped = strip_doc_comment_decoration("///test");
-        assert_eq!(stripped, ~"test");
+        assert_eq!(stripped, "test".to_owned());
         let stripped = strip_doc_comment_decoration("///!test");
-        assert_eq!(stripped, ~"test");
+        assert_eq!(stripped, "test".to_owned());
         let stripped = strip_doc_comment_decoration("//test");
-        assert_eq!(stripped, ~"test");
+        assert_eq!(stripped, "test".to_owned());
     }
 }
diff --git a/src/libsyntax/parse/lexer.rs b/src/libsyntax/parse/lexer.rs
index c1c91cb6a4f..ff087d95e50 100644
--- a/src/libsyntax/parse/lexer.rs
+++ b/src/libsyntax/parse/lexer.rs
@@ -400,9 +400,9 @@ fn consume_block_comment(rdr: &mut StringReader) -> Option<TokenAndSpan> {
     while level > 0 {
         if is_eof(rdr) {
             let msg = if is_doc_comment {
-                ~"unterminated block doc-comment"
+                "unterminated block doc-comment".to_owned()
             } else {
-                ~"unterminated block comment"
+                "unterminated block comment".to_owned()
             };
             fatal_span(rdr, start_bpos, rdr.last_pos, msg);
         } else if rdr.curr_is('/') && nextch_is(rdr, '*') {
@@ -456,7 +456,7 @@ fn scan_exponent(rdr: &mut StringReader, start_bpos: BytePos) -> Option<~str> {
             return Some(rslt.into_owned());
         } else {
             fatal_span(rdr, start_bpos, rdr.last_pos,
-                       ~"scan_exponent: bad fp literal");
+                       "scan_exponent: bad fp literal".to_owned());
         }
     } else { return None::<~str>; }
 }
@@ -480,11 +480,11 @@ fn check_float_base(rdr: &mut StringReader, start_bpos: BytePos, last_bpos: Byte
                     base: uint) {
     match base {
       16u => fatal_span(rdr, start_bpos, last_bpos,
-                      ~"hexadecimal float literal is not supported"),
+                      "hexadecimal float literal is not supported".to_owned()),
       8u => fatal_span(rdr, start_bpos, last_bpos,
-                     ~"octal float literal is not supported"),
+                     "octal float literal is not supported".to_owned()),
       2u => fatal_span(rdr, start_bpos, last_bpos,
-                     ~"binary float literal is not supported"),
+                     "binary float literal is not supported".to_owned()),
       _ => ()
     }
 }
@@ -544,13 +544,13 @@ fn scan_number(c: char, rdr: &mut StringReader) -> token::Token {
         }
         if num_str.len() == 0u {
             fatal_span(rdr, start_bpos, rdr.last_pos,
-                       ~"no valid digits found for number");
+                       "no valid digits found for number".to_owned());
         }
         let parsed = match from_str_radix::<u64>(num_str.as_slice(),
                                                  base as uint) {
             Some(p) => p,
             None => fatal_span(rdr, start_bpos, rdr.last_pos,
-                               ~"int literal is too large")
+                               "int literal is too large".to_owned())
         };
 
         match tp {
@@ -595,7 +595,7 @@ fn scan_number(c: char, rdr: &mut StringReader) -> token::Token {
             back-end.  */
         } else {
             fatal_span(rdr, start_bpos, rdr.last_pos,
-                       ~"expected `f32` or `f64` suffix");
+                       "expected `f32` or `f64` suffix".to_owned());
         }
     }
     if is_float {
@@ -605,13 +605,13 @@ fn scan_number(c: char, rdr: &mut StringReader) -> token::Token {
     } else {
         if num_str.len() == 0u {
             fatal_span(rdr, start_bpos, rdr.last_pos,
-                       ~"no valid digits found for number");
+                       "no valid digits found for number".to_owned());
         }
         let parsed = match from_str_radix::<u64>(num_str.as_slice(),
                                                  base as uint) {
             Some(p) => p,
             None => fatal_span(rdr, start_bpos, rdr.last_pos,
-                               ~"int literal is too large")
+                               "int literal is too large".to_owned())
         };
 
         debug!("lexing {} as an unsuffixed integer literal",
@@ -628,7 +628,7 @@ fn scan_numeric_escape(rdr: &mut StringReader, n_hex_digits: uint) -> char {
         let n = rdr.curr;
         if !is_hex_digit(n) {
             fatal_span_char(rdr, rdr.last_pos, rdr.pos,
-                            ~"illegal character in numeric character escape",
+                            "illegal character in numeric character escape".to_owned(),
                             n.unwrap());
         }
         bump(rdr);
@@ -638,13 +638,13 @@ fn scan_numeric_escape(rdr: &mut StringReader, n_hex_digits: uint) -> char {
     }
     if i != 0 && is_eof(rdr) {
         fatal_span(rdr, start_bpos, rdr.last_pos,
-                   ~"unterminated numeric character escape");
+                   "unterminated numeric character escape".to_owned());
     }
 
     match char::from_u32(accum_int as u32) {
         Some(x) => x,
         None => fatal_span(rdr, start_bpos, rdr.last_pos,
-                           ~"illegal numeric character escape")
+                           "illegal numeric character escape".to_owned())
     }
 }
 
@@ -813,11 +813,12 @@ fn next_token_inner(rdr: &mut StringReader) -> token::Token {
 
             if token::is_keyword(token::keywords::Self, tok) {
                 fatal_span(rdr, start, rdr.last_pos,
-                           ~"invalid lifetime name: 'self is no longer a special lifetime");
+                           "invalid lifetime name: 'self \
+                            is no longer a special lifetime".to_owned());
             } else if token::is_any_keyword(tok) &&
                 !token::is_keyword(token::keywords::Static, tok) {
                 fatal_span(rdr, start, rdr.last_pos,
-                           ~"invalid lifetime name");
+                           "invalid lifetime name".to_owned());
             } else {
                 return token::LIFETIME(ident);
             }
@@ -846,7 +847,7 @@ fn next_token_inner(rdr: &mut StringReader) -> token::Token {
                             'U' => scan_numeric_escape(rdr, 8u),
                             c2 => {
                                 fatal_span_char(rdr, escaped_pos, rdr.last_pos,
-                                                ~"unknown character escape", c2)
+                                                "unknown character escape".to_owned(), c2)
                             }
                         }
                     }
@@ -854,7 +855,7 @@ fn next_token_inner(rdr: &mut StringReader) -> token::Token {
             }
             '\t' | '\n' | '\r' | '\'' => {
                 fatal_span_char(rdr, start, rdr.last_pos,
-                                ~"character constant must be escaped", c2);
+                                "character constant must be escaped".to_owned(), c2);
             }
             _ => {}
         }
@@ -865,7 +866,7 @@ fn next_token_inner(rdr: &mut StringReader) -> token::Token {
                                // ascii single quote.
                                start - BytePos(1),
                                rdr.last_pos,
-                               ~"unterminated character constant");
+                               "unterminated character constant".to_owned());
         }
         bump(rdr); // advance curr past token
         return token::LIT_CHAR(c2 as u32);
@@ -877,7 +878,7 @@ fn next_token_inner(rdr: &mut StringReader) -> token::Token {
         while !rdr.curr_is('"') {
             if is_eof(rdr) {
                 fatal_span(rdr, start_bpos, rdr.last_pos,
-                           ~"unterminated double quote string");
+                           "unterminated double quote string".to_owned());
             }
 
             let ch = rdr.curr.unwrap();
@@ -886,7 +887,7 @@ fn next_token_inner(rdr: &mut StringReader) -> token::Token {
               '\\' => {
                 if is_eof(rdr) {
                     fatal_span(rdr, start_bpos, rdr.last_pos,
-                           ~"unterminated double quote string");
+                           "unterminated double quote string".to_owned());
                 }
 
                 let escaped = rdr.curr.unwrap();
@@ -912,7 +913,7 @@ fn next_token_inner(rdr: &mut StringReader) -> token::Token {
                   }
                   c2 => {
                     fatal_span_char(rdr, escaped_pos, rdr.last_pos,
-                                    ~"unknown string escape", c2);
+                                    "unknown string escape".to_owned(), c2);
                   }
                 }
               }
@@ -933,11 +934,11 @@ fn next_token_inner(rdr: &mut StringReader) -> token::Token {
 
         if is_eof(rdr) {
             fatal_span(rdr, start_bpos, rdr.last_pos,
-                       ~"unterminated raw string");
+                       "unterminated raw string".to_owned());
         } else if !rdr.curr_is('"') {
             fatal_span_char(rdr, start_bpos, rdr.last_pos,
-                            ~"only `#` is allowed in raw string delimitation; \
-                              found illegal character",
+                            "only `#` is allowed in raw string delimitation; \
+                             found illegal character".to_owned(),
                             rdr.curr.unwrap());
         }
         bump(rdr);
@@ -946,7 +947,7 @@ fn next_token_inner(rdr: &mut StringReader) -> token::Token {
         'outer: loop {
             if is_eof(rdr) {
                 fatal_span(rdr, start_bpos, rdr.last_pos,
-                           ~"unterminated raw string");
+                           "unterminated raw string".to_owned());
             }
             if rdr.curr_is('"') {
                 content_end_bpos = rdr.last_pos;
@@ -994,7 +995,7 @@ fn next_token_inner(rdr: &mut StringReader) -> token::Token {
       '%' => { return binop(rdr, token::PERCENT); }
       c => {
           fatal_span_char(rdr, rdr.last_pos, rdr.pos,
-                          ~"unknown start of token", c);
+                          "unknown start of token".to_owned(), c);
       }
     }
 }
@@ -1022,15 +1023,15 @@ mod test {
     // open a string reader for the given string
     fn setup<'a>(span_handler: &'a diagnostic::SpanHandler,
                  teststr: ~str) -> StringReader<'a> {
-        let fm = span_handler.cm.new_filemap(~"zebra.rs", teststr);
+        let fm = span_handler.cm.new_filemap("zebra.rs".to_owned(), teststr);
         new_string_reader(span_handler, fm)
     }
 
     #[test] fn t1 () {
         let span_handler = mk_sh();
         let mut string_reader = setup(&span_handler,
-            ~"/* my source file */ \
-              fn main() { println!(\"zebra\"); }\n");
+            "/* my source file */ \
+             fn main() { println!(\"zebra\"); }\n".to_owned());
         let id = str_to_ident("fn");
         let tok1 = string_reader.next_token();
         let tok2 = TokenAndSpan{
@@ -1063,54 +1064,54 @@ mod test {
     }
 
     #[test] fn doublecolonparsing () {
-        check_tokenization(setup(&mk_sh(), ~"a b"),
+        check_tokenization(setup(&mk_sh(), "a b".to_owned()),
                            vec!(mk_ident("a",false),
                              mk_ident("b",false)));
     }
 
     #[test] fn dcparsing_2 () {
-        check_tokenization(setup(&mk_sh(), ~"a::b"),
+        check_tokenization(setup(&mk_sh(), "a::b".to_owned()),
                            vec!(mk_ident("a",true),
                              token::MOD_SEP,
                              mk_ident("b",false)));
     }
 
     #[test] fn dcparsing_3 () {
-        check_tokenization(setup(&mk_sh(), ~"a ::b"),
+        check_tokenization(setup(&mk_sh(), "a ::b".to_owned()),
                            vec!(mk_ident("a",false),
                              token::MOD_SEP,
                              mk_ident("b",false)));
     }
 
     #[test] fn dcparsing_4 () {
-        check_tokenization(setup(&mk_sh(), ~"a:: b"),
+        check_tokenization(setup(&mk_sh(), "a:: b".to_owned()),
                            vec!(mk_ident("a",true),
                              token::MOD_SEP,
                              mk_ident("b",false)));
     }
 
     #[test] fn character_a() {
-        assert_eq!(setup(&mk_sh(), ~"'a'").next_token().tok,
+        assert_eq!(setup(&mk_sh(), "'a'".to_owned()).next_token().tok,
                    token::LIT_CHAR('a' as u32));
     }
 
     #[test] fn character_space() {
-        assert_eq!(setup(&mk_sh(), ~"' '").next_token().tok,
+        assert_eq!(setup(&mk_sh(), "' '".to_owned()).next_token().tok,
                    token::LIT_CHAR(' ' as u32));
     }
 
     #[test] fn character_escaped() {
-        assert_eq!(setup(&mk_sh(), ~"'\\n'").next_token().tok,
+        assert_eq!(setup(&mk_sh(), "'\\n'".to_owned()).next_token().tok,
                    token::LIT_CHAR('\n' as u32));
     }
 
     #[test] fn lifetime_name() {
-        assert_eq!(setup(&mk_sh(), ~"'abc").next_token().tok,
+        assert_eq!(setup(&mk_sh(), "'abc".to_owned()).next_token().tok,
                    token::LIFETIME(token::str_to_ident("abc")));
     }
 
     #[test] fn raw_string() {
-        assert_eq!(setup(&mk_sh(), ~"r###\"\"#a\\b\x00c\"\"###").next_token().tok,
+        assert_eq!(setup(&mk_sh(), "r###\"\"#a\\b\x00c\"\"###".to_owned()).next_token().tok,
                    token::LIT_STR_RAW(token::str_to_ident("\"#a\\b\x00c\""), 3));
     }
 
@@ -1121,7 +1122,7 @@ mod test {
     }
 
     #[test] fn nested_block_comments() {
-        assert_eq!(setup(&mk_sh(), ~"/* /* */ */'a'").next_token().tok,
+        assert_eq!(setup(&mk_sh(), "/* /* */ */'a'".to_owned()).next_token().tok,
                    token::LIT_CHAR('a' as u32));
     }
 
diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs
index eca9f955d93..4586980d893 100644
--- a/src/libsyntax/parse/mod.rs
+++ b/src/libsyntax/parse/mod.rs
@@ -302,7 +302,7 @@ mod test {
     }
 
     #[test] fn path_exprs_1() {
-        assert!(string_to_expr(~"a") ==
+        assert!(string_to_expr("a".to_owned()) ==
                    @ast::Expr{
                     id: ast::DUMMY_NODE_ID,
                     node: ast::ExprPath(ast::Path {
@@ -321,7 +321,7 @@ mod test {
     }
 
     #[test] fn path_exprs_2 () {
-        assert!(string_to_expr(~"::a::b") ==
+        assert!(string_to_expr("::a::b".to_owned()) ==
                    @ast::Expr {
                     id: ast::DUMMY_NODE_ID,
                     node: ast::ExprPath(ast::Path {
@@ -346,12 +346,12 @@ mod test {
 
     #[should_fail]
     #[test] fn bad_path_expr_1() {
-        string_to_expr(~"::abc::def::return");
+        string_to_expr("::abc::def::return".to_owned());
     }
 
     // check the token-tree-ization of macros
     #[test] fn string_to_tts_macro () {
-        let tts = string_to_tts(~"macro_rules! zip (($a)=>($a))");
+        let tts = string_to_tts("macro_rules! zip (($a)=>($a))".to_owned());
         let tts: &[ast::TokenTree] = tts.as_slice();
         match tts {
             [ast::TTTok(_,_),
@@ -404,9 +404,9 @@ mod test {
     }
 
     #[test] fn string_to_tts_1 () {
-        let tts = string_to_tts(~"fn a (b : int) { b; }");
+        let tts = string_to_tts("fn a (b : int) { b; }".to_owned());
         assert_eq!(to_json_str(&tts),
-        ~"[\
+        "[\
     {\
         \"variant\":\"TTTok\",\
         \"fields\":[\
@@ -528,12 +528,12 @@ mod test {
             ]\
         ]\
     }\
-]"
+]".to_owned()
         );
     }
 
     #[test] fn ret_expr() {
-        assert!(string_to_expr(~"return d") ==
+        assert!(string_to_expr("return d".to_owned()) ==
                    @ast::Expr{
                     id: ast::DUMMY_NODE_ID,
                     node:ast::ExprRet(Some(@ast::Expr{
@@ -556,7 +556,7 @@ mod test {
     }
 
     #[test] fn parse_stmt_1 () {
-        assert!(string_to_stmt(~"b;") ==
+        assert!(string_to_stmt("b;".to_owned()) ==
                    @Spanned{
                        node: ast::StmtExpr(@ast::Expr {
                            id: ast::DUMMY_NODE_ID,
@@ -583,7 +583,7 @@ mod test {
 
     #[test] fn parse_ident_pat () {
         let sess = new_parse_sess();
-        let mut parser = string_to_parser(&sess, ~"b");
+        let mut parser = string_to_parser(&sess, "b".to_owned());
         assert!(parser.parse_pat() ==
                    @ast::Pat{id: ast::DUMMY_NODE_ID,
                              node: ast::PatIdent(
@@ -607,7 +607,7 @@ mod test {
     // check the contents of the tt manually:
     #[test] fn parse_fundecl () {
         // this test depends on the intern order of "fn" and "int"
-        assert!(string_to_item(~"fn a (b : int) { b; }") ==
+        assert!(string_to_item("fn a (b : int) { b; }".to_owned()) ==
                   Some(
                       @ast::Item{ident:str_to_ident("a"),
                             attrs:Vec::new(),
@@ -699,12 +699,12 @@ mod test {
 
     #[test] fn parse_exprs () {
         // just make sure that they parse....
-        string_to_expr(~"3 + 4");
-        string_to_expr(~"a::z.froob(b,@(987+3))");
+        string_to_expr("3 + 4".to_owned());
+        string_to_expr("a::z.froob(b,@(987+3))".to_owned());
     }
 
     #[test] fn attrs_fix_bug () {
-        string_to_item(~"pub fn mk_file_writer(path: &Path, flags: &[FileFlag])
+        string_to_item("pub fn mk_file_writer(path: &Path, flags: &[FileFlag])
                    -> Result<@Writer, ~str> {
     #[cfg(windows)]
     fn wb() -> c_int {
@@ -715,7 +715,7 @@ mod test {
     fn wb() -> c_int { O_WRONLY as c_int }
 
     let mut fflags: c_int = wb();
-}");
+}".to_owned());
     }
 
 }
diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs
index 379403e5409..58634be1995 100644
--- a/src/libsyntax/parse/parser.rs
+++ b/src/libsyntax/parse/parser.rs
@@ -384,7 +384,7 @@ impl<'a> Parser<'a> {
         fn tokens_to_str(tokens: &[token::Token]) -> ~str {
             let mut i = tokens.iter();
             // This might be a sign we need a connect method on Iterator.
-            let b = i.next().map_or(~"", |t| Parser::token_to_str(t));
+            let b = i.next().map_or("".to_owned(), |t| Parser::token_to_str(t));
             i.fold(b, |b,a| b + "`, `" + Parser::token_to_str(a))
         }
         if edible.contains(&self.token) {
diff --git a/src/libsyntax/parse/token.rs b/src/libsyntax/parse/token.rs
index de6dacbe766..77743cdb9df 100644
--- a/src/libsyntax/parse/token.rs
+++ b/src/libsyntax/parse/token.rs
@@ -141,56 +141,56 @@ impl fmt::Show for Nonterminal {
 
 pub fn binop_to_str(o: BinOp) -> ~str {
     match o {
-      PLUS => ~"+",
-      MINUS => ~"-",
-      STAR => ~"*",
-      SLASH => ~"/",
-      PERCENT => ~"%",
-      CARET => ~"^",
-      AND => ~"&",
-      OR => ~"|",
-      SHL => ~"<<",
-      SHR => ~">>"
+      PLUS => "+".to_owned(),
+      MINUS => "-".to_owned(),
+      STAR => "*".to_owned(),
+      SLASH => "/".to_owned(),
+      PERCENT => "%".to_owned(),
+      CARET => "^".to_owned(),
+      AND => "&".to_owned(),
+      OR => "|".to_owned(),
+      SHL => "<<".to_owned(),
+      SHR => ">>".to_owned()
     }
 }
 
 pub fn to_str(t: &Token) -> ~str {
     match *t {
-      EQ => ~"=",
-      LT => ~"<",
-      LE => ~"<=",
-      EQEQ => ~"==",
-      NE => ~"!=",
-      GE => ~">=",
-      GT => ~">",
-      NOT => ~"!",
-      TILDE => ~"~",
-      OROR => ~"||",
-      ANDAND => ~"&&",
+      EQ => "=".to_owned(),
+      LT => "<".to_owned(),
+      LE => "<=".to_owned(),
+      EQEQ => "==".to_owned(),
+      NE => "!=".to_owned(),
+      GE => ">=".to_owned(),
+      GT => ">".to_owned(),
+      NOT => "!".to_owned(),
+      TILDE => "~".to_owned(),
+      OROR => "||".to_owned(),
+      ANDAND => "&&".to_owned(),
       BINOP(op) => binop_to_str(op),
       BINOPEQ(op) => binop_to_str(op) + "=",
 
       /* Structural symbols */
-      AT => ~"@",
-      DOT => ~".",
-      DOTDOT => ~"..",
-      DOTDOTDOT => ~"...",
-      COMMA => ~",",
-      SEMI => ~";",
-      COLON => ~":",
-      MOD_SEP => ~"::",
-      RARROW => ~"->",
-      LARROW => ~"<-",
-      DARROW => ~"<->",
-      FAT_ARROW => ~"=>",
-      LPAREN => ~"(",
-      RPAREN => ~")",
-      LBRACKET => ~"[",
-      RBRACKET => ~"]",
-      LBRACE => ~"{",
-      RBRACE => ~"}",
-      POUND => ~"#",
-      DOLLAR => ~"$",
+      AT => "@".to_owned(),
+      DOT => ".".to_owned(),
+      DOTDOT => "..".to_owned(),
+      DOTDOTDOT => "...".to_owned(),
+      COMMA => ",".to_owned(),
+      SEMI => ";".to_owned(),
+      COLON => ":".to_owned(),
+      MOD_SEP => "::".to_owned(),
+      RARROW => "->".to_owned(),
+      LARROW => "<-".to_owned(),
+      DARROW => "<->".to_owned(),
+      FAT_ARROW => "=>".to_owned(),
+      LPAREN => "(".to_owned(),
+      RPAREN => ")".to_owned(),
+      LBRACKET => "[".to_owned(),
+      RBRACKET => "]".to_owned(),
+      LBRACE => "{".to_owned(),
+      RBRACE => "}".to_owned(),
+      POUND => "#".to_owned(),
+      DOLLAR => "$".to_owned(),
 
       /* Literals */
       LIT_CHAR(c) => {
@@ -232,29 +232,29 @@ pub fn to_str(t: &Token) -> ~str {
       LIFETIME(s) => {
           format!("'{}", get_ident(s))
       }
-      UNDERSCORE => ~"_",
+      UNDERSCORE => "_".to_owned(),
 
       /* Other */
       DOC_COMMENT(s) => get_ident(s).get().to_str(),
-      EOF => ~"<eof>",
+      EOF => "<eof>".to_owned(),
       INTERPOLATED(ref nt) => {
         match nt {
             &NtExpr(e) => ::print::pprust::expr_to_str(e),
             &NtMeta(e) => ::print::pprust::meta_item_to_str(e),
             _ => {
-                ~"an interpolated " +
+                "an interpolated ".to_owned() +
                     match *nt {
-                        NtItem(..) => ~"item",
-                        NtBlock(..) => ~"block",
-                        NtStmt(..) => ~"statement",
-                        NtPat(..) => ~"pattern",
+                        NtItem(..) => "item".to_owned(),
+                        NtBlock(..) => "block".to_owned(),
+                        NtStmt(..) => "statement".to_owned(),
+                        NtPat(..) => "pattern".to_owned(),
                         NtMeta(..) => fail!("should have been handled"),
                         NtExpr(..) => fail!("should have been handled above"),
-                        NtTy(..) => ~"type",
-                        NtIdent(..) => ~"identifier",
-                        NtPath(..) => ~"path",
-                        NtTT(..) => ~"tt",
-                        NtMatchers(..) => ~"matcher sequence"
+                        NtTy(..) => "type".to_owned(),
+                        NtIdent(..) => "identifier".to_owned(),
+                        NtPath(..) => "path".to_owned(),
+                        NtTT(..) => "tt".to_owned(),
+                        NtMatchers(..) => "matcher sequence".to_owned()
                     }
             }
         }