about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--src/librustc/traits/auto_trait.rs127
-rw-r--r--src/librustc/traits/mod.rs2
-rw-r--r--src/librustc/traits/select.rs30
-rw-r--r--src/librustc_codegen_llvm/debuginfo/metadata.rs30
4 files changed, 95 insertions, 94 deletions
diff --git a/src/librustc/traits/auto_trait.rs b/src/librustc/traits/auto_trait.rs
index 0a9d126c9ae..479bffc3ea0 100644
--- a/src/librustc/traits/auto_trait.rs
+++ b/src/librustc/traits/auto_trait.rs
@@ -233,44 +233,45 @@ impl<'tcx> AutoTraitFinder<'tcx> {
 }
 
 impl AutoTraitFinder<'tcx> {
-    // The core logic responsible for computing the bounds for our synthesized impl.
-    //
-    // To calculate the bounds, we call `SelectionContext.select` in a loop. Like
-    // `FulfillmentContext`, we recursively select the nested obligations of predicates we
-    // encounter. However, whenever we encounter an `UnimplementedError` involving a type parameter,
-    // we add it to our `ParamEnv`. Since our goal is to determine when a particular type implements
-    // an auto trait, Unimplemented errors tell us what conditions need to be met.
-    //
-    // This method ends up working somewhat similarly to `FulfillmentContext`, but with a few key
-    // differences. `FulfillmentContext` works under the assumption that it's dealing with concrete
-    // user code. According, it considers all possible ways that a `Predicate` could be met, which
-    // isn't always what we want for a synthesized impl. For example, given the predicate `T:
-    // Iterator`, `FulfillmentContext` can end up reporting an Unimplemented error for `T:
-    // IntoIterator` -- since there's an implementation of `Iterator` where `T: IntoIterator`,
-    // `FulfillmentContext` will drive `SelectionContext` to consider that impl before giving up. If
-    // we were to rely on `FulfillmentContext`'s decision, we might end up synthesizing an impl like
-    // this:
-    //
-    //     impl<T> Send for Foo<T> where T: IntoIterator
-    //
-    // While it might be technically true that Foo implements Send where `T: IntoIterator`,
-    // the bound is overly restrictive - it's really only necessary that `T: Iterator`.
-    //
-    // For this reason, `evaluate_predicates` handles predicates with type variables specially. When
-    // we encounter an `Unimplemented` error for a bound such as `T: Iterator`, we immediately add
-    // it to our `ParamEnv`, and add it to our stack for recursive evaluation. When we later select
-    // it, we'll pick up any nested bounds, without ever inferring that `T: IntoIterator` needs to
-    // hold.
-    //
-    // One additional consideration is supertrait bounds. Normally, a `ParamEnv` is only ever
-    // constructed once for a given type. As part of the construction process, the `ParamEnv` will
-    // have any supertrait bounds normalized -- e.g., if we have a type `struct Foo<T: Copy>`, the
-    // `ParamEnv` will contain `T: Copy` and `T: Clone`, since `Copy: Clone`. When we construct our
-    // own `ParamEnv`, we need to do this ourselves, through `traits::elaborate_predicates`, or else
-    // `SelectionContext` will choke on the missing predicates. However, this should never show up
-    // in the final synthesized generics: we don't want our generated docs page to contain something
-    // like `T: Copy + Clone`, as that's redundant. Therefore, we keep track of a separate
-    // `user_env`, which only holds the predicates that will actually be displayed to the user.
+    /// The core logic responsible for computing the bounds for our synthesized impl.
+    ///
+    /// To calculate the bounds, we call `SelectionContext.select` in a loop. Like
+    /// `FulfillmentContext`, we recursively select the nested obligations of predicates we
+    /// encounter. However, whenever we encounter an `UnimplementedError` involving a type
+    /// parameter, we add it to our `ParamEnv`. Since our goal is to determine when a particular
+    /// type implements an auto trait, Unimplemented errors tell us what conditions need to be met.
+    ///
+    /// This method ends up working somewhat similarly to `FulfillmentContext`, but with a few key
+    /// differences. `FulfillmentContext` works under the assumption that it's dealing with concrete
+    /// user code. According, it considers all possible ways that a `Predicate` could be met, which
+    /// isn't always what we want for a synthesized impl. For example, given the predicate `T:
+    /// Iterator`, `FulfillmentContext` can end up reporting an Unimplemented error for `T:
+    /// IntoIterator` -- since there's an implementation of `Iterator` where `T: IntoIterator`,
+    /// `FulfillmentContext` will drive `SelectionContext` to consider that impl before giving up.
+    /// If we were to rely on `FulfillmentContext`s decision, we might end up synthesizing an impl
+    /// like this:
+    ///
+    ///     impl<T> Send for Foo<T> where T: IntoIterator
+    ///
+    /// While it might be technically true that Foo implements Send where `T: IntoIterator`,
+    /// the bound is overly restrictive - it's really only necessary that `T: Iterator`.
+    ///
+    /// For this reason, `evaluate_predicates` handles predicates with type variables specially.
+    /// When we encounter an `Unimplemented` error for a bound such as `T: Iterator`, we immediately
+    /// add it to our `ParamEnv`, and add it to our stack for recursive evaluation. When we later
+    /// select it, we'll pick up any nested bounds, without ever inferring that `T: IntoIterator`
+    /// needs to hold.
+    ///
+    /// One additional consideration is supertrait bounds. Normally, a `ParamEnv` is only ever
+    /// constructed once for a given type. As part of the construction process, the `ParamEnv` will
+    /// have any supertrait bounds normalized -- e.g., if we have a type `struct Foo<T: Copy>`, the
+    /// `ParamEnv` will contain `T: Copy` and `T: Clone`, since `Copy: Clone`. When we construct our
+    /// own `ParamEnv`, we need to do this ourselves, through `traits::elaborate_predicates`, or
+    /// else `SelectionContext` will choke on the missing predicates. However, this should never
+    /// show up in the final synthesized generics: we don't want our generated docs page to contain
+    /// something like `T: Copy + Clone`, as that's redundant. Therefore, we keep track of a
+    /// separate `user_env`, which only holds the predicates that will actually be displayed to the
+    /// user.
     fn evaluate_predicates(
         &self,
         infcx: &InferCtxt<'_, 'tcx>,
@@ -393,29 +394,29 @@ impl AutoTraitFinder<'tcx> {
         return Some((new_env, final_user_env));
     }
 
-    // This method is designed to work around the following issue:
-    // When we compute auto trait bounds, we repeatedly call `SelectionContext.select`,
-    // progressively building a `ParamEnv` based on the results we get.
-    // However, our usage of `SelectionContext` differs from its normal use within the compiler,
-    // in that we capture and re-reprocess predicates from `Unimplemented` errors.
-    //
-    // This can lead to a corner case when dealing with region parameters.
-    // During our selection loop in `evaluate_predicates`, we might end up with
-    // two trait predicates that differ only in their region parameters:
-    // one containing a HRTB lifetime parameter, and one containing a 'normal'
-    // lifetime parameter. For example:
-    //
-    //     T as MyTrait<'a>
-    //     T as MyTrait<'static>
-    //
-    // If we put both of these predicates in our computed `ParamEnv`, we'll
-    // confuse `SelectionContext`, since it will (correctly) view both as being applicable.
-    //
-    // To solve this, we pick the 'more strict' lifetime bound -- i.e., the HRTB
-    // Our end goal is to generate a user-visible description of the conditions
-    // under which a type implements an auto trait. A trait predicate involving
-    // a HRTB means that the type needs to work with any choice of lifetime,
-    // not just one specific lifetime (e.g., `'static`).
+    /// This method is designed to work around the following issue:
+    /// When we compute auto trait bounds, we repeatedly call `SelectionContext.select`,
+    /// progressively building a `ParamEnv` based on the results we get.
+    /// However, our usage of `SelectionContext` differs from its normal use within the compiler,
+    /// in that we capture and re-reprocess predicates from `Unimplemented` errors.
+    ///
+    /// This can lead to a corner case when dealing with region parameters.
+    /// During our selection loop in `evaluate_predicates`, we might end up with
+    /// two trait predicates that differ only in their region parameters:
+    /// one containing a HRTB lifetime parameter, and one containing a 'normal'
+    /// lifetime parameter. For example:
+    ///
+    ///     T as MyTrait<'a>
+    ///     T as MyTrait<'static>
+    ///
+    /// If we put both of these predicates in our computed `ParamEnv`, we'll
+    /// confuse `SelectionContext`, since it will (correctly) view both as being applicable.
+    ///
+    /// To solve this, we pick the 'more strict' lifetime bound -- i.e., the HRTB
+    /// Our end goal is to generate a user-visible description of the conditions
+    /// under which a type implements an auto trait. A trait predicate involving
+    /// a HRTB means that the type needs to work with any choice of lifetime,
+    /// not just one specific lifetime (e.g., `'static`).
     fn add_user_pred<'c>(
         &self,
         user_computed_preds: &mut FxHashSet<ty::Predicate<'c>>,
@@ -506,8 +507,8 @@ impl AutoTraitFinder<'tcx> {
         }
     }
 
-    // This is very similar to `handle_lifetimes`. However, instead of matching `ty::Region`'s
-    // to each other, we match `ty::RegionVid`'s to `ty::Region`'s.
+    /// This is very similar to `handle_lifetimes`. However, instead of matching `ty::Region`s
+    /// to each other, we match `ty::RegionVid`s to `ty::Region`s.
     fn map_vid_to_region<'cx>(
         &self,
         regions: &RegionConstraintData<'cx>,
diff --git a/src/librustc/traits/mod.rs b/src/librustc/traits/mod.rs
index 64830ce1d4c..d94e004db29 100644
--- a/src/librustc/traits/mod.rs
+++ b/src/librustc/traits/mod.rs
@@ -191,7 +191,7 @@ pub enum ObligationCauseCode<'tcx> {
     /// Obligation incurred due to a coercion.
     Coercion { source: Ty<'tcx>, target: Ty<'tcx> },
 
-    // Various cases where expressions must be `Sized` / `Copy` / etc.
+    /// Various cases where expressions must be `Sized` / `Copy` / etc.
     /// `L = X` implies that `L` is `Sized`.
     AssignmentLhsSized,
     /// `(x1, .., xn)` must be `Sized`.
diff --git a/src/librustc/traits/select.rs b/src/librustc/traits/select.rs
index de490b74aed..283fa56d11f 100644
--- a/src/librustc/traits/select.rs
+++ b/src/librustc/traits/select.rs
@@ -1198,26 +1198,26 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
             .insert(trait_ref, WithDepNode::new(dep_node, result));
     }
 
-    // For various reasons, it's possible for a subobligation
-    // to have a *lower* recursion_depth than the obligation used to create it.
-    // Projection sub-obligations may be returned from the projection cache,
-    // which results in obligations with an 'old' `recursion_depth`.
-    // Additionally, methods like `ty::wf::obligations` and
-    // `InferCtxt.subtype_predicate` produce subobligations without
-    // taking in a 'parent' depth, causing the generated subobligations
-    // to have a `recursion_depth` of `0`.
-    //
-    // To ensure that obligation_depth never decreasees, we force all subobligations
-    // to have at least the depth of the original obligation.
+    /// For various reasons, it's possible for a subobligation
+    /// to have a *lower* recursion_depth than the obligation used to create it.
+    /// Projection sub-obligations may be returned from the projection cache,
+    /// which results in obligations with an 'old' `recursion_depth`.
+    /// Additionally, methods like `ty::wf::obligations` and
+    /// `InferCtxt.subtype_predicate` produce subobligations without
+    /// taking in a 'parent' depth, causing the generated subobligations
+    /// to have a `recursion_depth` of `0`.
+    ///
+    /// To ensure that obligation_depth never decreasees, we force all subobligations
+    /// to have at least the depth of the original obligation.
     fn add_depth<T: 'cx, I: Iterator<Item = &'cx mut Obligation<'tcx, T>>>(&self, it: I,
                                                                            min_depth: usize) {
         it.for_each(|o| o.recursion_depth = cmp::max(min_depth, o.recursion_depth) + 1);
     }
 
-    // Checks that the recursion limit has not been exceeded.
-    //
-    // The weird return type of this function allows it to be used with the `try` (`?`)
-    // operator within certain functions.
+    /// Checks that the recursion limit has not been exceeded.
+    ///
+    /// The weird return type of this function allows it to be used with the `try` (`?`)
+    /// operator within certain functions.
     fn check_recursion_limit<T: Display + TypeFoldable<'tcx>, V: Display + TypeFoldable<'tcx>>(
         &self,
         obligation: &Obligation<'tcx, T>,
diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs
index 085738b4cca..2e16ae4c73f 100644
--- a/src/librustc_codegen_llvm/debuginfo/metadata.rs
+++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs
@@ -91,19 +91,19 @@ pub const NO_SCOPE_METADATA: Option<&DIScope> = None;
 #[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)]
 pub struct UniqueTypeId(ast::Name);
 
-// The `TypeMap` is where the `CrateDebugContext` holds the type metadata nodes
-// created so far. The metadata nodes are indexed by `UniqueTypeId`, and, for
-// faster lookup, also by `Ty`. The `TypeMap` is responsible for creating
-// `UniqueTypeId`s.
+/// The `TypeMap` is where the `CrateDebugContext` holds the type metadata nodes
+/// created so far. The metadata nodes are indexed by `UniqueTypeId`, and, for
+/// faster lookup, also by `Ty`. The `TypeMap` is responsible for creating
+/// `UniqueTypeId`s.
 #[derive(Default)]
 pub struct TypeMap<'ll, 'tcx> {
-    // The `UniqueTypeId`s created so far.
+    /// The `UniqueTypeId`s created so far.
     unique_id_interner: Interner,
-    // A map from `UniqueTypeId` to debuginfo metadata for that type. This is a 1:1 mapping.
+    /// A map from `UniqueTypeId` to debuginfo metadata for that type. This is a 1:1 mapping.
     unique_id_to_metadata: FxHashMap<UniqueTypeId, &'ll DIType>,
-    // A map from types to debuginfo metadata. This is an N:1 mapping.
+    /// A map from types to debuginfo metadata. This is an N:1 mapping.
     type_to_metadata: FxHashMap<Ty<'tcx>, &'ll DIType>,
-    // A map from types to `UniqueTypeId`. This is an N:1 mapping.
+    /// A map from types to `UniqueTypeId`. This is an N:1 mapping.
     type_to_unique_id: FxHashMap<Ty<'tcx>, UniqueTypeId>
 }
 
@@ -203,9 +203,9 @@ impl TypeMap<'ll, 'tcx> {
         return UniqueTypeId(key);
     }
 
-    // Get the `UniqueTypeId` for an enum variant. Enum variants are not really
-    // types of their own, so they need special handling. We still need a
-    // UniqueTypeId for them, since to debuginfo they *are* real types.
+    /// Gets the `UniqueTypeId` for an enum variant. Enum variants are not really
+    /// types of their own, so they need special handling. We still need a
+    /// `UniqueTypeId` for them, since to debuginfo they *are* real types.
     fn get_unique_type_id_of_enum_variant<'a>(&mut self,
                                               cx: &CodegenCx<'a, 'tcx>,
                                               enum_type: Ty<'tcx>,
@@ -219,9 +219,9 @@ impl TypeMap<'ll, 'tcx> {
         UniqueTypeId(interner_key)
     }
 
-    // Get the unique type ID string for an enum variant part.
-    // Variant parts are not types and shouldn't really have their own ID,
-    // but it makes `set_members_of_composite_type()` simpler.
+    /// Gets the unique type ID string for an enum variant part.
+    /// Variant parts are not types and shouldn't really have their own ID,
+    /// but it makes `set_members_of_composite_type()` simpler.
     fn get_unique_type_id_str_of_enum_variant_part(&mut self, enum_type_id: UniqueTypeId) -> &str {
         let variant_part_type_id = format!("{}_variant_part",
                                            self.get_unique_type_id_as_string(enum_type_id));
@@ -1027,7 +1027,7 @@ impl MetadataCreationResult<'ll> {
     }
 }
 
-// Description of a type member, which can either be a regular field (as in
+/// Description of a type member, which can either be a regular field (as in
 /// structs or tuples) or an enum variant.
 #[derive(Debug)]
 struct MemberDescription<'ll> {