about summary refs log tree commit diff
diff options
context:
space:
mode:
authorNiko Matsakis <niko@alum.mit.edu>2015-04-01 04:41:45 -0400
committerNiko Matsakis <niko@alum.mit.edu>2015-04-02 13:25:06 -0400
commit38fdd50e0b16e947c5a3879f593f51fd769deae2 (patch)
treeb1b990c5eadbb2c6d79de39e9de6a1d253cbeebb
parent628d715ff42c83ab74bef3a5a5c5ac3c2f915c11 (diff)
downloadrust-38fdd50e0b16e947c5a3879f593f51fd769deae2.tar.gz
rust-38fdd50e0b16e947c5a3879f593f51fd769deae2.zip
Remove *most* mentions of phantom fns and variance on traits. Leave some
comments and also leave the entries in the variance tables for now.
-rw-r--r--src/libcore/marker.rs28
-rw-r--r--src/librustc/middle/lang_items.rs1
-rw-r--r--src/librustc/middle/traits/object_safety.rs9
-rw-r--r--src/librustc/middle/traits/select.rs8
-rw-r--r--src/librustc_typeck/check/wf.rs42
-rw-r--r--src/librustc_typeck/variance.rs239
-rw-r--r--src/test/compile-fail/variance-contravariant-arg-object.rs4
-rw-r--r--src/test/compile-fail/variance-contravariant-arg-trait-match.rs3
-rw-r--r--src/test/compile-fail/variance-contravariant-self-trait-match.rs3
-rw-r--r--src/test/compile-fail/variance-covariant-arg-object.rs3
-rw-r--r--src/test/compile-fail/variance-covariant-arg-trait-match.rs3
-rw-r--r--src/test/compile-fail/variance-covariant-self-trait-match.rs3
12 files changed, 174 insertions, 172 deletions
diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs
index e867ba02854..352f7d86977 100644
--- a/src/libcore/marker.rs
+++ b/src/libcore/marker.rs
@@ -276,12 +276,15 @@ macro_rules! impls{
 #[unstable(feature = "core", reason = "deprecated")]
 #[deprecated(since = "1.0.0", reason = "No longer needed")]
 #[allow(deprecated)]
+#[cfg(stage0)]
 pub trait MarkerTrait : PhantomFn<Self,Self> { }
-//                                    ~~~~~ <-- FIXME(#22806)?
-//
-// Marker trait has been made invariant so as to avoid inf recursion,
-// but we should ideally solve the underlying problem. That's a bit
-// complicated.
+
+/// `MarkerTrait` is deprecated and no longer needed.
+#[unstable(feature = "core", reason = "deprecated")]
+#[deprecated(since = "1.0.0", reason = "No longer needed")]
+#[allow(deprecated)]
+#[cfg(not(stage0))]
+pub trait MarkerTrait { }
 
 #[allow(deprecated)]
 impl<T:?Sized> MarkerTrait for T { }
@@ -290,7 +293,20 @@ impl<T:?Sized> MarkerTrait for T { }
 #[lang="phantom_fn"]
 #[unstable(feature = "core", reason = "deprecated")]
 #[deprecated(since = "1.0.0", reason = "No longer needed")]
-pub trait PhantomFn<A:?Sized,R:?Sized=()> { }
+#[cfg(stage0)]
+pub trait PhantomFn<A:?Sized,R:?Sized=()> {
+}
+
+/// `PhantomFn` is a deprecated marker trait that is no longer needed.
+#[unstable(feature = "core", reason = "deprecated")]
+#[deprecated(since = "1.0.0", reason = "No longer needed")]
+#[cfg(not(stage0))]
+pub trait PhantomFn<A:?Sized,R:?Sized=()> {
+}
+
+#[allow(deprecated)]
+#[cfg(not(stage0))]
+impl<A:?Sized,R:?Sized,T:?Sized> PhantomFn<A,R> for T { }
 
 /// `PhantomData<T>` allows you to describe that a type acts as if it stores a value of type `T`,
 /// even though it does not. This allows you to inform the compiler about certain safety properties
diff --git a/src/librustc/middle/lang_items.rs b/src/librustc/middle/lang_items.rs
index a08de58f909..89a8625856c 100644
--- a/src/librustc/middle/lang_items.rs
+++ b/src/librustc/middle/lang_items.rs
@@ -321,7 +321,6 @@ lets_do_this! {
     ExchangeHeapLangItem,            "exchange_heap",           exchange_heap;
     OwnedBoxLangItem,                "owned_box",               owned_box;
 
-    PhantomFnItem,                   "phantom_fn",              phantom_fn;
     PhantomDataItem,                 "phantom_data",            phantom_data;
 
     // Deprecated:
diff --git a/src/librustc/middle/traits/object_safety.rs b/src/librustc/middle/traits/object_safety.rs
index af6bb4ccccd..a2ff86cd065 100644
--- a/src/librustc/middle/traits/object_safety.rs
+++ b/src/librustc/middle/traits/object_safety.rs
@@ -138,11 +138,10 @@ fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>,
             match predicate {
                 ty::Predicate::Trait(ref data) => {
                     // In the case of a trait predicate, we can skip the "self" type.
-                    Some(data.def_id()) != tcx.lang_items.phantom_fn() &&
-                        data.0.trait_ref.substs.types.get_slice(TypeSpace)
-                                                     .iter()
-                                                     .cloned()
-                                                     .any(is_self)
+                    data.0.trait_ref.substs.types.get_slice(TypeSpace)
+                                                 .iter()
+                                                 .cloned()
+                                                 .any(is_self)
                 }
                 ty::Predicate::Projection(..) |
                 ty::Predicate::TypeOutlives(..) |
diff --git a/src/librustc/middle/traits/select.rs b/src/librustc/middle/traits/select.rs
index f7e7d071f8c..ad7d96c652d 100644
--- a/src/librustc/middle/traits/select.rs
+++ b/src/librustc/middle/traits/select.rs
@@ -836,14 +836,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
             ambiguous: false
         };
 
-        // Check for the `PhantomFn` trait. This is really just a
-        // special annotation that is *always* considered to match, no
-        // matter what the type parameters are etc.
-        if self.tcx().lang_items.phantom_fn() == Some(obligation.predicate.def_id()) {
-            candidates.vec.push(PhantomFnCandidate);
-            return Ok(candidates);
-        }
-
         // Other bounds. Consider both in-scope bounds from fn decl
         // and applicable impls. There is a certain set of precedence rules here.
 
diff --git a/src/librustc_typeck/check/wf.rs b/src/librustc_typeck/check/wf.rs
index a86e2b17c93..eb06caf7d5a 100644
--- a/src/librustc_typeck/check/wf.rs
+++ b/src/librustc_typeck/check/wf.rs
@@ -117,15 +117,10 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
 
                 self.check_variances_for_type_defn(item, ast_generics);
             }
-            ast::ItemTrait(_, ref ast_generics, _, ref items) => {
+            ast::ItemTrait(_, _, _, ref items) => {
                 let trait_predicates =
                     ty::lookup_predicates(ccx.tcx, local_def(item.id));
-                reject_non_type_param_bounds(
-                    ccx.tcx,
-                    item.span,
-                    &trait_predicates);
-                self.check_variances(item, ast_generics, &trait_predicates,
-                                     self.tcx().lang_items.phantom_fn());
+                reject_non_type_param_bounds(ccx.tcx, item.span, &trait_predicates);
                 if ty::trait_has_default_impl(ccx.tcx, local_def(item.id)) {
                     if !items.is_empty() {
                         ccx.tcx.sess.span_err(
@@ -287,30 +282,7 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
                                      ast_generics: &ast::Generics)
     {
         let item_def_id = local_def(item.id);
-        let predicates = ty::lookup_predicates(self.tcx(), item_def_id);
-        self.check_variances(item,
-                             ast_generics,
-                             &predicates,
-                             self.tcx().lang_items.phantom_data());
-    }
-
-    fn check_variances(&self,
-                       item: &ast::Item,
-                       ast_generics: &ast::Generics,
-                       ty_predicates: &ty::GenericPredicates<'tcx>,
-                       suggested_marker_id: Option<ast::DefId>)
-    {
-        let variance_lang_items = &[
-            self.tcx().lang_items.phantom_fn(),
-            self.tcx().lang_items.phantom_data(),
-        ];
-
-        let item_def_id = local_def(item.id);
-        let is_lang_item = variance_lang_items.iter().any(|n| *n == Some(item_def_id));
-        if is_lang_item {
-            return;
-        }
-
+        let ty_predicates = ty::lookup_predicates(self.tcx(), item_def_id);
         let variances = ty::item_variances(self.tcx(), item_def_id);
 
         let mut constrained_parameters: HashSet<_> =
@@ -331,7 +303,7 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
                 continue;
             }
             let span = self.ty_param_span(ast_generics, item, space, index);
-            self.report_bivariance(span, param_ty.name, suggested_marker_id);
+            self.report_bivariance(span, param_ty.name);
         }
 
         for (space, index, &variance) in variances.regions.iter_enumerated() {
@@ -342,7 +314,7 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
             assert_eq!(space, TypeSpace);
             let span = ast_generics.lifetimes[index].lifetime.span;
             let name = ast_generics.lifetimes[index].lifetime.name;
-            self.report_bivariance(span, name, suggested_marker_id);
+            self.report_bivariance(span, name);
         }
     }
 
@@ -377,14 +349,14 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
 
     fn report_bivariance(&self,
                          span: Span,
-                         param_name: ast::Name,
-                         suggested_marker_id: Option<ast::DefId>)
+                         param_name: ast::Name)
     {
         self.tcx().sess.span_err(
             span,
             &format!("parameter `{}` is never used",
                      param_name.user_string(self.tcx())));
 
+        let suggested_marker_id = self.tcx().lang_items.phantom_data();
         match suggested_marker_id {
             Some(def_id) => {
                 self.tcx().sess.fileline_help(
diff --git a/src/librustc_typeck/variance.rs b/src/librustc_typeck/variance.rs
index a95919c20f3..da2de731d64 100644
--- a/src/librustc_typeck/variance.rs
+++ b/src/librustc_typeck/variance.rs
@@ -18,34 +18,121 @@
 //! defined on type `X`, we only consider the definition of the type `X`
 //! and the definitions of any types it references.
 //!
-//! We only infer variance for type parameters found on *types*: structs,
-//! enums, and traits. We do not infer variance for type parameters found
-//! on fns or impls. This is because those things are not type definitions
-//! and variance doesn't really make sense in that context.
-//!
-//! It is worth covering what variance means in each case. For structs and
-//! enums, I think it is fairly straightforward. The variance of the type
+//! We only infer variance for type parameters found on *data types*
+//! like structs and enums. In these cases, there is fairly straightforward
+//! explanation for what variance means. The variance of the type
 //! or lifetime parameters defines whether `T<A>` is a subtype of `T<B>`
 //! (resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B`
-//! (resp. `'a` and `'b`). (FIXME #3598 -- we do not currently make use of
-//! the variances we compute for type parameters.)
+//! (resp. `'a` and `'b`).
+//!
+//! We do not infer variance for type parameters found on traits, fns,
+//! or impls. Variance on trait parameters can make indeed make sense
+//! (and we used to compute it) but it is actually rather subtle in
+//! meaning and not that useful in practice, so we removed it. See the
+//! addendum for some details. Variances on fn/impl parameters, otoh,
+//! doesn't make sense because these parameters are instantiated and
+//! then forgotten, they don't persist in types or compiled
+//! byproducts.
+//!
+//! ### The algorithm
+//!
+//! The basic idea is quite straightforward. We iterate over the types
+//! defined and, for each use of a type parameter X, accumulate a
+//! constraint indicating that the variance of X must be valid for the
+//! variance of that use site. We then iteratively refine the variance of
+//! X until all constraints are met. There is *always* a sol'n, because at
+//! the limit we can declare all type parameters to be invariant and all
+//! constraints will be satisfied.
+//!
+//! As a simple example, consider:
+//!
+//!     enum Option<A> { Some(A), None }
+//!     enum OptionalFn<B> { Some(|B|), None }
+//!     enum OptionalMap<C> { Some(|C| -> C), None }
+//!
+//! Here, we will generate the constraints:
+//!
+//!     1. V(A) <= +
+//!     2. V(B) <= -
+//!     3. V(C) <= +
+//!     4. V(C) <= -
+//!
+//! These indicate that (1) the variance of A must be at most covariant;
+//! (2) the variance of B must be at most contravariant; and (3, 4) the
+//! variance of C must be at most covariant *and* contravariant. All of these
+//! results are based on a variance lattice defined as follows:
+//!
+//!       *      Top (bivariant)
+//!    -     +
+//!       o      Bottom (invariant)
+//!
+//! Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the
+//! optimal solution. Note that there is always a naive solution which
+//! just declares all variables to be invariant.
+//!
+//! You may be wondering why fixed-point iteration is required. The reason
+//! is that the variance of a use site may itself be a function of the
+//! variance of other type parameters. In full generality, our constraints
+//! take the form:
+//!
+//!     V(X) <= Term
+//!     Term := + | - | * | o | V(X) | Term x Term
+//!
+//! Here the notation V(X) indicates the variance of a type/region
+//! parameter `X` with respect to its defining class. `Term x Term`
+//! represents the "variance transform" as defined in the paper:
+//!
+//!   If the variance of a type variable `X` in type expression `E` is `V2`
+//!   and the definition-site variance of the [corresponding] type parameter
+//!   of a class `C` is `V1`, then the variance of `X` in the type expression
+//!   `C<E>` is `V3 = V1.xform(V2)`.
+//!
+//! ### Constraints
+//!
+//! If I have a struct or enum with where clauses:
+//!
+//!     struct Foo<T:Bar> { ... }
+//!
+//! you might wonder whether the variance of `T` with respect to `Bar`
+//! affects the variance `T` with respect to `Foo`. I claim no.  The
+//! reason: assume that `T` is invariant w/r/t `Bar` but covariant w/r/t
+//! `Foo`. And then we have a `Foo<X>` that is upcast to `Foo<Y>`, where
+//! `X <: Y`. However, while `X : Bar`, `Y : Bar` does not hold.  In that
+//! case, the upcast will be illegal, but not because of a variance
+//! failure, but rather because the target type `Foo<Y>` is itself just
+//! not well-formed. Basically we get to assume well-formedness of all
+//! types involved before considering variance.
 //!
-//! ### Variance on traits
+//! ### Addendum: Variance on traits
 //!
-//! The meaning of variance for trait parameters is more subtle and worth
-//! expanding upon. There are in fact two uses of the variance values we
-//! compute.
+//! As mentioned above, we used to permit variance on traits. This was
+//! computed based on the appearance of trait type parameters in
+//! method signatures and was used to represent the compatibility of
+//! vtables in trait objects (and also "virtual" vtables or dictionary
+//! in trait bounds). One complication was that variance for
+//! associated types is less obvious, since they can be projected out
+//! and put to myriad uses, so it's not clear when it is safe to allow
+//! `X<A>::Bar` to vary (or indeed just what that means). Moreover (as
+//! covered below) all inputs on any trait with an associated type had
+//! to be invariant, limiting the applicability. Finally, the
+//! annotations (`MarkerTrait`, `PhantomFn`) needed to ensure that all
+//! trait type parameters had a variance were confusing and annoying
+//! for little benefit.
 //!
-//! #### Trait variance and object types
+//! Just for historical reference,I am going to preserve some text indicating
+//! how one could interpret variance and trait matching.
 //!
-//! The first is for object types. Just as with structs and enums, we can
-//! decide the subtyping relationship between two object types `&Trait<A>`
-//! and `&Trait<B>` based on the relationship of `A` and `B`. Note that
-//! for object types we ignore the `Self` type parameter -- it is unknown,
-//! and the nature of dynamic dispatch ensures that we will always call a
+//! #### Variance and object types
+//!
+//! Just as with structs and enums, we can decide the subtyping
+//! relationship between two object types `&Trait<A>` and `&Trait<B>`
+//! based on the relationship of `A` and `B`. Note that for object
+//! types we ignore the `Self` type parameter -- it is unknown, and
+//! the nature of dynamic dispatch ensures that we will always call a
 //! function that is expected the appropriate `Self` type. However, we
-//! must be careful with the other type parameters, or else we could end
-//! up calling a function that is expecting one type but provided another.
+//! must be careful with the other type parameters, or else we could
+//! end up calling a function that is expecting one type but provided
+//! another.
 //!
 //! To see what I mean, consider a trait like so:
 //!
@@ -135,104 +222,24 @@
 //!
 //! These conditions are satisfied and so we are happy.
 //!
-//! ### The algorithm
-//!
-//! The basic idea is quite straightforward. We iterate over the types
-//! defined and, for each use of a type parameter X, accumulate a
-//! constraint indicating that the variance of X must be valid for the
-//! variance of that use site. We then iteratively refine the variance of
-//! X until all constraints are met. There is *always* a sol'n, because at
-//! the limit we can declare all type parameters to be invariant and all
-//! constraints will be satisfied.
-//!
-//! As a simple example, consider:
-//!
-//!     enum Option<A> { Some(A), None }
-//!     enum OptionalFn<B> { Some(|B|), None }
-//!     enum OptionalMap<C> { Some(|C| -> C), None }
-//!
-//! Here, we will generate the constraints:
-//!
-//!     1. V(A) <= +
-//!     2. V(B) <= -
-//!     3. V(C) <= +
-//!     4. V(C) <= -
-//!
-//! These indicate that (1) the variance of A must be at most covariant;
-//! (2) the variance of B must be at most contravariant; and (3, 4) the
-//! variance of C must be at most covariant *and* contravariant. All of these
-//! results are based on a variance lattice defined as follows:
-//!
-//!       *      Top (bivariant)
-//!    -     +
-//!       o      Bottom (invariant)
+//! #### Variance and associated types
 //!
-//! Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the
-//! optimal solution. Note that there is always a naive solution which
-//! just declares all variables to be invariant.
-//!
-//! You may be wondering why fixed-point iteration is required. The reason
-//! is that the variance of a use site may itself be a function of the
-//! variance of other type parameters. In full generality, our constraints
-//! take the form:
-//!
-//!     V(X) <= Term
-//!     Term := + | - | * | o | V(X) | Term x Term
-//!
-//! Here the notation V(X) indicates the variance of a type/region
-//! parameter `X` with respect to its defining class. `Term x Term`
-//! represents the "variance transform" as defined in the paper:
-//!
-//!   If the variance of a type variable `X` in type expression `E` is `V2`
-//!   and the definition-site variance of the [corresponding] type parameter
-//!   of a class `C` is `V1`, then the variance of `X` in the type expression
-//!   `C<E>` is `V3 = V1.xform(V2)`.
-//!
-//! ### Constraints
-//!
-//! If I have a struct or enum with where clauses:
-//!
-//!     struct Foo<T:Bar> { ... }
-//!
-//! you might wonder whether the variance of `T` with respect to `Bar`
-//! affects the variance `T` with respect to `Foo`. I claim no.  The
-//! reason: assume that `T` is invariant w/r/t `Bar` but covariant w/r/t
-//! `Foo`. And then we have a `Foo<X>` that is upcast to `Foo<Y>`, where
-//! `X <: Y`. However, while `X : Bar`, `Y : Bar` does not hold.  In that
-//! case, the upcast will be illegal, but not because of a variance
-//! failure, but rather because the target type `Foo<Y>` is itself just
-//! not well-formed. Basically we get to assume well-formedness of all
-//! types involved before considering variance.
-//!
-//! ### Associated types
-//!
-//! Any trait with an associated type is invariant with respect to all
-//! of its inputs. To see why this makes sense, consider what
-//! subtyping for a trait reference means:
+//! Traits with associated types -- or at minimum projection
+//! expressions -- must be invariant with respect to all of their
+//! inputs. To see why this makes sense, consider what subtyping for a
+//! trait reference means:
 //!
 //!    <T as Trait> <: <U as Trait>
 //!
-//! means that if I know that `T as Trait`,
-//! I also know that `U as
-//! Trait`. Moreover, if you think of it as
-//! dictionary passing style, it means that
-//! a dictionary for `<T as Trait>` is safe
-//! to use where a dictionary for `<U as
-//! Trait>` is expected.
-//!
-//! The problem is that when you can
-//! project types out from `<T as Trait>`,
-//! the relationship to types projected out
-//! of `<U as Trait>` is completely unknown
-//! unless `T==U` (see #21726 for more
-//! details). Making `Trait` invariant
-//! ensures that this is true.
+//! means that if I know that `T as Trait`, I also know that `U as
+//! Trait`. Moreover, if you think of it as dictionary passing style,
+//! it means that a dictionary for `<T as Trait>` is safe to use where
+//! a dictionary for `<U as Trait>` is expected.
 //!
-//! *Historical note: we used to preserve this invariant another way,
-//! by tweaking the subtyping rules and requiring that when a type `T`
-//! appeared as part of a projection, that was considered an invariant
-//! location, but this version does away with the need for those
-//! somewhat "special-case-feeling" rules.*
+//! The problem is that when you can project types out from `<T as
+//! Trait>`, the relationship to types projected out of `<U as Trait>`
+//! is completely unknown unless `T==U` (see #21726 for more
+//! details). Making `Trait` invariant ensures that this is true.
 //!
 //! Another related reason is that if we didn't make traits with
 //! associated types invariant, then projection is no longer a
@@ -383,7 +390,6 @@ fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>,
 
 fn lang_items(tcx: &ty::ctxt) -> Vec<(ast::NodeId,Vec<ty::Variance>)> {
     let all = vec![
-        (tcx.lang_items.phantom_fn(), vec![ty::Contravariant, ty::Covariant]),
         (tcx.lang_items.phantom_data(), vec![ty::Covariant]),
         (tcx.lang_items.unsafe_cell_type(), vec![ty::Invariant]),
 
@@ -520,6 +526,9 @@ impl<'a, 'tcx, 'v> Visitor<'v> for TermsContext<'a, 'tcx> {
                 self.add_inferreds_for_item(item.id, false, generics);
             }
             ast::ItemTrait(_, ref generics, _, _) => {
+                // Note: all inputs for traits are ultimately
+                // constrained to be invariant. See `visit_item` in
+                // the impl for `ConstraintContext` below.
                 self.add_inferreds_for_item(item.id, true, generics);
                 visit::walk_item(self, item);
             }
diff --git a/src/test/compile-fail/variance-contravariant-arg-object.rs b/src/test/compile-fail/variance-contravariant-arg-object.rs
index e47d0c33d8f..1795ac95358 100644
--- a/src/test/compile-fail/variance-contravariant-arg-object.rs
+++ b/src/test/compile-fail/variance-contravariant-arg-object.rs
@@ -10,8 +10,8 @@
 
 #![allow(dead_code)]
 
-// This test was previously testing variance on traits.
-// But now that it is removed, both cases error.
+// Test that even when `T` is only used in contravariant position, it
+// is treated as invariant.
 
 trait Get<T> : 'static {
     fn get(&self, t: T);
diff --git a/src/test/compile-fail/variance-contravariant-arg-trait-match.rs b/src/test/compile-fail/variance-contravariant-arg-trait-match.rs
index b96e19220d0..9b6e3c9de3b 100644
--- a/src/test/compile-fail/variance-contravariant-arg-trait-match.rs
+++ b/src/test/compile-fail/variance-contravariant-arg-trait-match.rs
@@ -10,6 +10,9 @@
 
 #![allow(dead_code)]
 
+// Test that even when `T` is only used in contravariant position, it
+// is treated as invariant.
+
 trait Get<T> {
     fn get(&self, t: T);
 }
diff --git a/src/test/compile-fail/variance-contravariant-self-trait-match.rs b/src/test/compile-fail/variance-contravariant-self-trait-match.rs
index 9d2766d878b..6d9d1e61fed 100644
--- a/src/test/compile-fail/variance-contravariant-self-trait-match.rs
+++ b/src/test/compile-fail/variance-contravariant-self-trait-match.rs
@@ -10,6 +10,9 @@
 
 #![allow(dead_code)]
 
+// Test that even when `Self` is only used in contravariant position, it
+// is treated as invariant.
+
 trait Get {
     fn get(&self);
 }
diff --git a/src/test/compile-fail/variance-covariant-arg-object.rs b/src/test/compile-fail/variance-covariant-arg-object.rs
index 757c1c9a8a2..ad059a467f5 100644
--- a/src/test/compile-fail/variance-covariant-arg-object.rs
+++ b/src/test/compile-fail/variance-covariant-arg-object.rs
@@ -10,6 +10,9 @@
 
 #![allow(dead_code)]
 
+// Test that even when `T` is only used in covariant position, it
+// is treated as invariant.
+
 trait Get<T> : 'static {
     fn get(&self) -> T;
 }
diff --git a/src/test/compile-fail/variance-covariant-arg-trait-match.rs b/src/test/compile-fail/variance-covariant-arg-trait-match.rs
index 097f8eb9864..c42a845b3b5 100644
--- a/src/test/compile-fail/variance-covariant-arg-trait-match.rs
+++ b/src/test/compile-fail/variance-covariant-arg-trait-match.rs
@@ -10,6 +10,9 @@
 
 #![allow(dead_code)]
 
+// Test that even when `T` is only used in covariant position, it
+// is treated as invariant.
+
 trait Get<T> {
     fn get(&self) -> T;
 }
diff --git a/src/test/compile-fail/variance-covariant-self-trait-match.rs b/src/test/compile-fail/variance-covariant-self-trait-match.rs
index 57ea367b49c..25148dfc020 100644
--- a/src/test/compile-fail/variance-covariant-self-trait-match.rs
+++ b/src/test/compile-fail/variance-covariant-self-trait-match.rs
@@ -10,6 +10,9 @@
 
 #![allow(dead_code)]
 
+// Test that even when `Self` is only used in covariant position, it
+// is treated as invariant.
+
 trait Get {
     fn get() -> Self;
 }