[Git][ghc/ghc][wip/dmd-arity] Compute demand signatures assuming idArity
Sebastian Graf
gitlab at gitlab.haskell.org
Fri Apr 5 13:50:31 UTC 2019
Sebastian Graf pushed to branch wip/dmd-arity at Glasgow Haskell Compiler / GHC
Commits:
1a670dc0 by Sebastian Graf at 2019-04-05T13:50:20Z
Compute demand signatures assuming idArity
This does four things:
1. Look at `idArity` instead of manifest lambdas to decide whether to use LetUp
2. Compute the strictness signature in LetDown assuming at least `idArity`
incoming arguments
3. Remove the special case for trivial RHSs, which is subsumed by 2
4. Don't perform the W/W split when doing so would eta expand a binding.
Otherwise we would eta expand PAPs, causing unnecessary churn in the
Simplifier.
NoFib Results
--------------------------------------------------------------------------------
Program Allocs Instrs
--------------------------------------------------------------------------------
fannkuch-redux +0.3% 0.0%
gg -0.0% -0.1%
maillist +0.2% +0.2%
minimax 0.0% +0.8%
pretty 0.0% -0.1%
scc 0.0% -0.1%
--------------------------------------------------------------------------------
Min -0.0% -0.1%
Max +0.3% +0.8%
Geometric Mean +0.0% +0.0%
- - - - -
17 changed files:
- compiler/basicTypes/Demand.hs
- compiler/basicTypes/Id.hs
- compiler/basicTypes/IdInfo.hs
- compiler/basicTypes/Var.hs
- compiler/coreSyn/CoreArity.hs
- compiler/coreSyn/CoreLint.hs
- compiler/coreSyn/CoreUnfold.hs
- compiler/simplCore/SimplMonad.hs
- compiler/simplCore/SimplUtils.hs
- compiler/stranal/DmdAnal.hs
- compiler/stranal/WorkWrap.hs
- compiler/stranal/WwLib.hs
- + testsuite/tests/perf/compiler/WWRec.hs
- testsuite/tests/perf/compiler/all.T
- + testsuite/tests/stranal/sigs/NewtypeArity.hs
- + testsuite/tests/stranal/sigs/NewtypeArity.stderr
- testsuite/tests/stranal/sigs/all.T
Changes:
=====================================
compiler/basicTypes/Demand.hs
=====================================
@@ -22,7 +22,7 @@ module Demand (
DmdType(..), dmdTypeDepth, lubDmdType, bothDmdType,
nopDmdType, botDmdType, mkDmdType,
- addDemand, removeDmdTyArgs,
+ addDemand, ensureArgs,
BothDmdArg, mkBothDmdArg, toBothDmdArg,
DmdEnv, emptyDmdEnv,
@@ -34,7 +34,7 @@ module Demand (
vanillaCprProdRes, cprSumRes,
appIsBottom, isBottomingSig, pprIfaceStrictSig,
trimCPRInfo, returnsCPR_maybe,
- StrictSig(..), mkStrictSig, mkClosedStrictSig,
+ StrictSig(..), mkStrictSigForArity, mkClosedStrictSig,
nopSig, botSig, cprProdSig,
isTopSig, hasDemandEnvSig,
splitStrictSig, strictSigDmdEnv,
@@ -47,10 +47,10 @@ module Demand (
deferAfterIO,
postProcessUnsat, postProcessDmdType,
- splitProdDmd_maybe, peelCallDmd, peelManyCalls, mkCallDmd,
+ splitProdDmd_maybe, peelCallDmd, peelManyCalls, mkCallDmd, mkCallDmds,
mkWorkerDemand, dmdTransformSig, dmdTransformDataConSig,
dmdTransformDictSelSig, argOneShots, argsOneShots, saturatedByOneShots,
- trimToType, TypeShape(..),
+ TypeShape(..), peelTsFuns, trimToType,
useCount, isUsedOnce, reuseEnv,
killUsageDemand, killUsageSig, zapUsageDemand, zapUsageEnvSig,
@@ -675,10 +675,15 @@ mkProdDmd dx
= JD { sd = mkSProd $ map getStrDmd dx
, ud = mkUProd $ map getUseDmd dx }
+-- | Wraps the 'CleanDemand' with a one-shot call demand: @d@ -> @C1(d)@.
mkCallDmd :: CleanDemand -> CleanDemand
mkCallDmd (JD {sd = d, ud = u})
= JD { sd = mkSCall d, ud = mkUCall One u }
+-- | @mkCallDmds n d@ returns @C1(C1...(C1 d))@ where there are @n@ @C1@'s.
+mkCallDmds :: Arity -> CleanDemand -> CleanDemand
+mkCallDmds arity cd = iterate mkCallDmd cd !! arity
+
-- See Note [Demand on the worker] in WorkWrap
mkWorkerDemand :: Int -> Demand
mkWorkerDemand n = JD { sd = Lazy, ud = Use One (go n) }
@@ -804,6 +809,13 @@ instance Outputable TypeShape where
ppr (TsFun ts) = text "TsFun" <> parens (ppr ts)
ppr (TsProd tss) = parens (hsep $ punctuate comma $ map ppr tss)
+-- | @peelTsFuns n ts@ tries to peel off @n@ 'TsFun' constructors from @ts@ and
+-- returns 'Just' the wrapped 'TypeShape' on success, and 'Nothing' otherwise.
+peelTsFuns :: Arity -> TypeShape -> Maybe TypeShape
+peelTsFuns 0 ts = Just ts
+peelTsFuns n (TsFun ts) = peelTsFuns (n-1) ts
+peelTsFuns _ _ = Nothing
+
trimToType :: Demand -> TypeShape -> Demand
-- See Note [Trimming a demand to a type]
trimToType (JD { sd = ms, ud = mu }) ts
@@ -1207,12 +1219,8 @@ mkDmdType fv ds res = DmdType fv ds res
dmdTypeDepth :: DmdType -> Arity
dmdTypeDepth (DmdType _ ds _) = length ds
--- Remove any demand on arguments. This is used in dmdAnalRhs on the body
-removeDmdTyArgs :: DmdType -> DmdType
-removeDmdTyArgs = ensureArgs 0
-
--- This makes sure we can use the demand type with n arguments,
--- It extends the argument list with the correct resTypeArgDmd
+-- | This makes sure we can use the demand type with n arguments.
+-- It extends the argument list with the correct resTypeArgDmd.
-- It also adjusts the DmdResult: Divergence survives additional arguments,
-- CPR information does not (and definite converge also would not).
ensureArgs :: Arity -> DmdType -> DmdType
@@ -1567,8 +1575,56 @@ and <L,U(U,U)> on the second, then returning a constructor.
If this same function is applied to one arg, all we can say is that it
uses x with <L,U>, and its arg with demand <L,U>.
+
+Note [Understanding DmdType and StrictSig]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Demand types are sound approximations of an expression's semantics relative to
+the incoming demand we put the expression under. Consider the following
+expression:
+
+ \x y -> x `seq` (y, 2*x)
+
+Here is a table with demand types resulting from different incoming demands we
+put that expression under. Note the monotonicity; a stronger incoming demand
+yields a more precise demand type:
+
+ incoming demand | demand type
+ ----------------------------------------------------
+ <S ,HU > | <L,U><L,U>{}
+ <C(C(S )),C1(C1(U ))> | <S,U><L,U>{}
+ <C(C(S(S,L))),C1(C1(U(1*U,A)))> | <S,1*HU><S,1*U>{}
+
+Note that in the first example, the depth of the demand type was *higher* than
+the arity of the incoming call demand due to the anonymous lambda.
+The converse is also possible and happens when we unleash demand signatures.
+In @f x y@, the incoming call demand on f has arity 2. But if all we have is a
+demand signature with depth 1 for @f@ (which we can safely unleash, see below),
+the demand type of @f@ under a call demand of arity 2 has a *lower* depth of 1.
+
+So: Demand types are elicited by putting an expression under an incoming (call)
+demand, the arity of which can be lower or higher than the depth of the
+resulting demand type.
+In contrast, a demand signature summarises a function's semantics *without*
+immediately specifying the incoming demand it was produced under. Despite StrSig
+being a newtype wrapper around DmdType, it actually encodes two things:
+
+ * The threshold (i.e., minimum arity) to unleash the signature
+ * A demand type that is sound to unleash when the minimum arity requirement is
+ met.
+
+Here comes the subtle part: The threshold is encoded in the wrapped demand
+type's depth! So in mkStrictSigForArity we make sure to trim the list of
+argument demands to the given threshold arity. Call sites will make sure that
+this corresponds to the arity of the call demand that elicited the wrapped
+demand type. See also Note [What are demand signatures?] in DmdAnal.
+
+Besides trimming argument demands, mkStrictSigForArity will also trim CPR
+information if necessary.
-}
+-- | The depth of the wrapped 'DmdType' encodes the arity at which it is safe
+-- to unleash. Better construct this through 'mkStrictSigForArity'.
+-- See Note [Understanding DmdType and StrictSig]
newtype StrictSig = StrictSig DmdType
deriving( Eq )
@@ -1580,34 +1636,43 @@ pprIfaceStrictSig :: StrictSig -> SDoc
pprIfaceStrictSig (StrictSig (DmdType _ dmds res))
= hcat (map ppr dmds) <> ppr res
-mkStrictSig :: DmdType -> StrictSig
-mkStrictSig dmd_ty = StrictSig dmd_ty
+-- | Turns a 'DmdType' computed for the particular 'Arity' into a 'StrictSig'
+-- unleashable at that arity. See Note [Understanding DmdType and StrictSig]
+mkStrictSigForArity :: Arity -> DmdType -> StrictSig
+mkStrictSigForArity arity dmd_ty = StrictSig (ensureArgs arity dmd_ty)
mkClosedStrictSig :: [Demand] -> DmdResult -> StrictSig
-mkClosedStrictSig ds res = mkStrictSig (DmdType emptyDmdEnv ds res)
+mkClosedStrictSig ds res = mkStrictSigForArity (length ds) (DmdType emptyDmdEnv ds res)
splitStrictSig :: StrictSig -> ([Demand], DmdResult)
splitStrictSig (StrictSig (DmdType _ dmds res)) = (dmds, res)
increaseStrictSigArity :: Int -> StrictSig -> StrictSig
--- Add extra arguments to a strictness signature
+-- ^ Add extra arguments to a strictness signature.
+-- In contrast to 'etaExpandStrictSig', this /prepends/ additional argument
+-- demands and leaves CPR info intact.
increaseStrictSigArity arity_increase sig@(StrictSig dmd_ty@(DmdType env dmds res))
| isTopDmdType dmd_ty = sig
- | arity_increase <= 0 = sig
+ | arity_increase == 0 = sig
+ | arity_increase < 0 = WARN( True, text "increaseStrictSigArity:"
+ <+> text "negative arity increase"
+ <+> ppr arity_increase )
+ nopSig
| otherwise = StrictSig (DmdType env dmds' res)
where
dmds' = replicate arity_increase topDmd ++ dmds
etaExpandStrictSig :: Arity -> StrictSig -> StrictSig
--- We are expanding (\x y. e) to (\x y z. e z)
--- Add exta demands to the /end/ of the arg demands if necessary
-etaExpandStrictSig arity sig@(StrictSig dmd_ty@(DmdType env dmds res))
- | isTopDmdType dmd_ty = sig
- | arity_increase <= 0 = sig
- | otherwise = StrictSig (DmdType env dmds' res)
- where
- arity_increase = arity - length dmds
- dmds' = dmds ++ replicate arity_increase topDmd
+-- ^ We are expanding (\x y. e) to (\x y z. e z).
+-- In contrast to 'increaseStrictSigArity', this /appends/ extra arg demands if
+-- necessary, potentially destroying the signature's CPR property.
+etaExpandStrictSig arity (StrictSig dmd_ty)
+ | arity < dmdTypeDepth dmd_ty
+ -- an arity decrease must zap the whole signature, because it was possibly
+ -- computed for a higher incoming call demand.
+ = nopSig
+ | otherwise
+ = StrictSig $ ensureArgs arity dmd_ty
isTopSig :: StrictSig -> Bool
isTopSig (StrictSig ty) = isTopDmdType ty
=====================================
compiler/basicTypes/Id.hs
=====================================
@@ -668,6 +668,7 @@ isBottomingId v
| isId v = isBottomingSig (idStrictness v)
| otherwise = False
+-- | Accesses the 'Id''s 'strictnessInfo'.
idStrictness :: Id -> StrictSig
idStrictness id = strictnessInfo (idInfo id)
=====================================
compiler/basicTypes/IdInfo.hs
=====================================
@@ -237,22 +237,34 @@ pprIdDetails other = brackets (pp other)
-- too big.
data IdInfo
= IdInfo {
- arityInfo :: !ArityInfo, -- ^ 'Id' arity
- ruleInfo :: RuleInfo, -- ^ Specialisations of the 'Id's function which exist
- -- See Note [Specialisations and RULES in IdInfo]
- unfoldingInfo :: Unfolding, -- ^ The 'Id's unfolding
- cafInfo :: CafInfo, -- ^ 'Id' CAF info
- oneShotInfo :: OneShotInfo, -- ^ Info about a lambda-bound variable, if the 'Id' is one
- inlinePragInfo :: InlinePragma, -- ^ Any inline pragma atached to the 'Id'
- occInfo :: OccInfo, -- ^ How the 'Id' occurs in the program
-
- strictnessInfo :: StrictSig, -- ^ A strictness signature
-
- demandInfo :: Demand, -- ^ ID demand information
- callArityInfo :: !ArityInfo, -- ^ How this is called.
- -- n <=> all calls have at least n arguments
-
- levityInfo :: LevityInfo -- ^ when applied, will this Id ever have a levity-polymorphic type?
+ arityInfo :: !ArityInfo,
+ -- ^ 'Id' arity, as computed by 'CoreArity'. Specifies how many
+ -- arguments this 'Id' has to be applied to before it doesn any
+ -- meaningful work.
+ ruleInfo :: RuleInfo,
+ -- ^ Specialisations of the 'Id's function which exist.
+ -- See Note [Specialisations and RULES in IdInfo]
+ unfoldingInfo :: Unfolding,
+ -- ^ The 'Id's unfolding
+ cafInfo :: CafInfo,
+ -- ^ 'Id' CAF info
+ oneShotInfo :: OneShotInfo,
+ -- ^ Info about a lambda-bound variable, if the 'Id' is one
+ inlinePragInfo :: InlinePragma,
+ -- ^ Any inline pragma atached to the 'Id'
+ occInfo :: OccInfo,
+ -- ^ How the 'Id' occurs in the program
+ strictnessInfo :: StrictSig,
+ -- ^ A strictness signature. Digests how a function uses its arguments
+ -- if applied to at least 'arityInfo' arguments.
+ demandInfo :: Demand,
+ -- ^ ID demand information
+ callArityInfo :: !ArityInfo,
+ -- ^ How this is called. This is the number of arguments to which a
+ -- binding can be eta-expanded without losing any sharing.
+ -- n <=> all calls have at least n arguments
+ levityInfo :: LevityInfo
+ -- ^ when applied, will this Id ever have a levity-polymorphic type?
}
-- Setters
=====================================
compiler/basicTypes/Var.hs
=====================================
@@ -700,6 +700,8 @@ setIdNotExported id = ASSERT( isLocalId id )
************************************************************************
-}
+-- | Is this a type-level (i.e., computationally irrelevant, thus erasable)
+-- variable? Satisfies @isTyVar = not . isId at .
isTyVar :: Var -> Bool -- True of both TyVar and TcTyVar
isTyVar (TyVar {}) = True
isTyVar (TcTyVar {}) = True
@@ -712,17 +714,21 @@ isTcTyVar _ = False
isTyCoVar :: Var -> Bool
isTyCoVar v = isTyVar v || isCoVar v
+-- | Is this a value-level (i.e., computationally relevant) 'Id'entifier?
+-- Satisfies @isId = not . isTyVar at .
isId :: Var -> Bool
isId (Id {}) = True
isId _ = False
+-- | Is this a coercion variable?
+-- Satisfies @'isId' v ==> 'isCoVar' v == not ('isNonCoVarId' v)@.
isCoVar :: Var -> Bool
--- A coercion variable
isCoVar (Id { id_details = details }) = isCoVarDetails details
isCoVar _ = False
+-- | Is this a term variable ('Id') that is /not/ a coercion variable?
+-- Satisfies @'isId' v ==> 'isCoVar' v == not ('isNonCoVarId' v)@.
isNonCoVarId :: Var -> Bool
--- A term variable (Id) that is /not/ a coercion variable
isNonCoVarId (Id { id_details = details }) = not (isCoVarDetails details)
isNonCoVarId _ = False
=====================================
compiler/coreSyn/CoreArity.hs
=====================================
@@ -158,7 +158,7 @@ exprBotStrictness_maybe e
{-
Note [exprArity invariant]
~~~~~~~~~~~~~~~~~~~~~~~~~~
-exprArity has the following invariant:
+exprArity has the following invariants:
(1) If typeArity (exprType e) = n,
then manifestArity (etaExpand e n) = n
=====================================
compiler/coreSyn/CoreLint.hs
=====================================
@@ -570,15 +570,9 @@ lintSingleBinding top_lvl_flag rec_flag (binder,rhs)
(addWarnL (text "INLINE binder is (non-rule) loop breaker:" <+> ppr binder))
-- Only non-rule loop breakers inhibit inlining
- -- Check whether arity and demand type are consistent (only if demand analysis
- -- already happened)
- --
- -- Note (Apr 2014): this is actually ok. See Note [Demand analysis for trivial right-hand sides]
- -- in DmdAnal. After eta-expansion in CorePrep the rhs is no longer trivial.
- -- ; let dmdTy = idStrictness binder
- -- ; checkL (case dmdTy of
- -- StrictSig dmd_ty -> idArity binder >= dmdTypeDepth dmd_ty || exprIsTrivial rhs)
- -- (mkArityMsg binder)
+ -- We used to check that the dmdTypeDepth of a demand signature never
+ -- exceeds idArity, but that is an unnecessary complication, see
+ -- Note [idArity varies independently of dmdTypeDepth] in DmdAnal
-- Check that the binder's arity is within the bounds imposed by
-- the type and the strictness signature. See Note [exprArity invariant]
@@ -2562,20 +2556,6 @@ mkKindErrMsg tyvar arg_ty
hang (text "Arg type:")
4 (ppr arg_ty <+> dcolon <+> ppr (typeKind arg_ty))]
-{- Not needed now
-mkArityMsg :: Id -> MsgDoc
-mkArityMsg binder
- = vcat [hsep [text "Demand type has",
- ppr (dmdTypeDepth dmd_ty),
- text "arguments, rhs has",
- ppr (idArity binder),
- text "arguments,",
- ppr binder],
- hsep [text "Binder's strictness signature:", ppr dmd_ty]
-
- ]
- where (StrictSig dmd_ty) = idStrictness binder
--}
mkCastErr :: CoreExpr -> Coercion -> Type -> Type -> MsgDoc
mkCastErr expr = mk_cast_err "expression" "type" (ppr expr)
=====================================
compiler/coreSyn/CoreUnfold.hs
=====================================
@@ -1149,15 +1149,15 @@ certainlyWillInline dflags fn_info
-- INLINABLE functions come via this path
-- See Note [certainlyWillInline: INLINABLE]
do_cunf expr (UnfIfGoodArgs { ug_size = size, ug_args = args })
- | not (null args) -- See Note [certainlyWillInline: be careful of thunks]
+ | arityInfo fn_info > 0 -- See Note [certainlyWillInline: be careful of thunks]
, not (isBottomingSig (strictnessInfo fn_info))
-- Do not unconditionally inline a bottoming functions even if
-- it seems smallish. We've carefully lifted it out to top level,
-- so we don't want to re-inline it.
- , let arity = length args
- , size - (10 * (arity + 1)) <= ufUseThreshold dflags
+ , let unf_arity = length args
+ , size - (10 * (unf_arity + 1)) <= ufUseThreshold dflags
= Just (fn_unf { uf_src = InlineStable
- , uf_guidance = UnfWhen { ug_arity = arity
+ , uf_guidance = UnfWhen { ug_arity = unf_arity
, ug_unsat_ok = unSaturatedOk
, ug_boring_ok = inlineBoringOk expr } })
-- Note the "unsaturatedOk". A function like f = \ab. a
@@ -1175,6 +1175,17 @@ found that the WorkWrap phase thought that
y = case x of F# v -> F# (v +# v)
was certainlyWillInline, so the addition got duplicated.
+Note that we check arityInfo instead of the arity of the unfolding to detect
+this case. This is so that we don't accidentally fail to inline small partial
+applications, like `f = g 42` (where `g` recurses into `f`) where g has arity 2
+(say). Here there is no risk of work duplication, and the RHS is tiny, so
+certainlyWillInline should return True. But `unf_arity` is zero! However f's
+arity, gotten from `arityInfo fn_info`, is 1.
+
+Failing to say that `f` will inline forces W/W to generate a potentially huge
+worker for f that will immediately cancel with `g`'s wrapper anyway, causing
+unnecessary churn in the Simplifier while arriving at the same result.
+
Note [certainlyWillInline: INLINABLE]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
certainlyWillInline /must/ return Nothing for a large INLINABLE thing,
=====================================
compiler/simplCore/SimplMonad.hs
=====================================
@@ -21,7 +21,7 @@ module SimplMonad (
import GhcPrelude
-import Var ( Var, isTyVar, mkLocalVar )
+import Var ( Var, isId, mkLocalVar )
import Name ( mkSystemVarName )
import Id ( Id, mkSysLocalOrCoVar )
import IdInfo ( IdDetails(..), vanillaIdInfo, setArityInfo )
@@ -187,7 +187,8 @@ newJoinId bndrs body_ty
= do { uniq <- getUniqueM
; let name = mkSystemVarName uniq (fsLit "$j")
join_id_ty = mkLamTypes bndrs body_ty -- Note [Funky mkLamTypes]
- arity = length (filter (not . isTyVar) bndrs)
+ -- Note [idArity for join points] in SimplUtils
+ arity = length (filter isId bndrs)
join_arity = length bndrs
details = JoinId join_arity
id_info = vanillaIdInfo `setArityInfo` arity
=====================================
compiler/simplCore/SimplUtils.hs
=====================================
@@ -1508,7 +1508,7 @@ tryEtaExpandRhs :: SimplMode -> OutId -> OutExpr
-> SimplM (Arity, Bool, OutExpr)
-- See Note [Eta-expanding at let bindings]
-- If tryEtaExpandRhs rhs = (n, is_bot, rhs') then
--- (a) rhs' has manifest arity
+-- (a) rhs' has manifest arity n
-- (b) if is_bot is True then rhs' applied to n args is guaranteed bottom
tryEtaExpandRhs mode bndr rhs
| Just join_arity <- isJoinId_maybe bndr
@@ -1517,6 +1517,7 @@ tryEtaExpandRhs mode bndr rhs
-- Note [Do not eta-expand join points]
-- But do return the correct arity and bottom-ness, because
-- these are used to set the bndr's IdInfo (#15517)
+ -- Note [idArity for join points]
| otherwise
= do { (new_arity, is_bot, new_rhs) <- try_expand
@@ -1610,6 +1611,13 @@ CorePrep comes around, the code is very likely to look more like this:
$j2 = if n > 0 then $j1
else (...) eta
+Note [idArity for join points]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Because of Note [Do not eta-expand join points] we have it that the idArity
+of a join point is always (less than or) equal to the join arity.
+Essentially, for join points we set `idArity $j = count isId join_lam_bndrs`.
+It really can be less if there are type-level binders in join_lam_bndrs.
+
Note [Do not eta-expand PAPs]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We used to have old_arity = manifestArity rhs, which meant that we
=====================================
compiler/stranal/DmdAnal.hs
=====================================
@@ -32,7 +32,7 @@ import Type
import Coercion ( Coercion, coVarsOfCo )
import FamInstEnv
import Util
-import Maybes ( isJust )
+import Maybes ( fromMaybe, isJust )
import TysWiredIn
import TysPrim ( realWorldStatePrimTy )
import ErrUtils ( dumpIfSet_dyn )
@@ -206,7 +206,6 @@ dmdAnal' env dmd (App fun arg)
-- , text "overall res dmd_ty =" <+> ppr (res_ty `bothDmdType` arg_ty) ])
(res_ty `bothDmdType` arg_ty, App fun' arg')
--- this is an anonymous lambda, since @dmdAnalRhsLetDown@ uses @collectBinders@
dmdAnal' env dmd (Lam var body)
| isTyVar var
= let
@@ -286,10 +285,7 @@ dmdAnal' env dmd (Case scrut case_bndr ty alts)
-- This is used for a non-recursive local let without manifest lambdas.
-- This is the LetUp rule in the paper “Higher-Order Cardinality Analysis”.
dmdAnal' env dmd (Let (NonRec id rhs) body)
- | useLetUp id rhs
- , Nothing <- unpackTrivial rhs
- -- dmdAnalRhsLetDown treats trivial right hand sides specially
- -- so if we have a trival right hand side, fall through to that.
+ | useLetUp id
= (final_ty, Let (NonRec id' rhs') body')
where
(body_ty, body') = dmdAnal env dmd body
@@ -582,25 +578,6 @@ environment, which effectively assigns them 'nopSig' (see "getStrictness")
-}
--- Trivial RHS
--- See Note [Demand analysis for trivial right-hand sides]
-dmdAnalTrivialRhs ::
- AnalEnv -> Id -> CoreExpr -> Var ->
- (DmdEnv, Id, CoreExpr)
-dmdAnalTrivialRhs env id rhs fn
- = (fn_fv, set_idStrictness env id fn_str, rhs)
- where
- fn_str = getStrictness env fn
- fn_fv | isLocalId fn = unitVarEnv fn topDmd
- | otherwise = emptyDmdEnv
- -- Note [Remember to demand the function itself]
- -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- -- fn_fv: don't forget to produce a demand for fn itself
- -- Lacking this caused #9128
- -- The demand is very conservative (topDmd), but that doesn't
- -- matter; trivial bindings are usually inlined, so it only
- -- kicks in for top-level bindings and NOINLINE bindings
-
-- Let bindings can be processed in two ways:
-- Down (RHS before body) or Up (body before RHS).
-- dmdAnalRhsLetDown implements the Down variant:
@@ -621,28 +598,23 @@ dmdAnalRhsLetDown :: TopLevelFlag
-- Process the RHS of the binding, add the strictness signature
-- to the Id, and augment the environment with the signature as well.
dmdAnalRhsLetDown top_lvl rec_flag env let_dmd id rhs
- | Just fn <- unpackTrivial rhs -- See Note [Demand analysis for trivial right-hand sides]
- = dmdAnalTrivialRhs env id rhs fn
-
- | otherwise
- = (lazy_fv, id', mkLams bndrs' body')
+ = (lazy_fv, id', rhs')
where
- (bndrs, body, body_dmd)
- = case isJoinId_maybe id of
- Just join_arity -- See Note [Demand analysis for join points]
- | (bndrs, body) <- collectNBinders join_arity rhs
- -> (bndrs, body, let_dmd)
-
- Nothing | (bndrs, body) <- collectBinders rhs
- -> (bndrs, body, mkBodyDmd env body)
-
- env_body = foldl' extendSigsWithLam env bndrs
- (body_ty, body') = dmdAnal env_body body_dmd body
- body_ty' = removeDmdTyArgs body_ty -- zap possible deep CPR info
- (DmdType rhs_fv rhs_dmds rhs_res, bndrs')
- = annotateLamBndrs env (isDFunId id) body_ty' bndrs
- sig_ty = mkStrictSig (mkDmdType sig_fv rhs_dmds rhs_res')
- id' = set_idStrictness env id sig_ty
+ rhs_arity = idArity id
+ rhs_dmd
+ -- See Note [Demand analysis for join points]
+ -- See Note [idArity for join points] in SimplUtils
+ -- rhs_arity matches the join arity of the join point
+ | isJoinId id
+ = mkCallDmds rhs_arity let_dmd
+ | otherwise
+ -- NB: rhs_arity
+ -- See Note [Demand signatures are computed for a threshold demand based on idArity]
+ = mkRhsDmd env rhs_arity rhs
+ (DmdType rhs_fv rhs_dmds rhs_res, rhs')
+ = dmdAnal env rhs_dmd rhs
+ sig = mkStrictSigForArity rhs_arity (mkDmdType sig_fv rhs_dmds rhs_res')
+ id' = set_idStrictness env id sig
-- See Note [NOINLINE and strictness]
@@ -666,36 +638,63 @@ dmdAnalRhsLetDown top_lvl rec_flag env let_dmd id rhs
|| not (isStrictDmd (idDemandInfo id) || ae_virgin env)
-- See Note [Optimistic CPR in the "virgin" case]
-mkBodyDmd :: AnalEnv -> CoreExpr -> CleanDemand
--- See Note [Product demands for function body]
-mkBodyDmd env body
- = case deepSplitProductType_maybe (ae_fam_envs env) (exprType body) of
- Nothing -> cleanEvalDmd
- Just (dc, _, _, _) -> cleanEvalProdDmd (dataConRepArity dc)
-
-unpackTrivial :: CoreExpr -> Maybe Id
--- Returns (Just v) if the arg is really equal to v, modulo
--- casts, type applications etc
--- See Note [Demand analysis for trivial right-hand sides]
-unpackTrivial (Var v) = Just v
-unpackTrivial (Cast e _) = unpackTrivial e
-unpackTrivial (Lam v e) | isTyVar v = unpackTrivial e
-unpackTrivial (App e a) | isTypeArg a = unpackTrivial e
-unpackTrivial _ = Nothing
-
--- | If given the RHS of a let-binding, this 'useLetUp' determines
--- whether we should process the binding up (body before rhs) or
--- down (rhs before body).
+-- | @mkRhsDmd env rhs_arity rhs@ creates a 'CleanDemand' for
+-- unleashing on the given function's @rhs@, by creating a call demand of
+-- @rhs_arity@ with a body demand appropriate for possible product types.
+-- See Note [Product demands for function body].
+-- For example, a call of the form @mkRhsDmd _ 2 (\x y -> (x, y))@ returns a
+-- clean usage demand of @C1(C1(U(U,U)))@.
+mkRhsDmd :: AnalEnv -> Arity -> CoreExpr -> CleanDemand
+mkRhsDmd env rhs_arity rhs = fromMaybe cleanEvalDmd $ do
+ let ts = findTypeShape (ae_fam_envs env) (exprType rhs)
+ TsProd tss <- peelTsFuns rhs_arity ts
+ pure (mkCallDmds rhs_arity (cleanEvalProdDmd (length tss)))
+
+-- | If given the let-bound 'Id', 'useLetUp' determines whether we should
+-- process the binding up (body before rhs) or down (rhs before body).
--
--- We use LetDown if there is a chance to get a useful strictness signature.
--- This is the case when there are manifest value lambdas or the binding is a
--- join point (hence always acts like a function, not a value).
-useLetUp :: Var -> CoreExpr -> Bool
-useLetUp f _ | isJoinId f = False
-useLetUp f (Lam v e) | isTyVar v = useLetUp f e
-useLetUp _ (Lam _ _) = False
-useLetUp _ _ = True
-
+-- We use LetDown if there is a chance to get a useful strictness signature to
+-- unleash at call sites. LetDown is generally more precise than LetUp if we can
+-- correctly guess how it will be used in the body, that is, for which incoming
+-- demand the strictness signature should be computed, which allows us to
+-- unleash higher-order demands on arguments at call sites. This is mostly the
+-- case when
+--
+-- * The binding takes any arguments before performing meaningful work (cf.
+-- 'idArity'), in which case we are interested to see how it uses them.
+-- * The binding is a join point, hence acting like a function, not a value.
+-- As a big plus, we know *precisely* how it will be used in the body; since
+-- it's always tail-called, we can directly unleash the incoming demand of
+-- the let binding on its RHS when computing a strictness signature. See
+-- [Demand analysis for join points].
+--
+-- Thus, if the binding is not a join point and its arity is 0, we have a thunk
+-- and use LetUp, implying that we have no usable demand signature available
+-- when we analyse the let body.
+--
+-- Since thunk evaluation is memoised, we want to unleash its 'DmdEnv' of free
+-- vars at most once, regardless of how many times it was forced in the body.
+-- This makes a real difference wrt. usage demands. The other reason is being
+-- able to unleash a more precise product demand on its RHS once we know how the
+-- thunk was used in the let body.
+--
+-- Characteristic examples, always assuming a single evaluation:
+--
+-- * @let x = 2*y in x + x@ => LetUp. Compared to LetDown, we find out that
+-- the expression uses @y@ at most once.
+-- * @let x = (a,b) in fst x@ => LetUp. Compared to LetDown, we find out that
+-- @b@ is absent.
+-- * @let f x = x*2 in f y@ => LetDown. Compared to LetUp, we find out that
+-- the expression uses @y@ strictly, because we have @f@'s demand signature
+-- available at the call site.
+-- * @join exit = 2*y in if a then exit else if b then exit else 3*y@ =>
+-- LetDown. Compared to LetUp, we find out that the expression uses @y@
+-- strictly, because we can unleash @exit@'s signature at each call site.
+-- * For a more convincing example with join points, see Note [Demand analysis
+-- for join points].
+--
+useLetUp :: Var -> Bool
+useLetUp f = idArity f == 0 && not (isJoinId f)
{- Note [Demand analysis for join points]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -728,22 +727,141 @@ let_dmd here).
Another win for join points! #13543.
+Note [Demand signatures are computed for a threshold demand based on idArity]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We compute demand signatures assuming idArity incoming arguments to approximate
+behavior for when we have a call site with at least that many arguments. idArity
+is /at least/ the number of manifest lambdas, but might be higher for PAPs and
+trivial RHS (see Note [Demand analysis for trivial right-hand sides]).
+
+Because idArity of a function varies independently of its cardinality properties
+(cf. Note [idArity varies independently of dmdTypeDepth]), we implicitly encode
+the arity for when a demand signature is sound to unleash in its 'dmdTypeDepth'
+(cf. Note [Understanding DmdType and StrictSig] in Demand). It is unsound to
+unleash a demand signature when the incoming number of arguments is less than
+that. See Note [What are demand signatures?] for more details on soundness.
+
+Why idArity arguments? Because that's a conservative estimate of how many
+arguments we must feed a function before it does anything interesting with them.
+Also it elegantly subsumes the trivial RHS and PAP case.
+
+There might be functions for which we might want to analyse for more incoming
+arguments than idArity. Example:
+
+ f x =
+ if expensive
+ then \y -> ... y ...
+ else \y -> ... y ...
+
+We'd analyse `f` under a unary call demand C(S), corresponding to idArity
+being 1. That's enough to look under the manifest lambda and find out how a
+unary call would use `x`, but not enough to look into the lambdas in the if
+branches.
+
+On the other hand, if we analysed for call demand C(C(S)), we'd get useful
+strictness info for `y` (and more precise info on `x`) and possibly CPR
+information, but
+
+ * We would no longer be able to unleash the signature at unary call sites
+ * Performing the worker/wrapper split based on this information would be
+ implicitly eta-expanding `f`, playing fast and loose with divergence and
+ even being unsound in the presence of newtypes, so we refrain from doing so.
+ Also see Note [Don't eta expand in w/w] in WorkWrap.
+
+Since we only compute one signature, we do so for arity 1. Computing multiple
+signatures for different arities (i.e., polyvariance) would be entirely
+possible, if it weren't for the additional runtime and implementation
+complexity.
+
+Note [idArity varies independently of dmdTypeDepth]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+We used to check in CoreLint that dmdTypeDepth <= idArity for a let-bound
+identifier. But that means we would have to zap demand signatures every time we
+reset or decrease arity. That's an unnecessary dependency, because
+
+ * The demand signature captures a semantic property that is independent of
+ what the binding's current arity is
+ * idArity is analysis information itself, thus volatile
+ * We already *have* dmdTypeDepth, wo why not just use it to encode the
+ threshold for when to unleash the signature
+ (cf. Note [Understanding DmdType and StrictSig] in Demand)
+
+Consider the following expression, for example:
+
+ (let go x y = `x` seq ... in go) |> co
+
+`go` might have a strictness signature of `<S><L>`. The simplifier will identify
+`go` as a nullary join point through `joinPointBinding_maybe` and float the
+coercion into the binding, leading to an arity decrease:
+
+ join go = (\x y -> `x` seq ...) |> co in go
+
+With the CoreLint check, we would have to zap `go`'s perfectly viable strictness
+signature.
+
+Note [What are demand signatures?]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Demand analysis interprets expressions in the abstract domain of demand
+transformers. Given an incoming demand we put an expression under, its abstract
+transformer gives us back a demand type denoting how other things (like
+arguments and free vars) were used when the expression was evaluated.
+Here's an example:
+
+ f x y =
+ if x + expensive
+ then \z -> z + y * ...
+ else \z -> z * ...
+
+The abstract transformer (let's call it F_e) of the if expression (let's call it
+e) would transform an incoming head demand <S,HU> into a demand type like
+{x-><S,1*U>,y-><L,U>}<L,U>. In pictures:
+
+ Demand ---F_e---> DmdType
+ <S,HU> {x-><S,1*U>,y-><L,U>}<L,U>
+
+Let's assume that the demand transformers we compute for an expression are
+correct wrt. to some concrete semantics for Core. How do demand signatures fit
+in? They are strange beasts, given that they come with strict rules when to
+it's sound to unleash them.
+
+Fortunately, we can formalise the rules with Galois connections. Consider
+f's strictness signature, {}<S,1*U><L,U>. It's a single-point approximation of
+the actual abstract transformer of f's RHS for arity 2. So, what happens is that
+we abstract *once more* from the abstract domain we already are in, replacing
+the incoming Demand by a simple lattice with two elements denoting incoming
+arity: A_2 = {<2, >=2} (where '<2' is the top element and >=2 the bottom
+element). Here's the diagram:
+
+ A_2 -----f_f----> DmdType
+ ^ |
+ | α γ |
+ | v
+ Demand ---F_f---> DmdType
+
+With
+ α(C1(C1(_))) = >=2 -- example for usage demands, but similar for strictness
+ α(_) = <2
+ γ(ty) = ty
+and F_f being the abstract transformer of f's RHS and f_f being the abstracted
+abstract transformer computable from our demand signature simply by
+
+ f_f(>=2) = {}<S,1*U><L,U>
+ f_f(<2) = postProcessUnsat {}<S,1*U><L,U>
+
+where postProcessUnsat makes a proper top element out of the given demand type.
+
Note [Demand analysis for trivial right-hand sides]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
- foo = plusInt |> co
+ foo = plusInt |> co
where plusInt is an arity-2 function with known strictness. Clearly
we want plusInt's strictness to propagate to foo! But because it has
no manifest lambdas, it won't do so automatically, and indeed 'co' might
-have type (Int->Int->Int) ~ T, so we *can't* eta-expand. So we have a
-special case for right-hand sides that are "trivial", namely variables,
-casts, type applications, and the like.
+have type (Int->Int->Int) ~ T.
-Note that this can mean that 'foo' has an arity that is smaller than that
-indicated by its demand info. e.g. if co :: (Int->Int->Int) ~ T, then
-foo's arity will be zero (see Note [exprArity invariant] in CoreArity),
-but its demand signature will be that of plusInt. A small example is the
-test case of #8963.
+Fortunately, CoreArity gives 'foo' arity 2, which is enough for LetDown to
+forward plusInt's demand signature, and all is well (see Note [Newtype arity] in
+CoreArity)! A small example is the test case NewtypeArity.
Note [Product demands for function body]
@@ -841,13 +959,6 @@ annotateBndr env dmd_ty var
where
(dmd_ty', dmd) = findBndrDmd env False dmd_ty var
-annotateLamBndrs :: AnalEnv -> DFunFlag -> DmdType -> [Var] -> (DmdType, [Var])
-annotateLamBndrs env args_of_dfun ty bndrs = mapAccumR annotate ty bndrs
- where
- annotate dmd_ty bndr
- | isId bndr = annotateLamIdBndr env args_of_dfun dmd_ty bndr
- | otherwise = (dmd_ty, bndr)
-
annotateLamIdBndr :: AnalEnv
-> DFunFlag -- is this lambda at the top of the RHS of a dfun?
-> DmdType -- Demand type of body
@@ -1160,12 +1271,6 @@ extendSigEnv top_lvl sigs var sig = extendVarEnv sigs var (sig, top_lvl)
lookupSigEnv :: AnalEnv -> Id -> Maybe (StrictSig, TopLevelFlag)
lookupSigEnv env id = lookupVarEnv (ae_sigs env) id
-getStrictness :: AnalEnv -> Id -> StrictSig
-getStrictness env fn
- | isGlobalId fn = idStrictness fn
- | Just (sig, _) <- lookupSigEnv env fn = sig
- | otherwise = nopSig
-
nonVirgin :: AnalEnv -> AnalEnv
nonVirgin env = env { ae_virgin = False }
=====================================
compiler/stranal/WorkWrap.hs
=====================================
@@ -9,6 +9,7 @@ module WorkWrap ( wwTopBinds ) where
import GhcPrelude
+import CoreArity ( manifestArity )
import CoreSyn
import CoreUnfold ( certainlyWillInline, mkWwInlineRule, mkWorkerUnfolding )
import CoreUtils ( exprType, exprIsHNF )
@@ -457,7 +458,7 @@ tryWW dflags fam_envs is_rec fn_id rhs
-- See Note [Don't w/w INLINE things]
-- See Note [Don't w/w inline small non-loop-breaker things]
- | is_fun
+ | is_fun && is_eta_exp
= splitFun dflags fam_envs new_fn_id fn_info wrap_dmds res_info rhs
| is_thunk -- See Note [Thunk splitting]
@@ -474,9 +475,11 @@ tryWW dflags fam_envs is_rec fn_id rhs
-- See Note [Zapping DmdEnv after Demand Analyzer] and
-- See Note [Zapping Used Once info in WorkWrap]
- is_fun = notNull wrap_dmds || isJoinId fn_id
- is_thunk = not is_fun && not (exprIsHNF rhs) && not (isJoinId fn_id)
- && not (isUnliftedType (idType fn_id))
+ is_fun = notNull wrap_dmds || isJoinId fn_id
+ -- See Note [Don't eta expand in w/w]
+ is_eta_exp = length wrap_dmds == manifestArity rhs
+ is_thunk = not is_fun && not (exprIsHNF rhs) && not (isJoinId fn_id)
+ && not (isUnliftedType (idType fn_id))
{-
Note [Zapping DmdEnv after Demand Analyzer]
@@ -516,6 +519,30 @@ want to _keep_ the info for the code generator).
We do not do it in the demand analyser for the same reasons outlined in
Note [Zapping DmdEnv after Demand Analyzer] above.
+
+Note [Don't eta expand in w/w]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+A binding where the manifestArity of the RHS is less than idArity of the binder
+means CoreArity didn't eta expand that binding. When this happens, it does so
+for a reason (see Note [exprArity invariant] in CoreArity) and we probably have
+a PAP, cast or trivial expression as RHS.
+
+Performing the worker/wrapper split will implicitly eta-expand the binding to
+idArity, overriding CoreArity's decision. Other than playing fast and loose with
+divergence, it's also broken for newtypes:
+
+ f = (\xy.blah) |> co
+ where
+ co :: (Int -> Int -> Char) ~ T
+
+Then idArity is 2 (despite the type T), and it can have a StrictSig based on a
+threshold of 2. But we can't w/w it without a type error.
+
+The situation is less grave for PAPs, but the implicit eta expansion caused a
+compiler allocation regression in T15164, where huge recursive instance method
+groups, mostly consisting of PAPs, got w/w'd. This caused great churn in the
+simplifier, when simply waiting for the PAPs to inline arrived at the same
+output program.
-}
=====================================
compiler/stranal/WwLib.hs
=====================================
@@ -134,7 +134,7 @@ mkWwBodies :: DynFlags
-- wrap_fn_str E = case x of { (a,b) ->
-- case a of { (a1,a2) ->
-- E a1 a2 b y }}
--- work_fn_str E = \a2 a2 b y ->
+-- work_fn_str E = \a1 a2 b y ->
-- let a = (a1,a2) in
-- let x = (a,b) in
-- E
=====================================
testsuite/tests/perf/compiler/WWRec.hs
=====================================
@@ -0,0 +1,73 @@
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE UndecidableInstances #-}
+
+module WWRec where
+
+class Rule f a where
+ get :: Decorator f => f a
+class Monad f => Decorator f where
+ foo :: Rule f a => f a
+
+data A1 = MkA1 A2
+data A2 = MkA2 A3
+data A3 = MkA3 A4
+data A4 = MkA4 A5
+data A5 = MkA5 A6
+data A6 = MkA6 A7
+data A7 = MkA7 A8
+data A8 = MkA8 A9
+data A9 = MkA9 A10
+data A10 = MkA10 A11
+data A11 = MkA11 A12
+data A12 = MkA12 A13
+data A13 = MkA13 A14
+data A14 = MkA14 A15
+data A15 = MkA15 A16
+data A16 = MkA16 A17
+data A17 = MkA17 A18
+data A18 = MkA18 A19
+data A19 = MkA19 A20
+data A20 = MkA20 A21
+data A21 = MkA21 A22
+data A22 = MkA22 A23
+data A23 = MkA23 A24
+data A24 = MkA24 A25
+data A25 = MkA25 A26
+data A26 = MkA26 A27
+data A27 = MkA27 A28
+data A28 = MkA28 A29
+data A29 = MkA29 A30
+data A30 = MkA30 A1
+
+instance Rule f A2 => Rule f A1 where get = MkA1 <$> foo
+instance Rule f A3 => Rule f A2 where get = MkA2 <$> foo
+instance Rule f A4 => Rule f A3 where get = MkA3 <$> foo
+instance Rule f A5 => Rule f A4 where get = MkA4 <$> foo
+instance Rule f A6 => Rule f A5 where get = MkA5 <$> foo
+instance Rule f A7 => Rule f A6 where get = MkA6 <$> foo
+instance Rule f A8 => Rule f A7 where get = MkA7 <$> foo
+instance Rule f A9 => Rule f A8 where get = MkA8 <$> foo
+instance Rule f A10 => Rule f A9 where get = MkA9 <$> foo
+instance Rule f A11 => Rule f A10 where get = MkA10 <$> foo
+instance Rule f A12 => Rule f A11 where get = MkA11 <$> foo
+instance Rule f A13 => Rule f A12 where get = MkA12 <$> foo
+instance Rule f A14 => Rule f A13 where get = MkA13 <$> foo
+instance Rule f A15 => Rule f A14 where get = MkA14 <$> foo
+instance Rule f A16 => Rule f A15 where get = MkA15 <$> foo
+instance Rule f A17 => Rule f A16 where get = MkA16 <$> foo
+instance Rule f A18 => Rule f A17 where get = MkA17 <$> foo
+instance Rule f A19 => Rule f A18 where get = MkA18 <$> foo
+instance Rule f A20 => Rule f A19 where get = MkA19 <$> foo
+instance Rule f A21 => Rule f A20 where get = MkA20 <$> foo
+instance Rule f A22 => Rule f A21 where get = MkA21 <$> foo
+instance Rule f A23 => Rule f A22 where get = MkA22 <$> foo
+instance Rule f A24 => Rule f A23 where get = MkA23 <$> foo
+instance Rule f A25 => Rule f A24 where get = MkA24 <$> foo
+instance Rule f A26 => Rule f A25 where get = MkA25 <$> foo
+instance Rule f A27 => Rule f A26 where get = MkA26 <$> foo
+instance Rule f A28 => Rule f A27 where get = MkA27 <$> foo
+instance Rule f A29 => Rule f A28 where get = MkA28 <$> foo
+instance Rule f A30 => Rule f A29 where get = MkA29 <$> foo
+instance Rule f A1 => Rule f A30 where get = MkA30 <$> foo
=====================================
testsuite/tests/perf/compiler/all.T
=====================================
@@ -393,6 +393,13 @@ test ('T15164',
compile,
['-v0 -O'])
+# See https://gitlab.haskell.org/ghc/ghc/merge_requests/312#note_186960
+test ('WWRec',
+ [ collect_compiler_stats('bytes allocated',10)
+ ],
+ compile,
+ ['-v0 -O'])
+
test('T16190',
[ collect_stats(),
when(opsys('mingw32'), expect_broken(16389))
=====================================
testsuite/tests/stranal/sigs/NewtypeArity.hs
=====================================
@@ -0,0 +1,10 @@
+-- | 't' and 't2' should have a strictness signature for arity 2 here.
+module Test where
+
+newtype T = MkT (Int -> Int -> Int)
+
+t :: T
+t = MkT (\a b -> a + b)
+
+t2 :: T
+t2 = MkT (+)
=====================================
testsuite/tests/stranal/sigs/NewtypeArity.stderr
=====================================
@@ -0,0 +1,18 @@
+
+==================== Strictness signatures ====================
+Test.$tc'MkT: m
+Test.$tcT: m
+Test.$trModule: m
+Test.t: <S,1*U(U)><S,1*U(U)>m
+Test.t2: <S,1*U(U)><S,1*U(U)>m
+
+
+
+==================== Strictness signatures ====================
+Test.$tc'MkT: m
+Test.$tcT: m
+Test.$trModule: m
+Test.t: <S,1*U(U)><S,1*U(U)>m
+Test.t2: <S,1*U(U)><S,1*U(U)>m
+
+
=====================================
testsuite/tests/stranal/sigs/all.T
=====================================
@@ -17,3 +17,4 @@ test('BottomFromInnerLambda', normal, compile, [''])
test('DmdAnalGADTs', normal, compile, [''])
test('T12370', normal, compile, [''])
test('CaseBinderCPR', normal, compile, [''])
+test('NewtypeArity', normal, compile, [''])
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/commit/1a670dc0196aeeb37e7b452e6f1b0e28380f032f
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/commit/1a670dc0196aeeb37e7b452e6f1b0e28380f032f
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20190405/e8ae55ab/attachment-0001.html>
More information about the ghc-commits
mailing list