[Git][ghc/ghc][wip/andreask/arm_immediates] Aarch64 NCG: Don't emit overflowed literals
Andreas Klebinger (@AndreasK)
gitlab at gitlab.haskell.org
Mon Jul 24 12:57:16 UTC 2023
Andreas Klebinger pushed to branch wip/andreask/arm_immediates at Glasgow Haskell Compiler / GHC
Commits:
a3860418 by Andreas Klebinger at 2023-07-24T14:47:37+02:00
Aarch64 NCG: Don't emit overflowed literals
Rather than emitting overflowed literals we truncate them now.
- - - - -
2 changed files:
- compiler/GHC/CmmToAsm/AArch64/CodeGen.hs
- compiler/GHC/CmmToAsm/AArch64/Regs.hs
Changes:
=====================================
compiler/GHC/CmmToAsm/AArch64/CodeGen.hs
=====================================
@@ -397,55 +397,57 @@ For example mov x1, #0x10000 is allowed but will be assembled to movz x1, #0x1,
-- Allows for 16bit immediate which can be shifted by 0/16/32/48 bits.
-- Used with MOVZ,MOVN, MOVK
-- See Note [Aarch64 immediates]
-getMovWideImm :: Integer -> Maybe Operand
-getMovWideImm n
- -- TODO: Handle sign extension
+getMovWideImm :: Integer -> Width -> Maybe Operand
+getMovWideImm n w
+ -- TODO: Handle sign extension/negatives
| n <= 0
= Nothing
-- Fits in 16 bits
| sized_n < 2^(16 :: Int)
- = Just $ OpImm (ImmInteger n)
+ = Just $ OpImm (ImmInteger truncated)
-- 0x0000 0000 xxxx 0000
| trailing_zeros >= 16 && sized_n < 2^(32 :: Int)
- = Just $ OpImmShift (ImmInteger $ n `shiftR` 16) SLSL 16
+ = Just $ OpImmShift (ImmInteger $ truncated `shiftR` 16) SLSL 16
-- 0x 0000 xxxx 0000 0000
| trailing_zeros >= 32 && sized_n < 2^(48 :: Int)
- = Just $ OpImmShift (ImmInteger $ n `shiftR` 32) SLSL 32
+ = Just $ OpImmShift (ImmInteger $ truncated `shiftR` 32) SLSL 32
-- 0x xxxx 0000 0000 0000
| trailing_zeros >= 48
- = Just $ OpImmShift (ImmInteger $ n `shiftR` 48) SLSL 48
+ = Just $ OpImmShift (ImmInteger $ truncated `shiftR` 48) SLSL 48
| otherwise
= Nothing
where
- sized_n = fromIntegral n :: Word64
+ truncated = narrowU w n
+ sized_n = fromIntegral truncated :: Word64
trailing_zeros = countTrailingZeros sized_n
-- | Arithmetic(immediate)
-- Allows for 12bit immediates which can be shifted by 0 or 12 bits.
-- Used with ADD, ADDS, SUB, SUBS, CMP, CMN
-- See Note [Aarch64 immediates]
-getArithImm :: Integer -> Maybe Operand
-getArithImm n
+getArithImm :: Integer -> Width -> Maybe Operand
+getArithImm n w
-- TODO: Handle sign extension
| n <= 0
= Nothing
-- Fits in 16 bits
-- Fits in 12 bits
| sized_n < 2^(12::Int)
- = Just $ OpImm (ImmInteger n)
+ = Just $ OpImm (ImmInteger truncated)
-- 12 bits shifted by 12 places.
| trailing_zeros >= 12 && sized_n < 2^(24::Int)
- = Just $ OpImmShift (ImmInteger $ n `shiftR` 12) SLSL 12
+ = Just $ OpImmShift (ImmInteger $ truncated `shiftR` 12) SLSL 12
| otherwise
= Nothing
where
- sized_n = fromIntegral n :: Word64
+ sized_n = fromIntegral truncated :: Word64
+ truncated = narrowU w n
trailing_zeros = countTrailingZeros sized_n
-- | Logical (immediate)
@@ -453,10 +455,12 @@ getArithImm n
-- Used with AND, ANDS, EOR, ORR, TST
-- and their aliases which includes at least MOV (bitmask immediate)
-- See Note [Aarch64 immediates]
-getBitmaskImm :: Integer -> Maybe Operand
-getBitmaskImm n
- | isAArch64Bitmask n = Just $ OpImm (ImmInteger n)
+getBitmaskImm :: Integer -> Width -> Maybe Operand
+getBitmaskImm n w
+ | isAArch64Bitmask truncated = Just $ OpImm (ImmInteger truncated)
| otherwise = Nothing
+ where
+ truncated = narrowU w n
-- TODO OPT: we might be able give getRegister
@@ -604,7 +608,7 @@ getRegister' config plat expr
-- or figure out something smarter. Lowered to
-- `MOV dst XZR`
CmmInt i w | i >= 0
- , Just imm_op <- getMovWideImm i -> do
+ , Just imm_op <- getMovWideImm i w -> do
return (Any (intFormat w) (\dst -> unitOL $ annExpr expr (MOVZ (OpReg w dst) imm_op)))
CmmInt i w | isNbitEncodeable 16 i, i >= 0 -> do
@@ -895,7 +899,7 @@ getRegister' config plat expr
(reg_x, format_x, code_x) <- getSomeReg x
(op_y, format_y, code_y) <- case y of
CmmLit (CmmInt n w)
- | Just imm_operand_y <- encode_imm n
+ | Just imm_operand_y <- encode_imm n w
-> return (imm_operand_y, intFormat w, nilOL)
_ -> do
(reg_y, format_y, code_y) <- getSomeReg y
@@ -910,7 +914,7 @@ getRegister' config plat expr
-- In the case of 8- and 16-bit signed arithmetic we must first
-- sign-extend both arguments to 32-bits.
-- See Note [Signed arithmetic on AArch64].
- intOpImm :: Bool -> Width -> (Operand -> Operand -> Operand -> OrdList Instr) -> (Integer -> Maybe Operand) -> NatM (Register)
+ intOpImm :: Bool -> Width -> (Operand -> Operand -> Operand -> OrdList Instr) -> (Integer -> Width -> Maybe Operand) -> NatM (Register)
intOpImm {- is signed -} True w op _encode_imm = intOp True w op
intOpImm False w op encode_imm = do
-- compute x<m> <- x
@@ -919,7 +923,7 @@ getRegister' config plat expr
(reg_x, format_x, code_x) <- getSomeReg x
(op_y, format_y, code_y) <- case y of
CmmLit (CmmInt n w)
- | Just imm_operand_y <- encode_imm n
+ | Just imm_operand_y <- encode_imm n w
-> return (imm_operand_y, intFormat w, nilOL)
_ -> do
(reg_y, format_y, code_y) <- getSomeReg y
=====================================
compiler/GHC/CmmToAsm/AArch64/Regs.hs
=====================================
@@ -77,6 +77,8 @@ litToImm (CmmInt i w) = ImmInteger (narrowS w i)
-- narrow to the width: a CmmInt might be out of
-- range, but we assume that ImmInteger only contains
-- in-range values. A signed value should be fine here.
+ -- AK: We do call this with out of range values, however
+ -- it just truncates as we would expect.
litToImm (CmmFloat f W32) = ImmFloat f
litToImm (CmmFloat f W64) = ImmDouble f
litToImm (CmmLabel l) = ImmCLbl l
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/commit/a386041869261daf9aeff0a31eff8b1f375a37b8
--
View it on GitLab: https://gitlab.haskell.org/ghc/ghc/-/commit/a386041869261daf9aeff0a31eff8b1f375a37b8
You're receiving this email because of your account on gitlab.haskell.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mail.haskell.org/pipermail/ghc-commits/attachments/20230724/656bfc2b/attachment-0001.html>
More information about the ghc-commits
mailing list