From 02ab54fbb8722be8f3a91f1ed6e80cfe017ab8ee Mon Sep 17 00:00:00 2001 From: Koz Ross Date: Wed, 22 May 2024 12:58:48 +1200 Subject: [PATCH 01/19] Initial bitwise primitives --- plutus-core/plutus-core.cabal | 1 + .../src/PlutusCore/Bitwise/Other.hs | 510 ++++++++++++++++++ .../src/PlutusCore/Default/Builtins.hs | 47 ++ .../RewriteRules/CommuteFnWithConst.hs | 4 + 4 files changed, 562 insertions(+) create mode 100644 plutus-core/plutus-core/src/PlutusCore/Bitwise/Other.hs diff --git a/plutus-core/plutus-core.cabal b/plutus-core/plutus-core.cabal index d854c5c895e..8ee595f2b31 100644 --- a/plutus-core/plutus-core.cabal +++ b/plutus-core/plutus-core.cabal @@ -89,6 +89,7 @@ library PlutusCore.Annotation PlutusCore.Arity PlutusCore.Bitwise.Convert + PlutusCore.Bitwise.Other PlutusCore.Builtin PlutusCore.Builtin.Debug PlutusCore.Builtin.Elaborate diff --git a/plutus-core/plutus-core/src/PlutusCore/Bitwise/Other.hs b/plutus-core/plutus-core/src/PlutusCore/Bitwise/Other.hs new file mode 100644 index 00000000000..032e468f262 --- /dev/null +++ b/plutus-core/plutus-core/src/PlutusCore/Bitwise/Other.hs @@ -0,0 +1,510 @@ +-- editorconfig-checker-disable-file +{-# LANGUAGE BangPatterns #-} + +-- | Implementation for shifts, rotation, popcount and find-first-set +module PlutusCore.Bitwise.Other ( + bitwiseShift, + bitwiseRotate, + countSetBits, + findFirstSetBit + ) where + +import Control.Monad (unless, when) +import Data.Bits qualified as Bits +import Data.ByteString (ByteString) +import Data.ByteString qualified as BS +import Data.ByteString.Internal qualified as BSI +import Data.Foldable (for_) +import Data.Word (Word64, Word8) +import Foreign.Marshal.Utils (copyBytes, fillBytes) +import Foreign.Ptr (Ptr, castPtr, plusPtr) +import Foreign.Storable (peekByteOff, peekElemOff, pokeByteOff) +import System.IO.Unsafe (unsafeDupablePerformIO) + +{- Note [Shift and rotation implementation] + +Both shifts and rotations work similarly: they effectively impose a 'write +offset' to bits in the data argument, then write those bits to the result +with this offset applied. The difference between them is in what should be +done if the resulting offset index would fall out of bounds: shifts just +discard the data (and fill whatever remains with zeroes), while rotations +'wrap around' modularly. This operation is bit parallel by definition, thus +theoretically making it amenable to the techniques described in Note [Bit +parallelism and loop sectioning]. + +However, the naive way of doing this runs into a problem: the byte ordering +on Tier 1 platforms inside `Word64` means that consecutive bit indexes +according to CIP-122 don't remain that way. We could avoid this by using a +byte flip followed by an adjustment in the opposite direction, then a byte flip +back again. However, this is a costly operation, and would also be extremely +fiddly across stride boundaries, making both performance and implementation +clarity suffer. Instead, we use a different observation, namely that both +shifts and rotations on the same input are monoidally homomorphic into +natural number addition (assuming the same 'direction' for shifts). Using +this, combined with Euclidean division, we can decompose any shift or +rotation by `i` into two consecutive shifts and rotations: + +1. A 'large' shift or rotation, by `div i 8`; and +2. A 'small' shift or rotation, by `rem i 8`. + +While on paper, this seems much less efficient (as our stride is smaller), +we also observe that the 'large' shift moves around whole bytes, while also +keeping consecutive bytes consecutive, assuming their bit indices remain +in-bounds. This means that we can implement step 1 both simply and efficiently: + +* For shifts, we perform a partial copy of all the bytes whose bits remain + in-bounds, followed by clearing of whatever remains. +* For rotations, we perform two partial copies: first of all the bytes whose + bits remain in-bounds, followed by whatever remains, at the 'opposite end'. + +These can make use of the bulk copying and clearing operations provided by the +GHC runtime. Not only are these shorter and more readable, they are also _far_ +more efficient than anything we could do, as they rely on optimized C called +via the runtime (meaning no FFI penalty). From our experiments, both with +these operations, and others from CIP-122, we note that the cost of these is +essentially constant up to about the size of 1-2 cache lines (64-128 bytes): +since we anticipate smaller inputs as far more likely, this actually runs +_faster_ than our proposed sectioning approach, while being easier to read +and write. + +It is arguable that our approach forces 'double writing', as Step 2 has to +possibly overwrite our work in Step 1. However, by avoiding the need to +perform byte flips, as well as benefitting from the huge speedups gained +from our split approach, this cost is essentially negligible, especially +given that we can operate mutably throughout. We also have an additional +benefit: if the requested rotation or shift happens to be an exact multiple +of 8, we can be _much_ faster, as Step 2 becomes unnecessary in that case. +-} + +-- | Shifts, as per [this +-- CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md). +bitwiseShift :: ByteString -> Int -> ByteString +bitwiseShift bs bitMove + | BS.null bs = bs + | bitMove == 0 = bs + | otherwise = unsafeDupablePerformIO . BS.useAsCString bs $ \srcPtr -> + BSI.create len $ \dstPtr -> do + -- To simplify our calculations, we work only with absolute values, + -- letting different functions control for direction, instead of + -- trying to unify the scheme for both positive and negative shifts. + let magnitude = abs bitMove + -- Instead of worrying about partial clearing, we just zero the entire + -- block of memory, as the cost is essentially negligible and saves us + -- a bunch of offset arithmetic. + fillBytes dstPtr 0x00 len + unless (magnitude >= bitLen) $ do + let (bigShift, smallShift) = magnitude `quotRem` 8 + case signum bitMove of + (-1) -> negativeShift (castPtr srcPtr) dstPtr bigShift smallShift + _ -> positiveShift (castPtr srcPtr) dstPtr bigShift smallShift + where + len :: Int + !len = BS.length bs + bitLen :: Int + !bitLen = len * 8 + negativeShift :: Ptr Word8 -> Ptr Word8 -> Int -> Int -> IO () + negativeShift srcPtr dstPtr bigShift smallShift = do + let copyDstPtr = plusPtr dstPtr bigShift + let copyLen = len - bigShift + -- Since we already zeroed everything, we only do the partial copy. + copyBytes copyDstPtr srcPtr copyLen + when (smallShift > 0) $ do + -- When working with the small shift, we have to shift bits across + -- byte boundaries. Thus, we have to make sure that: + -- + -- 1. We 'save' our first byte from being processed. + -- 2. We can 'select' the bits that would be shifted over the + -- boundary and apply them. + let !invSmallShift = 8 - smallShift + let !mask = 0xFF `Bits.unsafeShiftR` invSmallShift + for_ [len - 1, len - 2 .. len - copyLen] $ \byteIx -> do + -- To handle shifts across byte boundaries, we have to 'read + -- backwards', mask off the relevant part, then recombine. + !(currentByte :: Word8) <- peekByteOff dstPtr byteIx + !(prevByte :: Word8) <- peekByteOff dstPtr (byteIx - 1) + let !prevOverflowBits = prevByte Bits..&. mask + let !newCurrentByte = + (currentByte `Bits.unsafeShiftR` smallShift) + Bits..|. (prevOverflowBits `Bits.unsafeShiftL` invSmallShift) + pokeByteOff dstPtr byteIx newCurrentByte + !(firstByte :: Word8) <- peekByteOff dstPtr (len - copyLen - 1) + pokeByteOff dstPtr (len - copyLen - 1) (firstByte `Bits.unsafeShiftR` smallShift) + -- This works similarly to `negativeShift` above, but in the opposite direction. + positiveShift :: Ptr Word8 -> Ptr Word8 -> Int -> Int -> IO () + positiveShift srcPtr dstPtr bigShift smallShift = do + let copySrcPtr = plusPtr srcPtr bigShift + let copyLen = len - bigShift + copyBytes dstPtr copySrcPtr copyLen + when (smallShift > 0) $ do + let !invSmallShift = 8 - smallShift + let !mask = 0xFF `Bits.unsafeShiftL` invSmallShift + for_ [0, 1 .. copyLen - 2] $ \byteIx -> do + !(currentByte :: Word8) <- peekByteOff dstPtr byteIx + !(nextByte :: Word8) <- peekByteOff dstPtr (byteIx + 1) + let !nextOverflowBits = nextByte Bits..&. mask + let !newCurrentByte = + (currentByte `Bits.unsafeShiftL` smallShift) + Bits..|. (nextOverflowBits `Bits.unsafeShiftR` invSmallShift) + pokeByteOff dstPtr byteIx newCurrentByte + !(lastByte :: Word8) <- peekByteOff dstPtr (copyLen - 1) + pokeByteOff dstPtr (copyLen - 1) (lastByte `Bits.unsafeShiftL` smallShift) + +-- | Rotations, as per [this +-- CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md). +bitwiseRotate :: ByteString -> Int -> ByteString +bitwiseRotate bs bitMove + | BS.null bs = bs + | otherwise = + -- To save ourselves some trouble, we work only with absolute shifts + -- (letting argument sign handle dispatch to dedicated 'directional' + -- functions, like for shifts), and also simplify rotations larger than + -- the bit length to the equivalent value modulo the bit length, as + -- they're equivalent. + let !magnitude = abs bitMove + !reducedMagnitude = magnitude `rem` bitLen + in if reducedMagnitude == 0 + then bs + else unsafeDupablePerformIO . BS.useAsCString bs $ \srcPtr -> + BSI.create len $ \dstPtr -> do + let (bigRotation, smallRotation) = reducedMagnitude `quotRem` 8 + case signum bitMove of + (-1) -> negativeRotate (castPtr srcPtr) dstPtr bigRotation smallRotation + _ -> positiveRotate (castPtr srcPtr) dstPtr bigRotation smallRotation + where + len :: Int + !len = BS.length bs + bitLen :: Int + !bitLen = len * 8 + negativeRotate :: Ptr Word8 -> Ptr Word8 -> Int -> Int -> IO () + negativeRotate srcPtr dstPtr bigRotate smallRotate = do + -- Two partial copies are needed here, unlike with shifts, because + -- there's no point zeroing our data, since it'll all be overwritten + -- with stuff from the input anyway. + let copyStartDstPtr = plusPtr dstPtr bigRotate + let copyStartLen = len - bigRotate + copyBytes copyStartDstPtr srcPtr copyStartLen + let copyEndSrcPtr = plusPtr srcPtr copyStartLen + copyBytes dstPtr copyEndSrcPtr bigRotate + when (smallRotate > 0) $ do + -- This works similarly as for shifts. + let invSmallRotate = 8 - smallRotate + let !mask = 0xFF `Bits.unsafeShiftR` invSmallRotate + !(cloneLastByte :: Word8) <- peekByteOff dstPtr (len - 1) + for_ [len - 1, len - 2 .. 1] $ \byteIx -> do + !(currentByte :: Word8) <- peekByteOff dstPtr byteIx + !(prevByte :: Word8) <- peekByteOff dstPtr (byteIx - 1) + let !prevOverflowBits = prevByte Bits..&. mask + let !newCurrentByte = + (currentByte `Bits.unsafeShiftR` smallRotate) + Bits..|. (prevOverflowBits `Bits.unsafeShiftL` invSmallRotate) + pokeByteOff dstPtr byteIx newCurrentByte + !(firstByte :: Word8) <- peekByteOff dstPtr 0 + let !lastByteOverflow = cloneLastByte Bits..&. mask + let !newLastByte = + (firstByte `Bits.unsafeShiftR` smallRotate) + Bits..|. (lastByteOverflow `Bits.unsafeShiftL` invSmallRotate) + pokeByteOff dstPtr 0 newLastByte + positiveRotate :: Ptr Word8 -> Ptr Word8 -> Int -> Int -> IO () + positiveRotate srcPtr dstPtr bigRotate smallRotate = do + let copyStartSrcPtr = plusPtr srcPtr bigRotate + let copyStartLen = len - bigRotate + copyBytes dstPtr copyStartSrcPtr copyStartLen + let copyEndDstPtr = plusPtr dstPtr copyStartLen + copyBytes copyEndDstPtr srcPtr bigRotate + when (smallRotate > 0) $ do + let !invSmallRotate = 8 - smallRotate + let !mask = 0xFF `Bits.unsafeShiftL` invSmallRotate + !(cloneFirstByte :: Word8) <- peekByteOff dstPtr 0 + for_ [0, 1 .. len - 2] $ \byteIx -> do + !(currentByte :: Word8) <- peekByteOff dstPtr byteIx + !(nextByte :: Word8) <- peekByteOff dstPtr (byteIx + 1) + let !nextOverflowBits = nextByte Bits..&. mask + let !newCurrentByte = + (currentByte `Bits.unsafeShiftL` smallRotate) + Bits..|. (nextOverflowBits `Bits.unsafeShiftR` invSmallRotate) + pokeByteOff dstPtr byteIx newCurrentByte + !(lastByte :: Word8) <- peekByteOff dstPtr (len - 1) + let !firstOverflowBits = cloneFirstByte Bits..&. mask + let !newLastByte = + (lastByte `Bits.unsafeShiftL` smallRotate) + Bits..|. (firstOverflowBits `Bits.unsafeShiftR` invSmallRotate) + pokeByteOff dstPtr (len - 1) newLastByte + +-- | Counting the number of set bits, as per [this +-- CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md). +countSetBits :: ByteString -> Int +countSetBits bs = unsafeDupablePerformIO . BS.useAsCString bs $ \srcPtr -> do + -- See Note [Bit parallelism and loop sectioning] for details of why we + -- define this function the way it is. We make use of the fact that `popCount` + -- is bit-parallel, and has a constant-time implementation for `Word64` and `Word8`. + let bigSrcPtr :: Ptr Word64 = castPtr srcPtr + let smallSrcPtr :: Ptr Word8 = plusPtr srcPtr offset + goBig bigSrcPtr smallSrcPtr 0 0 + where + len :: Int + !len = BS.length bs + bigStrides :: Int + !bigStrides = len `quot` 8 + smallStrides :: Int + !smallStrides = len `rem` 8 + offset :: Int + !offset = bigStrides * 8 + goBig :: Ptr Word64 -> Ptr Word8 -> Int -> Int -> IO Int + goBig !bigSrcPtr !smallSrcPtr !acc !bigIx + | bigIx == bigStrides = goSmall smallSrcPtr acc 0 + | otherwise = do + !w64 <- peekElemOff bigSrcPtr bigIx + goBig bigSrcPtr smallSrcPtr (acc + Bits.popCount w64) (bigIx + 1) + goSmall :: Ptr Word8 -> Int -> Int -> IO Int + goSmall !smallSrcPtr !acc !smallIx + | smallIx == smallStrides = pure acc + | otherwise = do + !w8 <- peekElemOff smallSrcPtr smallIx + goSmall smallSrcPtr (acc + Bits.popCount w8) (smallIx + 1) + +-- | Finding the first set bit's index, as per [this +-- CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md). +findFirstSetBit :: ByteString -> Int +findFirstSetBit bs = unsafeDupablePerformIO . BS.useAsCString bs $ \srcPtr -> do + let bigSrcPtr :: Ptr Word64 = castPtr srcPtr + goBig bigSrcPtr 0 (len - 8) + where + -- We implement this operation in a somewhat unusual way, to try and + -- benefit from bit paralellism, thus allowing loop sectioning as well: + -- see Note [Bit parallelism and loop sectioning] as to why we choose to + -- do this. + -- + -- Finding the first set bit is not (inherently) bit parallel, as there is + -- a clear 'horizontal dependency'. Thus, we instead 'localize' this + -- 'horizontal dependency' by noting that the following operations _are_ + -- bit-parallel: + -- + -- 1. Checking if all bits are zero + -- 2. Keeping an additive accumulator + -- + -- Essentially, we begin by taking large steps through our data, checking + -- whether we only have zeroes. This can be done in strides of 64 bits at a + -- time, and every time we find that many zeroes, we keep track. After we + -- encounter a nonzero `Word64`, we 'step down' to `Word8`-sized steps, + -- continuing to count zero blocks the same way. Once we encounter a + -- non-zero `Word8`, we can resort to the specialized operation for + -- counting trailing zeroes from `Data.Bits`, and 'top up' our accumulated + -- count to produce the index we want. If we ever 'walk off the end', we + -- know that there's no way we could find any set bits and return -1. + -- + -- This is complicated slightly by us having to walk the input backwards + -- instead of forwards, but due to the requirements of the CIP-122 bit + -- indexing scheme, we don't really have a choice here. This doesn't + -- affect the description above however: it just complicates the indexing + -- maths required. + goBig :: Ptr Word64 -> Int -> Int -> IO Int + goBig !bigSrcPtr !acc !byteIx + | byteIx >= 0 = do + !(w64 :: Word64) <- peekByteOff bigSrcPtr byteIx + -- In theory, we could use the same technique here as we do in + -- `goSmall`, namely count speculatively and then compare to 64. + -- However this is not possible for us, as the native byte ordering + -- on Tier 1 platforms does not keep consecutive bits _across_ bytes + -- consecutive, which would make this result unreliable. While we + -- _could_ do a byte order flip before counting (from the opposite + -- end) to avoid this, the cost of this operation is much larger + -- than a comparison to zero, and would only benefit us _once_, + -- instead of once-per-stride. Thus, we instead use the approach + -- here. + if w64 == 0x0 + then goBig bigSrcPtr (acc + 64) (byteIx - 8) + else goSmall (castPtr bigSrcPtr) acc (byteIx + 7) + | byteIx <= (-8) = pure (-1) + | otherwise = goSmall (castPtr bigSrcPtr) 0 (8 + byteIx - 1) + goSmall :: Ptr Word8 -> Int -> Int -> IO Int + goSmall !smallSrcPtr !acc !byteIx + | byteIx < 0 = pure (-1) + | otherwise = do + !(w8 :: Word8) <- peekByteOff smallSrcPtr byteIx + -- Instead of redundantly first checking for a zero byte, + -- then counting, we speculatively count, relying on the behaviour of + -- `countTrailingZeros` that, on a zero byte, we get 8. + let !counted = Bits.countTrailingZeros w8 + let !newAcc = acc + counted + if counted == 8 + then goSmall smallSrcPtr newAcc (byteIx - 1) + else pure newAcc + len :: Int + !len = BS.length bs + +{- Note [Bit parallelism and loop sectioning] +All of the operations defined in this module effectively function as loops +over bits, which have to be read and/or written. In particular, this +involves two operations: + +1. Trafficking data between memory and machine registers; and +2. Extraction of individual bits from larger chunks (either bytes or whole + machine words). + +There are also looping overheads as well, which involve comparisons and +branches. + +On all architectures of interest (essentially, 64-bit Tier 1), general-purpose +registers (GPRs henceforth) are 64 bits (or 8 bytes). Furthermore, the primary +cost of moving data between memory and registers is having to overcome the +'memory wall': the exact amount of data being moved doesn't affect this very +much. In addition to this, when we operate on single bits, the remaining 63 +bits of the GPR holding that data are essentially 'wasted'. In the situation +we have (namely, operating over `ByteString`s, which are packed arrays), we +get two sources of inefficiency: + +* Despite paying the cost for a memory transfer, we transfer only + one-sixty-fourth the data we could; and +* Despite transferring data from memory to registers, we utilize the register + at only one-sixty-fourth capacity. + +This essentially means we perform _sixty-four times_ more rotations of the +loop, and memory moves, than we need to! + +We can reduce this inefficiency considerably by using a combination of +techniques. The first of these is _bit parallelism_, which performs operations +on single bits in parallel across larger sections of data. This can only be +done if the operation in question has no 'horizontal dependencies': namely, +that the operation is a homomorphism into a monoid from single bits. To see +why this is possible, consider the following byte, denoted as an array of its +bits: + +[1, 0, 1, 1, 0, 1, 0, 1] + +Suppose we wanted to count the number of set bits in that byte: this is a +homomorphism into natural numbers under addition, and any given bit value +doesn't change what it is 'morphed' into. Thus, we can count all the bits +in parallel: + +1. ((1 + 0) + (1 + 1)) + ((0 + 1) + (0 + 1)) +2. (1 + 2) + (1 + 1) +3. (3 + 2) +4. 5 + +This can be done across any span, whether it is one bit, eight bits, or +sixty-four bits. The largest such span GHC allows us to work on portably is +`Word64` (that is, 64 bits), but our input data is a `ByteString`, which stores +bytes. In theory, this would force us to use at most 8-way bit paralellism, +which would reduce our penalty from a factor of 64 to a factor of 8. + +However, we can remove even that inefficiency using an additional technique: +_loop sectioning_. This would turn our homogenous loop (that always operates +one byte at a time) into a heterogenous loop: first, we operate on a larger +section (or _stride_) until we can no longer do this, and then finish up using +byte-at-a-time processing. Essentially, given an input like this: + +[ b1, b2, b3, b4, b5, b6, b7, b8, b9, b10 ] + +the homogeous byte-at-a-time approach would process it like so: + + _ _ _ _ _ _ _ _ _ _ +[ b1, b2, b3, b4, b5, b6, b7, b8, b9, b10 ] + +for a total of 10 memory transfers and 10 loop spins, whereas a loop-sectioned +approach with a stride of 8 would instead process like so: + + ______________________________ _ _ +[ b1, b2, b3, b4, b5, b6, b7, b8, b9, b10 ] + +Giving us only _three_ memory transfers and _three_ loop spins instead. This +effectively reduces our work by a further factor of 8. Since our operations +(reads, writes and counts of bits primarily, with some others) are all +bit-parallel, this is almost free. + +This technique only benefits us because counted arrays are cache-friendly: see +Note [Superscalarity and caching] for a longer explanation of this and why it +matters. + +Further information: + +- Tier 1 GHC platform list: + https://gitlab.haskell.org/ghc/ghc/-/wikis/platforms#tier-1-platforms +- Memory wall: + https://link.springer.com/referenceworkentry/10.1007/978-0-387-09766-4_234 +- Loop sectioning in more detail: + http://physics.ujep.cz/~zmoravec/prga/main_for/mergedProjects/optaps_for/common/optaps_vec_mine.htm +-} + +{- Note [Superscalarity and caching] +On modern architectures, in order to process data, it must first be moved from +memory into a register. This operation has some cost (known as the 'memory wall'), +which is largely independent of how much data gets moved (assuming the register +can hold it): moving one byte, or a whole register's worth, costs about the same. +To reduce this cost, CPU manufacturers have introduced _cache hierarchies_, +which are designed to limit the cost of the wall, as long as the data access +matches the cache's optimal usage pattern. Thus, while an idealized view of +the memory hierachy is this: + +Registers +--------- +Memory + +in reality, the view is more like this: + +Registers +--------- +L1 cache +--------- +L2 cache +--------- +L3 cache (on some platforms) +--------- +Memory + +Each 'higher' cache in the hierarchy is smaller, but faster, and when a memory +fetch is requested in code, in addition to moving the requested data to a +register, that data (plus some more) is moved into caches as well. The amount +of data moved into cache (a _cache line_) is typically eight machine words on +modern architectures (and definitely is the case for all Tier 1 GHC platforms): +for the cases concerning Plutus, that is 64 bytes. Therefore, if data we need +soon after a fetch is _physically_ nearby, it won't need to be fetched from +memory: instead, it would come from a cache, which is faster (by a considerable +margin). + +To see how this can matter, consider the following ByteString: + +[ b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11 ] + +The ByteString (being a counted array) has all of its data physically adjacent +to each other. Suppose we wanted to fetch the byte at index 1 (second position). +The naive view of what happens is like this: + +Registers: [b2] [ ] [ ] .... [ ] +Memory: [ b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11 ] + +Thus, it would appear that, if we wanted a different position's value, we would +need to fetch from memory again. However, what _actually_ happens is more like this: + +Registers: [b2] [ ] [ ] .... [ ] +L1 cache: [ b2, b3, b4, b5, b6, b7, b8, b9, b10, b11 ] +Memory: [ b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11 ] + +We note that b2, as well as its adjacent elements, were _all_ pulled into the L1 +cache. This can only work because all these elements are physically adjacent in +memory. The improvement in performance from this cache use is _very_ non-trivial: +an L1 cache is about 200 times faster than a memory access, and an L2 cache about +20 times faster. + +To take further advantage of this, modern CPUs (and all Tier 1 GHC platforms have +this capability) are _superscalar_. To explain what this means, let's consider the +naive view of how CPUs execute instructions: namely, it is one-at-a-time, and +synchronous. While CPUs must give the _appearance_ that they behave this way, in +practice, CPU execution is very much asynchronous: due to the proliferation of ALUs +on a single chip, having twice as many processing units is much cheaper than having +processing units run twice as fast. Thus, if there are no data dependencies +between instructions, CPUs can (and do!) execute them simultaneously, stalling to +await results if a data dependency is detected. This can be done automatically +using Tomasulo's algorithm, which ensures no conflicts with maximum throughput. + +Superscalarity interacts well with the cache hierarchy, as it makes data more +easily available for processing, provided there is enough 'work to do', and no +data dependencies. In our situation, most of what we do is data _movement_ from +one memory location to another, which by its very nature lacks any data +dependencies. + +Further references: + +- Numbers for cache and memory transfers: https://gist.github.com/jboner/2841832 +- Superscalarity: https://en.wikipedia.org/wiki/Superscalar_processor +- Tomasulo's algorithm: https://en.wikipedia.org/wiki/Tomasulo%27s_algorithm +-} diff --git a/plutus-core/plutus-core/src/PlutusCore/Default/Builtins.hs b/plutus-core/plutus-core/src/PlutusCore/Default/Builtins.hs index 33c46234256..8b5d9781ad0 100644 --- a/plutus-core/plutus-core/src/PlutusCore/Default/Builtins.hs +++ b/plutus-core/plutus-core/src/PlutusCore/Default/Builtins.hs @@ -27,6 +27,7 @@ import PlutusCore.Evaluation.Result (EvaluationResult (..)) import PlutusCore.Pretty (PrettyConfigPlc) import PlutusCore.Bitwise.Convert as Convert +import PlutusCore.Bitwise.Other as Other import PlutusCore.Crypto.BLS12_381.G1 qualified as BLS12_381.G1 import PlutusCore.Crypto.BLS12_381.G2 qualified as BLS12_381.G2 import PlutusCore.Crypto.BLS12_381.Pairing qualified as BLS12_381.Pairing @@ -152,6 +153,11 @@ data DefaultFun -- Conversions | IntegerToByteString | ByteStringToInteger + -- Bitwise + | BitwiseShift + | BitwiseRotate + | CountSetBits + | FindFirstSetBit deriving stock (Show, Eq, Ord, Enum, Bounded, Generic, Ix) deriving anyclass (NFData, Hashable, PrettyBy PrettyConfigPlc) @@ -1820,6 +1826,38 @@ instance uni ~ DefaultUni => ToBuiltinMeaning uni DefaultFun where in makeBuiltinMeaning byteStringToIntegerDenotation (runCostingFunTwoArguments . paramByteStringToInteger) + + toBuiltinMeaning _semvar BitwiseShift = + let bitwiseShiftDenotation :: BS.ByteString -> Int -> BS.ByteString + bitwiseShiftDenotation = Other.bitwiseShift + {-# INLINE bitwiseShiftDenotation #-} + in makeBuiltinMeaning + bitwiseShiftDenotation + (runCostingFunTwoArguments . unimplementedCostingFun) + + toBuiltinMeaning _semvar BitwiseRotate = + let bitwiseRotateDenotation :: BS.ByteString -> Int -> BS.ByteString + bitwiseRotateDenotation = Other.bitwiseRotate + {-# INLINE bitwiseRotateDenotation #-} + in makeBuiltinMeaning + bitwiseRotateDenotation + (runCostingFunTwoArguments . unimplementedCostingFun) + + toBuiltinMeaning _semvar CountSetBits = + let countSetBitsDenotation :: BS.ByteString -> Int + countSetBitsDenotation = Other.countSetBits + {-# INLINE countSetBitsDenotation #-} + in makeBuiltinMeaning + countSetBitsDenotation + (runCostingFunOneArgument . unimplementedCostingFun) + + toBuiltinMeaning _semvar FindFirstSetBit = + let findFirstSetBitDenotation :: BS.ByteString -> Int + findFirstSetBitDenotation = Other.findFirstSetBit + {-# INLINE findFirstSetBitDenotation #-} + in makeBuiltinMeaning + findFirstSetBitDenotation + (runCostingFunOneArgument . unimplementedCostingFun) -- See Note [Inlining meanings of builtins]. {-# INLINE toBuiltinMeaning #-} @@ -1947,6 +1985,11 @@ instance Flat DefaultFun where IntegerToByteString -> 73 ByteStringToInteger -> 74 + BitwiseShift -> 82 + BitwiseRotate -> 83 + CountSetBits -> 84 + FindFirstSetBit -> 85 + decode = go =<< decodeBuiltin where go 0 = pure AddInteger go 1 = pure SubtractInteger @@ -2023,6 +2066,10 @@ instance Flat DefaultFun where go 72 = pure Blake2b_224 go 73 = pure IntegerToByteString go 74 = pure ByteStringToInteger + go 82 = pure BitwiseShift + go 83 = pure BitwiseRotate + go 84 = pure CountSetBits + go 85 = pure FindFirstSetBit go t = fail $ "Failed to decode builtin tag, got: " ++ show t size _ n = n + builtinTagWidth diff --git a/plutus-core/plutus-ir/src/PlutusIR/Transform/RewriteRules/CommuteFnWithConst.hs b/plutus-core/plutus-ir/src/PlutusIR/Transform/RewriteRules/CommuteFnWithConst.hs index 1f52b55900a..f1837512a34 100644 --- a/plutus-core/plutus-ir/src/PlutusIR/Transform/RewriteRules/CommuteFnWithConst.hs +++ b/plutus-core/plutus-ir/src/PlutusIR/Transform/RewriteRules/CommuteFnWithConst.hs @@ -129,3 +129,7 @@ isCommutative = \case MkNilPairData -> False IntegerToByteString -> False ByteStringToInteger -> False + BitwiseShift -> False + BitwiseRotate -> False + CountSetBits -> False + FindFirstSetBit -> False From 1a1ce7002ba688ee9a30bbb83914a577b49b405e Mon Sep 17 00:00:00 2001 From: Koz Ross Date: Wed, 22 May 2024 13:58:16 +1200 Subject: [PATCH 02/19] Wire up new builtins --- .../src/PlutusLedgerApi/Common/Versions.hs | 3 +- .../src/PlutusTx/Compiler/Builtins.hs | 11 ++++++ plutus-tx/src/PlutusTx/Builtins.hs | 35 +++++++++++++++++++ plutus-tx/src/PlutusTx/Builtins/Internal.hs | 34 ++++++++++++++++++ 4 files changed, 82 insertions(+), 1 deletion(-) diff --git a/plutus-ledger-api/src/PlutusLedgerApi/Common/Versions.hs b/plutus-ledger-api/src/PlutusLedgerApi/Common/Versions.hs index 24a553dd651..41799d1b3df 100644 --- a/plutus-ledger-api/src/PlutusLedgerApi/Common/Versions.hs +++ b/plutus-ledger-api/src/PlutusLedgerApi/Common/Versions.hs @@ -117,7 +117,8 @@ builtinsIntroducedIn = Map.fromList [ Bls12_381_G2_equal, Bls12_381_G2_hashToGroup, Bls12_381_G2_compress, Bls12_381_G2_uncompress, Bls12_381_millerLoop, Bls12_381_mulMlResult, Bls12_381_finalVerify, - Keccak_256, Blake2b_224, IntegerToByteString, ByteStringToInteger + Keccak_256, Blake2b_224, IntegerToByteString, ByteStringToInteger, + BitwiseShift, BitwiseRotate, CountSetBits, FindFirstSetBit ]) ] diff --git a/plutus-tx-plugin/src/PlutusTx/Compiler/Builtins.hs b/plutus-tx-plugin/src/PlutusTx/Compiler/Builtins.hs index b0d8e6b15aa..5719bc725a2 100644 --- a/plutus-tx-plugin/src/PlutusTx/Compiler/Builtins.hs +++ b/plutus-tx-plugin/src/PlutusTx/Compiler/Builtins.hs @@ -277,6 +277,11 @@ builtinNames = [ , 'Builtins.integerToByteString , 'Builtins.byteStringToInteger + + , 'Builtins.bitwiseShift + , 'Builtins.bitwiseRotate + , 'Builtins.countSetBits + , 'Builtins.findFirstSetBit ] defineBuiltinTerm :: CompilingDefault uni fun m ann => Ann -> TH.Name -> PIRTerm uni fun -> m () @@ -434,6 +439,12 @@ defineBuiltinTerms = do PLC.IntegerToByteString -> defineBuiltinInl 'Builtins.integerToByteString PLC.ByteStringToInteger -> defineBuiltinInl 'Builtins.byteStringToInteger + -- Other bitwise ops + PLC.BitwiseShift -> defineBuiltinInl 'Builtins.bitwiseShift + PLC.BitwiseRotate -> defineBuiltinInl 'Builtins.bitwiseRotate + PLC.CountSetBits -> defineBuiltinInl 'Builtins.countSetBits + PLC.FindFirstSetBit -> defineBuiltinInl 'Builtins.findFirstSetBit + defineBuiltinTypes :: CompilingDefault uni fun m ann => m () diff --git a/plutus-tx/src/PlutusTx/Builtins.hs b/plutus-tx/src/PlutusTx/Builtins.hs index 5e9550d9bb6..43ebb5ec153 100644 --- a/plutus-tx/src/PlutusTx/Builtins.hs +++ b/plutus-tx/src/PlutusTx/Builtins.hs @@ -110,6 +110,11 @@ module PlutusTx.Builtins ( , toBuiltin , integerToByteString , byteStringToInteger + -- * Bitwise + , bitwiseShift + , bitwiseRotate + , countSetBits + , findFirstSetBit ) where import Data.Maybe @@ -652,3 +657,33 @@ integerToByteString endianness = BI.integerToByteString (toOpaque (byteOrderToBo byteStringToInteger :: ByteOrder -> BuiltinByteString -> Integer byteStringToInteger endianness = BI.byteStringToInteger (toOpaque (byteOrderToBool endianness)) + +-- Bitwise operations + +-- | Shift a 'BuiltinByteString', as per [this +-- CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md). +{-# INLINEABLE bitwiseShift #-} +bitwiseShift :: BuiltinByteString -> Integer -> BuiltinByteString +bitwiseShift = BI.bitwiseShift + +-- | Rotate a 'BuiltinByteString', as per [this +-- CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md). +{-# INLINEABLE bitwiseRotate #-} +bitwiseRotate :: BuiltinByteString -> Integer -> BuiltinByteString +bitwiseRotate = BI.bitwiseRotate + +-- | Count the set bits in a 'BuiltinByteString', as per [this +-- CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md). +{-# INLINEABLE countSetBits #-} +countSetBits :: BuiltinByteString -> Integer +countSetBits = BI.countSetBits + +-- | Find the lowest index of a set bit in a 'BuiltinByteString', as per [this +-- CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md). +-- +-- If given a 'BuiltinByteString' which consists only of zero bytes (including the empty +-- 'BuiltinByteString', this returns @-1@. +{-# INLINEABLE findFirstSetBit #-} +findFirstSetBit :: BuiltinByteString -> Integer +findFirstSetBit = BI.findFirstSetBit + diff --git a/plutus-tx/src/PlutusTx/Builtins/Internal.hs b/plutus-tx/src/PlutusTx/Builtins/Internal.hs index 843b63ccc4c..710db769f4a 100644 --- a/plutus-tx/src/PlutusTx/Builtins/Internal.hs +++ b/plutus-tx/src/PlutusTx/Builtins/Internal.hs @@ -33,6 +33,7 @@ import Data.Text as Text (Text, empty) import Data.Text.Encoding as Text (decodeUtf8, encodeUtf8) import GHC.Generics (Generic) import PlutusCore.Bitwise.Convert qualified as Convert +import PlutusCore.Bitwise.Other qualified as Other import PlutusCore.Builtin (BuiltinResult (..)) import PlutusCore.Crypto.BLS12_381.G1 qualified as BLS12_381.G1 import PlutusCore.Crypto.BLS12_381.G2 qualified as BLS12_381.G2 @@ -706,3 +707,36 @@ byteStringToInteger -> BuiltinInteger byteStringToInteger (BuiltinBool statedEndianness) (BuiltinByteString input) = Convert.byteStringToIntegerWrapper statedEndianness input + +{- +BITWISE +-} + +{-# NOINLINE bitwiseShift #-} +bitwiseShift :: + BuiltinByteString -> + BuiltinInteger -> + BuiltinByteString +bitwiseShift (BuiltinByteString bs) = + BuiltinByteString . Other.bitwiseShift bs . fromIntegral + +{-# NOINLINE bitwiseRotate #-} +bitwiseRotate :: + BuiltinByteString -> + BuiltinInteger -> + BuiltinByteString +bitwiseRotate (BuiltinByteString bs) = + BuiltinByteString . Other.bitwiseRotate bs . fromIntegral + +{-# NOINLINE countSetBits #-} +countSetBits :: + BuiltinByteString -> + BuiltinInteger +countSetBits (BuiltinByteString bs) = fromIntegral . Other.countSetBits $ bs + +{-# NOINLINE findFirstSetBit #-} +findFirstSetBit :: + BuiltinByteString -> + BuiltinInteger +findFirstSetBit (BuiltinByteString bs) = + fromIntegral . Other.findFirstSetBit $ bs From 3556946b370c1a861c68c9ae7a6a65404f14a399 Mon Sep 17 00:00:00 2001 From: Koz Ross Date: Thu, 23 May 2024 12:39:02 +1200 Subject: [PATCH 03/19] Tests --- plutus-core/plutus-core.cabal | 1 + .../test/Evaluation/Builtins/Bitwise.hs | 274 ++++++++++++++++++ .../test/Evaluation/Builtins/Definition.hs | 27 ++ 3 files changed, 302 insertions(+) create mode 100644 plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs diff --git a/plutus-core/plutus-core.cabal b/plutus-core/plutus-core.cabal index 8ee595f2b31..b365adbcfda 100644 --- a/plutus-core/plutus-core.cabal +++ b/plutus-core/plutus-core.cabal @@ -412,6 +412,7 @@ test-suite untyped-plutus-core-test DeBruijn.Spec DeBruijn.UnDeBruijnify Evaluation.Builtins + Evaluation.Builtins.Bitwise Evaluation.Builtins.BLS12_381 Evaluation.Builtins.BLS12_381.TestClasses Evaluation.Builtins.BLS12_381.Utils diff --git a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs new file mode 100644 index 00000000000..f3b1e59d84e --- /dev/null +++ b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs @@ -0,0 +1,274 @@ +-- editorconfig-checker-disable-file + +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE TypeApplications #-} + +-- | Tests for [this +-- CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md) +module Evaluation.Builtins.Bitwise ( + shiftHomomorphism, + rotateHomomorphism, + csbHomomorphism, + shiftClear, + rotateRollover, + csbRotate + ) where + +import Data.ByteString (ByteString) +import Data.ByteString qualified as BS +import Evaluation.Builtins.Common (typecheckEvaluateCek) +import Hedgehog (Property, PropertyT, annotateShow, failure, forAll, forAllWith, property, (===)) +import Hedgehog.Gen qualified as Gen +import Hedgehog.Range qualified as Range +import Numeric (showHex) +import PlutusCore qualified as PLC +import PlutusCore.Evaluation.Machine.ExBudgetingDefaults (defaultBuiltinCostModel) +import PlutusCore.MkPlc (builtin, mkConstant, mkIterAppNoAnn) +import PlutusPrelude (Word8, def) +import Test.Tasty (TestTree) +import Test.Tasty.Hedgehog (testPropertyNamed) +import Test.Tasty.HUnit (assertEqual, assertFailure, testCase) +import UntypedPlutusCore qualified as UPLC + +shiftHomomorphism :: [TestTree] +shiftHomomorphism = [ + testPropertyNamed "zero shift is identity" "zero_shift_id" idProp, + testPropertyNamed "non-negative addition of shifts is composition" "plus_shift_pos_comp" plusCompProp, + testPropertyNamed "non-positive addition of shifts is composition" "plus_shift_neg_comp" minusCompProp + ] + where + idProp :: Property + idProp = property $ do + bs <- forAllByteString + let lhs = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ + mkConstant @ByteString () bs, + mkConstant @Integer () 0 + ] + evaluateAndVerify (mkConstant @ByteString () bs) lhs + plusCompProp :: Property + plusCompProp = property $ do + bs <- forAllByteString + i <- forAll . Gen.integral . Range.linear 0 $ 512 + j <- forAll . Gen.integral . Range.linear 0 $ 512 + let lhsInner = mkIterAppNoAnn (builtin () PLC.AddInteger) [ + mkConstant @Integer () i, + mkConstant @Integer () j + ] + let lhs = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ + mkConstant @ByteString () bs, + lhsInner + ] + let rhsInner = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ + mkConstant @ByteString () bs, + mkConstant @Integer () i + ] + let rhs = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ + rhsInner, + mkConstant @Integer () j + ] + let compareExp = mkIterAppNoAnn (builtin () PLC.EqualsByteString) [ + lhs, + rhs + ] + evaluateAndVerify (mkConstant @Bool () True) compareExp + minusCompProp :: Property + minusCompProp = property $ do + bs <- forAllByteString + i <- forAll . Gen.integral . Range.linear 0 $ negate 512 + j <- forAll . Gen.integral . Range.linear 0 $ negate 512 + let lhsInner = mkIterAppNoAnn (builtin () PLC.AddInteger) [ + mkConstant @Integer () i, + mkConstant @Integer () j + ] + let lhs = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ + mkConstant @ByteString () bs, + lhsInner + ] + let rhsInner = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ + mkConstant @ByteString () bs, + mkConstant @Integer () i + ] + let rhs = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ + rhsInner, + mkConstant @Integer () j + ] + let compareExp = mkIterAppNoAnn (builtin () PLC.EqualsByteString) [ + lhs, + rhs + ] + evaluateAndVerify (mkConstant @Bool () True) compareExp + +rotateHomomorphism :: [TestTree] +rotateHomomorphism = [ + testPropertyNamed "zero rotation is identity" "zero_rotate_id" idProp, + testPropertyNamed "addition of rotations is composition" "plus_rotate_comp" compProp + ] + where + idProp :: Property + idProp = property $ do + bs <- forAllByteString + let lhs = mkIterAppNoAnn (builtin () PLC.BitwiseRotate) [ + mkConstant @ByteString () bs, + mkConstant @Integer () 0 + ] + let compareExp = mkIterAppNoAnn (builtin () PLC.EqualsByteString) [ + lhs, + mkConstant @ByteString () bs + ] + evaluateAndVerify (mkConstant @Bool () True) compareExp + compProp :: Property + compProp = property $ do + bs <- forAllByteString + i <- forAll . Gen.integral . Range.linear (negate 512) $ 512 + j <- forAll . Gen.integral . Range.linear (negate 512) $ 512 + let lhsInner = mkIterAppNoAnn (builtin () PLC.AddInteger) [ + mkConstant @Integer () i, + mkConstant @Integer () j + ] + let lhs = mkIterAppNoAnn (builtin () PLC.BitwiseRotate) [ + mkConstant @ByteString () bs, + lhsInner + ] + let rhsInner = mkIterAppNoAnn (builtin () PLC.BitwiseRotate) [ + mkConstant @ByteString () bs, + mkConstant @Integer () i + ] + let rhs = mkIterAppNoAnn (builtin () PLC.BitwiseRotate) [ + rhsInner, + mkConstant @Integer () j + ] + let compareExp = mkIterAppNoAnn (builtin () PLC.EqualsByteString) [ + lhs, + rhs + ] + evaluateAndVerify (mkConstant @Bool () True) compareExp + +csbHomomorphism :: [TestTree] +csbHomomorphism = [ + testCase "count of empty is zero" $ do + let lhs = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + mkConstant @ByteString () "" + ] + case typecheckEvaluateCek def defaultBuiltinCostModel lhs of + Left x -> assertFailure . show $ x + Right (res, logs) -> case res of + PLC.EvaluationFailure -> assertFailure . show $ logs + PLC.EvaluationSuccess r -> assertEqual "" r (mkConstant @Integer () 0), + testPropertyNamed "count of concat is addition" "concat_count_plus" compProp + ] + where + compProp :: Property + compProp = property $ do + bs1 <- forAllByteString + bs2 <- forAllByteString + let lhsInner = mkIterAppNoAnn (builtin () PLC.AppendByteString) [ + mkConstant @ByteString () bs1, + mkConstant @ByteString () bs2 + ] + let lhs = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + lhsInner + ] + let rhsLeft = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + mkConstant @ByteString () bs1 + ] + let rhsRight = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + mkConstant @ByteString () bs2 + ] + let rhs = mkIterAppNoAnn (builtin () PLC.AddInteger) [ + rhsLeft, + rhsRight + ] + let compareExp = mkIterAppNoAnn (builtin () PLC.EqualsInteger) [ + lhs, + rhs + ] + evaluateAndVerify (mkConstant @Bool () True) compareExp + +-- Currently, this test only verifies that shifting either positively or +-- negatively by more than the bit length has the same result. We need to +-- add an additional check that we also get all-zero, but until CIP-122 +-- operations merge, we can't. +shiftClear :: Property +shiftClear = property $ do + bs <- forAllByteString + let bitLen = 8 * BS.length bs + i <- forAll . Gen.integral . Range.linear 0 $ 512 + j <- forAll . Gen.integral . Range.linear 0 $ negate 512 + let lhs = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ + mkConstant @ByteString () bs, + mkConstant @Integer () (fromIntegral bitLen + i) + ] + let rhs = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ + mkConstant @ByteString () bs, + mkConstant @Integer () ((negate . fromIntegral $ bitLen) + j) + ] + let compareExp = mkIterAppNoAnn (builtin () PLC.EqualsByteString) [ + lhs, + rhs + ] + evaluateAndVerify (mkConstant @Bool () True) compareExp + +rotateRollover :: Property +rotateRollover = property $ do + bs <- forAllByteString + let bitLen = 8 * BS.length bs + i <- forAll . Gen.integral . Range.linear (negate 512) $ 512 + let lhs = mkIterAppNoAnn (builtin () PLC.BitwiseRotate) [ + mkConstant @ByteString () bs, + mkConstant @Integer () (case signum i of + (-1) -> (negate . fromIntegral $ bitLen) + i + _ -> fromIntegral bitLen + i) + ] + let rhs = mkIterAppNoAnn (builtin () PLC.BitwiseRotate) [ + mkConstant @ByteString () bs, + mkConstant @Integer () i + ] + let compareExp = mkIterAppNoAnn (builtin () PLC.EqualsByteString) [ + lhs, + rhs + ] + evaluateAndVerify (mkConstant @Bool () True) compareExp + +csbRotate :: Property +csbRotate = property $ do + bs <- forAllByteString + i <- forAll . Gen.integral . Range.linear (negate 512) $ 512 + let lhs = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + mkConstant @ByteString () bs + ] + let rhsInner = mkIterAppNoAnn (builtin () PLC.BitwiseRotate) [ + mkConstant @ByteString () bs, + mkConstant @Integer () i + ] + let rhs = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + rhsInner + ] + let compareExp = mkIterAppNoAnn (builtin () PLC.EqualsInteger) [ + lhs, + rhs + ] + evaluateAndVerify (mkConstant @Bool () True) compareExp + +-- Helpers + +forAllByteString :: PropertyT IO ByteString +forAllByteString = forAllWith hexShow . Gen.bytes . Range.linear 0 $ 1024 + +evaluateAndVerify :: + UPLC.Term UPLC.Name UPLC.DefaultUni UPLC.DefaultFun () -> + PLC.Term UPLC.TyName UPLC.Name UPLC.DefaultUni UPLC.DefaultFun () -> + PropertyT IO () +evaluateAndVerify expected actual = + case typecheckEvaluateCek def defaultBuiltinCostModel actual of + Left x -> annotateShow x >> failure + Right (res, logs) -> case res of + PLC.EvaluationFailure -> annotateShow logs >> failure + PLC.EvaluationSuccess r -> r === expected + +hexShow :: ByteString -> String +hexShow = ("0x" <>) . BS.foldl' (\acc w8 -> acc <> byteToHex w8) "" + where + byteToHex :: Word8 -> String + byteToHex w8 + | w8 < 128 = "0" <> showHex w8 "" + | otherwise = showHex w8 "" diff --git a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Definition.hs b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Definition.hs index c7d6eb915e8..e02b7378488 100644 --- a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Definition.hs +++ b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Definition.hs @@ -39,6 +39,7 @@ import PlutusCore.StdLib.Data.ScottList qualified as Scott import PlutusCore.StdLib.Data.ScottUnit qualified as Scott import PlutusCore.StdLib.Data.Unit +import Evaluation.Builtins.Bitwise qualified as Bitwise import Evaluation.Builtins.BLS12_381 (test_BLS12_381) import Evaluation.Builtins.Common import Evaluation.Builtins.Conversion qualified as Conversion @@ -874,6 +875,31 @@ test_Conversion = ] ] +-- Tests of the laws from [this +-- CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md) +test_Bitwise :: TestTree +test_Bitwise = + adjustOption (\x -> max x . HedgehogTestLimit . Just $ 8000) . + testGroup "Bitwise" $ [ + testGroup "bitwiseShift" [ + testGroup "homomorphism" Bitwise.shiftHomomorphism, + testPropertyNamed "shifts over bit length clear input" "shift_too_much" + Bitwise.shiftClear + ], + testGroup "bitwiseRotate" [ + testGroup "homomorphism" Bitwise.rotateHomomorphism, + testPropertyNamed "rotations over bit length roll over" "rotate_too_much" + Bitwise.rotateRollover + ], + testGroup "countSetBits" [ + testGroup "homomorphism" Bitwise.csbHomomorphism, + testPropertyNamed "rotation preserves count" "popcount_rotate" + Bitwise.csbRotate + ], + testGroup "findFirstSetBit" [ + ] + ] + test_definition :: TestTree test_definition = testGroup "definition" @@ -909,4 +935,5 @@ test_definition = , test_Version , test_ConsByteString , test_Conversion + , test_Bitwise ] From 249489a0bf46cba4de42aa7b4e727b55e5c93155 Mon Sep 17 00:00:00 2001 From: Koz Ross Date: Thu, 23 May 2024 12:41:46 +1200 Subject: [PATCH 04/19] Changelogs --- .../20240523_124004_koz.ross_bitwise_2.md | 41 +++++++++++++++++++ .../20240523_124052_koz.ross_bitwise_2.md | 41 +++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 plutus-core/changelog.d/20240523_124004_koz.ross_bitwise_2.md create mode 100644 plutus-tx/changelog.d/20240523_124052_koz.ross_bitwise_2.md diff --git a/plutus-core/changelog.d/20240523_124004_koz.ross_bitwise_2.md b/plutus-core/changelog.d/20240523_124004_koz.ross_bitwise_2.md new file mode 100644 index 00000000000..2d3610f34aa --- /dev/null +++ b/plutus-core/changelog.d/20240523_124004_koz.ross_bitwise_2.md @@ -0,0 +1,41 @@ + + + +### Added + +- Implementation and tests for primitive operations in [this + CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md) + + + + + diff --git a/plutus-tx/changelog.d/20240523_124052_koz.ross_bitwise_2.md b/plutus-tx/changelog.d/20240523_124052_koz.ross_bitwise_2.md new file mode 100644 index 00000000000..31b38adcfc1 --- /dev/null +++ b/plutus-tx/changelog.d/20240523_124052_koz.ross_bitwise_2.md @@ -0,0 +1,41 @@ + + + +### Added + +- Builtin wrappers for operations from [this + CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md(. + + + + + From ad5cc7b5c8231f8a631e388f37dc8bc65762c1f7 Mon Sep 17 00:00:00 2001 From: Koz Ross Date: Fri, 24 May 2024 14:02:42 +1200 Subject: [PATCH 05/19] Fix failing goldens --- plutus-tx-plugin/test/Budget/9.6/map2.uplc.golden | 4 ++-- plutus-tx-plugin/test/Budget/9.6/map3.uplc.golden | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plutus-tx-plugin/test/Budget/9.6/map2.uplc.golden b/plutus-tx-plugin/test/Budget/9.6/map2.uplc.golden index f1bf99b0f21..e2e0f98905e 100644 --- a/plutus-tx-plugin/test/Budget/9.6/map2.uplc.golden +++ b/plutus-tx-plugin/test/Budget/9.6/map2.uplc.golden @@ -212,8 +212,8 @@ program [ (addInteger 7 n) , #534556454e ]) , (constr 0 []) ]) ]) ]) ]))) - (addInteger 4 n)) - (addInteger 3 n)) + (addInteger 3 n)) + (addInteger 4 n)) (\`$dToData` `$dToData` -> (\go eta -> goList (go eta)) (fix1 diff --git a/plutus-tx-plugin/test/Budget/9.6/map3.uplc.golden b/plutus-tx-plugin/test/Budget/9.6/map3.uplc.golden index f1bf99b0f21..e2e0f98905e 100644 --- a/plutus-tx-plugin/test/Budget/9.6/map3.uplc.golden +++ b/plutus-tx-plugin/test/Budget/9.6/map3.uplc.golden @@ -212,8 +212,8 @@ program [ (addInteger 7 n) , #534556454e ]) , (constr 0 []) ]) ]) ]) ]))) - (addInteger 4 n)) - (addInteger 3 n)) + (addInteger 3 n)) + (addInteger 4 n)) (\`$dToData` `$dToData` -> (\go eta -> goList (go eta)) (fix1 From 9520bc3e1f490ad81123e7a226aae155cda9ae93 Mon Sep 17 00:00:00 2001 From: Koz Ross Date: Fri, 7 Jun 2024 09:38:07 +1200 Subject: [PATCH 06/19] Fix cost model for tests --- .../untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs index f3b1e59d84e..2cc88946a57 100644 --- a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs +++ b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs @@ -22,7 +22,7 @@ import Hedgehog.Gen qualified as Gen import Hedgehog.Range qualified as Range import Numeric (showHex) import PlutusCore qualified as PLC -import PlutusCore.Evaluation.Machine.ExBudgetingDefaults (defaultBuiltinCostModel) +import PlutusCore.Evaluation.Machine.ExBudgetingDefaults (defaultBuiltinCostModelForTesting) import PlutusCore.MkPlc (builtin, mkConstant, mkIterAppNoAnn) import PlutusPrelude (Word8, def) import Test.Tasty (TestTree) @@ -149,7 +149,7 @@ csbHomomorphism = [ let lhs = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ mkConstant @ByteString () "" ] - case typecheckEvaluateCek def defaultBuiltinCostModel lhs of + case typecheckEvaluateCek def defaultBuiltinCostModelForTesting lhs of Left x -> assertFailure . show $ x Right (res, logs) -> case res of PLC.EvaluationFailure -> assertFailure . show $ logs @@ -259,7 +259,7 @@ evaluateAndVerify :: PLC.Term UPLC.TyName UPLC.Name UPLC.DefaultUni UPLC.DefaultFun () -> PropertyT IO () evaluateAndVerify expected actual = - case typecheckEvaluateCek def defaultBuiltinCostModel actual of + case typecheckEvaluateCek def defaultBuiltinCostModelForTesting actual of Left x -> annotateShow x >> failure Right (res, logs) -> case res of PLC.EvaluationFailure -> annotateShow logs >> failure From aa5f83d155b8254d6b65c9df0f091ce19db70652 Mon Sep 17 00:00:00 2001 From: Koz Ross Date: Fri, 7 Jun 2024 09:40:40 +1200 Subject: [PATCH 07/19] Bitwise primitives are not in Conway --- plutus-ledger-api/src/PlutusLedgerApi/Common/Versions.hs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plutus-ledger-api/src/PlutusLedgerApi/Common/Versions.hs b/plutus-ledger-api/src/PlutusLedgerApi/Common/Versions.hs index 41799d1b3df..b9698bcca06 100644 --- a/plutus-ledger-api/src/PlutusLedgerApi/Common/Versions.hs +++ b/plutus-ledger-api/src/PlutusLedgerApi/Common/Versions.hs @@ -117,7 +117,9 @@ builtinsIntroducedIn = Map.fromList [ Bls12_381_G2_equal, Bls12_381_G2_hashToGroup, Bls12_381_G2_compress, Bls12_381_G2_uncompress, Bls12_381_millerLoop, Bls12_381_mulMlResult, Bls12_381_finalVerify, - Keccak_256, Blake2b_224, IntegerToByteString, ByteStringToInteger, + Keccak_256, Blake2b_224, IntegerToByteString, ByteStringToInteger + ]), + ((PlutusV3, futurePV), Set.fromList [ BitwiseShift, BitwiseRotate, CountSetBits, FindFirstSetBit ]) ] From a01acc0052ee5b276ff488e85914c71788bf8181 Mon Sep 17 00:00:00 2001 From: Koz Ross Date: Mon, 10 Jun 2024 10:01:47 +1200 Subject: [PATCH 08/19] Finish shift tests --- .../test/Evaluation/Builtins/Bitwise.hs | 62 +++++++++++++++---- .../test/Evaluation/Builtins/Definition.hs | 6 +- 2 files changed, 56 insertions(+), 12 deletions(-) diff --git a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs index 2cc88946a57..94d425c5b84 100644 --- a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs +++ b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs @@ -11,7 +11,9 @@ module Evaluation.Builtins.Bitwise ( csbHomomorphism, shiftClear, rotateRollover, - csbRotate + csbRotate, + shiftPosClearLow, + shiftNegClearHigh, ) where import Data.ByteString (ByteString) @@ -184,23 +186,26 @@ csbHomomorphism = [ ] evaluateAndVerify (mkConstant @Bool () True) compareExp --- Currently, this test only verifies that shifting either positively or --- negatively by more than the bit length has the same result. We need to --- add an additional check that we also get all-zero, but until CIP-122 --- operations merge, we can't. shiftClear :: Property shiftClear = property $ do bs <- forAllByteString let bitLen = 8 * BS.length bs - i <- forAll . Gen.integral . Range.linear 0 $ 512 - j <- forAll . Gen.integral . Range.linear 0 $ negate 512 + i <- forAll . Gen.integral . Range.linear (negate 256) $ 256 + adjustment <- case signum i of + (-1) -> pure $ negate bitLen + i + -- Here, we shift by the length exactly, so we randomly pick negative or positive + 0 -> forAll . Gen.element $ [bitLen, negate bitLen] + _ -> pure $ bitLen + i let lhs = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ mkConstant @ByteString () bs, - mkConstant @Integer () (fromIntegral bitLen + i) + mkConstant @Integer () (fromIntegral adjustment) ] - let rhs = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ - mkConstant @ByteString () bs, - mkConstant @Integer () ((negate . fromIntegral $ bitLen) + j) + let rhsInner = mkIterAppNoAnn (builtin () PLC.LengthOfByteString) [ + mkConstant @ByteString () bs + ] + let rhs = mkIterAppNoAnn (builtin () PLC.ReplicateByteString) [ + rhsInner, + mkConstant @Integer () 0 ] let compareExp = mkIterAppNoAnn (builtin () PLC.EqualsByteString) [ lhs, @@ -208,6 +213,38 @@ shiftClear = property $ do ] evaluateAndVerify (mkConstant @Bool () True) compareExp +shiftPosClearLow :: Property +shiftPosClearLow = property $ do + bs <- forAllByteString1 + let bitLen = 8 * BS.length bs + n <- forAll . Gen.integral . Range.linear 1 $ bitLen - 1 + i <- forAll . Gen.integral . Range.linear 0 $ n - 1 + let lhsInner = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ + mkConstant @ByteString () bs, + mkConstant @Integer () (fromIntegral n) + ] + let lhs = mkIterAppNoAnn (builtin () PLC.ReadBit) [ + lhsInner, + mkConstant @Integer () (fromIntegral i) + ] + evaluateAndVerify (mkConstant @Bool () False) lhs + +shiftNegClearHigh :: Property +shiftNegClearHigh = property $ do + bs <- forAllByteString1 + let bitLen = 8 * BS.length bs + n <- forAll . Gen.integral . Range.linear 1 $ bitLen - 1 + i <- forAll . Gen.integral . Range.linear 0 $ n - 1 + let lhsInner = mkIterAppNoAnn (builtin () PLC.BitwiseShift) [ + mkConstant @ByteString () bs, + mkConstant @Integer () (fromIntegral . negate $ n) + ] + let lhs = mkIterAppNoAnn (builtin () PLC.ReadBit) [ + lhsInner, + mkConstant @Integer () (fromIntegral $ bitLen - i - 1) + ] + evaluateAndVerify (mkConstant @Bool () False) lhs + rotateRollover :: Property rotateRollover = property $ do bs <- forAllByteString @@ -254,6 +291,9 @@ csbRotate = property $ do forAllByteString :: PropertyT IO ByteString forAllByteString = forAllWith hexShow . Gen.bytes . Range.linear 0 $ 1024 +forAllByteString1 :: PropertyT IO ByteString +forAllByteString1 = forAllWith hexShow . Gen.bytes . Range.linear 1 $ 1024 + evaluateAndVerify :: UPLC.Term UPLC.Name UPLC.DefaultUni UPLC.DefaultFun () -> PLC.Term UPLC.TyName UPLC.Name UPLC.DefaultUni UPLC.DefaultFun () -> diff --git a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Definition.hs b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Definition.hs index 534907f016a..ec6360ce663 100644 --- a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Definition.hs +++ b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Definition.hs @@ -912,7 +912,11 @@ test_Bitwise = testGroup "bitwiseShift" [ testGroup "homomorphism" Bitwise.shiftHomomorphism, testPropertyNamed "shifts over bit length clear input" "shift_too_much" - Bitwise.shiftClear + Bitwise.shiftClear, + testPropertyNamed "positive shifts clear low indexes" "shift_pos_low" + Bitwise.shiftPosClearLow, + testPropertyNamed "negative shifts clear high indexes" "shift_neg_high" + Bitwise.shiftNegClearHigh ], testGroup "bitwiseRotate" [ testGroup "homomorphism" Bitwise.rotateHomomorphism, From 2fc1e8d93a44e94a5f570d7444a8f19a7d73e39b Mon Sep 17 00:00:00 2001 From: Koz Ross Date: Mon, 10 Jun 2024 10:05:31 +1200 Subject: [PATCH 09/19] Fix goldens --- plutus-tx-plugin/test/Budget/9.6/map2.uplc.golden | 4 ++-- plutus-tx-plugin/test/Budget/9.6/map3.uplc.golden | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plutus-tx-plugin/test/Budget/9.6/map2.uplc.golden b/plutus-tx-plugin/test/Budget/9.6/map2.uplc.golden index e2e0f98905e..f1bf99b0f21 100644 --- a/plutus-tx-plugin/test/Budget/9.6/map2.uplc.golden +++ b/plutus-tx-plugin/test/Budget/9.6/map2.uplc.golden @@ -212,8 +212,8 @@ program [ (addInteger 7 n) , #534556454e ]) , (constr 0 []) ]) ]) ]) ]))) - (addInteger 3 n)) - (addInteger 4 n)) + (addInteger 4 n)) + (addInteger 3 n)) (\`$dToData` `$dToData` -> (\go eta -> goList (go eta)) (fix1 diff --git a/plutus-tx-plugin/test/Budget/9.6/map3.uplc.golden b/plutus-tx-plugin/test/Budget/9.6/map3.uplc.golden index e2e0f98905e..f1bf99b0f21 100644 --- a/plutus-tx-plugin/test/Budget/9.6/map3.uplc.golden +++ b/plutus-tx-plugin/test/Budget/9.6/map3.uplc.golden @@ -212,8 +212,8 @@ program [ (addInteger 7 n) , #534556454e ]) , (constr 0 []) ]) ]) ]) ]))) - (addInteger 3 n)) - (addInteger 4 n)) + (addInteger 4 n)) + (addInteger 3 n)) (\`$dToData` `$dToData` -> (\go eta -> goList (go eta)) (fix1 From d23d03b34d70d47ea00b2822db0456f41185d7da Mon Sep 17 00:00:00 2001 From: Koz Ross Date: Mon, 10 Jun 2024 12:56:17 +1200 Subject: [PATCH 10/19] Rest of tests --- .../test/Evaluation/Builtins/Bitwise.hs | 252 +++++++++++++++++- .../test/Evaluation/Builtins/Definition.hs | 26 +- 2 files changed, 275 insertions(+), 3 deletions(-) diff --git a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs index 94d425c5b84..1ef9fa49977 100644 --- a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs +++ b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Bitwise.hs @@ -14,11 +14,23 @@ module Evaluation.Builtins.Bitwise ( csbRotate, shiftPosClearLow, shiftNegClearHigh, + rotateMoveBits, + csbComplement, + csbInclusionExclusion, + csbAnd, + csbOr, + csbXor, + ffsReplicate, + ffsAnd, + ffsOr, + ffsXor, + ffsIndex ) where +import Control.Monad (unless) import Data.ByteString (ByteString) import Data.ByteString qualified as BS -import Evaluation.Builtins.Common (typecheckEvaluateCek) +import Evaluation.Builtins.Common (typecheckEvaluateCek, typecheckReadKnownCek) import Hedgehog (Property, PropertyT, annotateShow, failure, forAll, forAllWith, property, (===)) import Hedgehog.Gen qualified as Gen import Hedgehog.Range qualified as Range @@ -32,6 +44,198 @@ import Test.Tasty.Hedgehog (testPropertyNamed) import Test.Tasty.HUnit (assertEqual, assertFailure, testCase) import UntypedPlutusCore qualified as UPLC +ffsIndex :: Property +ffsIndex = property $ do + bs <- forAllNonZeroByteString + let ffsExp = mkIterAppNoAnn (builtin () PLC.FindFirstSetBit) [ + mkConstant @ByteString () bs + ] + case typecheckReadKnownCek def defaultBuiltinCostModelForTesting ffsExp of + Left err -> annotateShow err >> failure + Right (Left err) -> annotateShow err >> failure + Right (Right ix) -> do + let hitIxExp = mkIterAppNoAnn (builtin () PLC.ReadBit) [ + mkConstant @ByteString () bs, + mkConstant @Integer () ix + ] + evaluateAndVerify (mkConstant @Bool () True) hitIxExp + unless (ix == 0) $ do + i <- forAll . Gen.integral . Range.linear 0 $ ix - 1 + let missIxExp = mkIterAppNoAnn (builtin () PLC.ReadBit) [ + mkConstant @ByteString () bs, + mkConstant @Integer () i + ] + evaluateAndVerify (mkConstant @Bool () False) missIxExp + +ffsAnd :: Property +ffsAnd = property $ do + bs <- forAllByteString + semantics <- forAll Gen.bool + let lhs = mkIterAppNoAnn (builtin () PLC.FindFirstSetBit) [ + mkConstant @ByteString () bs + ] + let rhsInner = mkIterAppNoAnn (builtin () PLC.AndByteString) [ + mkConstant @Bool () semantics, + mkConstant @ByteString () bs, + mkConstant @ByteString () bs + ] + let rhs = mkIterAppNoAnn (builtin () PLC.FindFirstSetBit) [ + rhsInner + ] + evaluateTheSame lhs rhs + +ffsOr :: Property +ffsOr = property $ do + bs <- forAllByteString + semantics <- forAll Gen.bool + let lhs = mkIterAppNoAnn (builtin () PLC.FindFirstSetBit) [ + mkConstant @ByteString () bs + ] + let rhsInner = mkIterAppNoAnn (builtin () PLC.OrByteString) [ + mkConstant @Bool () semantics, + mkConstant @ByteString () bs, + mkConstant @ByteString () bs + ] + let rhs = mkIterAppNoAnn (builtin () PLC.FindFirstSetBit) [ + rhsInner + ] + evaluateTheSame lhs rhs + +ffsXor :: Property +ffsXor = property $ do + bs <- forAllByteString + semantics <- forAll Gen.bool + let rhsInner = mkIterAppNoAnn (builtin () PLC.XorByteString) [ + mkConstant @Bool () semantics, + mkConstant @ByteString () bs, + mkConstant @ByteString () bs + ] + let rhs = mkIterAppNoAnn (builtin () PLC.FindFirstSetBit) [ + rhsInner + ] + evaluateAndVerify (mkConstant @Integer () (negate 1)) rhs + +ffsReplicate :: Property +ffsReplicate = property $ do + n <- forAll . Gen.integral . Range.linear 1 $ 512 + w8 <- forAll . Gen.integral . Range.linear 0 $ 255 + let lhsInner = mkIterAppNoAnn (builtin () PLC.ReplicateByteString) [ + mkConstant @Integer () n, + mkConstant @Integer () w8 + ] + let lhs = mkIterAppNoAnn (builtin () PLC.FindFirstSetBit) [ + lhsInner + ] + let rhsInner = mkIterAppNoAnn (builtin () PLC.ReplicateByteString) [ + mkConstant @Integer () 1, + mkConstant @Integer () w8 + ] + let rhs = mkIterAppNoAnn (builtin () PLC.FindFirstSetBit) [ + rhsInner + ] + evaluateTheSame lhs rhs + +csbComplement :: Property +csbComplement = property $ do + bs <- forAllByteString + let bitLen = BS.length bs * 8 + let lhs = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + mkConstant @ByteString () bs + ] + let rhsComplement = mkIterAppNoAnn (builtin () PLC.ComplementByteString) [ + mkConstant @ByteString () bs + ] + let rhsCount = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + rhsComplement + ] + let rhs = mkIterAppNoAnn (builtin () PLC.SubtractInteger) [ + mkConstant @Integer () (fromIntegral bitLen), + rhsCount + ] + evaluateTheSame lhs rhs + +csbInclusionExclusion :: Property +csbInclusionExclusion = property $ do + x <- forAllByteString + y <- forAllByteString + let lhsInner = mkIterAppNoAnn (builtin () PLC.XorByteString) [ + mkConstant @Bool () False, + mkConstant @ByteString () x, + mkConstant @ByteString () y + ] + let lhs = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + lhsInner + ] + let rhsOr = mkIterAppNoAnn (builtin () PLC.OrByteString) [ + mkConstant @Bool () False, + mkConstant @ByteString () x, + mkConstant @ByteString () y + ] + let rhsAnd = mkIterAppNoAnn (builtin () PLC.AndByteString) [ + mkConstant @Bool () False, + mkConstant @ByteString () x, + mkConstant @ByteString () y + ] + let rhsCountOr = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + rhsOr + ] + let rhsCountAnd = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + rhsAnd + ] + let rhs = mkIterAppNoAnn (builtin () PLC.SubtractInteger) [ + rhsCountOr, + rhsCountAnd + ] + evaluateTheSame lhs rhs + +csbAnd :: Property +csbAnd = property $ do + bs <- forAllByteString + semantics <- forAll Gen.bool + let lhs = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + mkConstant @ByteString () bs + ] + let rhsInner = mkIterAppNoAnn (builtin () PLC.AndByteString) [ + mkConstant @Bool () semantics, + mkConstant @ByteString () bs, + mkConstant @ByteString () bs + ] + let rhs = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + rhsInner + ] + evaluateTheSame lhs rhs + +csbOr :: Property +csbOr = property $ do + bs <- forAllByteString + semantics <- forAll Gen.bool + let lhs = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + mkConstant @ByteString () bs + ] + let rhsInner = mkIterAppNoAnn (builtin () PLC.OrByteString) [ + mkConstant @Bool () semantics, + mkConstant @ByteString () bs, + mkConstant @ByteString () bs + ] + let rhs = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + rhsInner + ] + evaluateTheSame lhs rhs + +csbXor :: Property +csbXor = property $ do + bs <- forAllByteString + semantics <- forAll Gen.bool + let rhsInner = mkIterAppNoAnn (builtin () PLC.XorByteString) [ + mkConstant @Bool () semantics, + mkConstant @ByteString () bs, + mkConstant @ByteString () bs + ] + let rhs = mkIterAppNoAnn (builtin () PLC.CountSetBits) [ + rhsInner + ] + evaluateAndVerify (mkConstant @Integer () 0) rhs + shiftHomomorphism :: [TestTree] shiftHomomorphism = [ testPropertyNamed "zero shift is identity" "zero_shift_id" idProp, @@ -266,6 +470,30 @@ rotateRollover = property $ do ] evaluateAndVerify (mkConstant @Bool () True) compareExp +rotateMoveBits :: Property +rotateMoveBits = property $ do + bs <- forAllByteString1 + let bitLen = 8 * BS.length bs + i <- forAll . Gen.integral . Range.linear 0 $ bitLen - 1 + j <- forAll . Gen.integral . Range.linear (negate 256) $ 256 + let lhs = mkIterAppNoAnn (builtin () PLC.ReadBit) [ + mkConstant @ByteString () bs, + mkConstant @Integer () (fromIntegral i) + ] + let rhsRotation = mkIterAppNoAnn (builtin () PLC.BitwiseRotate) [ + mkConstant @ByteString () bs, + mkConstant @Integer () (fromIntegral j) + ] + let rhsIndex = mkIterAppNoAnn (builtin () PLC.ModInteger) [ + mkConstant @Integer () (fromIntegral $ i + j), + mkConstant @Integer () (fromIntegral bitLen) + ] + let rhs = mkIterAppNoAnn (builtin () PLC.ReadBit) [ + rhsRotation, + rhsIndex + ] + evaluateTheSame lhs rhs + csbRotate :: Property csbRotate = property $ do bs <- forAllByteString @@ -294,6 +522,10 @@ forAllByteString = forAllWith hexShow . Gen.bytes . Range.linear 0 $ 1024 forAllByteString1 :: PropertyT IO ByteString forAllByteString1 = forAllWith hexShow . Gen.bytes . Range.linear 1 $ 1024 +forAllNonZeroByteString :: PropertyT IO ByteString +forAllNonZeroByteString = + forAllWith hexShow . Gen.filterT (BS.any (/= 0x00)) . Gen.bytes . Range.linear 0 $ 1024 + evaluateAndVerify :: UPLC.Term UPLC.Name UPLC.DefaultUni UPLC.DefaultFun () -> PLC.Term UPLC.TyName UPLC.Name UPLC.DefaultUni UPLC.DefaultFun () -> @@ -305,6 +537,24 @@ evaluateAndVerify expected actual = PLC.EvaluationFailure -> annotateShow logs >> failure PLC.EvaluationSuccess r -> r === expected +evaluateTheSame :: + PLC.Term UPLC.TyName UPLC.Name UPLC.DefaultUni UPLC.DefaultFun () -> + PLC.Term UPLC.TyName UPLC.Name UPLC.DefaultUni UPLC.DefaultFun () -> + PropertyT IO () +evaluateTheSame lhs rhs = + case typecheckEvaluateCek def defaultBuiltinCostModelForTesting lhs of + Left x -> annotateShow x >> failure + Right (resLhs, logsLhs) -> case typecheckEvaluateCek def defaultBuiltinCostModelForTesting rhs of + Left x -> annotateShow x >> failure + Right (resRhs, logsRhs) -> case (resLhs, resRhs) of + (PLC.EvaluationFailure, PLC.EvaluationFailure) -> do + annotateShow logsLhs + annotateShow logsRhs + failure + (PLC.EvaluationSuccess rLhs, PLC.EvaluationSuccess rRhs) -> rLhs === rRhs + (PLC.EvaluationFailure, _) -> annotateShow logsLhs >> failure + (_, PLC.EvaluationFailure) -> annotateShow logsRhs >> failure + hexShow :: ByteString -> String hexShow = ("0x" <>) . BS.foldl' (\acc w8 -> acc <> byteToHex w8) "" where diff --git a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Definition.hs b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Definition.hs index ec6360ce663..f174b7f1d8d 100644 --- a/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Definition.hs +++ b/plutus-core/untyped-plutus-core/test/Evaluation/Builtins/Definition.hs @@ -921,14 +921,36 @@ test_Bitwise = testGroup "bitwiseRotate" [ testGroup "homomorphism" Bitwise.rotateHomomorphism, testPropertyNamed "rotations over bit length roll over" "rotate_too_much" - Bitwise.rotateRollover + Bitwise.rotateRollover, + testPropertyNamed "rotations move bits but don't change them" "rotate_move" + Bitwise.rotateMoveBits ], testGroup "countSetBits" [ testGroup "homomorphism" Bitwise.csbHomomorphism, testPropertyNamed "rotation preserves count" "popcount_rotate" - Bitwise.csbRotate + Bitwise.csbRotate, + testPropertyNamed "count of the complement" "popcount_complement" + Bitwise.csbComplement, + testPropertyNamed "inclusion-exclusion" "popcount_inclusion_exclusion" + Bitwise.csbInclusionExclusion, + testPropertyNamed "count of self-AND" "popcount_self_and" + Bitwise.csbAnd, + testPropertyNamed "count of self-OR" "popcount_self_or" + Bitwise.csbOr, + testPropertyNamed "count of self-XOR" "popcount_self_xor" + Bitwise.csbXor ], testGroup "findFirstSetBit" [ + testPropertyNamed "find first in replicated" "ffs_replicate" + Bitwise.ffsReplicate, + testPropertyNamed "find first of self-AND" "ffs_and" + Bitwise.ffsAnd, + testPropertyNamed "find first of self-OR" "ffs_or" + Bitwise.ffsOr, + testPropertyNamed "find first of self-XOR" "ffs_xor" + Bitwise.ffsXor, + testPropertyNamed "found index set, lower indices clear" "ffs_index" + Bitwise.ffsIndex ] ] From 50b9171216594a7d6331a4ccc0a50879a21364a8 Mon Sep 17 00:00:00 2001 From: Koz Ross Date: Wed, 12 Jun 2024 09:24:25 +1200 Subject: [PATCH 11/19] Rename operations --- .../20240523_124004_koz.ross_bitwise_2.md | 4 +- .../src/PlutusCore/Bitwise/Logical.hs | 8 ++-- .../src/PlutusCore/Bitwise/Other.hs | 12 ++--- .../src/PlutusCore/Default/Builtins.hs | 48 +++++++++---------- .../RewriteRules/CommuteFnWithConst.hs | 6 +-- .../test/Evaluation/Builtins/Bitwise.hs | 42 ++++++++-------- .../test/Evaluation/Builtins/Laws.hs | 10 ++-- .../src/PlutusLedgerApi/Common/Versions.hs | 4 +- .../src/PlutusTx/Compiler/Builtins.hs | 12 ++--- .../20240523_124052_koz.ross_bitwise_2.md | 4 +- plutus-tx/src/PlutusTx/Builtins.hs | 24 +++++----- plutus-tx/src/PlutusTx/Builtins/Internal.hs | 24 +++++----- 12 files changed, 97 insertions(+), 101 deletions(-) diff --git a/plutus-core/changelog.d/20240523_124004_koz.ross_bitwise_2.md b/plutus-core/changelog.d/20240523_124004_koz.ross_bitwise_2.md index 2d3610f34aa..d4cded39098 100644 --- a/plutus-core/changelog.d/20240523_124004_koz.ross_bitwise_2.md +++ b/plutus-core/changelog.d/20240523_124004_koz.ross_bitwise_2.md @@ -15,12 +15,10 @@ Uncomment the section that is right (remove the HTML comment wrapper). - Implementation and tests for primitive operations in [this CIP](https://github.com/mlabs-haskell/CIPs/blob/koz/bitwise/CIP-XXXX/CIP-XXXX.md) -