Byte-wise SIMD addition with one signed and one unsigned addend, result clamped to [0,255]

In the Kepler architecture NVIDIA introduced a fairly wide selection of so-called video instructions for “in-register SIMD” arithmetic, applied either to a pair of half words or four bytes. These ran at one quarter of full integer instruction throughput.

One use case where these instructions proved useful was in bioinformatics, specifically in sequence alignment with the Smith-Waterman algorithm. Among other such efforts, a team of researchers produced the CUDASW++ software package that was updated through 2014 or thereabouts. [Later:] I noticed belatedly that the package is apparently still in active development, with the researchers now exploring the utility of half2 and s16x2 representations:

B. Schmidt, F. Kallenborn, A. Chacon, C. Hundt, “CUDASW++ 4.0: ultra-fast GPU-based Smith-Waterman protein sequence database search.” BMC bioinformatics, Vol. 25, article 342 (2024) (DOI).

Later GPU architectures removed most of the video instructions from hardware except for the most complex ones. CUDA currently exposes a subset of this functionality via device function intrinsics that are optimized across all supported GPU platforms. The full functionality of the Kepler video instructions is exposed at PTX level, but the emulation sequences used for these are not always well optimized.

One flavor of byte-wise addition that can be useful in implementations of Smith Waterman but that is not supported via a device function intrinsic and whose emulation when accessed through PTX is quite slow is the byte-wise addition of a signed and an unsigned addend, with the unsigned result clamped to [0,255], i.e. the value range of an unsigned byte.

The function vadd_su_us4() below provides a high-performance implementation of this functionality that compiles to 10 simple integer instructions on all GPU architectures currently supported by CUDA. I tested GPU targets of sm_30 and sm_75 as well as the host target. While CUDA compilers for CUDA 11 and CUDA 12 make excellent use of LOP3, I hand coded all instances of this operation here to achieve the best result.

[Code below updated 1/23/2025]

/*
  Copyright 2025, Norbert Juffa

  Redistribution and use in source and binary forms, with or without
  modification, are permitted provided that the following conditions
  are met:

  1. Redistributions of source code must retain the above copyright
     notice, this list of conditions and the following disclaimer.

  2. Redistributions in binary form must reproduce the above copyright
     notice, this list of conditions and the following disclaimer in the
     documentation and/or other materials provided with the distribution.

  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

#include <cstdint>

#if (__CUDACC__)
#define __HOST__ __host__
#define __DEVICE__ __device__
#else // __CUDACC__
#define __HOST__
#define __DEVICE__
#endif // __CUDACC__

__HOST__ __DEVICE__ uint32_t lop3_08 (uint32_t a, uint32_t b, uint32_t c)
{
    uint32_t r;
#if (__CUDA_ARCH__ >= 500)
    asm ("lop3.b32 %0,%1,%2,%3,0x08;\n\t" : "=r"(r) : "r"(a), "r"(b), "r"(c));
#else // __CUDA_ARCH__
    r = ~a & b & c;
#endif // __CUDA_ARCH__
    return r;
}

__HOST__ __DEVICE__ uint32_t lop3_28 (uint32_t a, uint32_t b, uint32_t c)
{
    uint32_t r;
#if (__CUDA_ARCH__ >= 500)
    asm ("lop3.b32 %0,%1,%2,%3,0x28;\n\t" : "=r"(r) : "r"(a), "r"(b), "r"(c));
#else // __CUDA_ARCH__
    r = (a ^ b) & c;
#endif // __CUDA_ARCH__
    return r;
}

__HOST__ __DEVICE__ uint32_t lop3_a8 (uint32_t a, uint32_t b, uint32_t c)
{
    uint32_t r;
#if (__CUDA_ARCH__ >= 500)
    asm ("lop3.b32 %0,%1,%2,%3,0xa8;\n\t" : "=r"(r) : "r"(a), "r"(b), "r"(c));
#else // __CUDA_ARCH__
    r = (a | b) & c;
#endif // __CUDA_ARCH__
    return r;
}

__HOST__ __DEVICE__ uint32_t lop3_ef (uint32_t a, uint32_t b, uint32_t c)
{
    uint32_t r;
#if (__CUDA_ARCH__ >= 500)
    asm ("lop3.b32 %0,%1,%2,%3,0xef;\n\t" : "=r"(r) : "r"(a), "r"(b), "r"(c));
#else // __CUDA_ARCH__
    r = ~a | b | c;
#endif // __CUDA_ARCH__
    return r;
}

#define UINT32_H4  0x80808080U   // byte-wise sign bits (MSBs)

/* expand msb of each byte (with other bits zero!) into a byte mask */
__HOST__ __DEVICE__ uint32_t msb_to_byte_mask (uint32_t a)
{
#if (__CUDA_ARCH__ >= 200)
    asm ("prmt.b32 %0,%0,0,0xba98;" : "+r"(a));
#else // __CUDA_ARCH__
    a = a + a - (a >> 7);
#endif // __CUDA_ARCH__
    return a;
}
/* expand msb of each byte (other bits don't matter) into a byte mask */
__HOST__ __DEVICE__ uint32_t masked_msb_to_byte_mask (uint32_t a)
{
#if (__CUDA_ARCH__ >= 200)
    asm ("prmt.b32 %0,%0,0,0xba98;" : "+r"(a));
#else // __CUDA_ARCH__
    a = a & UINT32_H4;
    a = a + a - (a >> 7);
#endif // __CUDA_ARCH__
    return a;
}

/* byte-wise addition of a signed addend 'a' and an unsigned addend 'b', with
   the unsigned result clamped to [0,255]
*/
__HOST__ __DEVICE__ uint32_t vadd_su_us4 (uint32_t a, uint32_t b)
{
#if (__CUDA_ARCH__ >= 300) && (__CUDA_ARCH__ < 500)
    uint32_t d, c = 0;
    asm ("vadd4.u32.s32.u32.sat %0,%1,%2,%3;" : "=r"(d):"r"(a),"r"(b),"r"(c));
    return d;
#else // __CUDA_ARCH__
    uint32_t res, sum, ofl, nufl;
    /* msb sum bit */
    sum = lop3_28 (a, b, UINT32_H4);
    /* add the seven low-order bits; capture the carry into the msb */
    res = (a & ~UINT32_H4) + (b & ~UINT32_H4);
    /* compute clamping flags in msb:
       ofl =       (carry-out from msb == 1) AND NOT (sign bit of 'a' == 1)
       nufl = NOT ((sign bit of 'a' == 1) AND NOT (carry-out from msb == 1))
    */
    ofl = lop3_08 (a, sum, res); // flag in msb, other bits cleared
    nufl = lop3_ef (a, b, res);  // flag in msb, other bits NOT cleared
    /* complete addition for msb */
    res = res ^ sum;
    /* apply clamping */
    res = lop3_a8 (res, msb_to_byte_mask (ofl), masked_msb_to_byte_mask (nufl));
    return res;
#endif // __CUDA_ARCH__
}
4 Likes

I managed to eliminate one instruction from the emulation sequence (which now runs to ten instructions) by manually controlling the use of LOP3.

1 Like