Spaces:
Running
Running
a3sh
commited on
Commit
·
a18cd16
1
Parent(s):
06ec111
ggml : faster ssm scan (llama/10558)
Browse files* faster ssm_scan
* delete unused commnet
* clang format
* add space
* modify unnecessary calculations
* faster ssm conv implementatioin
* modify file name with dash
- ggml/src/ggml-cuda/ggml-cuda.cu +10 -0
- ggml/src/ggml-cuda/ssm-conv.cu +151 -0
- ggml/src/ggml-cuda/ssm-conv.cuh +3 -0
- ggml/src/ggml-cuda/ssm-scan.cu +155 -0
- ggml/src/ggml-cuda/ssm-scan.cuh +3 -0
ggml/src/ggml-cuda/ggml-cuda.cu
CHANGED
|
@@ -31,6 +31,8 @@
|
|
| 31 |
#include "ggml-cuda/rope.cuh"
|
| 32 |
#include "ggml-cuda/scale.cuh"
|
| 33 |
#include "ggml-cuda/softmax.cuh"
|
|
|
|
|
|
|
| 34 |
#include "ggml-cuda/sum.cuh"
|
| 35 |
#include "ggml-cuda/sumrows.cuh"
|
| 36 |
#include "ggml-cuda/tsembd.cuh"
|
|
@@ -2296,6 +2298,12 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
|
| 2296 |
case GGML_OP_SUM_ROWS:
|
| 2297 |
ggml_cuda_op_sum_rows(ctx, dst);
|
| 2298 |
break;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2299 |
case GGML_OP_ARGSORT:
|
| 2300 |
ggml_cuda_op_argsort(ctx, dst);
|
| 2301 |
break;
|
|
@@ -3193,6 +3201,8 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
|
|
| 3193 |
case GGML_OP_COS:
|
| 3194 |
case GGML_OP_CLAMP:
|
| 3195 |
case GGML_OP_LOG:
|
|
|
|
|
|
|
| 3196 |
return true;
|
| 3197 |
case GGML_OP_CONT:
|
| 3198 |
return op->src[0]->type != GGML_TYPE_BF16;
|
|
|
|
| 31 |
#include "ggml-cuda/rope.cuh"
|
| 32 |
#include "ggml-cuda/scale.cuh"
|
| 33 |
#include "ggml-cuda/softmax.cuh"
|
| 34 |
+
#include "ggml-cuda/ssm-conv.cuh"
|
| 35 |
+
#include "ggml-cuda/ssm-scan.cuh"
|
| 36 |
#include "ggml-cuda/sum.cuh"
|
| 37 |
#include "ggml-cuda/sumrows.cuh"
|
| 38 |
#include "ggml-cuda/tsembd.cuh"
|
|
|
|
| 2298 |
case GGML_OP_SUM_ROWS:
|
| 2299 |
ggml_cuda_op_sum_rows(ctx, dst);
|
| 2300 |
break;
|
| 2301 |
+
case GGML_OP_SSM_CONV:
|
| 2302 |
+
ggml_cuda_op_ssm_conv(ctx, dst);
|
| 2303 |
+
break;
|
| 2304 |
+
case GGML_OP_SSM_SCAN:
|
| 2305 |
+
ggml_cuda_op_ssm_scan(ctx, dst);
|
| 2306 |
+
break;
|
| 2307 |
case GGML_OP_ARGSORT:
|
| 2308 |
ggml_cuda_op_argsort(ctx, dst);
|
| 2309 |
break;
|
|
|
|
| 3201 |
case GGML_OP_COS:
|
| 3202 |
case GGML_OP_CLAMP:
|
| 3203 |
case GGML_OP_LOG:
|
| 3204 |
+
case GGML_OP_SSM_SCAN:
|
| 3205 |
+
case GGML_OP_SSM_CONV:
|
| 3206 |
return true;
|
| 3207 |
case GGML_OP_CONT:
|
| 3208 |
return op->src[0]->type != GGML_TYPE_BF16;
|
ggml/src/ggml-cuda/ssm-conv.cu
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "ssm-conv.cuh"
|
| 2 |
+
|
| 3 |
+
template <size_t split_d_inner, size_t d_conv>
|
| 4 |
+
static __global__ void ssm_conv_f32(const float * __restrict__ src0, const float * __restrict__ src1,
|
| 5 |
+
const int src0_nb0, const int src0_nb1, const int src0_nb2, const int src1_nb1,
|
| 6 |
+
float * __restrict__ dst, const int dst_nb0, const int dst_nb1, const int dst_nb2,
|
| 7 |
+
const int nc, const int ncs, const int nr, const int n_t, const int n_s) {
|
| 8 |
+
const int tid = threadIdx.x;
|
| 9 |
+
const int bidx = blockIdx.x;
|
| 10 |
+
const int bidy = blockIdx.y;
|
| 11 |
+
|
| 12 |
+
const float * x_block = (const float *) ((char *) src0 + bidx * src0_nb2 + bidy * split_d_inner * src0_nb1);
|
| 13 |
+
const float * w_block = (const float *) ((char *) src1 + bidy * split_d_inner * src1_nb1);
|
| 14 |
+
float * y_block = (float *) ((char *) dst + bidx * dst_nb2 + bidy * split_d_inner * dst_nb0);
|
| 15 |
+
|
| 16 |
+
const int stride_x = src0_nb1 / sizeof(float);
|
| 17 |
+
const int stride_w = src1_nb1 / sizeof(float);
|
| 18 |
+
const int stride_y = dst_nb1 / sizeof(float);
|
| 19 |
+
|
| 20 |
+
float x[d_conv] = { 0.0f };
|
| 21 |
+
float w[d_conv] = { 0.0f };
|
| 22 |
+
|
| 23 |
+
#pragma unroll
|
| 24 |
+
for (int j = 0; j < d_conv; j++) {
|
| 25 |
+
w[j] = w_block[tid * stride_w + j];
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
for (int i = 0; i < n_t; i++) {
|
| 29 |
+
float sumf = 0.0f;
|
| 30 |
+
|
| 31 |
+
if (i == 0) {
|
| 32 |
+
for (int j = 0; j < d_conv; j++) {
|
| 33 |
+
x[j] = x_block[tid * stride_x + j];
|
| 34 |
+
}
|
| 35 |
+
} else {
|
| 36 |
+
x[(i - 1) % d_conv] = x_block[tid * stride_x + i + d_conv - 1];
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
#pragma unroll
|
| 40 |
+
for (int j = 0; j < d_conv; j++) {
|
| 41 |
+
sumf += x[(i + j) % d_conv] * w[j];
|
| 42 |
+
}
|
| 43 |
+
y_block[i * stride_y + tid] = sumf;
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
template <size_t split_d_inner, size_t d_conv, size_t split_n_t>
|
| 48 |
+
static __global__ void ssm_conv_long_token_f32(const float * __restrict__ src0, const float * __restrict__ src1,
|
| 49 |
+
const int src0_nb0, const int src0_nb1, const int src0_nb2,
|
| 50 |
+
const int src1_nb1, float * __restrict__ dst, const int dst_nb0,
|
| 51 |
+
const int dst_nb1, const int dst_nb2, const int nc, const int ncs,
|
| 52 |
+
const int nr, const int n_t, const int n_s) {
|
| 53 |
+
const int tid = threadIdx.x;
|
| 54 |
+
const int bidx = blockIdx.x;
|
| 55 |
+
const int bidy = blockIdx.y;
|
| 56 |
+
const int bidz = blockIdx.z;
|
| 57 |
+
|
| 58 |
+
const float * x_block = (const float *) ((char *) src0 + bidx * src0_nb2 + bidy * split_d_inner * src0_nb1 +
|
| 59 |
+
bidz * split_n_t * src0_nb0);
|
| 60 |
+
const float * w_block = (const float *) ((char *) src1 + bidy * split_d_inner * src1_nb1);
|
| 61 |
+
float * y_block =
|
| 62 |
+
(float *) ((char *) dst + bidx * dst_nb2 + bidz * split_n_t * dst_nb1 + bidy * split_d_inner * dst_nb0);
|
| 63 |
+
|
| 64 |
+
const int stride_x = src0_nb1 / sizeof(float);
|
| 65 |
+
const int stride_w = src1_nb1 / sizeof(float);
|
| 66 |
+
const int stride_y = dst_nb1 / sizeof(float);
|
| 67 |
+
|
| 68 |
+
float x[d_conv] = { 0.0f };
|
| 69 |
+
float w[d_conv] = { 0.0f };
|
| 70 |
+
|
| 71 |
+
#pragma unroll
|
| 72 |
+
for (int j = 0; j < d_conv; j++) {
|
| 73 |
+
w[j] = w_block[tid * stride_w + j];
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
#pragma unroll
|
| 77 |
+
for (int i = 0; i < split_n_t; i++) {
|
| 78 |
+
if (bidz * split_n_t + i < n_t) {
|
| 79 |
+
float sumf = 0.0f;
|
| 80 |
+
|
| 81 |
+
if (i == 0) {
|
| 82 |
+
for (int j = 0; j < d_conv; j++) {
|
| 83 |
+
x[j] = x_block[tid * stride_x + j];
|
| 84 |
+
}
|
| 85 |
+
} else {
|
| 86 |
+
x[(i - 1) % d_conv] = x_block[tid * stride_x + i + d_conv - 1];
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
#pragma unroll
|
| 90 |
+
for (int j = 0; j < d_conv; j++) {
|
| 91 |
+
sumf += x[(i + j) % d_conv] * w[j];
|
| 92 |
+
}
|
| 93 |
+
y_block[i * stride_y + tid] = sumf;
|
| 94 |
+
}
|
| 95 |
+
}
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
static void ssm_conv_f32_cuda(const float * src0, const float * src1, const int src0_nb0, const int src0_nb1,
|
| 99 |
+
const int src0_nb2, const int src1_nb1, float * dst, const int dst_nb0, const int dst_nb1,
|
| 100 |
+
const int dst_nb2, const int nc, const int ncs, const int nr, const int n_t,
|
| 101 |
+
const int n_s, cudaStream_t stream) {
|
| 102 |
+
const int threads = 128;
|
| 103 |
+
GGML_ASSERT(nr % threads == 0);
|
| 104 |
+
|
| 105 |
+
if (n_t <= 32) {
|
| 106 |
+
const dim3 blocks(n_s, (nr + threads - 1) / threads, 1);
|
| 107 |
+
if (nc == 4) {
|
| 108 |
+
ssm_conv_f32<threads, 4><<<blocks, threads, 0, stream>>>(src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1,
|
| 109 |
+
dst, dst_nb0, dst_nb1, dst_nb2, nc, ncs, nr, n_t,
|
| 110 |
+
n_s);
|
| 111 |
+
} else {
|
| 112 |
+
GGML_ABORT("Only support kernel size = 4 now.");
|
| 113 |
+
}
|
| 114 |
+
} else {
|
| 115 |
+
if (nc == 4) {
|
| 116 |
+
const int split_n_t = 32;
|
| 117 |
+
dim3 blocks(n_s, (nr + threads - 1) / threads, (n_t + split_n_t - 1) / split_n_t);
|
| 118 |
+
ssm_conv_long_token_f32<threads, 4, split_n_t>
|
| 119 |
+
<<<blocks, threads, 0, stream>>>(src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, dst, dst_nb0,
|
| 120 |
+
dst_nb1, dst_nb2, nc, ncs, nr, n_t, n_s);
|
| 121 |
+
} else {
|
| 122 |
+
GGML_ABORT("Only support kernel size = 4 right now.");
|
| 123 |
+
}
|
| 124 |
+
}
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
void ggml_cuda_op_ssm_conv(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
| 128 |
+
const struct ggml_tensor * src0 = dst->src[0]; // conv_x
|
| 129 |
+
const struct ggml_tensor * src1 = dst->src[1]; // conv1d.weight
|
| 130 |
+
|
| 131 |
+
const int nc = src1->ne[0]; // d_conv
|
| 132 |
+
const int ncs = src0->ne[0]; // d_conv - 1 + n_t
|
| 133 |
+
const int nr = src0->ne[1]; // d_inner
|
| 134 |
+
const int n_t = dst->ne[1]; // tokens per sequence
|
| 135 |
+
const int n_s = dst->ne[2]; // number of sequences in the batch
|
| 136 |
+
|
| 137 |
+
GGML_ASSERT(dst->ne[0] == nr);
|
| 138 |
+
GGML_ASSERT(src0->nb[0] == sizeof(float));
|
| 139 |
+
GGML_ASSERT(src1->nb[0] == sizeof(float));
|
| 140 |
+
GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float));
|
| 141 |
+
|
| 142 |
+
const float * src0_d = (const float *) src0->data;
|
| 143 |
+
const float * src1_d = (const float *) src1->data;
|
| 144 |
+
float * dst_d = (float *) dst->data;
|
| 145 |
+
cudaStream_t stream = ctx.stream();
|
| 146 |
+
|
| 147 |
+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
| 148 |
+
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
| 149 |
+
ssm_conv_f32_cuda(src0_d, src1_d, src0->nb[0], src0->nb[1], src0->nb[2], src1->nb[1], dst_d, dst->nb[0], dst->nb[1],
|
| 150 |
+
dst->nb[2], nc, ncs, nr, n_t, n_s, stream);
|
| 151 |
+
}
|
ggml/src/ggml-cuda/ssm-conv.cuh
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "common.cuh"
|
| 2 |
+
|
| 3 |
+
void ggml_cuda_op_ssm_conv(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
ggml/src/ggml-cuda/ssm-scan.cu
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "ssm-scan.cuh"
|
| 2 |
+
|
| 3 |
+
// #include <cuda_runtime.h>
|
| 4 |
+
// static __device__ void global_to_shared(const float *src, float *dst) {
|
| 5 |
+
// asm volatile("cp.async.");
|
| 6 |
+
// }
|
| 7 |
+
|
| 8 |
+
template <size_t splitD, size_t N>
|
| 9 |
+
__global__ void __launch_bounds__(splitD, 2)
|
| 10 |
+
ssm_scan_f32(const float * __restrict__ src0, const float * __restrict__ src1, const float * __restrict__ src2,
|
| 11 |
+
const float * __restrict__ src3, const float * __restrict__ src4, const float * __restrict__ src5,
|
| 12 |
+
const int src0_nb1, const int src0_nb2, const int src1_nb0, const int src1_nb1, const int src1_nb2,
|
| 13 |
+
const int src1_nb3, const int src2_nb0, const int src2_nb1, const int src2_nb2, const int src3_nb1,
|
| 14 |
+
const int src4_nb1, const int src4_nb2, const int src5_nb1, const int src5_nb2,
|
| 15 |
+
float * __restrict__ dst, const int D, const int L, const int B) {
|
| 16 |
+
const int bidx = blockIdx.x; // split along B
|
| 17 |
+
const int bidy = blockIdx.y; // split along D
|
| 18 |
+
const int tid = threadIdx.x;
|
| 19 |
+
const int wid = tid / 32;
|
| 20 |
+
const int wtid = tid % 32;
|
| 21 |
+
|
| 22 |
+
extern __shared__ float smem[];
|
| 23 |
+
const int stride_sA = N + 1;
|
| 24 |
+
const int stride_ss0 = N + 1;
|
| 25 |
+
float * smem_A = smem;
|
| 26 |
+
float * smem_s0 = smem_A + splitD * stride_sA;
|
| 27 |
+
|
| 28 |
+
const float * s0_block = (const float *) ((char *) src0 + bidx * src0_nb2 + bidy * splitD * src0_nb1);
|
| 29 |
+
const float * x_block = (const float *) ((char *) src1 + (bidx * src1_nb2) + bidy * splitD * sizeof(float));
|
| 30 |
+
const float * dt_block = (const float *) ((char *) src2 + (bidx * src2_nb2) + bidy * splitD * sizeof(float));
|
| 31 |
+
const float * A_block = (const float *) ((char *) src3 + bidy * splitD * src3_nb1);
|
| 32 |
+
const float * B_block = (const float *) ((char *) src4 + (bidx * src4_nb2));
|
| 33 |
+
const float * C_block = (const float *) ((char *) src5 + (bidx * src5_nb2));
|
| 34 |
+
float * y_block = (float *) ((char *) dst + (bidx * src1_nb2) + bidy * splitD * sizeof(float));
|
| 35 |
+
float * s_block = (float *) ((char *) dst + src1_nb3 + bidx * src0_nb2 + bidy * splitD * src0_nb1);
|
| 36 |
+
|
| 37 |
+
const int stride_s0 = src0_nb1 / sizeof(float);
|
| 38 |
+
const int stride_x = src1_nb1 / sizeof(float);
|
| 39 |
+
const int stride_dt = src2_nb1 / sizeof(float);
|
| 40 |
+
const int stride_A = src3_nb1 / sizeof(float);
|
| 41 |
+
const int stride_B = src4_nb1 / sizeof(float);
|
| 42 |
+
const int stride_C = src5_nb1 / sizeof(float);
|
| 43 |
+
const int stride_s = stride_s0;
|
| 44 |
+
const int stride_y = stride_x;
|
| 45 |
+
|
| 46 |
+
// can N not be 16? for example 32?
|
| 47 |
+
if (N == 16) {
|
| 48 |
+
#pragma unroll
|
| 49 |
+
for (int i = 0; i < splitD / 4; i += 2) {
|
| 50 |
+
float value = A_block[(wid * warpSize + i) * stride_A + wtid];
|
| 51 |
+
// todo: bank conflict
|
| 52 |
+
// I am always confused with how to use the swizzling method to solve
|
| 53 |
+
// bank conflit. Hoping somebody can tell me.
|
| 54 |
+
smem_A[(wid * warpSize + i) * stride_sA + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value;
|
| 55 |
+
}
|
| 56 |
+
#pragma unroll
|
| 57 |
+
for (int i = 0; i < splitD / 4; i += 2) {
|
| 58 |
+
float value = s0_block[(wid * warpSize + i) * stride_s0 + wtid];
|
| 59 |
+
smem_s0[(wid * warpSize + i) * stride_ss0 + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value;
|
| 60 |
+
}
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
__syncthreads();
|
| 64 |
+
|
| 65 |
+
for (int i = 0; i < L; i++) {
|
| 66 |
+
float dt_soft_plus = dt_block[i * stride_dt + tid];
|
| 67 |
+
if (dt_soft_plus <= 20.0f) {
|
| 68 |
+
dt_soft_plus = log1pf(exp(dt_soft_plus));
|
| 69 |
+
}
|
| 70 |
+
float x_dt = x_block[i * stride_x + tid] * dt_soft_plus;
|
| 71 |
+
float sumf = 0.0f;
|
| 72 |
+
#pragma unroll
|
| 73 |
+
for (int j = 0; j < N; j++) {
|
| 74 |
+
float state = (smem_s0[tid * stride_ss0 + j] * expf(dt_soft_plus * smem_A[tid * stride_sA + j])) +
|
| 75 |
+
(B_block[i * stride_B + j] * x_dt);
|
| 76 |
+
sumf += state * C_block[i * stride_C + j];
|
| 77 |
+
if (i == L - 1) {
|
| 78 |
+
s_block[tid * stride_s + j] = state;
|
| 79 |
+
} else {
|
| 80 |
+
smem_s0[tid * stride_ss0 + j] = state;
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
__syncthreads();
|
| 84 |
+
y_block[i * stride_y + tid] = sumf;
|
| 85 |
+
}
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
static void ssm_scan_f32_cuda(const float * src0, const float * src1, const float * src2, const float * src3,
|
| 89 |
+
const float * src4, const float * src5, const int src0_nb1, const int src0_nb2,
|
| 90 |
+
const int src1_nb0, const int src1_nb1, const int src1_nb2, const int src1_nb3,
|
| 91 |
+
const int src2_nb0, const int src2_nb1, const int src2_nb2, const int src3_nb1,
|
| 92 |
+
const int src4_nb1, const int src4_nb2, const int src5_nb1, const int src5_nb2,
|
| 93 |
+
float * dst, const int N, const int D, const int L, const int B, cudaStream_t stream) {
|
| 94 |
+
const int threads = 128;
|
| 95 |
+
// todo: consider D cannot be divided,does this situation exist?
|
| 96 |
+
GGML_ASSERT(D % threads == 0);
|
| 97 |
+
const dim3 blocks(B, (D + threads - 1) / threads, 1);
|
| 98 |
+
const int smem_size = (threads * (N + 1) * 2) * sizeof(float);
|
| 99 |
+
if (N == 16) {
|
| 100 |
+
ssm_scan_f32<128, 16><<<blocks, threads, smem_size, stream>>>(
|
| 101 |
+
src0, src1, src2, src3, src4, src5, src0_nb1, src0_nb2, src1_nb0, src1_nb1, src1_nb2, src1_nb3, src2_nb0,
|
| 102 |
+
src2_nb1, src2_nb2, src3_nb1, src4_nb1, src4_nb2, src5_nb1, src5_nb2, dst, D, L, B);
|
| 103 |
+
} else {
|
| 104 |
+
GGML_ABORT("doesn't support N!=16.");
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
void ggml_cuda_op_ssm_scan(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
| 109 |
+
const struct ggml_tensor * src0 = dst->src[0]; // s
|
| 110 |
+
const struct ggml_tensor * src1 = dst->src[1]; // x
|
| 111 |
+
const struct ggml_tensor * src2 = dst->src[2]; // dt
|
| 112 |
+
const struct ggml_tensor * src3 = dst->src[3]; // A
|
| 113 |
+
const struct ggml_tensor * src4 = dst->src[4]; // B
|
| 114 |
+
const struct ggml_tensor * src5 = dst->src[5]; // C
|
| 115 |
+
|
| 116 |
+
// const int64_t d_state = src0->ne[0];
|
| 117 |
+
// const int64_t d_inner = src0->ne[1];
|
| 118 |
+
// const int64_t l = src1->ne[1];
|
| 119 |
+
// const int64_t b = src0->ne[2];
|
| 120 |
+
|
| 121 |
+
const int64_t nc = src0->ne[0]; // d_state
|
| 122 |
+
const int64_t nr = src0->ne[1]; // d_inner
|
| 123 |
+
const int64_t n_t = src1->ne[1]; // number of tokens per sequence
|
| 124 |
+
const int64_t n_s = src0->ne[2]; // number of sequences in the batch
|
| 125 |
+
|
| 126 |
+
GGML_ASSERT(ggml_nelements(src1) + ggml_nelements(src0) == ggml_nelements(dst));
|
| 127 |
+
GGML_ASSERT(src0->nb[0] == sizeof(float));
|
| 128 |
+
GGML_ASSERT(src1->nb[0] == sizeof(float));
|
| 129 |
+
GGML_ASSERT(src2->nb[0] == sizeof(float));
|
| 130 |
+
GGML_ASSERT(src3->nb[0] == sizeof(float));
|
| 131 |
+
GGML_ASSERT(src4->nb[0] == sizeof(float));
|
| 132 |
+
GGML_ASSERT(src5->nb[0] == sizeof(float));
|
| 133 |
+
// required for the dot product between s and C
|
| 134 |
+
GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float));
|
| 135 |
+
// required for per-sequence offsets for states
|
| 136 |
+
GGML_ASSERT(src0->nb[2] == src0->ne[0] * src0->ne[1] * sizeof(float));
|
| 137 |
+
// required to get correct offset for state destination (i.e. src1->nb[3])
|
| 138 |
+
GGML_ASSERT(src1->nb[3] == src1->ne[0] * src1->ne[1] * src1->ne[2] * sizeof(float));
|
| 139 |
+
|
| 140 |
+
const float * src0_d = (const float *) src0->data;
|
| 141 |
+
const float * src1_d = (const float *) src1->data;
|
| 142 |
+
const float * src2_d = (const float *) src2->data;
|
| 143 |
+
const float * src3_d = (const float *) src3->data;
|
| 144 |
+
const float * src4_d = (const float *) src4->data;
|
| 145 |
+
const float * src5_d = (const float *) src5->data;
|
| 146 |
+
float * dst_d = (float *) dst->data;
|
| 147 |
+
cudaStream_t stream = ctx.stream();
|
| 148 |
+
|
| 149 |
+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
| 150 |
+
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
| 151 |
+
|
| 152 |
+
ssm_scan_f32_cuda(src0_d, src1_d, src2_d, src3_d, src4_d, src5_d, src0->nb[1], src0->nb[2], src1->nb[0],
|
| 153 |
+
src1->nb[1], src1->nb[2], src1->nb[3], src2->nb[0], src2->nb[1], src2->nb[2], src3->nb[1],
|
| 154 |
+
src4->nb[1], src4->nb[2], src5->nb[1], src5->nb[2], dst_d, nc, nr, n_t, n_s, stream);
|
| 155 |
+
}
|
ggml/src/ggml-cuda/ssm-scan.cuh
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "common.cuh"
|
| 2 |
+
|
| 3 |
+
void ggml_cuda_op_ssm_scan(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|