Spaces:
Running
Running
files : remove old wkv6 (#0)
Browse files- ggml/src/ggml-cuda/wkv6.cu +0 -89
- ggml/src/ggml-cuda/wkv6.cuh +0 -5
- ggml/src/ggml-sycl/wkv6.cpp +0 -143
- ggml/src/ggml-sycl/wkv6.hpp +0 -9
ggml/src/ggml-cuda/wkv6.cu
DELETED
|
@@ -1,89 +0,0 @@
|
|
| 1 |
-
#include "common.cuh"
|
| 2 |
-
#include "wkv6.cuh"
|
| 3 |
-
|
| 4 |
-
static __global__ void rwkv_wkv_f32(const int B, const int T, const int C, const int H, const float * k, const float * v, const float * r, const float * tf, const float * td, const float * s, float * dst) {
|
| 5 |
-
const int tid = threadIdx.x;
|
| 6 |
-
const int bid = blockIdx.x;
|
| 7 |
-
|
| 8 |
-
const int head_size = CUDA_WKV_BLOCK_SIZE;
|
| 9 |
-
const int batch_i = bid / H;
|
| 10 |
-
const int head_i = bid % H;
|
| 11 |
-
const int state_size = C * head_size;
|
| 12 |
-
const int n_seq_tokens = T / B;
|
| 13 |
-
|
| 14 |
-
float state[head_size];
|
| 15 |
-
__shared__ float _k[head_size], _r[head_size], _tf[head_size], _td[head_size];
|
| 16 |
-
|
| 17 |
-
#pragma unroll
|
| 18 |
-
for (int i = 0; i < head_size; i++) {
|
| 19 |
-
state[i] = s[batch_i * state_size + head_i * head_size * head_size + i * head_size + tid];
|
| 20 |
-
}
|
| 21 |
-
|
| 22 |
-
__syncthreads();
|
| 23 |
-
_tf[tid] = tf[head_i * head_size + tid];
|
| 24 |
-
__syncthreads();
|
| 25 |
-
|
| 26 |
-
for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid; t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; t += C) {
|
| 27 |
-
__syncthreads();
|
| 28 |
-
_k[tid] = k[t];
|
| 29 |
-
_r[tid] = r[t];
|
| 30 |
-
_td[tid] = td[t];
|
| 31 |
-
__syncthreads();
|
| 32 |
-
|
| 33 |
-
const float _v = v[t];
|
| 34 |
-
float y = 0;
|
| 35 |
-
for (int j = 0; j < head_size; j += 4) {
|
| 36 |
-
const float4& k = (float4&)(_k[j]);
|
| 37 |
-
const float4& r = (float4&)(_r[j]);
|
| 38 |
-
const float4& tf = (float4&)(_tf[j]);
|
| 39 |
-
const float4& td = (float4&)(_td[j]);
|
| 40 |
-
float4& s = (float4&)(state[j]);
|
| 41 |
-
float4 kv;
|
| 42 |
-
|
| 43 |
-
kv.x = k.x * _v;
|
| 44 |
-
kv.y = k.y * _v;
|
| 45 |
-
kv.z = k.z * _v;
|
| 46 |
-
kv.w = k.w * _v;
|
| 47 |
-
|
| 48 |
-
y += r.x * (tf.x * kv.x + s.x);
|
| 49 |
-
y += r.y * (tf.y * kv.y + s.y);
|
| 50 |
-
y += r.z * (tf.z * kv.z + s.z);
|
| 51 |
-
y += r.w * (tf.w * kv.w + s.w);
|
| 52 |
-
|
| 53 |
-
s.x = s.x * td.x + kv.x;
|
| 54 |
-
s.y = s.y * td.y + kv.y;
|
| 55 |
-
s.z = s.z * td.z + kv.z;
|
| 56 |
-
s.w = s.w * td.w + kv.w;
|
| 57 |
-
}
|
| 58 |
-
dst[t] = y;
|
| 59 |
-
}
|
| 60 |
-
|
| 61 |
-
#pragma unroll
|
| 62 |
-
for (int i = 0; i < head_size; i++) {
|
| 63 |
-
dst[T * C + batch_i * state_size + head_i * head_size * head_size + i * head_size + tid] = state[i];
|
| 64 |
-
}
|
| 65 |
-
}
|
| 66 |
-
|
| 67 |
-
void ggml_cuda_op_rwkv_wkv6(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
| 68 |
-
const float * k_d = (const float *)dst->src[0]->data;
|
| 69 |
-
const float * v_d = (const float *)dst->src[1]->data;
|
| 70 |
-
const float * r_d = (const float *)dst->src[2]->data;
|
| 71 |
-
const float * tf_d = (const float *)dst->src[3]->data;
|
| 72 |
-
const float * td_d = (const float *)dst->src[4]->data;
|
| 73 |
-
const float * s_d = (const float *)dst->src[5]->data;
|
| 74 |
-
|
| 75 |
-
const int64_t B = dst->src[5]->ne[1];
|
| 76 |
-
const int64_t T = dst->src[0]->ne[2];
|
| 77 |
-
const int64_t C = dst->ne[0];
|
| 78 |
-
const int64_t H = dst->src[0]->ne[1];
|
| 79 |
-
|
| 80 |
-
float * dst_d = (float *)dst->data;
|
| 81 |
-
|
| 82 |
-
cudaStream_t stream = ctx.stream();
|
| 83 |
-
|
| 84 |
-
GGML_ASSERT(dst->src[5]->type == GGML_TYPE_F32);
|
| 85 |
-
GGML_ASSERT(C % H == 0);
|
| 86 |
-
GGML_ASSERT(C / H == CUDA_WKV_BLOCK_SIZE); // The current cuda kernel is designed for RWKV6, HEAD_SIZE == 64
|
| 87 |
-
|
| 88 |
-
rwkv_wkv_f32<<<B * H, C / H, 0, stream>>>(B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d);
|
| 89 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ggml/src/ggml-cuda/wkv6.cuh
DELETED
|
@@ -1,5 +0,0 @@
|
|
| 1 |
-
#include "common.cuh"
|
| 2 |
-
|
| 3 |
-
#define CUDA_WKV_BLOCK_SIZE 64
|
| 4 |
-
|
| 5 |
-
void ggml_cuda_op_rwkv_wkv6(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ggml/src/ggml-sycl/wkv6.cpp
DELETED
|
@@ -1,143 +0,0 @@
|
|
| 1 |
-
#include <sycl/sycl.hpp>
|
| 2 |
-
#include "wkv6.hpp"
|
| 3 |
-
|
| 4 |
-
constexpr int WKV_BLOCK_SIZE = 64; // Matching CUDA_WKV_BLOCK_SIZE
|
| 5 |
-
|
| 6 |
-
// Helper function for the main kernel
|
| 7 |
-
static void rwkv_wkv_f32_kernel(
|
| 8 |
-
const int B, const int T, const int C, const int H,
|
| 9 |
-
const float* k, const float* v, const float* r,
|
| 10 |
-
const float* tf, const float* td, const float* s,
|
| 11 |
-
float* dst, const sycl::nd_item<3>& item_ct1, float* shared_mem) {
|
| 12 |
-
|
| 13 |
-
const int tid = item_ct1.get_local_id(2);
|
| 14 |
-
const int bid = item_ct1.get_group(2);
|
| 15 |
-
|
| 16 |
-
const int head_size = WKV_BLOCK_SIZE;
|
| 17 |
-
const int batch_i = bid / H;
|
| 18 |
-
const int head_i = bid % H;
|
| 19 |
-
const int state_size = C * head_size;
|
| 20 |
-
const int n_seq_tokens = T / B;
|
| 21 |
-
|
| 22 |
-
// Set up shared memory pointers
|
| 23 |
-
float* _k = shared_mem;
|
| 24 |
-
float* _r = _k + head_size;
|
| 25 |
-
float* _tf = _r + head_size;
|
| 26 |
-
float* _td = _tf + head_size;
|
| 27 |
-
|
| 28 |
-
// Local state array
|
| 29 |
-
float state[WKV_BLOCK_SIZE];
|
| 30 |
-
|
| 31 |
-
// Load initial state
|
| 32 |
-
#pragma unroll
|
| 33 |
-
for (int i = 0; i < head_size; i++) {
|
| 34 |
-
state[i] = s[batch_i * state_size + head_i * head_size * head_size + i * head_size + tid];
|
| 35 |
-
}
|
| 36 |
-
|
| 37 |
-
// Sync threads before shared memory operations
|
| 38 |
-
item_ct1.barrier(sycl::access::fence_space::local_space);
|
| 39 |
-
|
| 40 |
-
// Load time-mixing parameters
|
| 41 |
-
_tf[tid] = tf[head_i * head_size + tid];
|
| 42 |
-
item_ct1.barrier(sycl::access::fence_space::local_space);
|
| 43 |
-
|
| 44 |
-
// Main sequence processing loop
|
| 45 |
-
for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid;
|
| 46 |
-
t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid;
|
| 47 |
-
t += C) {
|
| 48 |
-
|
| 49 |
-
item_ct1.barrier(sycl::access::fence_space::local_space);
|
| 50 |
-
|
| 51 |
-
// Load current timestep data to shared memory
|
| 52 |
-
_k[tid] = k[t];
|
| 53 |
-
_r[tid] = r[t];
|
| 54 |
-
_td[tid] = td[t];
|
| 55 |
-
|
| 56 |
-
item_ct1.barrier(sycl::access::fence_space::local_space);
|
| 57 |
-
|
| 58 |
-
const float _v = v[t];
|
| 59 |
-
float y = 0;
|
| 60 |
-
|
| 61 |
-
// Process in chunks of 4 for better vectorization
|
| 62 |
-
sycl::float4 k4, r4, tf4, td4, s4;
|
| 63 |
-
#pragma unroll
|
| 64 |
-
for (int j = 0; j < head_size; j += 4) {
|
| 65 |
-
// Load data in vec4 chunks
|
| 66 |
-
k4 = sycl::float4(_k[j], _k[j+1], _k[j+2], _k[j+3]);
|
| 67 |
-
r4 = sycl::float4(_r[j], _r[j+1], _r[j+2], _r[j+3]);
|
| 68 |
-
tf4 = sycl::float4(_tf[j], _tf[j+1], _tf[j+2], _tf[j+3]);
|
| 69 |
-
td4 = sycl::float4(_td[j], _td[j+1], _td[j+2], _td[j+3]);
|
| 70 |
-
s4 = sycl::float4(state[j], state[j+1], state[j+2], state[j+3]);
|
| 71 |
-
|
| 72 |
-
// Compute key-value product
|
| 73 |
-
sycl::float4 kv4 = k4 * _v;
|
| 74 |
-
|
| 75 |
-
// Accumulate weighted sum
|
| 76 |
-
y += sycl::dot(r4, tf4 * kv4 + s4);
|
| 77 |
-
|
| 78 |
-
// Update state
|
| 79 |
-
s4 = s4 * td4 + kv4;
|
| 80 |
-
|
| 81 |
-
// Store updated state
|
| 82 |
-
state[j] = s4.x();
|
| 83 |
-
state[j+1] = s4.y();
|
| 84 |
-
state[j+2] = s4.z();
|
| 85 |
-
state[j+3] = s4.w();
|
| 86 |
-
}
|
| 87 |
-
|
| 88 |
-
dst[t] = y;
|
| 89 |
-
}
|
| 90 |
-
|
| 91 |
-
// Save final state
|
| 92 |
-
#pragma unroll
|
| 93 |
-
for (int i = 0; i < head_size; i++) {
|
| 94 |
-
dst[T * C + batch_i * state_size + head_i * head_size * head_size + i * head_size + tid] = state[i];
|
| 95 |
-
}
|
| 96 |
-
}
|
| 97 |
-
|
| 98 |
-
void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context& ctx, ggml_tensor* dst) {
|
| 99 |
-
|
| 100 |
-
const ggml_tensor *src0 = dst->src[0];
|
| 101 |
-
const ggml_tensor *src1 = dst->src[1];
|
| 102 |
-
|
| 103 |
-
const float* k_d = (const float*)dst->src[0]->data;
|
| 104 |
-
const float* v_d = (const float*)dst->src[1]->data;
|
| 105 |
-
const float* r_d = (const float*)dst->src[2]->data;
|
| 106 |
-
const float* tf_d = (const float*)dst->src[3]->data;
|
| 107 |
-
const float* td_d = (const float*)dst->src[4]->data;
|
| 108 |
-
const float* s_d = (const float*)dst->src[5]->data;
|
| 109 |
-
float* dst_d = (float*)dst->data;
|
| 110 |
-
|
| 111 |
-
const int64_t B = dst->src[5]->ne[1];
|
| 112 |
-
const int64_t T = dst->src[0]->ne[2];
|
| 113 |
-
const int64_t C = dst->ne[0];
|
| 114 |
-
const int64_t H = dst->src[0]->ne[1];
|
| 115 |
-
|
| 116 |
-
GGML_ASSERT(dst->src[5]->type == GGML_TYPE_F32);
|
| 117 |
-
GGML_ASSERT(C % H == 0);
|
| 118 |
-
GGML_ASSERT(C / H == WKV_BLOCK_SIZE); // The current sycl kernel is designed for RWKV6, HEAD_SIZE == 64
|
| 119 |
-
|
| 120 |
-
dpct::queue_ptr stream = ctx.stream();
|
| 121 |
-
|
| 122 |
-
// Calculate execution configuration
|
| 123 |
-
const size_t shared_mem_size = WKV_BLOCK_SIZE * 4 * sizeof(float); // For k, r, tf, td
|
| 124 |
-
sycl::range<3> block_dims(1, 1, C / H);
|
| 125 |
-
sycl::range<3> grid_dims(1, 1, B * H);
|
| 126 |
-
|
| 127 |
-
// Submit kernel
|
| 128 |
-
stream->submit([&](sycl::handler& cgh) {
|
| 129 |
-
sycl::local_accessor<float, 1> shared_mem_acc(shared_mem_size, cgh);
|
| 130 |
-
|
| 131 |
-
cgh.parallel_for(
|
| 132 |
-
sycl::nd_range<3>(grid_dims * block_dims, block_dims),
|
| 133 |
-
[=](sycl::nd_item<3> item_ct1) {
|
| 134 |
-
rwkv_wkv_f32_kernel(
|
| 135 |
-
B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d,
|
| 136 |
-
item_ct1, (float*)shared_mem_acc.get_multi_ptr<sycl::access::decorated::no>().get()
|
| 137 |
-
);
|
| 138 |
-
});
|
| 139 |
-
});
|
| 140 |
-
|
| 141 |
-
GGML_UNUSED(src0);
|
| 142 |
-
GGML_UNUSED(src1);
|
| 143 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ggml/src/ggml-sycl/wkv6.hpp
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
#ifndef GGML_SYCL_WKV6_HPP
|
| 2 |
-
#define GGML_SYCL_WKV6_HPP
|
| 3 |
-
|
| 4 |
-
#include "common.hpp"
|
| 5 |
-
|
| 6 |
-
void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
#endif // GGML_SYCL_WKV6_HPP
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|