Files
cuetools.net/CUETools.FlaCuda/flacuda.cu

434 lines
17 KiB
Plaintext
Raw Normal View History

2009-09-09 09:46:13 +00:00
/**
* CUETools.FlaCuda: FLAC audio encoder using CUDA
* Copyright (c) 2009 Gregory S. Chudov
2009-09-07 12:39:31 +00:00
*
2009-09-09 09:46:13 +00:00
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
2009-09-07 12:39:31 +00:00
*
2009-09-09 09:46:13 +00:00
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
2009-09-07 12:39:31 +00:00
*
2009-09-09 09:46:13 +00:00
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
2009-09-07 12:39:31 +00:00
*/
#ifndef _FLACUDA_KERNEL_H_
#define _FLACUDA_KERNEL_H_
2009-09-09 09:46:13 +00:00
typedef struct
{
int samplesOffs;
int windowOffs;
2009-09-13 16:08:21 +00:00
int residualOffs;
int blocksize;
2009-09-09 09:46:13 +00:00
} computeAutocorTaskStruct;
2009-09-16 17:11:36 +00:00
typedef enum
{
Constant = 0,
Verbatim = 1,
Fixed = 8,
LPC = 32
} SubframeType;
2009-09-10 00:00:46 +00:00
typedef struct
{
int residualOrder; // <= 32
int samplesOffs;
int shift;
int cbits;
int size;
2009-09-16 17:11:36 +00:00
int type;
int obits;
int blocksize;
int reserved[8];
2009-09-10 00:00:46 +00:00
int coefs[32];
} encodeResidualTaskStruct;
2009-09-08 04:56:34 +00:00
extern "C" __global__ void cudaComputeAutocor(
float *output,
const int *samples,
const float *window,
2009-09-09 09:46:13 +00:00
computeAutocorTaskStruct *tasks,
int max_order, // should be <= 32
2009-09-08 04:56:34 +00:00
int frameSize,
2009-09-09 14:40:34 +00:00
int partSize // should be <= 2*blockDim - max_order
2009-09-09 09:46:13 +00:00
)
2009-09-07 12:39:31 +00:00
{
2009-09-08 04:56:34 +00:00
__shared__ struct {
2009-09-09 14:40:34 +00:00
float data[512];
2009-09-14 10:09:54 +00:00
volatile float product[256];
volatile float sum[33];
2009-09-09 09:46:13 +00:00
computeAutocorTaskStruct task;
2009-09-08 04:56:34 +00:00
} shared;
2009-09-09 09:46:13 +00:00
const int tid = threadIdx.x;
2009-09-09 14:40:34 +00:00
const int tid2 = threadIdx.x + 256;
2009-09-09 09:46:13 +00:00
// fetch task data
if (tid < sizeof(shared.task) / sizeof(int))
((int*)&shared.task)[tid] = ((int*)(tasks + blockIdx.y))[tid];
2009-09-07 12:39:31 +00:00
__syncthreads();
2009-09-09 09:46:13 +00:00
const int pos = blockIdx.x * partSize;
const int productLen = min(frameSize - pos - max_order, partSize);
const int dataLen = productLen + max_order;
// fetch samples
shared.data[tid] = tid < dataLen ? samples[shared.task.samplesOffs + pos + tid] * window[shared.task.windowOffs + pos + tid]: 0.0f;
2009-09-09 14:40:34 +00:00
shared.data[tid2] = tid2 < dataLen ? samples[shared.task.samplesOffs + pos + tid2] * window[shared.task.windowOffs + pos + tid2]: 0.0f;
2009-09-07 12:39:31 +00:00
__syncthreads();
2009-09-09 09:46:13 +00:00
for (int lag = 0; lag <= max_order; lag++)
2009-09-07 12:39:31 +00:00
{
2009-09-09 14:40:34 +00:00
shared.product[tid] = (tid < productLen) * shared.data[tid] * shared.data[tid + lag] +
+ (tid2 < productLen) * shared.data[tid2] * shared.data[tid2 + lag];
2009-09-07 12:39:31 +00:00
__syncthreads();
2009-09-09 09:46:13 +00:00
// product sum: reduction in shared mem
//if (tid < 256) shared.product[tid] += shared.product[tid + 256]; __syncthreads();
if (tid < 128) shared.product[tid] += shared.product[tid + 128]; __syncthreads();
if (tid < 64) shared.product[tid] += shared.product[tid + 64]; __syncthreads();
2009-09-14 19:11:03 +00:00
if (tid < 32) shared.product[tid] += shared.product[tid + 32]; __syncthreads();
shared.product[tid] += shared.product[tid + 16];
shared.product[tid] += shared.product[tid + 8];
shared.product[tid] += shared.product[tid + 4];
shared.product[tid] += shared.product[tid + 2];
if (tid == 0) shared.sum[lag] = shared.product[0] + shared.product[1];
2009-09-09 14:40:34 +00:00
__syncthreads();
2009-09-07 12:39:31 +00:00
}
// return results
2009-09-09 09:46:13 +00:00
if (tid <= max_order)
output[(blockIdx.x + blockIdx.y * gridDim.x) * (max_order + 1) + tid] = shared.sum[tid];
2009-09-07 12:39:31 +00:00
}
2009-09-09 14:40:34 +00:00
extern "C" __global__ void cudaComputeLPC(
2009-09-10 00:00:46 +00:00
encodeResidualTaskStruct *output,
2009-09-09 14:40:34 +00:00
float*autoc,
computeAutocorTaskStruct *tasks,
int max_order, // should be <= 32
2009-09-10 00:00:46 +00:00
int partCount // should be <= blockDim?
2009-09-09 14:40:34 +00:00
)
{
__shared__ struct {
computeAutocorTaskStruct task;
2009-09-14 08:39:28 +00:00
volatile float ldr[32];
volatile int bits[32];
volatile float autoc[33];
volatile float gen0[32];
volatile float gen1[32];
2009-09-14 09:19:22 +00:00
volatile float parts[128];
2009-09-14 08:39:28 +00:00
//volatile float reff[32];
2009-09-10 00:00:46 +00:00
int cbits;
2009-09-09 14:40:34 +00:00
} shared;
const int tid = threadIdx.x;
// fetch task data
if (tid < sizeof(shared.task) / sizeof(int))
((int*)&shared.task)[tid] = ((int*)(tasks + blockIdx.y))[tid];
// add up parts
2009-09-14 09:19:22 +00:00
for (int order = 0; order <= max_order; order++)
{
shared.parts[tid] = tid < partCount ? autoc[(blockIdx.y * partCount + tid) * (max_order + 1) + order] : 0;
__syncthreads();
if (tid < 64 && blockDim.x > 64) shared.parts[tid] += shared.parts[tid + 64];
__syncthreads();
if (tid < 32)
{
if (blockDim.x > 32) shared.parts[tid] += shared.parts[tid + 32];
shared.parts[tid] += shared.parts[tid + 16];
shared.parts[tid] += shared.parts[tid + 8];
shared.parts[tid] += shared.parts[tid + 4];
shared.parts[tid] += shared.parts[tid + 2];
shared.parts[tid] += shared.parts[tid + 1];
if (tid == 0)
shared.autoc[order] = shared.parts[0];
}
}
2009-09-10 00:00:46 +00:00
if (tid < 32)
2009-09-14 08:24:01 +00:00
{
shared.gen0[tid] = shared.autoc[tid+1];
shared.gen1[tid] = shared.autoc[tid+1];
shared.ldr[tid] = 0.0f;
2009-09-09 14:40:34 +00:00
2009-09-14 08:24:01 +00:00
float error = shared.autoc[0];
for (int order = 0; order < max_order; order++)
2009-09-10 00:00:46 +00:00
{
2009-09-14 08:24:01 +00:00
// Schur recursion
float reff = -shared.gen1[0] / error;
2009-09-14 08:39:28 +00:00
//if (tid == 0) shared.reff[order] = reff;
2009-09-14 10:09:54 +00:00
error += __fmul_rz(shared.gen1[0], reff);
2009-09-14 08:39:28 +00:00
if (tid < max_order - 1 - order)
2009-09-14 08:24:01 +00:00
{
2009-09-14 10:09:54 +00:00
float g1 = shared.gen1[tid + 1] + __fmul_rz(reff, shared.gen0[tid]);
float g0 = __fmul_rz(shared.gen1[tid + 1], reff) + shared.gen0[tid];
2009-09-14 08:24:01 +00:00
shared.gen1[tid] = g1;
shared.gen0[tid] = g0;
}
// Levinson-Durbin recursion
2009-09-16 17:11:36 +00:00
shared.ldr[tid] += (tid < order) * __fmul_rz(reff, shared.ldr[order - 1 - tid]) + (tid == order) * reff;
// Quantization
2009-09-14 19:11:03 +00:00
int precision = 13 - (order > 8);
2009-09-16 17:11:36 +00:00
int taskNo = shared.task.residualOffs + order;
2009-09-14 08:24:01 +00:00
shared.bits[tid] = __mul24((33 - __clz(__float2int_rn(fabs(shared.ldr[tid]) * (1 << 15))) - precision), tid <= order);
2009-09-10 00:00:46 +00:00
shared.bits[tid] = max(shared.bits[tid], shared.bits[tid + 16]);
shared.bits[tid] = max(shared.bits[tid], shared.bits[tid + 8]);
shared.bits[tid] = max(shared.bits[tid], shared.bits[tid + 4]);
shared.bits[tid] = max(shared.bits[tid], shared.bits[tid + 2]);
shared.bits[tid] = max(shared.bits[tid], shared.bits[tid + 1]);
int sh = max(0,min(15, 15 - shared.bits[0]));
2009-09-14 08:24:01 +00:00
2009-09-13 10:28:07 +00:00
// reverse coefs
2009-09-14 08:24:01 +00:00
int coef = max(-(1 << precision),min((1 << precision)-1,__float2int_rn(-shared.ldr[order - tid] * (1 << sh))));
2009-09-10 00:00:46 +00:00
if (tid <= order)
2009-09-11 11:16:45 +00:00
output[taskNo].coefs[tid] = coef;
2009-09-10 00:00:46 +00:00
if (tid == 0)
2009-09-11 11:16:45 +00:00
output[taskNo].shift = sh;
2009-09-10 00:00:46 +00:00
shared.bits[tid] = 33 - max(__clz(coef),__clz(-1 ^ coef));
shared.bits[tid] = max(shared.bits[tid], shared.bits[tid + 16]);
shared.bits[tid] = max(shared.bits[tid], shared.bits[tid + 8]);
shared.bits[tid] = max(shared.bits[tid], shared.bits[tid + 4]);
shared.bits[tid] = max(shared.bits[tid], shared.bits[tid + 2]);
shared.bits[tid] = max(shared.bits[tid], shared.bits[tid + 1]);
int cbits = shared.bits[0];
if (tid == 0)
2009-09-11 11:16:45 +00:00
output[taskNo].cbits = cbits;
2009-09-16 17:11:36 +00:00
}
}
2009-09-09 14:40:34 +00:00
}
2009-09-11 11:16:45 +00:00
// blockDim.x == 32
// blockDim.y == 8
2009-09-10 00:00:46 +00:00
extern "C" __global__ void cudaEstimateResidual(
int*output,
int*samples,
encodeResidualTaskStruct *tasks,
2009-09-11 11:16:45 +00:00
int max_order,
2009-09-10 00:00:46 +00:00
int frameSize,
2009-09-16 17:11:36 +00:00
int partSize // should be blockDim.x * blockDim.y == 256
2009-09-10 00:00:46 +00:00
)
2009-09-08 16:26:53 +00:00
{
2009-09-10 00:00:46 +00:00
__shared__ struct {
2009-09-16 17:11:36 +00:00
int data[32*9];
2009-09-14 09:19:22 +00:00
volatile int residual[32*8];
2009-09-11 11:16:45 +00:00
encodeResidualTaskStruct task[8];
2009-09-10 00:00:46 +00:00
} shared;
2009-09-11 11:16:45 +00:00
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
2009-09-13 10:28:07 +00:00
if (threadIdx.x < 16)
((int*)&shared.task[threadIdx.y])[threadIdx.x] = ((int*)(&tasks[blockIdx.y * blockDim.y + threadIdx.y]))[threadIdx.x];
2009-09-10 00:00:46 +00:00
__syncthreads();
2009-09-13 10:28:07 +00:00
const int pos = blockIdx.x * partSize;
2009-09-11 13:44:29 +00:00
const int dataLen = min(frameSize - pos, partSize + max_order);
2009-09-10 00:00:46 +00:00
// fetch samples
2009-09-13 10:28:07 +00:00
shared.data[tid] = tid < dataLen ? samples[shared.task[0].samplesOffs + pos + tid] : 0;
2009-09-16 17:11:36 +00:00
if (tid < 32) shared.data[tid + partSize] = tid + partSize < dataLen ? samples[shared.task[0].samplesOffs + pos + tid + partSize] : 0;
const int residualLen = max(0,min(frameSize - pos - shared.task[threadIdx.y].residualOrder, partSize));
2009-09-11 13:44:29 +00:00
__syncthreads();
2009-09-14 09:19:22 +00:00
shared.residual[tid] = 0;
2009-09-13 10:28:07 +00:00
shared.task[threadIdx.y].coefs[threadIdx.x] = threadIdx.x < max_order ? tasks[blockIdx.y * blockDim.y + threadIdx.y].coefs[threadIdx.x] : 0;
2009-09-11 11:16:45 +00:00
2009-09-16 17:11:36 +00:00
for (int i = blockDim.y * (shared.task[threadIdx.y].type == Verbatim); i < blockDim.y; i++) // += 32
2009-09-11 11:16:45 +00:00
{
2009-09-16 17:11:36 +00:00
int ptr = threadIdx.x + (i<<5);
2009-09-11 11:16:45 +00:00
// compute residual
2009-09-13 10:28:07 +00:00
int sum = 0;
int c = 0;
for (c = 0; c < shared.task[threadIdx.y].residualOrder; c++)
2009-09-16 17:11:36 +00:00
sum += __mul24(shared.data[ptr + c], shared.task[threadIdx.y].coefs[c]);
sum = shared.data[ptr + c] - (sum >> shared.task[threadIdx.y].shift);
shared.residual[tid] += __mul24(ptr < residualLen, (sum << 1) ^ (sum >> 31));
2009-09-11 11:16:45 +00:00
}
2009-09-13 10:28:07 +00:00
// enable this line when using blockDim.x == 64
//__syncthreads(); if (threadIdx.x < 32) shared.residual[tid] += shared.residual[tid + 32]; __syncthreads();
shared.residual[tid] += shared.residual[tid + 16];
shared.residual[tid] += shared.residual[tid + 8];
shared.residual[tid] += shared.residual[tid + 4];
shared.residual[tid] += shared.residual[tid + 2];
shared.residual[tid] += shared.residual[tid + 1];
2009-09-11 11:16:45 +00:00
// rice parameter search
2009-09-13 10:28:07 +00:00
shared.residual[tid] = __mul24(threadIdx.x >= 15, 0x7fffff) + residualLen * (threadIdx.x + 1) + ((shared.residual[threadIdx.y * blockDim.x] - (residualLen >> 1)) >> threadIdx.x);
shared.residual[tid] = min(shared.residual[tid], shared.residual[tid + 8]);
shared.residual[tid] = min(shared.residual[tid], shared.residual[tid + 4]);
shared.residual[tid] = min(shared.residual[tid], shared.residual[tid + 2]);
shared.residual[tid] = min(shared.residual[tid], shared.residual[tid + 1]);
2009-09-16 17:11:36 +00:00
if (threadIdx.x == 0)
2009-09-13 10:28:07 +00:00
output[(blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x + blockIdx.x] = shared.residual[tid];
2009-09-11 11:16:45 +00:00
}
// blockDim.x == 256
// gridDim.x = frameSize / chunkSize
extern "C" __global__ void cudaSumResidualChunks(
int *output,
encodeResidualTaskStruct *tasks,
int *residual,
int frameSize,
int chunkSize // <= blockDim.x(256)
)
{
__shared__ struct {
int residual[256];
int rice[32];
} shared;
2009-09-10 00:00:46 +00:00
2009-09-11 11:16:45 +00:00
// fetch parameters
const int tid = threadIdx.x;
const int residualOrder = tasks[blockIdx.y].residualOrder;
const int chunkNumber = blockIdx.x;
const int pos = chunkNumber * chunkSize;
const int residualLen = min(frameSize - pos - residualOrder, chunkSize);
// set upper residuals to zero, in case blockDim < 256
shared.residual[255 - tid] = 0;
// read residual
int res = (tid < residualLen) ? residual[blockIdx.y * 8192 + pos + tid] : 0;
// convert to unsigned
shared.residual[tid] = (2 * res) ^ (res >> 31);
2009-09-10 00:00:46 +00:00
__syncthreads();
2009-09-11 11:16:45 +00:00
2009-09-10 00:00:46 +00:00
// residual sum: reduction in shared mem
if (tid < 128) shared.residual[tid] += shared.residual[tid + 128]; __syncthreads();
if (tid < 64) shared.residual[tid] += shared.residual[tid + 64]; __syncthreads();
if (tid < 32) shared.residual[tid] += shared.residual[tid + 32]; __syncthreads();
shared.residual[tid] += shared.residual[tid + 16];
shared.residual[tid] += shared.residual[tid + 8];
shared.residual[tid] += shared.residual[tid + 4];
shared.residual[tid] += shared.residual[tid + 2];
shared.residual[tid] += shared.residual[tid + 1];
if (tid < 32)
{
// rice parameter search
shared.rice[tid] = __mul24(tid >= 15, 0x7fffff) + residualLen * (tid + 1) + ((shared.residual[0] - (residualLen >> 1)) >> tid);
shared.rice[tid] = min(shared.rice[tid], shared.rice[tid + 8]);
shared.rice[tid] = min(shared.rice[tid], shared.rice[tid + 4]);
shared.rice[tid] = min(shared.rice[tid], shared.rice[tid + 2]);
shared.rice[tid] = min(shared.rice[tid], shared.rice[tid + 1]);
}
2009-09-11 11:16:45 +00:00
// write output
2009-09-10 00:00:46 +00:00
if (tid == 0)
output[blockIdx.x + blockIdx.y * gridDim.x] = shared.rice[0];
}
extern "C" __global__ void cudaSumResidual(
encodeResidualTaskStruct *tasks,
int *residual,
2009-09-11 11:16:45 +00:00
int partSize,
int partCount // <= blockDim.y (256)
2009-09-10 00:00:46 +00:00
)
{
__shared__ struct {
2009-09-11 11:16:45 +00:00
int partLen[256];
encodeResidualTaskStruct task;
2009-09-10 00:00:46 +00:00
} shared;
const int tid = threadIdx.x;
// fetch task data
2009-09-11 11:16:45 +00:00
if (tid < sizeof(encodeResidualTaskStruct) / sizeof(int))
((int*)&shared.task)[tid] = ((int*)(tasks + blockIdx.y))[tid];
__syncthreads();
2009-09-10 00:00:46 +00:00
shared.partLen[tid] = (tid < partCount) ? residual[tid + partCount * blockIdx.y] : 0;
__syncthreads();
// length sum: reduction in shared mem
2009-09-11 11:16:45 +00:00
//if (tid < 128) shared.partLen[tid] += shared.partLen[tid + 128]; __syncthreads();
//if (tid < 64) shared.partLen[tid] += shared.partLen[tid + 64]; __syncthreads();
2009-09-10 00:00:46 +00:00
if (tid < 32) shared.partLen[tid] += shared.partLen[tid + 32]; __syncthreads();
shared.partLen[tid] += shared.partLen[tid + 16];
shared.partLen[tid] += shared.partLen[tid + 8];
shared.partLen[tid] += shared.partLen[tid + 4];
shared.partLen[tid] += shared.partLen[tid + 2];
shared.partLen[tid] += shared.partLen[tid + 1];
// return sum
if (tid == 0)
2009-09-16 17:11:36 +00:00
tasks[blockIdx.y].size = shared.task.type == Fixed ?
shared.task.residualOrder * shared.task.obits + 6 + shared.partLen[0] : shared.task.type == LPC ?
shared.task.residualOrder * shared.task.obits + 4 + 5 + shared.task.residualOrder * shared.task.cbits + 6 + (4 * partCount/2)/* << porder */ + shared.partLen[0] :
shared.task.obits * shared.task.blocksize;
}
#define BEST_INDEX(a,b) ((a) + ((b) - (a)) * (shared.length[b] < shared.length[a]))
extern "C" __global__ void cudaChooseBestResidual(
encodeResidualTaskStruct *tasks_out,
encodeResidualTaskStruct *tasks,
int count
)
{
__shared__ struct {
volatile int index[128];
int length[256];
} shared;
//shared.index[threadIdx.x] = threadIdx.x;
shared.length[threadIdx.x] = (threadIdx.x < count) ? tasks[threadIdx.x + count * blockIdx.y].size : 0x7fffffff;
__syncthreads();
//if (threadIdx.x < 128) shared.index[threadIdx.x] = BEST_INDEX(shared.index[threadIdx.x], shared.index[threadIdx.x + 128]); __syncthreads();
if (threadIdx.x < 128) shared.index[threadIdx.x] = BEST_INDEX(threadIdx.x, threadIdx.x + 128); __syncthreads();
if (threadIdx.x < 64) shared.index[threadIdx.x] = BEST_INDEX(shared.index[threadIdx.x], shared.index[threadIdx.x + 64]); __syncthreads();
if (threadIdx.x < 32)
{
shared.index[threadIdx.x] = BEST_INDEX(shared.index[threadIdx.x], shared.index[threadIdx.x + 32]);
shared.index[threadIdx.x] = BEST_INDEX(shared.index[threadIdx.x], shared.index[threadIdx.x + 16]);
shared.index[threadIdx.x] = BEST_INDEX(shared.index[threadIdx.x], shared.index[threadIdx.x + 8]);
shared.index[threadIdx.x] = BEST_INDEX(shared.index[threadIdx.x], shared.index[threadIdx.x + 4]);
shared.index[threadIdx.x] = BEST_INDEX(shared.index[threadIdx.x], shared.index[threadIdx.x + 2]);
shared.index[threadIdx.x] = BEST_INDEX(shared.index[threadIdx.x], shared.index[threadIdx.x + 1]);
}
__syncthreads();
if (threadIdx.x < sizeof(encodeResidualTaskStruct)/sizeof(int))
((int*)(tasks_out + blockIdx.y))[threadIdx.x] = ((int*)(tasks + count * blockIdx.y + shared.index[0]))[threadIdx.x];
// if (threadIdx.x == 0)
//tasks[count * blockIdx.y].best = count * blockIdx.y + shared.index[0];
2009-09-10 00:00:46 +00:00
}
2009-09-08 16:26:53 +00:00
2009-09-07 12:39:31 +00:00
extern "C" __global__ void cudaEncodeResidual(
int*output,
int*samples,
2009-09-16 17:11:36 +00:00
encodeResidualTaskStruct *tasks
2009-09-08 16:26:53 +00:00
)
2009-09-07 12:39:31 +00:00
{
2009-09-16 17:11:36 +00:00
__shared__ struct {
int data[256 + 32];
encodeResidualTaskStruct task;
} shared;
const int tid = threadIdx.x;
if (threadIdx.x < sizeof(encodeResidualTaskStruct))
((int*)&shared.task)[threadIdx.x] = ((int*)(&tasks[blockIdx.y]))[threadIdx.x];
__syncthreads();
const int partSize = blockDim.x;
const int pos = blockIdx.x * partSize;
const int dataLen = min(shared.task.blocksize - pos, partSize + shared.task.residualOrder);
// fetch samples
shared.data[tid] = tid < dataLen ? samples[shared.task.samplesOffs + pos + tid] : 0;
if (tid < 32) shared.data[tid + partSize] = tid + partSize < dataLen ? samples[shared.task.samplesOffs + pos + tid + partSize] : 0;
const int residualLen = max(0,min(shared.task.blocksize - pos - shared.task.residualOrder, partSize));
2009-09-08 16:26:53 +00:00
__syncthreads();
2009-09-16 17:11:36 +00:00
// compute residual
int sum = 0;
for (int c = 0; c < shared.task.residualOrder; c++)
sum += __mul24(shared.data[tid + c], shared.task.coefs[c]);
if (tid < residualLen)
output[shared.task.samplesOffs + pos + tid] = shared.data[tid + shared.task.residualOrder] - (sum >> shared.task.shift);
2009-09-07 12:39:31 +00:00
}
#endif