mirror of
https://github.com/claunia/cuetools.net.git
synced 2025-12-16 18:14:25 +00:00
tidying up
This commit is contained in:
@@ -1336,8 +1336,8 @@ namespace CUETools.Codecs.FLACCL
|
||||
frame_count += nFrames;
|
||||
frame_pos += nFrames * blocksize;
|
||||
task.openCLCQ.EnqueueWriteBuffer(task.clSamplesBytes, false, 0, sizeof(short) * channels * blocksize * nFrames, task.clSamplesBytes.HostPtr);
|
||||
//task.openCLCQ.EnqueueUnmapMemObject(task.cudaSamplesBytes, task.cudaSamplesBytes.HostPtr);
|
||||
//task.openCLCQ.EnqueueMapBuffer(task.cudaSamplesBytes, true, MapFlags.WRITE, 0, task.samplesBufferLen / 2);
|
||||
//task.openCLCQ.EnqueueUnmapMemObject(task.clSamplesBytes, task.clSamplesBytes.HostPtr);
|
||||
//task.openCLCQ.EnqueueMapBuffer(task.clSamplesBytes, true, MapFlags.WRITE, 0, task.samplesBufferLen / 2);
|
||||
}
|
||||
|
||||
unsafe void run_GPU_task(FLACCLTask task)
|
||||
@@ -1467,6 +1467,9 @@ namespace CUETools.Codecs.FLACCL
|
||||
OCLMan.Defines =
|
||||
"#define MAX_ORDER " + eparams.max_prediction_order.ToString() + "\n" +
|
||||
"#define GROUP_SIZE " + groupSize.ToString() + "\n" +
|
||||
#if DEBUG
|
||||
"#define DEBUG\n" +
|
||||
#endif
|
||||
_settings.Defines + "\n";
|
||||
// The BuildOptions string is passed directly to clBuild and can be used to do debug builds etc
|
||||
OCLMan.BuildOptions = "";
|
||||
@@ -2230,8 +2233,7 @@ namespace CUETools.Codecs.FLACCL
|
||||
int riceParamsLen = sizeof(int) * (4 << 8) * channels * FLACCLWriter.maxFrames;
|
||||
int lpcDataLen = sizeof(float) * 32 * 33 * lpc.MAX_LPC_WINDOWS * channelsCount * FLACCLWriter.maxFrames;
|
||||
|
||||
clSamplesBytes = openCLProgram.Context.CreateBuffer(MemFlags.READ_WRITE | MemFlags.ALLOC_HOST_PTR, (uint)samplesBufferLen / 2);
|
||||
//openCLCQ.EnqueueMapBuffer(cudaSamplesBytes, true, MapFlags.WRITE, 0, samplesBufferLen / 2);
|
||||
clSamplesBytes = openCLProgram.Context.CreateBuffer(MemFlags.READ_ONLY | MemFlags.ALLOC_HOST_PTR, (uint)samplesBufferLen / 2);
|
||||
clSamples = openCLProgram.Context.CreateBuffer(MemFlags.READ_WRITE, samplesBufferLen);
|
||||
clResidual = openCLProgram.Context.CreateBuffer(MemFlags.READ_WRITE | MemFlags.ALLOC_HOST_PTR, samplesBufferLen);
|
||||
clLPCData = openCLProgram.Context.CreateBuffer(MemFlags.READ_WRITE, lpcDataLen);
|
||||
@@ -2244,24 +2246,26 @@ namespace CUETools.Codecs.FLACCL
|
||||
clResidualOutput = openCLProgram.Context.CreateBuffer(MemFlags.READ_WRITE, sizeof(int) * channelsCount * (lpc.MAX_LPC_WINDOWS * lpc.MAX_LPC_ORDER + 8) * 64 /*FLACCLWriter.maxResidualParts*/ * FLACCLWriter.maxFrames);
|
||||
clWindowFunctions = openCLProgram.Context.CreateBuffer(MemFlags.READ_ONLY | MemFlags.ALLOC_HOST_PTR, sizeof(float) * FLACCLWriter.MAX_BLOCKSIZE /** 2*/ * lpc.MAX_LPC_WINDOWS);
|
||||
|
||||
clComputeAutocor = openCLProgram.CreateKernel("cudaComputeAutocor");
|
||||
clStereoDecorr = openCLProgram.CreateKernel("cudaStereoDecorr");
|
||||
//cudaChannelDecorr = openCLProgram.CreateKernel("cudaChannelDecorr");
|
||||
clChannelDecorr2 = openCLProgram.CreateKernel("cudaChannelDecorr2");
|
||||
clFindWastedBits = openCLProgram.CreateKernel("cudaFindWastedBits");
|
||||
clComputeLPC = openCLProgram.CreateKernel("cudaComputeLPC");
|
||||
clQuantizeLPC = openCLProgram.CreateKernel("cudaQuantizeLPC");
|
||||
//cudaComputeLPCLattice = openCLProgram.CreateKernel("cudaComputeLPCLattice");
|
||||
clEstimateResidual = openCLProgram.CreateKernel("cudaEstimateResidual");
|
||||
clChooseBestMethod = openCLProgram.CreateKernel("cudaChooseBestMethod");
|
||||
clCopyBestMethod = openCLProgram.CreateKernel("cudaCopyBestMethod");
|
||||
clCopyBestMethodStereo = openCLProgram.CreateKernel("cudaCopyBestMethodStereo");
|
||||
clEncodeResidual = openCLProgram.CreateKernel("cudaEncodeResidual");
|
||||
clCalcPartition = openCLProgram.CreateKernel("cudaCalcPartition");
|
||||
clCalcPartition16 = openCLProgram.CreateKernel("cudaCalcPartition16");
|
||||
clSumPartition = openCLProgram.CreateKernel("cudaSumPartition");
|
||||
clFindRiceParameter = openCLProgram.CreateKernel("cudaFindRiceParameter");
|
||||
clFindPartitionOrder = openCLProgram.CreateKernel("cudaFindPartitionOrder");
|
||||
//openCLCQ.EnqueueMapBuffer(clSamplesBytes, true, MapFlags.WRITE, 0, samplesBufferLen / 2);
|
||||
|
||||
clComputeAutocor = openCLProgram.CreateKernel("clComputeAutocor");
|
||||
clStereoDecorr = openCLProgram.CreateKernel("clStereoDecorr");
|
||||
//cudaChannelDecorr = openCLProgram.CreateKernel("clChannelDecorr");
|
||||
clChannelDecorr2 = openCLProgram.CreateKernel("clChannelDecorr2");
|
||||
clFindWastedBits = openCLProgram.CreateKernel("clFindWastedBits");
|
||||
clComputeLPC = openCLProgram.CreateKernel("clComputeLPC");
|
||||
clQuantizeLPC = openCLProgram.CreateKernel("clQuantizeLPC");
|
||||
//cudaComputeLPCLattice = openCLProgram.CreateKernel("clComputeLPCLattice");
|
||||
clEstimateResidual = openCLProgram.CreateKernel("clEstimateResidual");
|
||||
clChooseBestMethod = openCLProgram.CreateKernel("clChooseBestMethod");
|
||||
clCopyBestMethod = openCLProgram.CreateKernel("clCopyBestMethod");
|
||||
clCopyBestMethodStereo = openCLProgram.CreateKernel("clCopyBestMethodStereo");
|
||||
clEncodeResidual = openCLProgram.CreateKernel("clEncodeResidual");
|
||||
clCalcPartition = openCLProgram.CreateKernel("clCalcPartition");
|
||||
clCalcPartition16 = openCLProgram.CreateKernel("clCalcPartition16");
|
||||
clSumPartition = openCLProgram.CreateKernel("clSumPartition");
|
||||
clFindRiceParameter = openCLProgram.CreateKernel("clFindRiceParameter");
|
||||
clFindPartitionOrder = openCLProgram.CreateKernel("clFindPartitionOrder");
|
||||
|
||||
samplesBuffer = new int[FLACCLWriter.MAX_BLOCKSIZE * channelsCount];
|
||||
outputBuffer = new byte[max_frame_size * FLACCLWriter.maxFrames + 1];
|
||||
@@ -2377,14 +2381,13 @@ namespace CUETools.Codecs.FLACCL
|
||||
clSamples,
|
||||
clWindowFunctions,
|
||||
clResidualTasks,
|
||||
nWindowFunctions - 1,
|
||||
nResidualTasksPerChannel);
|
||||
|
||||
openCLCQ.EnqueueNDRangeKernel(
|
||||
clComputeAutocor,
|
||||
groupSize, 1,
|
||||
eparams.max_prediction_order / 4 + 1,
|
||||
nWindowFunctions * channelsCount * frameCount);
|
||||
channelsCount * frameCount,
|
||||
nWindowFunctions);
|
||||
|
||||
clComputeLPC.SetArgs(
|
||||
clResidualTasks,
|
||||
@@ -2491,7 +2494,7 @@ namespace CUETools.Codecs.FLACCL
|
||||
openCLCQ.EnqueueNDRangeKernel(
|
||||
clCalcPartition,
|
||||
groupSize, 1,
|
||||
1 << max_porder,
|
||||
1 + ((1 << max_porder) - 1) / (groupSize / 16),
|
||||
channels * frameCount);
|
||||
}
|
||||
|
||||
@@ -2516,7 +2519,7 @@ namespace CUETools.Codecs.FLACCL
|
||||
openCLCQ.EnqueueNDRangeKernel(
|
||||
clFindRiceParameter,
|
||||
groupSize, 1,
|
||||
Math.Max(1, 8 * (2 << max_porder) / groupSize),
|
||||
Math.Max(1, (2 << max_porder) / groupSize),
|
||||
channels * frameCount);
|
||||
|
||||
//if (max_porder > 0) // need to run even if max_porder==0 just to calculate the final frame size
|
||||
@@ -2531,18 +2534,18 @@ namespace CUETools.Codecs.FLACCL
|
||||
groupSize,
|
||||
channels * frameCount);
|
||||
|
||||
//openCLCQ.EnqueueReadBuffer(cudaBestRiceParams, false, 0, sizeof(int) * (1 << max_porder) * channels * frameCount, cudaBestRiceParams.HostPtr);
|
||||
//openCLCQ.EnqueueReadBuffer(cudaResidual, false, 0, sizeof(int) * MAX_BLOCKSIZE * channels, cudaResidual.HostPtr);
|
||||
openCLCQ.EnqueueMapBuffer(clBestRiceParams, false, MapFlags.READ, 0, sizeof(int) * (1 << max_porder) * channels * frameCount);
|
||||
openCLCQ.EnqueueUnmapMemObject(clBestRiceParams, clBestRiceParams.HostPtr);
|
||||
openCLCQ.EnqueueMapBuffer(clResidual, false, MapFlags.READ, 0, sizeof(int) * FLACCLWriter.MAX_BLOCKSIZE * channels);
|
||||
openCLCQ.EnqueueUnmapMemObject(clResidual, clResidual.HostPtr);
|
||||
openCLCQ.EnqueueReadBuffer(clBestRiceParams, false, 0, sizeof(int) * (1 << max_porder) * channels * frameCount, clBestRiceParams.HostPtr);
|
||||
openCLCQ.EnqueueReadBuffer(clResidual, false, 0, sizeof(int) * FLACCLWriter.MAX_BLOCKSIZE * channels, clResidual.HostPtr);
|
||||
//openCLCQ.EnqueueMapBuffer(clBestRiceParams, false, MapFlags.READ, 0, sizeof(int) * (1 << max_porder) * channels * frameCount);
|
||||
//openCLCQ.EnqueueUnmapMemObject(clBestRiceParams, clBestRiceParams.HostPtr);
|
||||
//openCLCQ.EnqueueMapBuffer(clResidual, false, MapFlags.READ, 0, sizeof(int) * FLACCLWriter.MAX_BLOCKSIZE * channels);
|
||||
//openCLCQ.EnqueueUnmapMemObject(clResidual, clResidual.HostPtr);
|
||||
}
|
||||
//openCLCQ.EnqueueReadBuffer(cudaBestResidualTasks, false, 0, sizeof(FLACCLSubframeTask) * channels * frameCount, cudaBestResidualTasks.HostPtr);
|
||||
openCLCQ.EnqueueMapBuffer(clBestResidualTasks, false, MapFlags.READ, 0, sizeof(FLACCLSubframeTask) * channels * frameCount);
|
||||
openCLCQ.EnqueueUnmapMemObject(clBestResidualTasks, clBestResidualTasks.HostPtr);
|
||||
openCLCQ.EnqueueReadBuffer(clBestResidualTasks, false, 0, sizeof(FLACCLSubframeTask) * channels * frameCount, clBestResidualTasks.HostPtr);
|
||||
//openCLCQ.EnqueueMapBuffer(clBestResidualTasks, false, MapFlags.READ, 0, sizeof(FLACCLSubframeTask) * channels * frameCount);
|
||||
//openCLCQ.EnqueueUnmapMemObject(clBestResidualTasks, clBestResidualTasks.HostPtr);
|
||||
|
||||
//openCLCQ.EnqueueMapBuffer(cudaSamplesBytes, false, MapFlags.WRITE, 0, samplesBufferLen / 2);
|
||||
//openCLCQ.EnqueueMapBuffer(clSamplesBytes, false, MapFlags.WRITE, 0, samplesBufferLen / 2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,12 @@
|
||||
#ifndef _FLACCL_KERNEL_H_
|
||||
#define _FLACCL_KERNEL_H_
|
||||
|
||||
#ifdef DEBUG
|
||||
#pragma OPENCL EXTENSION cl_amd_printf : enable
|
||||
#endif
|
||||
|
||||
#pragma OPENCL EXTENSION cl_khr_local_int32_base_atomics : enable
|
||||
|
||||
//#pragma OPENCL EXTENSION cl_amd_fp64 : enable
|
||||
|
||||
typedef enum
|
||||
@@ -55,7 +61,7 @@ typedef struct
|
||||
int coefs[32]; // fixme: should be short?
|
||||
} FLACCLSubframeTask;
|
||||
|
||||
__kernel void cudaStereoDecorr(
|
||||
__kernel void clStereoDecorr(
|
||||
__global int *samples,
|
||||
__global short2 *src,
|
||||
int offset
|
||||
@@ -72,7 +78,7 @@ __kernel void cudaStereoDecorr(
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void cudaChannelDecorr2(
|
||||
__kernel void clChannelDecorr2(
|
||||
__global int *samples,
|
||||
__global short2 *src,
|
||||
int offset
|
||||
@@ -87,7 +93,7 @@ __kernel void cudaChannelDecorr2(
|
||||
}
|
||||
}
|
||||
|
||||
//__kernel void cudaChannelDecorr(
|
||||
//__kernel void clChannelDecorr(
|
||||
// int *samples,
|
||||
// short *src,
|
||||
// int offset
|
||||
@@ -102,7 +108,7 @@ __kernel void cudaChannelDecorr2(
|
||||
//#define __ffs(a) (33 - clz(~a & (a - 1)))
|
||||
|
||||
__kernel __attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
void cudaFindWastedBits(
|
||||
void clFindWastedBits(
|
||||
__global FLACCLSubframeTask *tasks,
|
||||
__global int *samples,
|
||||
int tasksPerChannel
|
||||
@@ -115,12 +121,13 @@ void cudaFindWastedBits(
|
||||
int tid = get_local_id(0);
|
||||
if (tid < sizeof(task) / sizeof(int))
|
||||
((__local int*)&task)[tid] = ((__global int*)(&tasks[get_group_id(0) * tasksPerChannel].data))[tid];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int w = 0, a = 0;
|
||||
for (int pos = 0; pos < task.blocksize; pos += GROUP_SIZE)
|
||||
for (int pos = tid; pos + tid < task.blocksize; pos += GROUP_SIZE)
|
||||
{
|
||||
int smp = pos + tid < task.blocksize ? samples[task.samplesOffs + pos + tid] : 0;
|
||||
int smp = samples[task.samplesOffs + pos];
|
||||
w |= smp;
|
||||
a |= smp ^ (smp >> 31);
|
||||
}
|
||||
@@ -146,37 +153,39 @@ void cudaFindWastedBits(
|
||||
tasks[get_group_id(0) * tasksPerChannel + tid].data.abits = a;
|
||||
}
|
||||
|
||||
// get_num_groups(0) == number of tasks
|
||||
// get_num_groups(1) == number of windows
|
||||
__kernel __attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
void cudaComputeAutocor(
|
||||
void clComputeAutocor(
|
||||
__global float *output,
|
||||
__global const int *samples,
|
||||
__global const float *window,
|
||||
__global FLACCLSubframeTask *tasks,
|
||||
const int windowCount, // windows (log2: 0,1)
|
||||
const int taskCount // tasks per block
|
||||
)
|
||||
{
|
||||
__local float data[GROUP_SIZE * 2];
|
||||
__local float product[GROUP_SIZE];
|
||||
__local float product[(MAX_ORDER / 4 + 1) * GROUP_SIZE];
|
||||
__local FLACCLSubframeData task;
|
||||
const int tid = get_local_id(0);
|
||||
// fetch task data
|
||||
if (tid < sizeof(task) / sizeof(int))
|
||||
((__local int*)&task)[tid] = ((__global int*)(tasks + taskCount * (get_group_id(1) >> windowCount)))[tid];
|
||||
((__local int*)&task)[tid] = ((__global int*)(tasks + taskCount * get_group_id(0)))[tid];
|
||||
|
||||
for (int ord4 = 0; ord4 < (MAX_ORDER / 4 + 1); ord4 ++)
|
||||
product[ord4 * GROUP_SIZE + tid] = 0.0f;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int bs = task.blocksize;
|
||||
int windowOffs = (get_group_id(1) & ((1 << windowCount)-1)) * bs;
|
||||
int windowOffs = get_group_id(1) * bs;
|
||||
|
||||
data[tid] = tid < bs ? samples[task.samplesOffs + tid] * window[windowOffs + tid] : 0.0f;
|
||||
|
||||
int tid0 = tid % (GROUP_SIZE >> 2);
|
||||
int tid1 = tid / (GROUP_SIZE >> 2);
|
||||
int lag0 = get_group_id(0) * 4;
|
||||
__local float4 * dptr = ((__local float4 *)&data[0]) + tid0;
|
||||
__local float4 * dptr1 = ((__local float4 *)&data[lag0 + tid1]) + tid0;
|
||||
__local float4 * dptr1 = ((__local float4 *)&data[tid1]) + tid0;
|
||||
|
||||
float prod = 0.0f;
|
||||
for (int pos = 0; pos < bs; pos += GROUP_SIZE)
|
||||
{
|
||||
// fetch samples
|
||||
@@ -184,32 +193,26 @@ void cudaComputeAutocor(
|
||||
data[tid + GROUP_SIZE] = nextData;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
prod += dot(*dptr, *dptr1);
|
||||
for (int ord4 = 0; ord4 < (MAX_ORDER / 4 + 1); ord4 ++)
|
||||
product[ord4 * GROUP_SIZE + tid] += dot(dptr[0], dptr1[ord4]);
|
||||
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
data[tid] = nextData;
|
||||
}
|
||||
product[tid] = prod;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
for (int l = (GROUP_SIZE >> 3); l > 0; l >>= 1)
|
||||
{
|
||||
if (tid0 < l)
|
||||
product[tid] = product[tid] + product[tid + l];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
if (tid < 4 && tid + lag0 <= MAX_ORDER)
|
||||
output[get_group_id(1) * (MAX_ORDER + 1) + tid + lag0] = product[tid * (GROUP_SIZE >> 2)];
|
||||
for (int ord4 = 0; ord4 < (MAX_ORDER / 4 + 1); ord4 ++)
|
||||
for (int l = (GROUP_SIZE >> 3); l > 0; l >>= 1)
|
||||
{
|
||||
if (tid0 < l)
|
||||
product[ord4 * GROUP_SIZE + tid] += product[ord4 * GROUP_SIZE + tid + l];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
if (tid <= MAX_ORDER)
|
||||
output[(get_group_id(0) * get_num_groups(1) + get_group_id(1)) * (MAX_ORDER + 1) + tid] = product[tid * (GROUP_SIZE >> 2)];
|
||||
}
|
||||
|
||||
//#define DEBUGPRINT
|
||||
|
||||
#ifdef DEBUGPRINT
|
||||
#pragma OPENCL EXTENSION cl_amd_printf : enable
|
||||
#endif
|
||||
|
||||
__kernel __attribute__((reqd_work_group_size(32, 1, 1)))
|
||||
void cudaComputeLPC(
|
||||
void clComputeLPC(
|
||||
__global FLACCLSubframeTask *tasks,
|
||||
__global float *autoc,
|
||||
__global float *lpcs,
|
||||
@@ -250,7 +253,7 @@ void cudaComputeLPC(
|
||||
shared.ldr[get_local_id(0)] = 0.0f;
|
||||
float error = shared.autoc[0];
|
||||
|
||||
#ifdef DEBUGPRINT
|
||||
#ifdef DEBUGPRINT1
|
||||
int magic = shared.autoc[0] == 177286873088.0f;
|
||||
if (magic && get_local_id(0) <= MAX_ORDER)
|
||||
printf("autoc[%d] == %f\n", get_local_id(0), shared.autoc[get_local_id(0)]);
|
||||
@@ -261,8 +264,8 @@ void cudaComputeLPC(
|
||||
{
|
||||
// Schur recursion
|
||||
float reff = -shared.gen1[0] / error;
|
||||
error += shared.gen1[0] * reff; // Equivalent to error *= (1 - reff * reff);
|
||||
//error *= (1 - reff * reff);
|
||||
//error += shared.gen1[0] * reff; // Equivalent to error *= (1 - reff * reff);
|
||||
error *= (1 - reff * reff);
|
||||
float gen1;
|
||||
if (get_local_id(0) < MAX_ORDER - 1 - order)
|
||||
{
|
||||
@@ -272,7 +275,7 @@ void cudaComputeLPC(
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
if (get_local_id(0) < MAX_ORDER - 1 - order)
|
||||
shared.gen1[get_local_id(0)] = gen1;
|
||||
#ifdef DEBUGPRINT
|
||||
#ifdef DEBUGPRINT1
|
||||
if (magic && get_local_id(0) == 0)
|
||||
printf("order == %d, reff == %f, error = %f\n", order, reff, error);
|
||||
if (magic && get_local_id(0) <= MAX_ORDER)
|
||||
@@ -304,7 +307,7 @@ void cudaComputeLPC(
|
||||
}
|
||||
|
||||
__kernel __attribute__((reqd_work_group_size(32, 1, 1)))
|
||||
void cudaQuantizeLPC(
|
||||
void clQuantizeLPC(
|
||||
__global FLACCLSubframeTask *tasks,
|
||||
__global float*lpcs,
|
||||
int taskCount, // tasks per block
|
||||
@@ -449,8 +452,12 @@ void cudaQuantizeLPC(
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PARTORDER
|
||||
#define PARTORDER 4
|
||||
#endif
|
||||
|
||||
__kernel /*__attribute__(( vec_type_hint (int4)))*/ __attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
void cudaEstimateResidual(
|
||||
void clEstimateResidual(
|
||||
__global int*output,
|
||||
__global int*samples,
|
||||
__global FLACCLSubframeTask *tasks
|
||||
@@ -459,7 +466,7 @@ void cudaEstimateResidual(
|
||||
__local int data[GROUP_SIZE * 2];
|
||||
__local FLACCLSubframeTask task;
|
||||
__local int residual[GROUP_SIZE];
|
||||
__local int len[GROUP_SIZE / 16];
|
||||
__local int len[GROUP_SIZE >> PARTORDER];
|
||||
|
||||
const int tid = get_local_id(0);
|
||||
if (tid < sizeof(task)/sizeof(int))
|
||||
@@ -471,7 +478,7 @@ void cudaEstimateResidual(
|
||||
|
||||
if (tid < 32 && tid >= ro)
|
||||
task.coefs[tid] = 0;
|
||||
if (tid < GROUP_SIZE / 16)
|
||||
if (tid < (GROUP_SIZE >> PARTORDER))
|
||||
len[tid] = 0;
|
||||
data[tid] = 0;
|
||||
|
||||
@@ -514,35 +521,49 @@ void cudaEstimateResidual(
|
||||
#endif
|
||||
;
|
||||
|
||||
int t = data[tid + GROUP_SIZE] - ((sum.x + sum.y + sum.z + sum.w) >> task.data.shift);
|
||||
int t = nextData - ((sum.x + sum.y + sum.z + sum.w) >> task.data.shift);
|
||||
// ensure we're within frame bounds
|
||||
t = select(0, t, offs >= ro && offs < bs);
|
||||
// overflow protection
|
||||
t = clamp(t, -0x7fffff, 0x7fffff);
|
||||
// convert to unsigned
|
||||
residual[tid] = (t << 1) ^ (t >> 31);
|
||||
barrier(CLK_GLOBAL_MEM_FENCE);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
data[tid] = nextData;
|
||||
|
||||
// calculate rice partition bit length for every 16 samples
|
||||
if (tid < GROUP_SIZE / 16)
|
||||
if (tid < (GROUP_SIZE >> PARTORDER))
|
||||
{
|
||||
__local int4 * chunk = ((__local int4 *)residual) + (tid << 2);
|
||||
int4 sum = chunk[0] + chunk[1] + chunk[2] + chunk[3];
|
||||
int res = sum.x + sum.y + sum.z + sum.w;
|
||||
int k = clamp(27 - clz(res), 0, 14); // 27 - clz(res) == clz(16) - clz(res) == log2(res / 16)
|
||||
#ifdef EXTRAMODE
|
||||
sum = (chunk[0] >> k) + (chunk[1] >> k) + (chunk[2] >> k) + (chunk[3] >> k);
|
||||
len[tid] += (k << 4) + sum.x + sum.y + sum.z + sum.w;
|
||||
//__local int4 * chunk = (__local int4 *)&residual[tid << PARTORDER];
|
||||
__local int4 * chunk = ((__local int4 *)residual) + (tid << (PARTORDER - 2));
|
||||
#if PARTORDER == 3
|
||||
int4 sum = chunk[0] + chunk[1];
|
||||
#elif PARTORDER == 4
|
||||
int4 sum = chunk[0] + chunk[1] + chunk[2] + chunk[3]; // [0 .. (1 << (PARTORDER - 2)) - 1]
|
||||
#elif PARTORDER == 5
|
||||
int4 sum = chunk[0] + chunk[1] + chunk[2] + chunk[3] + chunk[4] + chunk[5] + chunk[6] + chunk[7];
|
||||
#else
|
||||
len[tid] += (k << 4) + (res >> k);
|
||||
#error Invalid PARTORDER
|
||||
#endif
|
||||
int res = sum.x + sum.y + sum.z + sum.w;
|
||||
int k = clamp(clz(1 << PARTORDER) - clz(res), 0, 14); // 27 - clz(res) == clz(16) - clz(res) == log2(res / 16)
|
||||
#ifdef EXTRAMODE
|
||||
#if PARTORDER == 3
|
||||
sum = (chunk[0] >> k) + (chunk[1] >> k);
|
||||
#elif PARTORDER == 4
|
||||
sum = (chunk[0] >> k) + (chunk[1] >> k) + (chunk[2] >> k) + (chunk[3] >> k);
|
||||
#else
|
||||
#error Invalid PARTORDER
|
||||
#endif
|
||||
len[tid] += (k << PARTORDER) + sum.x + sum.y + sum.z + sum.w;
|
||||
#else
|
||||
len[tid] += (k << PARTORDER) + (res >> k);
|
||||
#endif
|
||||
}
|
||||
|
||||
data[tid] = nextData;
|
||||
}
|
||||
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
for (int l = GROUP_SIZE / 32; l > 0; l >>= 1)
|
||||
for (int l = GROUP_SIZE >> (PARTORDER + 1); l > 0; l >>= 1)
|
||||
{
|
||||
if (tid < l)
|
||||
len[tid] += len[tid + l];
|
||||
@@ -553,7 +574,7 @@ void cudaEstimateResidual(
|
||||
}
|
||||
|
||||
__kernel __attribute__((reqd_work_group_size(32, 1, 1)))
|
||||
void cudaChooseBestMethod(
|
||||
void clChooseBestMethod(
|
||||
__global FLACCLSubframeTask *tasks,
|
||||
__global int *residual,
|
||||
int taskCount
|
||||
@@ -621,7 +642,7 @@ void cudaChooseBestMethod(
|
||||
}
|
||||
|
||||
__kernel __attribute__((reqd_work_group_size(64, 1, 1)))
|
||||
void cudaCopyBestMethod(
|
||||
void clCopyBestMethod(
|
||||
__global FLACCLSubframeTask *tasks_out,
|
||||
__global FLACCLSubframeTask *tasks,
|
||||
int count
|
||||
@@ -636,7 +657,7 @@ void cudaCopyBestMethod(
|
||||
}
|
||||
|
||||
__kernel __attribute__((reqd_work_group_size(64, 1, 1)))
|
||||
void cudaCopyBestMethodStereo(
|
||||
void clCopyBestMethodStereo(
|
||||
__global FLACCLSubframeTask *tasks_out,
|
||||
__global FLACCLSubframeTask *tasks,
|
||||
int count
|
||||
@@ -690,7 +711,7 @@ void cudaCopyBestMethodStereo(
|
||||
|
||||
// get_group_id(0) == task index
|
||||
__kernel __attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
void cudaEncodeResidual(
|
||||
void clEncodeResidual(
|
||||
__global int *output,
|
||||
__global int *samples,
|
||||
__global FLACCLSubframeTask *tasks
|
||||
@@ -756,10 +777,10 @@ void cudaEncodeResidual(
|
||||
}
|
||||
}
|
||||
|
||||
// get_group_id(0) == partition index
|
||||
// get_group_id(0) == partition index / (GROUP_SIZE / 16)
|
||||
// get_group_id(1) == task index
|
||||
__kernel __attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
void cudaCalcPartition(
|
||||
void clCalcPartition(
|
||||
__global int *partition_lengths,
|
||||
__global int *residual,
|
||||
__global FLACCLSubframeTask *tasks,
|
||||
@@ -767,51 +788,53 @@ void cudaCalcPartition(
|
||||
int psize // == task.blocksize >> max_porder?
|
||||
)
|
||||
{
|
||||
__local int data[GROUP_SIZE];
|
||||
__local int length[GROUP_SIZE / 16][16];
|
||||
__local int pl[(GROUP_SIZE / 16)][15];
|
||||
__local FLACCLSubframeData task;
|
||||
|
||||
const int tid = get_local_id(0);
|
||||
if (tid < sizeof(task) / sizeof(int))
|
||||
((__local int*)&task)[tid] = ((__global int*)(&tasks[get_group_id(1)]))[tid];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int k = tid % 16;
|
||||
int x = tid / 16;
|
||||
|
||||
int sum = 0;
|
||||
for (int pos0 = 0; pos0 < psize; pos0 += GROUP_SIZE)
|
||||
if (tid < (GROUP_SIZE / 16))
|
||||
{
|
||||
int offs = get_group_id(0) * psize + pos0 + tid;
|
||||
// fetch residual
|
||||
int s = (offs >= task.residualOrder && pos0 + tid < psize) ? residual[task.residualOffs + offs] : 0;
|
||||
// convert to unsigned
|
||||
data[tid] = min(0x7fffff, (s << 1) ^ (s >> 31));
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
// calc number of unary bits for each residual sample with each rice paramater
|
||||
for (int pos = 0; pos < psize && pos < GROUP_SIZE; pos += GROUP_SIZE / 16)
|
||||
sum += data[pos + x] >> k;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
for (int k = 0; k <= 14; k++)
|
||||
pl[tid][k] = 0;
|
||||
}
|
||||
|
||||
length[x][k] = min(0x7fffff, sum);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (x == 0)
|
||||
int start = get_group_id(0) * psize * (GROUP_SIZE / 16);
|
||||
int end = min(start + psize * (GROUP_SIZE / 16), task.blocksize);
|
||||
for (int offs = start + tid; offs < end; offs += GROUP_SIZE)
|
||||
{
|
||||
for (int i = 1; i < GROUP_SIZE / 16; i++)
|
||||
length[0][k] += length[i][k];
|
||||
// output length
|
||||
const int pos = (15 << (max_porder + 1)) * get_group_id(1) + (k << (max_porder + 1));
|
||||
if (k <= 14)
|
||||
partition_lengths[pos + get_group_id(0)] = min(0x7fffff,length[0][k]) + (psize - task.residualOrder * (get_group_id(0) == 0)) * (k + 1);
|
||||
// fetch residual
|
||||
int s = (offs >= task.residualOrder && offs < end) ? residual[task.residualOffs + offs] : 0;
|
||||
// convert to unsigned
|
||||
s = clamp(s, -0x7fffff, 0x7fffff);
|
||||
s = (s << 1) ^ (s >> 31);
|
||||
// calc number of unary bits for each residual sample with each rice paramater
|
||||
int part = (offs - start) / psize;
|
||||
for (int k = 0; k <= 14; k++)
|
||||
atom_add(&pl[part][k], s >> k);
|
||||
//pl[part][k] += s >> k;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int part = get_group_id(0) * (GROUP_SIZE / 16) + tid;
|
||||
if (tid < (GROUP_SIZE / 16) && part < (1 << max_porder))
|
||||
{
|
||||
for (int k = 0; k <= 14; k++)
|
||||
{
|
||||
// output length
|
||||
const int pos = (15 << (max_porder + 1)) * get_group_id(1) + (k << (max_porder + 1));
|
||||
partition_lengths[pos + part] = min(0x7fffff, pl[tid][k]) + select(psize, psize - task.residualOrder, part == 0) * (k + 1);
|
||||
// if (get_group_id(1) == 0)
|
||||
//printf("pl[%d][%d] == %d\n", k, part, min(0x7fffff, pl[k][tid]) + (psize - task.residualOrder * (part == 0)) * (k + 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get_group_id(0) == task index
|
||||
__kernel __attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
void cudaCalcPartition16(
|
||||
void clCalcPartition16(
|
||||
__global int *partition_lengths,
|
||||
__global int *residual,
|
||||
__global int *samples,
|
||||
@@ -887,6 +910,9 @@ void cudaCalcPartition16(
|
||||
s = clamp(s, -0x7fffff, 0x7fffff);
|
||||
// convert to unsigned
|
||||
res[tid] = (s << 1) ^ (s >> 31);
|
||||
|
||||
// for (int k = 0; k < 15; k++) atom_add(&pl[x][k], s >> k);
|
||||
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
data[tid] = nextData;
|
||||
|
||||
@@ -906,22 +932,22 @@ void cudaCalcPartition16(
|
||||
// get_group_id(0) == k
|
||||
// get_group_id(1) == task index
|
||||
__kernel __attribute__((reqd_work_group_size(128, 1, 1)))
|
||||
void cudaSumPartition(
|
||||
void clSumPartition(
|
||||
__global int* partition_lengths,
|
||||
int max_porder
|
||||
)
|
||||
{
|
||||
__local int data[512]; // max_porder <= 8, data length <= 1 << 9.
|
||||
__local int data[256]; // max_porder <= 8, data length <= 1 << 9.
|
||||
const int pos = (15 << (max_porder + 1)) * get_group_id(1) + (get_group_id(0) << (max_porder + 1));
|
||||
|
||||
// fetch partition lengths
|
||||
data[get_local_id(0)] = get_local_id(0) < (1 << max_porder) ? partition_lengths[pos + get_local_id(0)] : 0;
|
||||
data[get_local_size(0) + get_local_id(0)] = get_local_size(0) + get_local_id(0) < (1 << max_porder) ? partition_lengths[pos + get_local_size(0) + get_local_id(0)] : 0;
|
||||
int2 pl = get_local_id(0) * 2 < (1 << max_porder) ? *(__global int2*)&partition_lengths[pos + get_local_id(0) * 2] : 0;
|
||||
data[get_local_id(0)] = pl.x + pl.y;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
int in_pos = (get_local_id(0) << 1);
|
||||
int out_pos = (1 << max_porder) + get_local_id(0);
|
||||
for (int bs = 1 << (max_porder - 1); bs > 0; bs >>= 1)
|
||||
int out_pos = (1 << (max_porder - 1)) + get_local_id(0);
|
||||
for (int bs = 1 << (max_porder - 2); bs > 0; bs >>= 1)
|
||||
{
|
||||
if (get_local_id(0) < bs) data[out_pos] = data[in_pos] + data[in_pos + 1];
|
||||
in_pos += bs << 1;
|
||||
@@ -929,131 +955,96 @@ void cudaSumPartition(
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
if (get_local_id(0) < (1 << max_porder))
|
||||
partition_lengths[pos + (1 << max_porder) + get_local_id(0)] = data[(1 << max_porder) + get_local_id(0)];
|
||||
partition_lengths[pos + (1 << max_porder) + get_local_id(0)] = data[get_local_id(0)];
|
||||
if (get_local_size(0) + get_local_id(0) < (1 << max_porder))
|
||||
partition_lengths[pos + (1 << max_porder) + get_local_size(0) + get_local_id(0)] = data[(1 << max_porder) + get_local_size(0) + get_local_id(0)];
|
||||
partition_lengths[pos + (1 << max_porder) + get_local_size(0) + get_local_id(0)] = data[get_local_size(0) + get_local_id(0)];
|
||||
}
|
||||
|
||||
// Finds optimal rice parameter for several partitions at a time.
|
||||
// get_group_id(0) == chunk index (chunk size is GROUP_SIZE / 8, so total task size is 8 * (2 << max_porder))
|
||||
// get_group_id(0) == chunk index (chunk size is GROUP_SIZE, total task size is (2 << max_porder))
|
||||
// get_group_id(1) == task index
|
||||
__kernel __attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
void cudaFindRiceParameter(
|
||||
void clFindRiceParameter(
|
||||
__global int* rice_parameters,
|
||||
__global int* partition_lengths,
|
||||
int max_porder
|
||||
)
|
||||
{
|
||||
__local struct {
|
||||
volatile int length[GROUP_SIZE];
|
||||
volatile int index[GROUP_SIZE];
|
||||
} shared;
|
||||
const int tid = get_local_id(0);
|
||||
const int ws = GROUP_SIZE / 8;
|
||||
const int parts = min(ws, 2 << max_porder);
|
||||
const int p = tid % ws;
|
||||
const int k = tid / ws; // 0..7
|
||||
const int pos = (15 << (max_porder + 1)) * get_group_id(1) + (k << (max_porder + 1));
|
||||
const int parts = min(GROUP_SIZE, 2 << max_porder);
|
||||
const int pos = (15 << (max_porder + 1)) * get_group_id(1) + get_group_id(0) * GROUP_SIZE + tid;
|
||||
|
||||
// read length for 32 partitions
|
||||
int l1 = (p < parts) ? partition_lengths[pos + get_group_id(0) * ws + p] : 0xffffff;
|
||||
int l2 = (k + 8 <= 14 && p < parts) ? partition_lengths[pos + (8 << (max_porder + 1)) + get_group_id(0) * ws + p] : 0xffffff;
|
||||
// find best rice parameter
|
||||
shared.index[tid] = k + ((l2 < l1) << 3);
|
||||
shared.length[tid] = l1 = min(l1, l2);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
//#pragma unroll 3
|
||||
for (int lsh = GROUP_SIZE / 2; lsh >= ws; lsh >>= 1)
|
||||
{
|
||||
if (tid < lsh)
|
||||
{
|
||||
l2 = shared.length[tid + lsh];
|
||||
shared.index[tid] = shared.index[tid + (l2 < l1) * lsh];
|
||||
shared.length[tid] = l1 = min(l1, l2);
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
if (tid < parts)
|
||||
{
|
||||
int best_l = partition_lengths[pos];
|
||||
int best_k = 0;
|
||||
for (int k = 1; k <= 14; k++)
|
||||
{
|
||||
int l = partition_lengths[pos + (k << (max_porder + 1))];
|
||||
best_k = select(best_k, k, l < best_l);
|
||||
best_l = min(best_l, l);
|
||||
}
|
||||
|
||||
// output rice parameter
|
||||
rice_parameters[(get_group_id(1) << (max_porder + 2)) + get_group_id(0) * parts + tid] = shared.index[tid];
|
||||
rice_parameters[(get_group_id(1) << (max_porder + 2)) + get_group_id(0) * GROUP_SIZE + tid] = best_k;
|
||||
// output length
|
||||
rice_parameters[(get_group_id(1) << (max_porder + 2)) + (1 << (max_porder + 1)) + get_group_id(0) * parts + tid] = shared.length[tid];
|
||||
rice_parameters[(get_group_id(1) << (max_porder + 2)) + (1 << (max_porder + 1)) + get_group_id(0) * GROUP_SIZE + tid] = best_l;
|
||||
}
|
||||
}
|
||||
|
||||
// get_group_id(0) == task index
|
||||
__kernel __attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
void cudaFindPartitionOrder(
|
||||
void clFindPartitionOrder(
|
||||
__global int* best_rice_parameters,
|
||||
__global FLACCLSubframeTask *tasks,
|
||||
__global int* rice_parameters,
|
||||
int max_porder
|
||||
)
|
||||
{
|
||||
__local struct {
|
||||
int length[32];
|
||||
int index[32];
|
||||
} shared;
|
||||
__local int partlen[GROUP_SIZE];
|
||||
__local int partlen[9];
|
||||
__local FLACCLSubframeData task;
|
||||
|
||||
const int pos = (get_group_id(0) << (max_porder + 2)) + (2 << max_porder);
|
||||
if (get_local_id(0) < sizeof(task) / sizeof(int))
|
||||
((__local int*)&task)[get_local_id(0)] = ((__global int*)(&tasks[get_group_id(0)]))[get_local_id(0)];
|
||||
if (get_local_id(0) < 9)
|
||||
partlen[get_local_id(0)] = 0;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
// fetch partition lengths
|
||||
for (int offs = 0; offs < (2 << max_porder); offs += GROUP_SIZE)
|
||||
{
|
||||
if (offs + get_local_id(0) < (2 << max_porder) - 1)
|
||||
{
|
||||
int len = rice_parameters[pos + offs + get_local_id(0)];
|
||||
int porder = 31 - clz((2 << max_porder) - 1 - offs - get_local_id(0));
|
||||
atom_add(&partlen[porder], len);
|
||||
}
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
for (int porder = max_porder; porder >= 0; porder--)
|
||||
int best_length = partlen[0] + 4;
|
||||
int best_porder = 0;
|
||||
for (int porder = 1; porder <= max_porder; porder++)
|
||||
{
|
||||
int len = 0;
|
||||
for (int offs = 0; offs < (1 << porder); offs += GROUP_SIZE)
|
||||
len += offs + get_local_id(0) < (1 << porder) ? rice_parameters[pos + (2 << max_porder) - (2 << porder) + offs + get_local_id(0)] : 0;
|
||||
partlen[get_local_id(0)] = len;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
for (int l = min(GROUP_SIZE, 1 << porder) / 2; l > 0; l >>= 1)
|
||||
{
|
||||
if (get_local_id(0) < l)
|
||||
partlen[get_local_id(0)] += partlen[get_local_id(0) + l];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
if (get_local_id(0) == 0)
|
||||
shared.length[porder] = partlen[0] + (4 << porder);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int length = (4 << porder) + partlen[porder];
|
||||
best_porder = select(best_porder, porder, length < best_length);
|
||||
best_length = min(best_length, length);
|
||||
}
|
||||
|
||||
if (get_local_id(0) < 32 && get_local_id(0) > max_porder)
|
||||
shared.length[get_local_id(0)] = 0xfffffff;
|
||||
if (get_local_id(0) < 32)
|
||||
shared.index[get_local_id(0)] = get_local_id(0);
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
//atom_min(shared.index[get_local_id(0)],);
|
||||
int l1 = get_local_id(0) <= max_porder ? shared.length[get_local_id(0)] : 0xfffffff;
|
||||
for (int l = 8; l > 0; l >>= 1)
|
||||
{
|
||||
if (get_local_id(0) < l)
|
||||
{
|
||||
int l2 = shared.length[get_local_id(0) + l];
|
||||
shared.index[get_local_id(0)] = shared.index[get_local_id(0) + select(0, l, l2 < l1)];
|
||||
shared.length[get_local_id(0)] = l1 = min(l1, l2);
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
if (get_local_id(0) == 0)
|
||||
tasks[get_group_id(0)].data.porder = shared.index[0];
|
||||
if (get_local_id(0) == 0)
|
||||
{
|
||||
tasks[get_group_id(0)].data.porder = best_porder;
|
||||
int obits = task.obits - task.wbits;
|
||||
tasks[get_group_id(0)].data.size =
|
||||
task.type == Fixed ? task.residualOrder * obits + 6 + l1 :
|
||||
task.type == LPC ? task.residualOrder * obits + 6 + l1 + 4 + 5 + task.residualOrder * task.cbits :
|
||||
task.type == Fixed ? task.residualOrder * obits + 6 + best_length :
|
||||
task.type == LPC ? task.residualOrder * obits + 6 + best_length + 4 + 5 + task.residualOrder * task.cbits :
|
||||
task.type == Constant ? obits : obits * task.blocksize;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
int porder = shared.index[0];
|
||||
for (int offs = 0; offs < (1 << porder); offs += GROUP_SIZE)
|
||||
if (offs + get_local_id(0) < (1 << porder))
|
||||
best_rice_parameters[(get_group_id(0) << max_porder) + offs + get_local_id(0)] = rice_parameters[pos - (2 << porder) + offs + get_local_id(0)];
|
||||
for (int offs = 0; offs < (1 << best_porder); offs += GROUP_SIZE)
|
||||
if (offs + get_local_id(0) < (1 << best_porder))
|
||||
best_rice_parameters[(get_group_id(0) << max_porder) + offs + get_local_id(0)] = rice_parameters[pos - (2 << best_porder) + offs + get_local_id(0)];
|
||||
// FIXME: should be bytes?
|
||||
}
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user