Residual coding

This commit is contained in:
Ali
2022-07-29 18:32:51 +02:00
parent 64076541cd
commit f6ad7dc77a
35 changed files with 2784 additions and 498 deletions

View File

@@ -3,6 +3,7 @@
#include "DCTCommon.h"
#include <vector>
#include <Accelerate/Accelerate.h>
#define DCTSIZE 8 /* The basic DCT block is 8x8 samples */
#define DCTSIZE2 64 /* DCTSIZE squared; # of elements in a block */
@@ -128,6 +129,16 @@ static DCTELEM std_chrominance_quant_tbl[DCTSIZE2] = {
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99
};
static DCTELEM std_delta_quant_tbl[DCTSIZE2] = {
16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16
};
int jpeg_quality_scaling(int quality)
/* Convert a user-specified quality rating to a percentage scaling factor
@@ -285,14 +296,16 @@ static const int zigZagInv[DCTSIZE2] = {
53,60,61,54,47,55,62,63
};
static const int zigZag[DCTSIZE2] = {
0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63
static const int zigZag4x4Inv[4 * 4] = {
0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15
};
void performForwardDct(uint8_t const *pixels, int16_t *coefficients, int width, int height, int bytesPerRow, DCTELEM *divisors) {
DCTELEM block[DCTSIZE2];
JCOEF coefBlock[DCTSIZE2];
int acOffset = (width / DCTSIZE) * (height / DCTSIZE);
for (int y = 0; y < height; y += DCTSIZE) {
for (int x = 0; x < width; x += DCTSIZE) {
for (int blockY = 0; blockY < DCTSIZE; blockY++) {
@@ -305,9 +318,17 @@ void performForwardDct(uint8_t const *pixels, int16_t *coefficients, int width,
quantize(coefBlock, divisors, block);
coefficients[(y / DCTSIZE) * (width / DCTSIZE) + x / DCTSIZE] = coefBlock[0];
for (int blockY = 0; blockY < DCTSIZE; blockY++) {
for (int blockX = 0; blockX < DCTSIZE; blockX++) {
coefficients[(y + blockY) * bytesPerRow + (x + blockX)] = coefBlock[zigZagInv[blockY * DCTSIZE + blockX]];
if (blockX == 0 && blockY == 0) {
continue;
}
int16_t element = coefBlock[zigZagInv[blockY * DCTSIZE + blockX]];
//coefficients[(y + blockY) * bytesPerRow + (x + blockX)] = element;
coefficients[acOffset] = element;
acOffset++;
}
}
}
@@ -318,11 +339,21 @@ void performInverseDct(int16_t const * coefficients, uint8_t *pixels, int width,
DCTELEM coefficientBlock[DCTSIZE2];
JSAMPLE pixelBlock[DCTSIZE2];
int acOffset = (width / DCTSIZE) * (height / DCTSIZE);
for (int y = 0; y < height; y += DCTSIZE) {
for (int x = 0; x < width; x += DCTSIZE) {
coefficientBlock[0] = coefficients[(y / DCTSIZE) * (width / DCTSIZE) + x / DCTSIZE];
for (int blockY = 0; blockY < DCTSIZE; blockY++) {
for (int blockX = 0; blockX < DCTSIZE; blockX++) {
coefficientBlock[zigZag[blockY * DCTSIZE + blockX]] = coefficients[(y + blockY) * coefficientsPerRow + (x + blockX)];
if (blockX == 0 && blockY == 0) {
continue;
}
int16_t element = coefficients[acOffset];
acOffset++;
coefficientBlock[zigZagInv[blockY * DCTSIZE + blockX]] = element;
//coefficientBlock[zigZagInv[blockY * DCTSIZE + blockX]] = coefficients[(y + blockY) * coefficientsPerRow + (x + blockX)];
}
}
@@ -337,18 +368,516 @@ void performInverseDct(int16_t const * coefficients, uint8_t *pixels, int width,
}
}
void matrix_multiply_4x4_neon(float32_t *A, float32_t *B, float32_t *C) {
// these are the columns A
float32x4_t A0;
float32x4_t A1;
float32x4_t A2;
float32x4_t A3;
// these are the columns B
float32x4_t B0;
float32x4_t B1;
float32x4_t B2;
float32x4_t B3;
// these are the columns C
float32x4_t C0;
float32x4_t C1;
float32x4_t C2;
float32x4_t C3;
A0 = vld1q_f32(A);
A1 = vld1q_f32(A+4);
A2 = vld1q_f32(A+8);
A3 = vld1q_f32(A+12);
// Zero accumulators for C values
C0 = vmovq_n_f32(0);
C1 = vmovq_n_f32(0);
C2 = vmovq_n_f32(0);
C3 = vmovq_n_f32(0);
// Multiply accumulate in 4x1 blocks, i.e. each column in C
B0 = vld1q_f32(B);
C0 = vfmaq_laneq_f32(C0, A0, B0, 0);
C0 = vfmaq_laneq_f32(C0, A1, B0, 1);
C0 = vfmaq_laneq_f32(C0, A2, B0, 2);
C0 = vfmaq_laneq_f32(C0, A3, B0, 3);
vst1q_f32(C, C0);
B1 = vld1q_f32(B+4);
C1 = vfmaq_laneq_f32(C1, A0, B1, 0);
C1 = vfmaq_laneq_f32(C1, A1, B1, 1);
C1 = vfmaq_laneq_f32(C1, A2, B1, 2);
C1 = vfmaq_laneq_f32(C1, A3, B1, 3);
vst1q_f32(C+4, C1);
B2 = vld1q_f32(B+8);
C2 = vfmaq_laneq_f32(C2, A0, B2, 0);
C2 = vfmaq_laneq_f32(C2, A1, B2, 1);
C2 = vfmaq_laneq_f32(C2, A2, B2, 2);
C2 = vfmaq_laneq_f32(C2, A3, B2, 3);
vst1q_f32(C+8, C2);
B3 = vld1q_f32(B+12);
C3 = vfmaq_laneq_f32(C3, A0, B3, 0);
C3 = vfmaq_laneq_f32(C3, A1, B3, 1);
C3 = vfmaq_laneq_f32(C3, A2, B3, 2);
C3 = vfmaq_laneq_f32(C3, A3, B3, 3);
vst1q_f32(C+12, C3);
}
typedef int16_t tran_low_t;
typedef int32_t tran_high_t;
typedef int16_t tran_coef_t;
static const tran_coef_t cospi_1_64 = 16364;
static const tran_coef_t cospi_2_64 = 16305;
static const tran_coef_t cospi_3_64 = 16207;
static const tran_coef_t cospi_4_64 = 16069;
static const tran_coef_t cospi_5_64 = 15893;
static const tran_coef_t cospi_6_64 = 15679;
static const tran_coef_t cospi_7_64 = 15426;
static const tran_coef_t cospi_8_64 = 15137;
static const tran_coef_t cospi_9_64 = 14811;
static const tran_coef_t cospi_10_64 = 14449;
static const tran_coef_t cospi_11_64 = 14053;
static const tran_coef_t cospi_12_64 = 13623;
static const tran_coef_t cospi_13_64 = 13160;
static const tran_coef_t cospi_14_64 = 12665;
static const tran_coef_t cospi_15_64 = 12140;
static const tran_coef_t cospi_16_64 = 11585;
static const tran_coef_t cospi_17_64 = 11003;
static const tran_coef_t cospi_18_64 = 10394;
static const tran_coef_t cospi_19_64 = 9760;
static const tran_coef_t cospi_20_64 = 9102;
static const tran_coef_t cospi_21_64 = 8423;
static const tran_coef_t cospi_22_64 = 7723;
static const tran_coef_t cospi_23_64 = 7005;
static const tran_coef_t cospi_24_64 = 6270;
static const tran_coef_t cospi_25_64 = 5520;
static const tran_coef_t cospi_26_64 = 4756;
static const tran_coef_t cospi_27_64 = 3981;
static const tran_coef_t cospi_28_64 = 3196;
static const tran_coef_t cospi_29_64 = 2404;
static const tran_coef_t cospi_30_64 = 1606;
static const tran_coef_t cospi_31_64 = 804;
// 16384 * sqrt(2) * sin(kPi/9) * 2 / 3
static const tran_coef_t sinpi_1_9 = 5283;
static const tran_coef_t sinpi_2_9 = 9929;
static const tran_coef_t sinpi_3_9 = 13377;
static const tran_coef_t sinpi_4_9 = 15212;
#define DCT_CONST_BITS 14
#define DCT_CONST_ROUNDING (1 << (DCT_CONST_BITS - 1))
#define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n)-1))) >> (n))
static inline tran_high_t fdct_round_shift(tran_high_t input) {
tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
// TODO(debargha, peter.derivaz): Find new bounds for this assert
// and make the bounds consts.
// assert(INT16_MIN <= rv && rv <= INT16_MAX);
return rv;
}
void fdct4x4_float(const int16_t *input, tran_low_t *output) {
float inputFloat[4 * 4];
for (int i = 0; i < 4 * 4; i++) {
inputFloat[i] = (float)input[i];
}
float outputFloat[4 * 4];
int i, j, u, v;
for (u = 0; u < 4; ++u) {
for (v = 0; v < 4; ++v) {
outputFloat[u * 4 + v] = 0;
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
outputFloat[u * 4 + v] += inputFloat[i * 4 + j] * cos(M_PI/((float)4)*(i+1./2.)*u)*cos(M_PI/((float)4)*(j+1./2.)*v);
}
}
}
}
for (int i = 0; i < 4 * 4; i++) {
output[i] = (float)outputFloat[i];
}
}
void vpx_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
// The 2D transform is done with two passes which are actually pretty
// similar. In the first one, we transform the columns and transpose
// the results. In the second one, we transform the rows. To achieve that,
// as the first pass results are transposed, we transpose the columns (that
// is the transposed rows) and transpose the results (so that it goes back
// in normal/row positions).
int pass;
// We need an intermediate buffer between passes.
tran_low_t intermediate[4 * 4];
const tran_low_t *in_low = NULL;
tran_low_t *out = intermediate;
// Do the two transform/transpose passes
for (pass = 0; pass < 2; ++pass) {
tran_high_t in_high[4]; // canbe16
tran_high_t step[4]; // canbe16
tran_high_t temp1, temp2; // needs32
int i;
for (i = 0; i < 4; ++i) {
// Load inputs.
if (pass == 0) {
in_high[0] = input[0 * stride] * 16;
in_high[1] = input[1 * stride] * 16;
in_high[2] = input[2 * stride] * 16;
in_high[3] = input[3 * stride] * 16;
if (i == 0 && in_high[0]) {
++in_high[0];
}
} else {
assert(in_low != NULL);
in_high[0] = in_low[0 * 4];
in_high[1] = in_low[1 * 4];
in_high[2] = in_low[2 * 4];
in_high[3] = in_low[3 * 4];
++in_low;
}
// Transform.
step[0] = in_high[0] + in_high[3];
step[1] = in_high[1] + in_high[2];
step[2] = in_high[1] - in_high[2];
step[3] = in_high[0] - in_high[3];
temp1 = (step[0] + step[1]) * cospi_16_64;
temp2 = (step[0] - step[1]) * cospi_16_64;
out[0] = (tran_low_t)fdct_round_shift(temp1);
out[2] = (tran_low_t)fdct_round_shift(temp2);
temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64;
temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64;
out[1] = (tran_low_t)fdct_round_shift(temp1);
out[3] = (tran_low_t)fdct_round_shift(temp2);
// Do next column (which is a transposed row in second/horizontal pass)
++input;
out += 4;
}
// Setup in/out for next pass.
in_low = intermediate;
out = output;
}
{
int i, j;
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) output[j + i * 4] = (output[j + i * 4] + 1) >> 2;
}
}
}
#define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n)-1))) >> (n))
static inline tran_high_t dct_const_round_shift(tran_high_t input) {
tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
return (tran_high_t)rv;
}
static inline tran_high_t check_range(tran_high_t input) {
#ifdef CONFIG_COEFFICIENT_RANGE_CHECKING
// For valid VP9 input streams, intermediate stage coefficients should always
// stay within the range of a signed 16 bit integer. Coefficients can go out
// of this range for invalid/corrupt VP9 streams. However, strictly checking
// this range for every intermediate coefficient can burdensome for a decoder,
// therefore the following assertion is only enabled when configured with
// --enable-coefficient-range-checking.
assert(INT16_MIN <= input);
assert(input <= INT16_MAX);
#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
return input;
}
#define WRAPLOW(x) ((int32_t)check_range(x))
void idct4_c(const tran_low_t *input, tran_low_t *output) {
int16_t step[4];
tran_high_t temp1, temp2;
// stage 1
temp1 = ((int16_t)input[0] + (int16_t)input[2]) * cospi_16_64;
temp2 = ((int16_t)input[0] - (int16_t)input[2]) * cospi_16_64;
step[0] = WRAPLOW(dct_const_round_shift(temp1));
step[1] = WRAPLOW(dct_const_round_shift(temp2));
temp1 = (int16_t)input[1] * cospi_24_64 - (int16_t)input[3] * cospi_8_64;
temp2 = (int16_t)input[1] * cospi_8_64 + (int16_t)input[3] * cospi_24_64;
step[2] = WRAPLOW(dct_const_round_shift(temp1));
step[3] = WRAPLOW(dct_const_round_shift(temp2));
// stage 2
output[0] = WRAPLOW(step[0] + step[3]);
output[1] = WRAPLOW(step[1] + step[2]);
output[2] = WRAPLOW(step[1] - step[2]);
output[3] = WRAPLOW(step[0] - step[3]);
}
void vpx_idct4x4_16_add_c(const tran_low_t *input, tran_low_t *dest, int stride) {
int i, j;
tran_low_t out[4 * 4];
tran_low_t *outptr = out;
tran_low_t temp_in[4], temp_out[4];
// Rows
for (i = 0; i < 4; ++i) {
idct4_c(input, outptr);
input += 4;
outptr += 4;
}
// Columns
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
idct4_c(temp_in, temp_out);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = ROUND_POWER_OF_TWO(temp_out[j], 4);
//dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4));
}
}
}
static inline int16x8_t load_tran_low_to_s16q(const tran_low_t *buf) {
return vld1q_s16(buf);
}
static inline void transpose_s16_4x4q(int16x8_t *a0, int16x8_t *a1) {
// Swap 32 bit elements. Goes from:
// a0: 00 01 02 03 10 11 12 13
// a1: 20 21 22 23 30 31 32 33
// to:
// b0.val[0]: 00 01 20 21 10 11 30 31
// b0.val[1]: 02 03 22 23 12 13 32 33
const int32x4x2_t b0 =
vtrnq_s32(vreinterpretq_s32_s16(*a0), vreinterpretq_s32_s16(*a1));
// Swap 64 bit elements resulting in:
// c0: 00 01 20 21 02 03 22 23
// c1: 10 11 30 31 12 13 32 33
const int32x4_t c0 =
vcombine_s32(vget_low_s32(b0.val[0]), vget_low_s32(b0.val[1]));
const int32x4_t c1 =
vcombine_s32(vget_high_s32(b0.val[0]), vget_high_s32(b0.val[1]));
// Swap 16 bit elements resulting in:
// d0.val[0]: 00 10 20 30 02 12 22 32
// d0.val[1]: 01 11 21 31 03 13 23 33
const int16x8x2_t d0 =
vtrnq_s16(vreinterpretq_s16_s32(c0), vreinterpretq_s16_s32(c1));
*a0 = d0.val[0];
*a1 = d0.val[1];
}
static inline int16x8_t dct_const_round_shift_low_8(const int32x4_t *const in) {
return vcombine_s16(vrshrn_n_s32(in[0], DCT_CONST_BITS),
vrshrn_n_s32(in[1], DCT_CONST_BITS));
}
static inline void dct_const_round_shift_low_8_dual(const int32x4_t *const t32,
int16x8_t *const d0,
int16x8_t *const d1) {
*d0 = dct_const_round_shift_low_8(t32 + 0);
*d1 = dct_const_round_shift_low_8(t32 + 2);
}
static const int16_t kCospi[16] = {
16384 /* cospi_0_64 */, 15137 /* cospi_8_64 */,
11585 /* cospi_16_64 */, 6270 /* cospi_24_64 */,
16069 /* cospi_4_64 */, 13623 /* cospi_12_64 */,
-9102 /* -cospi_20_64 */, 3196 /* cospi_28_64 */,
16305 /* cospi_2_64 */, 1606 /* cospi_30_64 */,
14449 /* cospi_10_64 */, 7723 /* cospi_22_64 */,
15679 /* cospi_6_64 */, -4756 /* -cospi_26_64 */,
12665 /* cospi_14_64 */, -10394 /* -cospi_18_64 */
};
static inline void idct4x4_16_kernel_bd8(int16x8_t *const a) {
const int16x4_t cospis = vld1_s16(kCospi);
int16x4_t b[4];
int32x4_t c[4];
int16x8_t d[2];
b[0] = vget_low_s16(a[0]);
b[1] = vget_high_s16(a[0]);
b[2] = vget_low_s16(a[1]);
b[3] = vget_high_s16(a[1]);
c[0] = vmull_lane_s16(b[0], cospis, 2);
c[2] = vmull_lane_s16(b[1], cospis, 2);
c[1] = vsubq_s32(c[0], c[2]);
c[0] = vaddq_s32(c[0], c[2]);
c[3] = vmull_lane_s16(b[2], cospis, 3);
c[2] = vmull_lane_s16(b[2], cospis, 1);
c[3] = vmlsl_lane_s16(c[3], b[3], cospis, 1);
c[2] = vmlal_lane_s16(c[2], b[3], cospis, 3);
dct_const_round_shift_low_8_dual(c, &d[0], &d[1]);
a[0] = vaddq_s16(d[0], d[1]);
a[1] = vsubq_s16(d[0], d[1]);
}
static inline void transpose_idct4x4_16_bd8(int16x8_t *const a) {
transpose_s16_4x4q(&a[0], &a[1]);
idct4x4_16_kernel_bd8(a);
}
inline void vpx_idct4x4_16_add_neon(const int16x8_t &top64, const int16x8_t &bottom64, int16_t *dest, int16_t multiplier) {
int16x8_t a[2];
assert(!((intptr_t)dest % sizeof(uint32_t)));
int16x8_t mul = vdupq_n_s16(multiplier);
// Rows
a[0] = vmulq_s16(top64, mul);
a[1] = vmulq_s16(bottom64, mul);
transpose_idct4x4_16_bd8(a);
// Columns
a[1] = vcombine_s16(vget_high_s16(a[1]), vget_low_s16(a[1]));
transpose_idct4x4_16_bd8(a);
a[0] = vrshrq_n_s16(a[0], 4);
a[1] = vrshrq_n_s16(a[1], 4);
vst1q_s16(dest, a[0]);
dest += 2 * 4;
vst1_s16(dest, vget_high_s16(a[1]));
dest += 4;
vst1_s16(dest, vget_low_s16(a[1]));
}
static int dct4x4QuantDC = 60;
static int dct4x4QuantAC = 60;
void performForward4x4Dct(int16_t const *normalizedCoefficients, int16_t *coefficients, int width, int height, DCTELEM *divisors) {
DCTELEM block[4 * 4];
DCTELEM coefBlock[4 * 4];
//int acOffset = (width / 4) * (height / 4);
for (int y = 0; y < height; y += 4) {
for (int x = 0; x < width; x += 4) {
for (int blockY = 0; blockY < 4; blockY++) {
for (int blockX = 0; blockX < 4; blockX++) {
block[blockY * 4 + blockX] = normalizedCoefficients[(y + blockY) * width + (x + blockX)];
}
}
vpx_fdct4x4_c(block, coefBlock, 4);
coefBlock[0] /= dct4x4QuantDC;
for (int blockY = 0; blockY < 4; blockY++) {
for (int blockX = 0; blockX < 4; blockX++) {
if (blockX == 0 && blockY == 0) {
continue;
}
coefBlock[blockY * 4 + blockX] /= dct4x4QuantAC;
}
}
//coefficients[(y / 4) * (width / 4) + x / 4] = coefBlock[0];
for (int blockY = 0; blockY < 4; blockY++) {
for (int blockX = 0; blockX < 4; blockX++) {
/*if (blockX == 0 && blockY == 0) {
continue;
}*/
coefficients[(y + blockY) * width + (x + blockX)] = coefBlock[zigZag4x4Inv[blockY * 4 + blockX]];
//coefficients[acOffset] = coefBlock[zigZag4x4Inv[blockY * 4 + blockX]];
//acOffset++;
//coefficients[(y + blockY) * width + (x + blockX)] = coefBlock[blockY * 4 + blockX];
//int targetIndex = (blockY * 4 + blockX) * (width / 4 * height / 4) + blockIndex;
//coefficients[targetIndex] = coefBlock[zigZag4x4Inv[blockY * 4 + blockX]];
}
}
}
}
}
void performInverse4x4Dct(int16_t const * coefficients, int16_t *normalizedCoefficients, int width, int height, DctAuxiliaryData *auxiliaryData, IFAST_MULT_TYPE *ifmtbl) {
//DCTELEM coefficientBlock[4 * 4];
DCTELEM resultBlock[4 * 4];
for (int y = 0; y < height; y += 4) {
for (int x = 0; x < width; x += 4) {
uint32x2_t sa = vld1_u32((uint32_t *)&coefficients[(y + 0) * width + x]);
uint32x2_t sb = vld1_u32((uint32_t *)&coefficients[(y + 1) * width + x]);
uint32x2_t sc = vld1_u32((uint32_t *)&coefficients[(y + 2) * width + x]);
uint32x2_t sd = vld1_u32((uint32_t *)&coefficients[(y + 3) * width + x]);
uint8x16_t top = vreinterpretq_u8_u32(vcombine_u32(sa, sb));
uint8x16_t bottom = vreinterpretq_u8_u32(vcombine_u32(sc, sd));
uint8x16x2_t quad = vzipq_u8(top, bottom);
uint8_t topReorderIndices[16] = {0, 2, 4, 6, 20, 22, 24, 26, 8, 10, 16, 18, 28, 30, 17, 19};
uint8_t bottomReorderIndices[16] = {12, 14, 1, 3, 13, 15, 21, 23, 5, 7, 9, 11, 25, 27, 29, 31};
uint8x16_t qtop = vqtbl2q_u8(quad, vld1q_u8(topReorderIndices));
uint8x16_t qbottom = vqtbl2q_u8(quad, vld1q_u8(bottomReorderIndices));
uint16x8_t qtop16 = vreinterpretq_s16_u8(qtop);
uint16x8_t qbottom16 = vreinterpretq_s16_u8(qbottom);
int16x8_t top64 = vreinterpretq_s16_u16(qtop16);
int16x8_t bottom64 = vreinterpretq_s16_u16(qbottom16);
/*for (int blockY = 0; blockY < 4; blockY++) {
for (int blockX = 0; blockX < 4; blockX++) {
coefficientBlock[zigZag4x4Inv[blockY * 4 + blockX]] = coefficients[(y + blockY) * width + (x + blockX)];
}
}*/
vpx_idct4x4_16_add_neon(top64, bottom64, resultBlock, dct4x4QuantAC);
uint32x2_t a = vld1_u32((uint32_t *)&resultBlock[4 * 0]);
uint32x2_t b = vld1_u32((uint32_t *)&resultBlock[4 * 1]);
uint32x2_t c = vld1_u32((uint32_t *)&resultBlock[4 * 2]);
uint32x2_t d = vld1_u32((uint32_t *)&resultBlock[4 * 3]);
vst1_u32((uint32_t *)&normalizedCoefficients[(y + 0) * width + x], a);
vst1_u32((uint32_t *)&normalizedCoefficients[(y + 1) * width + x], b);
vst1_u32((uint32_t *)&normalizedCoefficients[(y + 2) * width + x], c);
vst1_u32((uint32_t *)&normalizedCoefficients[(y + 3) * width + x], d);
for (int blockY = 0; blockY < 4; blockY++) {
for (int blockX = 0; blockX < 4; blockX++) {
//normalizedCoefficients[(y + blockY) * width + (x + blockX)] = resultBlock[blockY * 4 + blockX];
}
}
}
}
}
}
namespace dct {
DCTTable DCTTable::generate(int quality, bool isChroma) {
DCTTable DCTTable::generate(int quality, DCTTable::Type type) {
DCTTable result;
result.table.resize(DCTSIZE2);
if (isChroma) {
jpeg_set_quality(result.table.data(), std_chrominance_quant_tbl, quality);
} else {
jpeg_set_quality(result.table.data(), std_luminance_quant_tbl, quality);
switch (type) {
case DCTTable::Type::Luma:
jpeg_set_quality(result.table.data(), std_luminance_quant_tbl, quality);
break;
case DCTTable::Type::Chroma:
jpeg_set_quality(result.table.data(), std_chrominance_quant_tbl, quality);
break;
case DCTTable::Type::Delta:
jpeg_set_quality(result.table.data(), std_delta_quant_tbl, quality);
break;
default:
jpeg_set_quality(result.table.data(), std_luminance_quant_tbl, quality);
break;
}
return result;
@@ -395,4 +924,12 @@ void DCT::inverse(int16_t const *coefficients, uint8_t *pixels, int width, int h
performInverseDct(coefficients, pixels, width, height, coefficientsPerRow, bytesPerRow, _internal->auxiliaryData, (IFAST_MULT_TYPE *)_internal->inverseDctData.data());
}
void DCT::forward4x4(int16_t const *normalizedCoefficients, int16_t *coefficients, int width, int height) {
performForward4x4Dct(normalizedCoefficients, coefficients, width, height, (DCTELEM *)_internal->forwardDctData.data());
}
void DCT::inverse4x4(int16_t const *coefficients, int16_t *normalizedCoefficients, int width, int height) {
performInverse4x4Dct(coefficients, normalizedCoefficients, width, height, _internal->auxiliaryData, (IFAST_MULT_TYPE *)_internal->inverseDctData.data());
}
}

View File

@@ -11,7 +11,13 @@ namespace dct {
class DCTInternal;
struct DCTTable {
static DCTTable generate(int quality, bool isChroma);
enum class Type {
Luma,
Chroma,
Delta
};
static DCTTable generate(int quality, Type type);
static DCTTable initializeEmpty();
std::vector<int16_t> table;
@@ -24,6 +30,8 @@ public:
void forward(uint8_t const *pixels, int16_t *coefficients, int width, int height, int bytesPerRow);
void inverse(int16_t const *coefficients, uint8_t *pixels, int width, int height, int coefficientsPerRow, int bytesPerRow);
void forward4x4(int16_t const *normalizedCoefficients, int16_t *coefficients, int width, int height);
void inverse4x4(int16_t const *coefficients, int16_t *normalizedCoefficients, int width, int height);
private:
DCTInternal *_internal;

View File

@@ -18,6 +18,7 @@ struct DctAuxiliaryData *createDctAuxiliaryData();
void freeDctAuxiliaryData(struct DctAuxiliaryData *data);
void dct_jpeg_idct_ifast(struct DctAuxiliaryData *auxiliaryData, void *dct_table, JCOEFPTR coef_block, JSAMPROW output_buf);
void dct_jpeg_idct_ifast_normalized(struct DctAuxiliaryData *auxiliaryData, void *dct_table, JCOEFPTR coef_block, JCOEFPTR output_buf);
void dct_jpeg_fdct_ifast(DCTELEM *data);
#ifdef __cplusplus

View File

@@ -96,6 +96,27 @@ __attribute__((aligned(16))) static const int16_t jsimd_fdct_ifast_neon_consts[]
F_0_382, F_0_541, F_0_707, F_0_306
};
#define FIX_0_382683433 ((JLONG)98) /* FIX(0.382683433) */
#define FIX_0_541196100 ((JLONG)139) /* FIX(0.541196100) */
#define FIX_0_707106781 ((JLONG)181) /* FIX(0.707106781) */
#define FIX_1_306562965 ((JLONG)334) /* FIX(1.306562965) */
#define FIX_1_082392200 ((JLONG)277) /* FIX(1.082392200) */
#define FIX_1_414213562 ((JLONG)362) /* FIX(1.414213562) */
#define FIX_1_847759065 ((JLONG)473) /* FIX(1.847759065) */
#define FIX_2_613125930 ((JLONG)669) /* FIX(2.613125930) */
#define CONST_BITS 8
#define RIGHT_SHIFT(x, shft) ((x) >> (shft))
#define IRIGHT_SHIFT(x, shft) ((x) >> (shft))
#define DESCALE(x, n) RIGHT_SHIFT(x, n)
#define IDESCALE(x, n) ((int)IRIGHT_SHIFT(x, n))
#define MULTIPLY(var, const) ((DCTELEM)DESCALE((var) * (const), CONST_BITS))
#define DEQUANTIZE(coef, quantval) (((IFAST_MULT_TYPE)(coef)) * (quantval))
#define NO_ZERO_ROW_TEST
void dct_jpeg_fdct_ifast(DCTELEM *data) {
/* Load an 8x8 block of samples into Neon registers. De-interleaving loads
* are used, followed by vuzp to transpose the block such that we have a
@@ -674,4 +695,626 @@ void dct_jpeg_idct_ifast(struct DctAuxiliaryData *auxiliaryData, void *dct_table
vst1q_lane_u64((uint64_t *)outptr7, vreinterpretq_u64_u8(rows_37), 1);
}
void dct_jpeg_idct_ifast_normalized_neon(struct DctAuxiliaryData *auxiliaryData, void *dct_table, JCOEFPTR coef_block, JCOEFPTR output_buf)
{
IFAST_MULT_TYPE *quantptr = dct_table;
/* Load DCT coefficients. */
int16x8_t row0 = vld1q_s16(coef_block + 0 * DCTSIZE);
int16x8_t row1 = vld1q_s16(coef_block + 1 * DCTSIZE);
int16x8_t row2 = vld1q_s16(coef_block + 2 * DCTSIZE);
int16x8_t row3 = vld1q_s16(coef_block + 3 * DCTSIZE);
int16x8_t row4 = vld1q_s16(coef_block + 4 * DCTSIZE);
int16x8_t row5 = vld1q_s16(coef_block + 5 * DCTSIZE);
int16x8_t row6 = vld1q_s16(coef_block + 6 * DCTSIZE);
int16x8_t row7 = vld1q_s16(coef_block + 7 * DCTSIZE);
/* Load quantization table values for DC coefficients. */
int16x8_t quant_row0 = vld1q_s16(quantptr + 0 * DCTSIZE);
/* Dequantize DC coefficients. */
row0 = vmulq_s16(row0, quant_row0);
/* Construct bitmap to test if all AC coefficients are 0. */
int16x8_t bitmap = vorrq_s16(row1, row2);
bitmap = vorrq_s16(bitmap, row3);
bitmap = vorrq_s16(bitmap, row4);
bitmap = vorrq_s16(bitmap, row5);
bitmap = vorrq_s16(bitmap, row6);
bitmap = vorrq_s16(bitmap, row7);
int64_t left_ac_bitmap = vgetq_lane_s64(vreinterpretq_s64_s16(bitmap), 0);
int64_t right_ac_bitmap = vgetq_lane_s64(vreinterpretq_s64_s16(bitmap), 1);
/* Load IDCT conversion constants. */
const int16x4_t consts = vld1_s16(jsimd_idct_ifast_neon_consts);
if (left_ac_bitmap == 0 && right_ac_bitmap == 0) {
/* All AC coefficients are zero.
* Compute DC values and duplicate into vectors.
*/
int16x8_t dcval = row0;
row1 = dcval;
row2 = dcval;
row3 = dcval;
row4 = dcval;
row5 = dcval;
row6 = dcval;
row7 = dcval;
} else if (left_ac_bitmap == 0) {
/* AC coefficients are zero for columns 0, 1, 2, and 3.
* Use DC values for these columns.
*/
int16x4_t dcval = vget_low_s16(row0);
/* Commence regular fast IDCT computation for columns 4, 5, 6, and 7. */
/* Load quantization table. */
int16x4_t quant_row1 = vld1_s16(quantptr + 1 * DCTSIZE + 4);
int16x4_t quant_row2 = vld1_s16(quantptr + 2 * DCTSIZE + 4);
int16x4_t quant_row3 = vld1_s16(quantptr + 3 * DCTSIZE + 4);
int16x4_t quant_row4 = vld1_s16(quantptr + 4 * DCTSIZE + 4);
int16x4_t quant_row5 = vld1_s16(quantptr + 5 * DCTSIZE + 4);
int16x4_t quant_row6 = vld1_s16(quantptr + 6 * DCTSIZE + 4);
int16x4_t quant_row7 = vld1_s16(quantptr + 7 * DCTSIZE + 4);
/* Even part: dequantize DCT coefficients. */
int16x4_t tmp0 = vget_high_s16(row0);
int16x4_t tmp1 = vmul_s16(vget_high_s16(row2), quant_row2);
int16x4_t tmp2 = vmul_s16(vget_high_s16(row4), quant_row4);
int16x4_t tmp3 = vmul_s16(vget_high_s16(row6), quant_row6);
int16x4_t tmp10 = vadd_s16(tmp0, tmp2); /* phase 3 */
int16x4_t tmp11 = vsub_s16(tmp0, tmp2);
int16x4_t tmp13 = vadd_s16(tmp1, tmp3); /* phases 5-3 */
int16x4_t tmp1_sub_tmp3 = vsub_s16(tmp1, tmp3);
int16x4_t tmp12 = vqdmulh_lane_s16(tmp1_sub_tmp3, consts, 1);
tmp12 = vadd_s16(tmp12, tmp1_sub_tmp3);
tmp12 = vsub_s16(tmp12, tmp13);
tmp0 = vadd_s16(tmp10, tmp13); /* phase 2 */
tmp3 = vsub_s16(tmp10, tmp13);
tmp1 = vadd_s16(tmp11, tmp12);
tmp2 = vsub_s16(tmp11, tmp12);
/* Odd part: dequantize DCT coefficients. */
int16x4_t tmp4 = vmul_s16(vget_high_s16(row1), quant_row1);
int16x4_t tmp5 = vmul_s16(vget_high_s16(row3), quant_row3);
int16x4_t tmp6 = vmul_s16(vget_high_s16(row5), quant_row5);
int16x4_t tmp7 = vmul_s16(vget_high_s16(row7), quant_row7);
int16x4_t z13 = vadd_s16(tmp6, tmp5); /* phase 6 */
int16x4_t neg_z10 = vsub_s16(tmp5, tmp6);
int16x4_t z11 = vadd_s16(tmp4, tmp7);
int16x4_t z12 = vsub_s16(tmp4, tmp7);
tmp7 = vadd_s16(z11, z13); /* phase 5 */
int16x4_t z11_sub_z13 = vsub_s16(z11, z13);
tmp11 = vqdmulh_lane_s16(z11_sub_z13, consts, 1);
tmp11 = vadd_s16(tmp11, z11_sub_z13);
int16x4_t z10_add_z12 = vsub_s16(z12, neg_z10);
int16x4_t z5 = vqdmulh_lane_s16(z10_add_z12, consts, 2);
z5 = vadd_s16(z5, z10_add_z12);
tmp10 = vqdmulh_lane_s16(z12, consts, 0);
tmp10 = vadd_s16(tmp10, z12);
tmp10 = vsub_s16(tmp10, z5);
tmp12 = vqdmulh_lane_s16(neg_z10, consts, 3);
tmp12 = vadd_s16(tmp12, vadd_s16(neg_z10, neg_z10));
tmp12 = vadd_s16(tmp12, z5);
tmp6 = vsub_s16(tmp12, tmp7); /* phase 2 */
tmp5 = vsub_s16(tmp11, tmp6);
tmp4 = vadd_s16(tmp10, tmp5);
row0 = vcombine_s16(dcval, vadd_s16(tmp0, tmp7));
row7 = vcombine_s16(dcval, vsub_s16(tmp0, tmp7));
row1 = vcombine_s16(dcval, vadd_s16(tmp1, tmp6));
row6 = vcombine_s16(dcval, vsub_s16(tmp1, tmp6));
row2 = vcombine_s16(dcval, vadd_s16(tmp2, tmp5));
row5 = vcombine_s16(dcval, vsub_s16(tmp2, tmp5));
row4 = vcombine_s16(dcval, vadd_s16(tmp3, tmp4));
row3 = vcombine_s16(dcval, vsub_s16(tmp3, tmp4));
} else if (right_ac_bitmap == 0) {
/* AC coefficients are zero for columns 4, 5, 6, and 7.
* Use DC values for these columns.
*/
int16x4_t dcval = vget_high_s16(row0);
/* Commence regular fast IDCT computation for columns 0, 1, 2, and 3. */
/* Load quantization table. */
int16x4_t quant_row1 = vld1_s16(quantptr + 1 * DCTSIZE);
int16x4_t quant_row2 = vld1_s16(quantptr + 2 * DCTSIZE);
int16x4_t quant_row3 = vld1_s16(quantptr + 3 * DCTSIZE);
int16x4_t quant_row4 = vld1_s16(quantptr + 4 * DCTSIZE);
int16x4_t quant_row5 = vld1_s16(quantptr + 5 * DCTSIZE);
int16x4_t quant_row6 = vld1_s16(quantptr + 6 * DCTSIZE);
int16x4_t quant_row7 = vld1_s16(quantptr + 7 * DCTSIZE);
/* Even part: dequantize DCT coefficients. */
int16x4_t tmp0 = vget_low_s16(row0);
int16x4_t tmp1 = vmul_s16(vget_low_s16(row2), quant_row2);
int16x4_t tmp2 = vmul_s16(vget_low_s16(row4), quant_row4);
int16x4_t tmp3 = vmul_s16(vget_low_s16(row6), quant_row6);
int16x4_t tmp10 = vadd_s16(tmp0, tmp2); /* phase 3 */
int16x4_t tmp11 = vsub_s16(tmp0, tmp2);
int16x4_t tmp13 = vadd_s16(tmp1, tmp3); /* phases 5-3 */
int16x4_t tmp1_sub_tmp3 = vsub_s16(tmp1, tmp3);
int16x4_t tmp12 = vqdmulh_lane_s16(tmp1_sub_tmp3, consts, 1);
tmp12 = vadd_s16(tmp12, tmp1_sub_tmp3);
tmp12 = vsub_s16(tmp12, tmp13);
tmp0 = vadd_s16(tmp10, tmp13); /* phase 2 */
tmp3 = vsub_s16(tmp10, tmp13);
tmp1 = vadd_s16(tmp11, tmp12);
tmp2 = vsub_s16(tmp11, tmp12);
/* Odd part: dequantize DCT coefficients. */
int16x4_t tmp4 = vmul_s16(vget_low_s16(row1), quant_row1);
int16x4_t tmp5 = vmul_s16(vget_low_s16(row3), quant_row3);
int16x4_t tmp6 = vmul_s16(vget_low_s16(row5), quant_row5);
int16x4_t tmp7 = vmul_s16(vget_low_s16(row7), quant_row7);
int16x4_t z13 = vadd_s16(tmp6, tmp5); /* phase 6 */
int16x4_t neg_z10 = vsub_s16(tmp5, tmp6);
int16x4_t z11 = vadd_s16(tmp4, tmp7);
int16x4_t z12 = vsub_s16(tmp4, tmp7);
tmp7 = vadd_s16(z11, z13); /* phase 5 */
int16x4_t z11_sub_z13 = vsub_s16(z11, z13);
tmp11 = vqdmulh_lane_s16(z11_sub_z13, consts, 1);
tmp11 = vadd_s16(tmp11, z11_sub_z13);
int16x4_t z10_add_z12 = vsub_s16(z12, neg_z10);
int16x4_t z5 = vqdmulh_lane_s16(z10_add_z12, consts, 2);
z5 = vadd_s16(z5, z10_add_z12);
tmp10 = vqdmulh_lane_s16(z12, consts, 0);
tmp10 = vadd_s16(tmp10, z12);
tmp10 = vsub_s16(tmp10, z5);
tmp12 = vqdmulh_lane_s16(neg_z10, consts, 3);
tmp12 = vadd_s16(tmp12, vadd_s16(neg_z10, neg_z10));
tmp12 = vadd_s16(tmp12, z5);
tmp6 = vsub_s16(tmp12, tmp7); /* phase 2 */
tmp5 = vsub_s16(tmp11, tmp6);
tmp4 = vadd_s16(tmp10, tmp5);
row0 = vcombine_s16(vadd_s16(tmp0, tmp7), dcval);
row7 = vcombine_s16(vsub_s16(tmp0, tmp7), dcval);
row1 = vcombine_s16(vadd_s16(tmp1, tmp6), dcval);
row6 = vcombine_s16(vsub_s16(tmp1, tmp6), dcval);
row2 = vcombine_s16(vadd_s16(tmp2, tmp5), dcval);
row5 = vcombine_s16(vsub_s16(tmp2, tmp5), dcval);
row4 = vcombine_s16(vadd_s16(tmp3, tmp4), dcval);
row3 = vcombine_s16(vsub_s16(tmp3, tmp4), dcval);
} else {
/* Some AC coefficients are non-zero; full IDCT calculation required. */
/* Load quantization table. */
int16x8_t quant_row1 = vld1q_s16(quantptr + 1 * DCTSIZE);
int16x8_t quant_row2 = vld1q_s16(quantptr + 2 * DCTSIZE);
int16x8_t quant_row3 = vld1q_s16(quantptr + 3 * DCTSIZE);
int16x8_t quant_row4 = vld1q_s16(quantptr + 4 * DCTSIZE);
int16x8_t quant_row5 = vld1q_s16(quantptr + 5 * DCTSIZE);
int16x8_t quant_row6 = vld1q_s16(quantptr + 6 * DCTSIZE);
int16x8_t quant_row7 = vld1q_s16(quantptr + 7 * DCTSIZE);
/* Even part: dequantize DCT coefficients. */
int16x8_t tmp0 = row0;
int16x8_t tmp1 = vmulq_s16(row2, quant_row2);
int16x8_t tmp2 = vmulq_s16(row4, quant_row4);
int16x8_t tmp3 = vmulq_s16(row6, quant_row6);
int16x8_t tmp10 = vaddq_s16(tmp0, tmp2); /* phase 3 */
int16x8_t tmp11 = vsubq_s16(tmp0, tmp2);
int16x8_t tmp13 = vaddq_s16(tmp1, tmp3); /* phases 5-3 */
int16x8_t tmp1_sub_tmp3 = vsubq_s16(tmp1, tmp3);
int16x8_t tmp12 = vqdmulhq_lane_s16(tmp1_sub_tmp3, consts, 1);
tmp12 = vaddq_s16(tmp12, tmp1_sub_tmp3);
tmp12 = vsubq_s16(tmp12, tmp13);
tmp0 = vaddq_s16(tmp10, tmp13); /* phase 2 */
tmp3 = vsubq_s16(tmp10, tmp13);
tmp1 = vaddq_s16(tmp11, tmp12);
tmp2 = vsubq_s16(tmp11, tmp12);
/* Odd part: dequantize DCT coefficients. */
int16x8_t tmp4 = vmulq_s16(row1, quant_row1);
int16x8_t tmp5 = vmulq_s16(row3, quant_row3);
int16x8_t tmp6 = vmulq_s16(row5, quant_row5);
int16x8_t tmp7 = vmulq_s16(row7, quant_row7);
int16x8_t z13 = vaddq_s16(tmp6, tmp5); /* phase 6 */
int16x8_t neg_z10 = vsubq_s16(tmp5, tmp6);
int16x8_t z11 = vaddq_s16(tmp4, tmp7);
int16x8_t z12 = vsubq_s16(tmp4, tmp7);
tmp7 = vaddq_s16(z11, z13); /* phase 5 */
int16x8_t z11_sub_z13 = vsubq_s16(z11, z13);
tmp11 = vqdmulhq_lane_s16(z11_sub_z13, consts, 1);
tmp11 = vaddq_s16(tmp11, z11_sub_z13);
int16x8_t z10_add_z12 = vsubq_s16(z12, neg_z10);
int16x8_t z5 = vqdmulhq_lane_s16(z10_add_z12, consts, 2);
z5 = vaddq_s16(z5, z10_add_z12);
tmp10 = vqdmulhq_lane_s16(z12, consts, 0);
tmp10 = vaddq_s16(tmp10, z12);
tmp10 = vsubq_s16(tmp10, z5);
tmp12 = vqdmulhq_lane_s16(neg_z10, consts, 3);
tmp12 = vaddq_s16(tmp12, vaddq_s16(neg_z10, neg_z10));
tmp12 = vaddq_s16(tmp12, z5);
tmp6 = vsubq_s16(tmp12, tmp7); /* phase 2 */
tmp5 = vsubq_s16(tmp11, tmp6);
tmp4 = vaddq_s16(tmp10, tmp5);
row0 = vaddq_s16(tmp0, tmp7);
row7 = vsubq_s16(tmp0, tmp7);
row1 = vaddq_s16(tmp1, tmp6);
row6 = vsubq_s16(tmp1, tmp6);
row2 = vaddq_s16(tmp2, tmp5);
row5 = vsubq_s16(tmp2, tmp5);
row4 = vaddq_s16(tmp3, tmp4);
row3 = vsubq_s16(tmp3, tmp4);
}
/* Transpose rows to work on columns in pass 2. */
int16x8x2_t rows_01 = vtrnq_s16(row0, row1);
int16x8x2_t rows_23 = vtrnq_s16(row2, row3);
int16x8x2_t rows_45 = vtrnq_s16(row4, row5);
int16x8x2_t rows_67 = vtrnq_s16(row6, row7);
int32x4x2_t rows_0145_l = vtrnq_s32(vreinterpretq_s32_s16(rows_01.val[0]),
vreinterpretq_s32_s16(rows_45.val[0]));
int32x4x2_t rows_0145_h = vtrnq_s32(vreinterpretq_s32_s16(rows_01.val[1]),
vreinterpretq_s32_s16(rows_45.val[1]));
int32x4x2_t rows_2367_l = vtrnq_s32(vreinterpretq_s32_s16(rows_23.val[0]),
vreinterpretq_s32_s16(rows_67.val[0]));
int32x4x2_t rows_2367_h = vtrnq_s32(vreinterpretq_s32_s16(rows_23.val[1]),
vreinterpretq_s32_s16(rows_67.val[1]));
int32x4x2_t cols_04 = vzipq_s32(rows_0145_l.val[0], rows_2367_l.val[0]);
int32x4x2_t cols_15 = vzipq_s32(rows_0145_h.val[0], rows_2367_h.val[0]);
int32x4x2_t cols_26 = vzipq_s32(rows_0145_l.val[1], rows_2367_l.val[1]);
int32x4x2_t cols_37 = vzipq_s32(rows_0145_h.val[1], rows_2367_h.val[1]);
int16x8_t col0 = vreinterpretq_s16_s32(cols_04.val[0]);
int16x8_t col1 = vreinterpretq_s16_s32(cols_15.val[0]);
int16x8_t col2 = vreinterpretq_s16_s32(cols_26.val[0]);
int16x8_t col3 = vreinterpretq_s16_s32(cols_37.val[0]);
int16x8_t col4 = vreinterpretq_s16_s32(cols_04.val[1]);
int16x8_t col5 = vreinterpretq_s16_s32(cols_15.val[1]);
int16x8_t col6 = vreinterpretq_s16_s32(cols_26.val[1]);
int16x8_t col7 = vreinterpretq_s16_s32(cols_37.val[1]);
/* 1-D IDCT, pass 2 */
/* Even part */
int16x8_t tmp10 = vaddq_s16(col0, col4);
int16x8_t tmp11 = vsubq_s16(col0, col4);
int16x8_t tmp13 = vaddq_s16(col2, col6);
int16x8_t col2_sub_col6 = vsubq_s16(col2, col6);
int16x8_t tmp12 = vqdmulhq_lane_s16(col2_sub_col6, consts, 1);
tmp12 = vaddq_s16(tmp12, col2_sub_col6);
tmp12 = vsubq_s16(tmp12, tmp13);
int16x8_t tmp0 = vaddq_s16(tmp10, tmp13);
int16x8_t tmp3 = vsubq_s16(tmp10, tmp13);
int16x8_t tmp1 = vaddq_s16(tmp11, tmp12);
int16x8_t tmp2 = vsubq_s16(tmp11, tmp12);
/* Odd part */
int16x8_t z13 = vaddq_s16(col5, col3);
int16x8_t neg_z10 = vsubq_s16(col3, col5);
int16x8_t z11 = vaddq_s16(col1, col7);
int16x8_t z12 = vsubq_s16(col1, col7);
int16x8_t tmp7 = vaddq_s16(z11, z13); /* phase 5 */
int16x8_t z11_sub_z13 = vsubq_s16(z11, z13);
tmp11 = vqdmulhq_lane_s16(z11_sub_z13, consts, 1);
tmp11 = vaddq_s16(tmp11, z11_sub_z13);
int16x8_t z10_add_z12 = vsubq_s16(z12, neg_z10);
int16x8_t z5 = vqdmulhq_lane_s16(z10_add_z12, consts, 2);
z5 = vaddq_s16(z5, z10_add_z12);
tmp10 = vqdmulhq_lane_s16(z12, consts, 0);
tmp10 = vaddq_s16(tmp10, z12);
tmp10 = vsubq_s16(tmp10, z5);
tmp12 = vqdmulhq_lane_s16(neg_z10, consts, 3);
tmp12 = vaddq_s16(tmp12, vaddq_s16(neg_z10, neg_z10));
tmp12 = vaddq_s16(tmp12, z5);
int16x8_t tmp6 = vsubq_s16(tmp12, tmp7); /* phase 2 */
int16x8_t tmp5 = vsubq_s16(tmp11, tmp6);
int16x8_t tmp4 = vaddq_s16(tmp10, tmp5);
col0 = vaddq_s16(tmp0, tmp7);
col7 = vsubq_s16(tmp0, tmp7);
col1 = vaddq_s16(tmp1, tmp6);
col6 = vsubq_s16(tmp1, tmp6);
col2 = vaddq_s16(tmp2, tmp5);
col5 = vsubq_s16(tmp2, tmp5);
col4 = vaddq_s16(tmp3, tmp4);
col3 = vsubq_s16(tmp3, tmp4);
/* Scale down by a factor of 8, narrowing to 8-bit. */
int8x16_t cols_01_s8 = vcombine_s8(vqshrn_n_s16(col0, PASS1_BITS + 3),
vqshrn_n_s16(col1, PASS1_BITS + 3));
int8x16_t cols_45_s8 = vcombine_s8(vqshrn_n_s16(col4, PASS1_BITS + 3),
vqshrn_n_s16(col5, PASS1_BITS + 3));
int8x16_t cols_23_s8 = vcombine_s8(vqshrn_n_s16(col2, PASS1_BITS + 3),
vqshrn_n_s16(col3, PASS1_BITS + 3));
int8x16_t cols_67_s8 = vcombine_s8(vqshrn_n_s16(col6, PASS1_BITS + 3),
vqshrn_n_s16(col7, PASS1_BITS + 3));
/* Clamp to range [0-255]. */
uint8x16_t cols_01 =
vreinterpretq_u8_s8
(vaddq_s8(cols_01_s8, vreinterpretq_s8_u8(vdupq_n_u8(CENTERJSAMPLE))));
uint8x16_t cols_45 =
vreinterpretq_u8_s8
(vaddq_s8(cols_45_s8, vreinterpretq_s8_u8(vdupq_n_u8(CENTERJSAMPLE))));
uint8x16_t cols_23 =
vreinterpretq_u8_s8
(vaddq_s8(cols_23_s8, vreinterpretq_s8_u8(vdupq_n_u8(CENTERJSAMPLE))));
uint8x16_t cols_67 =
vreinterpretq_u8_s8
(vaddq_s8(cols_67_s8, vreinterpretq_s8_u8(vdupq_n_u8(CENTERJSAMPLE))));
/* Transpose block to prepare for store. */
uint32x4x2_t cols_0415 = vzipq_u32(vreinterpretq_u32_u8(cols_01),
vreinterpretq_u32_u8(cols_45));
uint32x4x2_t cols_2637 = vzipq_u32(vreinterpretq_u32_u8(cols_23),
vreinterpretq_u32_u8(cols_67));
uint8x16x2_t cols_0145 = vtrnq_u8(vreinterpretq_u8_u32(cols_0415.val[0]),
vreinterpretq_u8_u32(cols_0415.val[1]));
uint8x16x2_t cols_2367 = vtrnq_u8(vreinterpretq_u8_u32(cols_2637.val[0]),
vreinterpretq_u8_u32(cols_2637.val[1]));
uint16x8x2_t rows_0426 = vtrnq_u16(vreinterpretq_u16_u8(cols_0145.val[0]),
vreinterpretq_u16_u8(cols_2367.val[0]));
uint16x8x2_t rows_1537 = vtrnq_u16(vreinterpretq_u16_u8(cols_0145.val[1]),
vreinterpretq_u16_u8(cols_2367.val[1]));
uint8x16_t rows_04 = vreinterpretq_u8_u16(rows_0426.val[0]);
uint8x16_t rows_15 = vreinterpretq_u8_u16(rows_1537.val[0]);
uint8x16_t rows_26 = vreinterpretq_u8_u16(rows_0426.val[1]);
uint8x16_t rows_37 = vreinterpretq_u8_u16(rows_1537.val[1]);
JCOEFPTR outptr0 = output_buf + DCTSIZE * 0;
JCOEFPTR outptr1 = output_buf + DCTSIZE * 1;
JCOEFPTR outptr2 = output_buf + DCTSIZE * 2;
JCOEFPTR outptr3 = output_buf + DCTSIZE * 3;
JCOEFPTR outptr4 = output_buf + DCTSIZE * 4;
JCOEFPTR outptr5 = output_buf + DCTSIZE * 5;
JCOEFPTR outptr6 = output_buf + DCTSIZE * 6;
JCOEFPTR outptr7 = output_buf + DCTSIZE * 7;
/* Store DCT block to memory. */
vst1q_lane_u64((uint64_t *)outptr0, vreinterpretq_u64_u16(rows_04), 0);
vst1q_lane_u64((uint64_t *)outptr1, vreinterpretq_u64_u16(rows_15), 0);
vst1q_lane_u64((uint64_t *)outptr2, vreinterpretq_u64_u16(rows_26), 0);
vst1q_lane_u64((uint64_t *)outptr3, vreinterpretq_u64_u16(rows_37), 0);
vst1q_lane_u64((uint64_t *)outptr4, vreinterpretq_u64_u16(rows_04), 1);
vst1q_lane_u64((uint64_t *)outptr5, vreinterpretq_u64_u16(rows_15), 1);
vst1q_lane_u64((uint64_t *)outptr6, vreinterpretq_u64_u16(rows_26), 1);
vst1q_lane_u64((uint64_t *)outptr7, vreinterpretq_u64_u16(rows_37), 1);
}
void dct_jpeg_idct_ifast_normalized(struct DctAuxiliaryData *auxiliaryData, void *dct_table, JCOEFPTR coef_block, JCOEFPTR output_buf) {
DCTELEM tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
DCTELEM tmp10, tmp11, tmp12, tmp13;
DCTELEM z5, z10, z11, z12, z13;
JCOEFPTR inptr;
IFAST_MULT_TYPE *quantptr;
int *wsptr;
JCOEFPTR outptr;
int ctr;
int workspace[DCTSIZE2]; /* buffers data between passes */
/* Pass 1: process columns from input, store into work array. */
inptr = coef_block;
quantptr = dct_table;
wsptr = workspace;
for (ctr = DCTSIZE; ctr > 0; ctr--) {
/* Due to quantization, we will usually find that many of the input
* coefficients are zero, especially the AC terms. We can exploit this
* by short-circuiting the IDCT calculation for any column in which all
* the AC terms are zero. In that case each output is equal to the
* DC coefficient (with scale factor as needed).
* With typical images and quantization tables, half or more of the
* column DCT calculations can be simplified this way.
*/
if (inptr[DCTSIZE * 1] == 0 && inptr[DCTSIZE * 2] == 0 &&
inptr[DCTSIZE * 3] == 0 && inptr[DCTSIZE * 4] == 0 &&
inptr[DCTSIZE * 5] == 0 && inptr[DCTSIZE * 6] == 0 &&
inptr[DCTSIZE * 7] == 0) {
/* AC terms all zero */
int dcval = (int)DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]);
wsptr[DCTSIZE * 0] = dcval;
wsptr[DCTSIZE * 1] = dcval;
wsptr[DCTSIZE * 2] = dcval;
wsptr[DCTSIZE * 3] = dcval;
wsptr[DCTSIZE * 4] = dcval;
wsptr[DCTSIZE * 5] = dcval;
wsptr[DCTSIZE * 6] = dcval;
wsptr[DCTSIZE * 7] = dcval;
inptr++; /* advance pointers to next column */
quantptr++;
wsptr++;
continue;
}
/* Even part */
tmp0 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]);
tmp1 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]);
tmp2 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]);
tmp3 = DEQUANTIZE(inptr[DCTSIZE * 6], quantptr[DCTSIZE * 6]);
tmp10 = tmp0 + tmp2; /* phase 3 */
tmp11 = tmp0 - tmp2;
tmp13 = tmp1 + tmp3; /* phases 5-3 */
tmp12 = MULTIPLY(tmp1 - tmp3, FIX_1_414213562) - tmp13; /* 2*c4 */
tmp0 = tmp10 + tmp13; /* phase 2 */
tmp3 = tmp10 - tmp13;
tmp1 = tmp11 + tmp12;
tmp2 = tmp11 - tmp12;
/* Odd part */
tmp4 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]);
tmp5 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]);
tmp6 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]);
tmp7 = DEQUANTIZE(inptr[DCTSIZE * 7], quantptr[DCTSIZE * 7]);
z13 = tmp6 + tmp5; /* phase 6 */
z10 = tmp6 - tmp5;
z11 = tmp4 + tmp7;
z12 = tmp4 - tmp7;
tmp7 = z11 + z13; /* phase 5 */
tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); /* 2*c4 */
z5 = MULTIPLY(z10 + z12, FIX_1_847759065); /* 2*c2 */
tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; /* 2*(c2-c6) */
tmp12 = MULTIPLY(z10, -FIX_2_613125930) + z5; /* -2*(c2+c6) */
tmp6 = tmp12 - tmp7; /* phase 2 */
tmp5 = tmp11 - tmp6;
tmp4 = tmp10 + tmp5;
wsptr[DCTSIZE * 0] = (int)(tmp0 + tmp7);
wsptr[DCTSIZE * 7] = (int)(tmp0 - tmp7);
wsptr[DCTSIZE * 1] = (int)(tmp1 + tmp6);
wsptr[DCTSIZE * 6] = (int)(tmp1 - tmp6);
wsptr[DCTSIZE * 2] = (int)(tmp2 + tmp5);
wsptr[DCTSIZE * 5] = (int)(tmp2 - tmp5);
wsptr[DCTSIZE * 4] = (int)(tmp3 + tmp4);
wsptr[DCTSIZE * 3] = (int)(tmp3 - tmp4);
inptr++; /* advance pointers to next column */
quantptr++;
wsptr++;
}
/* Pass 2: process rows from work array, store into output array. */
/* Note that we must descale the results by a factor of 8 == 2**3, */
/* and also undo the PASS1_BITS scaling. */
wsptr = workspace;
for (ctr = 0; ctr < DCTSIZE; ctr++) {
outptr = output_buf + ctr * DCTSIZE;
/* Rows of zeroes can be exploited in the same way as we did with columns.
* However, the column calculation has created many nonzero AC terms, so
* the simplification applies less often (typically 5% to 10% of the time).
* On machines with very fast multiplication, it's possible that the
* test takes more time than it's worth. In that case this section
* may be commented out.
*/
#ifndef NO_ZERO_ROW_TEST
if (wsptr[1] == 0 && wsptr[2] == 0 && wsptr[3] == 0 && wsptr[4] == 0 &&
wsptr[5] == 0 && wsptr[6] == 0 && wsptr[7] == 0) {
/* AC terms all zero */
//JSAMPLE dcval = range_limit[IDESCALE(wsptr[0], PASS1_BITS + 3) & RANGE_MASK];
JCOEF dcval = wsptr[0];
outptr[0] = dcval;
outptr[1] = dcval;
outptr[2] = dcval;
outptr[3] = dcval;
outptr[4] = dcval;
outptr[5] = dcval;
outptr[6] = dcval;
outptr[7] = dcval;
wsptr += DCTSIZE; /* advance pointer to next row */
continue;
}
#endif
/* Even part */
tmp10 = ((DCTELEM)wsptr[0] + (DCTELEM)wsptr[4]);
tmp11 = ((DCTELEM)wsptr[0] - (DCTELEM)wsptr[4]);
tmp13 = ((DCTELEM)wsptr[2] + (DCTELEM)wsptr[6]);
tmp12 =
MULTIPLY((DCTELEM)wsptr[2] - (DCTELEM)wsptr[6], FIX_1_414213562) - tmp13;
tmp0 = tmp10 + tmp13;
tmp3 = tmp10 - tmp13;
tmp1 = tmp11 + tmp12;
tmp2 = tmp11 - tmp12;
/* Odd part */
z13 = (DCTELEM)wsptr[5] + (DCTELEM)wsptr[3];
z10 = (DCTELEM)wsptr[5] - (DCTELEM)wsptr[3];
z11 = (DCTELEM)wsptr[1] + (DCTELEM)wsptr[7];
z12 = (DCTELEM)wsptr[1] - (DCTELEM)wsptr[7];
tmp7 = z11 + z13; /* phase 5 */
tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); /* 2*c4 */
z5 = MULTIPLY(z10 + z12, FIX_1_847759065); /* 2*c2 */
tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; /* 2*(c2-c6) */
tmp12 = MULTIPLY(z10, -FIX_2_613125930) + z5; /* -2*(c2+c6) */
tmp6 = tmp12 - tmp7; /* phase 2 */
tmp5 = tmp11 - tmp6;
tmp4 = tmp10 + tmp5;
/* Final output stage: scale down by a factor of 8 and range-limit */
/*outptr[0] =
range_limit[IDESCALE(tmp0 + tmp7, PASS1_BITS + 3) & RANGE_MASK];
outptr[7] =
range_limit[IDESCALE(tmp0 - tmp7, PASS1_BITS + 3) & RANGE_MASK];
outptr[1] =
range_limit[IDESCALE(tmp1 + tmp6, PASS1_BITS + 3) & RANGE_MASK];
outptr[6] =
range_limit[IDESCALE(tmp1 - tmp6, PASS1_BITS + 3) & RANGE_MASK];
outptr[2] =
range_limit[IDESCALE(tmp2 + tmp5, PASS1_BITS + 3) & RANGE_MASK];
outptr[5] =
range_limit[IDESCALE(tmp2 - tmp5, PASS1_BITS + 3) & RANGE_MASK];
outptr[4] =
range_limit[IDESCALE(tmp3 + tmp4, PASS1_BITS + 3) & RANGE_MASK];
outptr[3] =
range_limit[IDESCALE(tmp3 - tmp4, PASS1_BITS + 3) & RANGE_MASK];*/
outptr[0] = IDESCALE(tmp0 + tmp7, PASS1_BITS + 3);
outptr[7] = IDESCALE(tmp0 - tmp7, PASS1_BITS + 3);
outptr[1] = IDESCALE(tmp1 + tmp6, PASS1_BITS + 3);
outptr[6] = IDESCALE(tmp1 - tmp6, PASS1_BITS + 3);
outptr[2] = IDESCALE(tmp2 + tmp5, PASS1_BITS + 3);
outptr[5] = IDESCALE(tmp2 - tmp5, PASS1_BITS + 3);
outptr[4] = IDESCALE(tmp3 + tmp4, PASS1_BITS + 3);
outptr[3] = IDESCALE(tmp3 - tmp4, PASS1_BITS + 3);
/*outptr[0] = tmp0 + tmp7;
outptr[7] = tmp0 - tmp7;
outptr[1] = tmp1 + tmp6;
outptr[6] = tmp1 - tmp6;
outptr[2] = tmp2 + tmp5;
outptr[5] = tmp2 - tmp5;
outptr[4] = tmp3 + tmp4;
outptr[3] = tmp3 - tmp4;*/
wsptr += DCTSIZE; /* advance pointer to next row */
}
}
#endif

View File

@@ -13,10 +13,25 @@
@implementation ImageDCTTable
- (instancetype _Nonnull)initWithQuality:(NSInteger)quality isChroma:(bool)isChroma {
- (instancetype _Nonnull)initWithQuality:(NSInteger)quality type:(ImageDCTTableType)type; {
self = [super init];
if (self != nil) {
_table = dct::DCTTable::generate((int)quality, isChroma);
dct::DCTTable::Type mappedType;
switch (type) {
case ImageDCTTableTypeLuma:
mappedType = dct::DCTTable::Type::Luma;
break;
case ImageDCTTableTypeChroma:
mappedType = dct::DCTTable::Type::Chroma;
break;
case ImageDCTTableTypeDelta:
mappedType = dct::DCTTable::Type::Delta;
break;
default:
mappedType = dct::DCTTable::Type::Luma;
break;
}
_table = dct::DCTTable::generate((int)quality, mappedType);
}
return self;
}
@@ -63,4 +78,12 @@
_dct->inverse(coefficients, pixels, (int)width, (int)height, (int)coefficientsPerRow, (int)bytesPerRow);
}
- (void)forward4x4:(int16_t const * _Nonnull)normalizedCoefficients coefficients:(int16_t * _Nonnull)coefficients width:(NSInteger)width height:(NSInteger)height {
_dct->forward4x4(normalizedCoefficients, coefficients, (int)width, (int)height);
}
- (void)inverse4x4:(int16_t const * _Nonnull)coefficients normalizedCoefficients:(int16_t * _Nonnull)normalizedCoefficients width:(NSInteger)width height:(NSInteger)height {
_dct->inverse4x4(coefficients, normalizedCoefficients, (int)width, (int)height);
}
@end

View File

@@ -53,7 +53,7 @@ void splitRGBAIntoYUVAPlanes(uint8_t const *argb, uint8_t *outY, uint8_t *outU,
vImageExtractChannel_ARGB8888(&src, &destA, 3, kvImageDoNotTile);
}
void combineYUVAPlanesIntoARBB(uint8_t *argb, uint8_t const *inY, uint8_t const *inU, uint8_t const *inV, uint8_t const *inA, int width, int height, int bytesPerRow) {
void combineYUVAPlanesIntoARGB(uint8_t *argb, uint8_t const *inY, uint8_t const *inU, uint8_t const *inV, uint8_t const *inA, int width, int height, int bytesPerRow) {
static vImage_YpCbCrToARGB info;
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
@@ -94,8 +94,11 @@ void combineYUVAPlanesIntoARBB(uint8_t *argb, uint8_t const *inY, uint8_t const
srcA.rowBytes = width;
error = vImageConvert_420Yp8_Cb8_Cr8ToARGB8888(&srcYp, &srcCb, &srcCr, &destArgb, &info, permuteMap, 255, kvImageDoNotTile);
error = vImageOverwriteChannels_ARGB8888(&srcA, &destArgb, &destArgb, 1 << 0, kvImageDoNotTile);
//error = vImageOverwriteChannels_ARGB8888(&srcYp, &destArgb, &destArgb, 1 << 1, kvImageDoNotTile);
//error = vImageOverwriteChannels_ARGB8888(&srcYp, &destArgb, &destArgb, 1 << 2, kvImageDoNotTile);
//error = vImageOverwriteChannels_ARGB8888(&srcYp, &destArgb, &destArgb, 1 << 3, kvImageDoNotTile);
}
void scaleImagePlane(uint8_t *outPlane, int outWidth, int outHeight, int outBytesPerRow, uint8_t const *inPlane, int inWidth, int inHeight, int inBytesPerRow) {
@@ -113,3 +116,42 @@ void scaleImagePlane(uint8_t *outPlane, int outWidth, int outHeight, int outByte
vImageScale_Planar8(&src, &dst, nil, kvImageDoNotTile);
}
void subtractArraysInt16(int16_t const *a, int16_t const *b, uint16_t *dest, int length) {
for (int i = 0; i < length; i += 8) {
int16x8_t lhs = vld1q_s16((int16_t *)&a[i]);
int16x8_t rhs = vld1q_s16((int16_t *)&b[i]);
int16x8_t result = vsubq_s16(lhs, rhs);
vst1q_s16((int16_t *)&dest[i], result);
}
if (length % 8 != 0) {
for (int i = length - (length % 8); i < length; i++) {
dest[i] = a[i] - b[i];
}
}
}
void subtractArraysUInt8Int16(uint8_t const *a, int16_t const *b, uint8_t *dest, int length) {
for (int i = 0; i < length; i += 8) {
uint8x8_t lhs8 = vld1_u8(&a[i]);
int16x8_t lhs = vreinterpretq_s16_u16(vmovl_u8(lhs8));
int16x8_t rhs = vld1q_s16((int16_t *)&b[i]);
int16x8_t result = vsubq_s16(lhs, rhs);
uint8x8_t result8 = vqmovun_s16(result);
vst1_u8(&dest[i], result8);
}
if (length % 8 != 0) {
for (int i = length - (length % 8); i < length; i++) {
int16_t result = ((int16_t)a[i]) - b[i];
if (result < 0) {
result = 0;
}
if (result > 255) {
result = 255;
}
dest[i] = (int8_t)result;
}
}
}