38#if defined(OJPH_ARCH_I386) || defined(OJPH_ARCH_X86_64)
51#ifdef OJPH_COMPILER_MSVC
53 #define unlikely(x) (x)
55 #define likely(x) __builtin_expect((x), 1)
56 #define unlikely(x) __builtin_expect((x), 0)
74 static ui32 ulvc_cwd_pre[33];
75 static int ulvc_cwd_pre_len[33];
76 static ui32 ulvc_cwd_suf[33];
77 static int ulvc_cwd_suf_len[33];
82 struct vlc_src_table {
int c_q, rho, u_off, e_k, e_1, cwd, cwd_len; };
83 vlc_src_table tbl0[] = {
86 size_t tbl0_size =
sizeof(tbl0) /
sizeof(vlc_src_table);
88 si32 pattern_popcnt[16];
89 for (
ui32 i = 0; i < 16; ++i)
92 vlc_src_table* src_tbl = tbl0;
94 size_t tbl_size = tbl0_size;
95 for (
int i = 0; i < 2048; ++i)
97 int c_q = i >> 8, rho = (i >> 4) & 0xF, emb = i & 0xF;
98 if (((emb & rho) != emb) || (rho == 0 && c_q == 0))
102 vlc_src_table *best_entry = NULL;
106 for (
size_t j = 0; j < tbl_size; ++j)
108 if (src_tbl[j].c_q == c_q && src_tbl[j].rho == rho)
109 if (src_tbl[j].u_off == 1)
110 if ((emb & src_tbl[j].e_k) == src_tbl[j].e_1)
114 int ones_count = pattern_popcnt[src_tbl[j].e_k];
115 if (ones_count >= best_e_k)
117 best_entry = src_tbl + j;
118 best_e_k = ones_count;
125 for (
size_t j = 0; j < tbl_size; ++j)
127 if (src_tbl[j].c_q == c_q && src_tbl[j].rho == rho)
128 if (src_tbl[j].u_off == 0)
130 best_entry = src_tbl + j;
136 tgt_tbl[i] = (
ui16)((best_entry->cwd<<8) + (best_entry->cwd_len<<4)
141 vlc_src_table tbl1[] = {
144 size_t tbl1_size =
sizeof(tbl1) /
sizeof(vlc_src_table);
148 tbl_size = tbl1_size;
149 for (
int i = 0; i < 2048; ++i)
151 int c_q = i >> 8, rho = (i >> 4) & 0xF, emb = i & 0xF;
152 if (((emb & rho) != emb) || (rho == 0 && c_q == 0))
156 vlc_src_table *best_entry = NULL;
160 for (
size_t j = 0; j < tbl_size; ++j)
162 if (src_tbl[j].c_q == c_q && src_tbl[j].rho == rho)
163 if (src_tbl[j].u_off == 1)
164 if ((emb & src_tbl[j].e_k) == src_tbl[j].e_1)
168 int ones_count = pattern_popcnt[src_tbl[j].e_k];
169 if (ones_count >= best_e_k)
171 best_entry = src_tbl + j;
172 best_e_k = ones_count;
179 for (
size_t j = 0; j < tbl_size; ++j)
181 if (src_tbl[j].c_q == c_q && src_tbl[j].rho == rho)
182 if (src_tbl[j].u_off == 0)
184 best_entry = src_tbl + j;
190 tgt_tbl[i] = (
ui16)((best_entry->cwd<<8) + (best_entry->cwd_len<<4)
203 ulvc_cwd_pre[0] = 0; ulvc_cwd_pre[1] = 1; ulvc_cwd_pre[2] = 2;
204 ulvc_cwd_pre[3] = 4; ulvc_cwd_pre[4] = 4;
205 ulvc_cwd_pre_len[0] = 0; ulvc_cwd_pre_len[1] = 1;
206 ulvc_cwd_pre_len[2] = 2;
207 ulvc_cwd_pre_len[3] = 3; ulvc_cwd_pre_len[4] = 3;
208 ulvc_cwd_suf[0] = 0; ulvc_cwd_suf[1] = 0; ulvc_cwd_suf[2] = 0;
209 ulvc_cwd_suf[3] = 0; ulvc_cwd_suf[4] = 1;
210 ulvc_cwd_suf_len[0] = 0; ulvc_cwd_suf_len[1] = 0;
211 ulvc_cwd_suf_len[2] = 0;
212 ulvc_cwd_suf_len[3] = 1; ulvc_cwd_suf_len[4] = 1;
213 for (
int i = 5; i < 33; ++i)
216 ulvc_cwd_pre_len[i] = 3;
217 ulvc_cwd_suf[i] = (
ui32)(i-5);
218 ulvc_cwd_suf_len[i] = 5;
260 melp->buf_size = buffer_size;
261 melp->remaining_bits = 8;
272 melp->tmp = (melp->tmp << 1) + v;
273 melp->remaining_bits--;
274 if (melp->remaining_bits == 0) {
275 melp->buf[melp->pos++] = (
ui8)melp->tmp;
276 melp->remaining_bits = (melp->tmp == 0xFF ? 7 : 8);
286 static const int mel_exp[13] = {0,0,0,1,1,1,2,2,2,3,3,4,5};
290 if (melp->run >= melp->threshold) {
293 melp->k =
ojph_min(12, melp->k + 1);
294 melp->threshold = 1 << mel_exp[melp->k];
298 int t = mel_exp[melp->k];
304 melp->threshold = 1 << mel_exp[melp->k];
311 struct vlc_struct_avx2 {
319 bool last_greater_than_8F;
326 vlcp->buf = data + buffer_size - 1;
328 vlcp->buf_size = buffer_size;
333 vlcp->last_greater_than_8F =
true;
340 vlcp->tmp |= (
ui64)cwd << vlcp->used_bits;
341 vlcp->used_bits += cwd_len;
343 while (vlcp->used_bits >= 8) {
346 if (unlikely(vlcp->last_greater_than_8F)) {
347 tmp = vlcp->tmp & 0x7F;
349 if (likely(tmp != 0x7F)) {
350 tmp = vlcp->tmp & 0xFF;
351 *(vlcp->buf - vlcp->pos) = tmp;
352 vlcp->last_greater_than_8F = tmp > 0x8F;
354 vlcp->used_bits -= 8;
356 *(vlcp->buf - vlcp->pos) = tmp;
357 vlcp->last_greater_than_8F =
false;
359 vlcp->used_bits -= 7;
363 tmp = vlcp->tmp & 0xFF;
364 *(vlcp->buf - vlcp->pos) = tmp;
365 vlcp->last_greater_than_8F = tmp > 0x8F;
367 vlcp->used_bits -= 8;
383 if (vlcp->last_greater_than_8F && (vlcp->tmp & 0x7f) == 0x7f) {
384 *(vlcp->buf - vlcp->pos) = 0x7f;
387 vlcp->used_bits -= 7;
390 melp->tmp = melp->tmp << melp->remaining_bits;
391 int mel_mask = (0xFF << melp->remaining_bits) & 0xFF;
392 int vlc_mask = 0xFF >> (8 - vlcp->used_bits);
393 if ((mel_mask | vlc_mask) == 0)
396 if (melp->pos >= melp->buf_size)
397 OJPH_ERROR(0x00020003,
"mel encoder's buffer is full");
398 ui8 vlcp_tmp = (
ui8)vlcp->tmp;
399 int fuse = melp->tmp | vlcp_tmp;
400 if ( ( ((fuse ^ melp->tmp) & mel_mask)
401 | ((fuse ^ vlcp_tmp) & vlc_mask) ) == 0
402 && (fuse != 0xFF) && vlcp->pos > 1)
404 melp->buf[melp->pos++] = (
ui8)fuse;
408 if (vlcp->pos >= vlcp->buf_size)
409 OJPH_ERROR(0x00020004,
"vlc encoder's buffer is full");
410 melp->buf[melp->pos++] = (
ui8)melp->tmp;
411 *(vlcp->buf - vlcp->pos) = (
ui8)vlcp_tmp;
436 msp->buf_size = buffer_size;
448 if (msp->pos >= msp->buf_size)
449 OJPH_ERROR(0x00020005,
"magnitude sign encoder's buffer is full");
450 int t =
ojph_min(msp->max_bits - msp->used_bits, cwd_len);
451 msp->tmp |= ((
ui32)(cwd & ((1U << t) - 1))) << msp->used_bits;
455 if (msp->used_bits >= msp->max_bits)
457 msp->buf[msp->pos++] = (
ui8)msp->tmp;
458 msp->max_bits = (msp->tmp == 0xFF) ? 7 : 8;
471 int t = msp->max_bits - msp->used_bits;
472 msp->tmp |= (0xFF & ((1U << t) - 1)) << msp->used_bits;
474 if (msp->tmp != 0xFF)
476 if (msp->pos >= msp->buf_size)
477 OJPH_ERROR(0x00020006,
"magnitude sign encoder's buffer is full");
478 msp->buf[msp->pos++] = (
ui8)msp->tmp;
481 else if (msp->max_bits == 7)
485#define ZERO _mm256_setzero_si256()
486#define ONE _mm256_set1_epi32(1)
489inline __m256i avx2_lzcnt_epi32(__m256i v) {
491 v = _mm256_andnot_si256(_mm256_srli_epi32(v, 8), v);
493 v = _mm256_castps_si256(_mm256_cvtepi32_ps(v));
494 v = _mm256_srli_epi32(v, 23);
495 v = _mm256_subs_epu16(_mm256_set1_epi32(158), v);
496 v = _mm256_min_epi16(v, _mm256_set1_epi32(32));
501inline __m256i avx2_cmpneq_epi32(__m256i v, __m256i v2) {
502 return _mm256_xor_si256(_mm256_cmpeq_epi32(v, v2), _mm256_set1_epi32((int32_t)0xffffffff));
505static void proc_pixel(__m256i *src_vec,
ui32 p,
506 __m256i *eq_vec, __m256i *s_vec,
507 __m256i &rho_vec, __m256i &e_qmax_vec)
514 for (
ui32 i = 0; i < 4; ++i) {
516 val_vec[i] = _mm256_add_epi32(src_vec[i], src_vec[i]);
519 val_vec[i] = _mm256_srli_epi32(val_vec[i], (
int)p);
522 val_vec[i] = _mm256_and_si256(val_vec[i], _mm256_set1_epi32((
int)~1u));
525 const __m256i val_notmask = avx2_cmpneq_epi32(val_vec[i], ZERO);
532 val_vec[i] = _mm256_sub_epi32(val_vec[i], ONE);
533 _eq_vec[i] = avx2_lzcnt_epi32(val_vec[i]);
534 _eq_vec[i] = _mm256_sub_epi32(_mm256_set1_epi32(32), _eq_vec[i]);
541 val_vec[i] = _mm256_sub_epi32(val_vec[i], ONE);
542 _s_vec[i] = _mm256_srli_epi32(src_vec[i], 31);
543 _s_vec[i] = _mm256_add_epi32(_s_vec[i], val_vec[i]);
545 _eq_vec[i] = _mm256_and_si256(_eq_vec[i], val_notmask);
546 _s_vec[i] = _mm256_and_si256(_s_vec[i], val_notmask);
547 val_vec[i] = _mm256_srli_epi32(val_notmask, 31);
551 const __m256i idx = _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0);
565 for (
ui32 i = 0; i < 2; ++i) {
566 tmp1 = _mm256_permutevar8x32_epi32(_eq_vec[0 + i], idx);
567 tmp2 = _mm256_permutevar8x32_epi32(_eq_vec[2 + i], idx);
568 eq_vec[0 + i] = _mm256_permute2x128_si256(tmp1, tmp2, (0 << 0) + (2 << 4));
569 eq_vec[2 + i] = _mm256_permute2x128_si256(tmp1, tmp2, (1 << 0) + (3 << 4));
571 tmp1 = _mm256_permutevar8x32_epi32(_s_vec[0 + i], idx);
572 tmp2 = _mm256_permutevar8x32_epi32(_s_vec[2 + i], idx);
573 s_vec[0 + i] = _mm256_permute2x128_si256(tmp1, tmp2, (0 << 0) + (2 << 4));
574 s_vec[2 + i] = _mm256_permute2x128_si256(tmp1, tmp2, (1 << 0) + (3 << 4));
576 tmp1 = _mm256_permutevar8x32_epi32(val_vec[0 + i], idx);
577 tmp2 = _mm256_permutevar8x32_epi32(val_vec[2 + i], idx);
578 _rho_vec[0 + i] = _mm256_permute2x128_si256(tmp1, tmp2, (0 << 0) + (2 << 4));
579 _rho_vec[2 + i] = _mm256_permute2x128_si256(tmp1, tmp2, (1 << 0) + (3 << 4));
582 e_qmax_vec = _mm256_max_epi32(eq_vec[0], eq_vec[1]);
583 e_qmax_vec = _mm256_max_epi32(e_qmax_vec, eq_vec[2]);
584 e_qmax_vec = _mm256_max_epi32(e_qmax_vec, eq_vec[3]);
585 _rho_vec[1] = _mm256_slli_epi32(_rho_vec[1], 1);
586 _rho_vec[2] = _mm256_slli_epi32(_rho_vec[2], 2);
587 _rho_vec[3] = _mm256_slli_epi32(_rho_vec[3], 3);
588 rho_vec = _mm256_or_si256(_rho_vec[0], _rho_vec[1]);
589 rho_vec = _mm256_or_si256(rho_vec, _rho_vec[2]);
590 rho_vec = _mm256_or_si256(rho_vec, _rho_vec[3]);
606static void rotate_matrix(__m256i *matrix)
608 __m256i tmp1 = _mm256_unpacklo_epi32(matrix[0], matrix[1]);
609 __m256i tmp2 = _mm256_unpacklo_epi32(matrix[2], matrix[3]);
610 __m256i tmp3 = _mm256_unpackhi_epi32(matrix[0], matrix[1]);
611 __m256i tmp4 = _mm256_unpackhi_epi32(matrix[2], matrix[3]);
613 matrix[0] = _mm256_unpacklo_epi64(tmp1, tmp2);
614 matrix[1] = _mm256_unpacklo_epi64(tmp3, tmp4);
615 matrix[2] = _mm256_unpackhi_epi64(tmp1, tmp2);
616 matrix[3] = _mm256_unpackhi_epi64(tmp3, tmp4);
618 tmp1 = _mm256_permute2x128_si256(matrix[0], matrix[2], 0x20);
619 matrix[2] = _mm256_permute2x128_si256(matrix[0], matrix[2], 0x31);
622 tmp1 = _mm256_permute2x128_si256(matrix[1], matrix[3], 0x20);
623 matrix[3] = _mm256_permute2x128_si256(matrix[1], matrix[3], 0x31);
627static void proc_ms_encode(
ms_struct *msp,
637 auto tmp = _mm256_and_si256(tuple_vec, ONE);
638 tmp = _mm256_sub_epi32(uq_vec, tmp);
639 auto tmp1 = _mm256_and_si256(rho_vec, ONE);
640 auto mask = avx2_cmpneq_epi32(tmp1, ZERO);
641 m_vec[0] = _mm256_and_si256(mask, tmp);
644 tmp = _mm256_and_si256(tuple_vec, _mm256_set1_epi32(2));
645 tmp = _mm256_srli_epi32(tmp, 1);
646 tmp = _mm256_sub_epi32(uq_vec, tmp);
647 tmp1 = _mm256_and_si256(rho_vec, _mm256_set1_epi32(2));
648 mask = avx2_cmpneq_epi32(tmp1, ZERO);
649 m_vec[1] = _mm256_and_si256(mask, tmp);
652 tmp = _mm256_and_si256(tuple_vec, _mm256_set1_epi32(4));
653 tmp = _mm256_srli_epi32(tmp, 2);
654 tmp = _mm256_sub_epi32(uq_vec, tmp);
655 tmp1 = _mm256_and_si256(rho_vec, _mm256_set1_epi32(4));
656 mask = avx2_cmpneq_epi32(tmp1, ZERO);
657 m_vec[2] = _mm256_and_si256(mask, tmp);
660 tmp = _mm256_and_si256(tuple_vec, _mm256_set1_epi32(8));
661 tmp = _mm256_srli_epi32(tmp, 3);
662 tmp = _mm256_sub_epi32(uq_vec, tmp);
663 tmp1 = _mm256_and_si256(rho_vec, _mm256_set1_epi32(8));
664 mask = avx2_cmpneq_epi32(tmp1, ZERO);
665 m_vec[3] = _mm256_and_si256(mask, tmp);
667 rotate_matrix(m_vec);
679 rotate_matrix(s_vec);
687 for (
ui32 i = 0; i < 4; ++i) {
691 _mm256_storeu_si256((__m256i *)cwd_len, m_vec[i]);
692 tmp = _mm256_sllv_epi32(ONE, m_vec[i]);
693 tmp = _mm256_sub_epi32(tmp, ONE);
694 tmp = _mm256_and_si256(tmp, s_vec[i]);
695 _mm256_storeu_si256((__m256i*)cwd, tmp);
697 for (
ui32 j = 0; j < 4; ++j) {
700 _cwd_len = cwd_len[idx];
701 _cwd |= ((
ui64)cwd[idx + 1]) << _cwd_len;
702 _cwd_len += cwd_len[idx + 1];
708static __m256i cal_eps_vec(__m256i *eq_vec, __m256i &u_q_vec,
718 auto u_q_mask = _mm256_cmpgt_epi32(u_q_vec, ZERO);
720 auto mask = _mm256_cmpeq_epi32(eq_vec[0], e_qmax_vec);
721 auto eps_vec = _mm256_srli_epi32(mask, 31);
723 mask = _mm256_cmpeq_epi32(eq_vec[1], e_qmax_vec);
724 auto tmp = _mm256_srli_epi32(mask, 31);
725 tmp = _mm256_slli_epi32(tmp, 1);
726 eps_vec = _mm256_or_si256(eps_vec, tmp);
728 mask = _mm256_cmpeq_epi32(eq_vec[2], e_qmax_vec);
729 tmp = _mm256_srli_epi32(mask, 31);
730 tmp = _mm256_slli_epi32(tmp, 2);
731 eps_vec = _mm256_or_si256(eps_vec, tmp);
733 mask = _mm256_cmpeq_epi32(eq_vec[3], e_qmax_vec);
734 tmp = _mm256_srli_epi32(mask, 31);
735 tmp = _mm256_slli_epi32(tmp, 3);
736 eps_vec = _mm256_or_si256(eps_vec, tmp);
738 return _mm256_and_si256(u_q_mask, eps_vec);
741static void update_lep(
ui32 x, __m256i &prev_e_val_vec,
742 __m256i *eq_vec, __m256i *e_val_vec,
743 const __m256i left_shift)
749 auto tmp = _mm256_permutevar8x32_epi32(eq_vec[3], left_shift);
750 tmp = _mm256_insert_epi32(tmp, _mm_cvtsi128_si32(_mm256_castsi256_si128(prev_e_val_vec)), 0);
751 prev_e_val_vec = _mm256_insert_epi32(ZERO, _mm256_extract_epi32(eq_vec[3], 7), 0);
752 e_val_vec[x] = _mm256_max_epi32(eq_vec[1], tmp);
756static void update_lcxp(
ui32 x, __m256i &prev_cx_val_vec,
757 __m256i &rho_vec, __m256i *cx_val_vec,
758 const __m256i left_shift)
764 auto tmp = _mm256_permutevar8x32_epi32(rho_vec, left_shift);
765 tmp = _mm256_insert_epi32(tmp, _mm_cvtsi128_si32(_mm256_castsi256_si128(prev_cx_val_vec)), 0);
766 prev_cx_val_vec = _mm256_insert_epi32(ZERO, _mm256_extract_epi32(rho_vec, 7), 0);
768 tmp = _mm256_and_si256(tmp, _mm256_set1_epi32(8));
769 tmp = _mm256_srli_epi32(tmp, 3);
771 auto tmp1 = _mm256_and_si256(rho_vec, _mm256_set1_epi32(2));
772 tmp1 = _mm256_srli_epi32(tmp1, 1);
773 cx_val_vec[x] = _mm256_or_si256(tmp, tmp1);
776static __m256i cal_tuple(__m256i &cq_vec, __m256i &rho_vec,
777 __m256i &eps_vec,
ui32 *vlc_tbl)
780 auto tmp = _mm256_slli_epi32(cq_vec, 8);
781 auto tmp1 = _mm256_slli_epi32(rho_vec, 4);
782 tmp = _mm256_add_epi32(tmp, tmp1);
783 tmp = _mm256_add_epi32(tmp, eps_vec);
784 return _mm256_i32gather_epi32((
const int *)vlc_tbl, tmp, 4);
787static __m256i proc_cq1(
ui32 x, __m256i *cx_val_vec, __m256i &rho_vec,
788 const __m256i right_shift)
795 auto tmp = _mm256_srli_epi32(rho_vec, 1);
796 auto tmp1 = _mm256_and_si256(rho_vec, ONE);
797 return _mm256_or_si256(tmp, tmp1);
800static __m256i proc_cq2(
ui32 x, __m256i *cx_val_vec, __m256i &rho_vec,
801 const __m256i right_shift)
805 auto lcxp1_vec = _mm256_permutevar8x32_epi32(cx_val_vec[x], right_shift);
806 auto tmp = _mm256_permutevar8x32_epi32(lcxp1_vec, right_shift);
808#ifdef OJPH_ARCH_X86_64
809 tmp = _mm256_insert_epi64(tmp,
810 _mm_cvtsi128_si64(_mm256_castsi256_si128(cx_val_vec[x + 1])), 3);
811#elif (defined OJPH_ARCH_I386)
812 int lsb = _mm_cvtsi128_si32(_mm256_castsi256_si128(cx_val_vec[x + 1]));
813 tmp = _mm256_insert_epi32(tmp, lsb, 6);
814 int msb = _mm_extract_epi32(_mm256_castsi256_si128(cx_val_vec[x + 1]), 1);
815 tmp = _mm256_insert_epi32(tmp, msb, 7);
817 #error Error unsupport compiler
819 tmp = _mm256_slli_epi32(tmp, 2);
820 auto tmp1 = _mm256_insert_epi32(lcxp1_vec,
821 _mm_cvtsi128_si32(_mm256_castsi256_si128(cx_val_vec[x + 1])), 7);
822 tmp = _mm256_add_epi32(tmp1, tmp);
824 tmp1 = _mm256_and_si256(rho_vec, _mm256_set1_epi32(4));
825 tmp1 = _mm256_srli_epi32(tmp1, 1);
826 tmp = _mm256_or_si256(tmp, tmp1);
828 tmp1 = _mm256_and_si256(rho_vec, _mm256_set1_epi32(8));
829 tmp1 = _mm256_srli_epi32(tmp1, 2);
831 return _mm256_or_si256(tmp, tmp1);
834using fn_proc_cq = __m256i (*)(
ui32, __m256i *, __m256i &,
const __m256i);
836static void proc_mel_encode1(
mel_struct *melp, __m256i &cq_vec,
837 __m256i &rho_vec, __m256i u_q_vec,
ui32 ignore,
838 const __m256i right_shift)
840 int32_t mel_need_encode[8];
841 int32_t mel_need_encode2[8];
846 _mm256_storeu_si256((__m256i *)mel_need_encode, _mm256_cmpeq_epi32(cq_vec, ZERO));
848 _mm256_storeu_si256((__m256i*)mel_bit, _mm256_srli_epi32(avx2_cmpneq_epi32(rho_vec, ZERO), 31));
852 auto tmp = _mm256_permutevar8x32_epi32(u_q_vec, right_shift);
853 auto tmp1 = _mm256_min_epi32(u_q_vec, tmp);
854 _mm256_storeu_si256((__m256i*)mel_bit2, _mm256_srli_epi32(_mm256_cmpgt_epi32(tmp1, _mm256_set1_epi32(2)), 31));
857 auto need_encode2 = _mm256_cmpgt_epi32(u_q_vec, ZERO);
858 _mm256_storeu_si256((__m256i*)mel_need_encode2, _mm256_and_si256(need_encode2, _mm256_cmpgt_epi32(tmp, ZERO)));
860 ui32 i_max = 8 - (ignore / 2);
862 for (
ui32 i = 0; i < i_max; i += 2) {
863 if (mel_need_encode[i]) {
868 if (mel_need_encode[i + 1]) {
873 if (mel_need_encode2[i]) {
879static void proc_mel_encode2(
mel_struct *melp, __m256i &cq_vec,
880 __m256i &rho_vec, __m256i u_q_vec,
ui32 ignore,
881 const __m256i right_shift)
885 int32_t mel_need_encode[8];
890 _mm256_storeu_si256((__m256i*)mel_need_encode, _mm256_cmpeq_epi32(cq_vec, ZERO));
892 _mm256_storeu_si256((__m256i*)mel_bit, _mm256_srli_epi32(avx2_cmpneq_epi32(rho_vec, ZERO), 31));
895 ui32 i_max = 8 - (ignore / 2);
897 for (
ui32 i = 0; i < i_max; ++i) {
898 if (mel_need_encode[i]) {
904using fn_proc_mel_encode = void (*)(
mel_struct *, __m256i &, __m256i &,
905 __m256i,
ui32,
const __m256i);
907static void proc_vlc_encode1(vlc_struct_avx2 *vlcp,
ui32 *tuple,
910 ui32 i_max = 8 - (ignore / 2);
912 for (
ui32 i = 0; i < i_max; i += 2) {
914 ui32 val = tuple[i + 0] >> 4;
915 int size = tuple[i + 0] & 7;
919 val |= (tuple[i + 1] >> 4) << size;
920 size += tuple[i + 1] & 7;
923 if (u_q[i] > 2 && u_q[i + 1] > 2) {
925 val |= (ulvc_cwd_pre[u_q[i] - 2]) << size;
926 size += ulvc_cwd_pre_len[u_q[i] - 2];
929 val |= (ulvc_cwd_pre[u_q[i + 1] - 2]) << size;
930 size += ulvc_cwd_pre_len[u_q[i + 1] - 2];
933 val |= (ulvc_cwd_suf[u_q[i] - 2]) << size;
934 size += ulvc_cwd_suf_len[u_q[i] - 2];
937 val |= (ulvc_cwd_suf[u_q[i + 1] - 2]) << size;
938 size += ulvc_cwd_suf_len[u_q[i + 1] - 2];
940 }
else if (u_q[i] > 2 && u_q[i + 1] > 0) {
942 val |= (ulvc_cwd_pre[u_q[i]]) << size;
943 size += ulvc_cwd_pre_len[u_q[i]];
946 val |= (u_q[i + 1] - 1) << size;
950 val |= (ulvc_cwd_suf[u_q[i]]) << size;
951 size += ulvc_cwd_suf_len[u_q[i]];
955 val |= (ulvc_cwd_pre[u_q[i]]) << size;
956 size += ulvc_cwd_pre_len[u_q[i]];
959 val |= (ulvc_cwd_pre[u_q[i + 1]]) << size;
960 size += ulvc_cwd_pre_len[u_q[i + 1]];
963 val |= (ulvc_cwd_suf[u_q[i]]) << size;
964 size += ulvc_cwd_suf_len[u_q[i]];
967 val |= (ulvc_cwd_suf[u_q[i + 1]]) << size;
968 size += ulvc_cwd_suf_len[u_q[i + 1]];
975static void proc_vlc_encode2(vlc_struct_avx2 *vlcp,
ui32 *tuple,
978 ui32 i_max = 8 - (ignore / 2);
980 for (
ui32 i = 0; i < i_max; i += 2) {
982 ui32 val = tuple[i + 0] >> 4;
983 int size = tuple[i + 0] & 7;
987 val |= (tuple[i + 1] >> 4) << size;
988 size += tuple[i + 1] & 7;
992 val |= ulvc_cwd_pre[u_q[i]] << size;
993 size += ulvc_cwd_pre_len[u_q[i]];
996 val |= (ulvc_cwd_pre[u_q[i + 1]]) << size;
997 size += ulvc_cwd_pre_len[u_q[i + 1]];
1000 val |= (ulvc_cwd_suf[u_q[i + 0]]) << size;
1001 size += ulvc_cwd_suf_len[u_q[i + 0]];
1004 val |= (ulvc_cwd_suf[u_q[i + 1]]) << size;
1005 size += ulvc_cwd_suf_len[u_q[i + 1]];
1011using fn_proc_vlc_encode = void (*)(vlc_struct_avx2 *,
ui32 *,
ui32 *,
ui32);
1016 ojph::mem_elastic_allocator *elastic,
1017 ojph::coded_lists *& coded)
1021 ui32 width = (_width + 15) & ~15u;
1022 ui32 ignore = width - _width;
1023 const int ms_size = (16384 * 16 + 14) / 15;
1024 const int mel_vlc_size = 3072;
1025 const int mel_size = 192;
1026 const int vlc_size = mel_vlc_size - mel_size;
1028 ui8 ms_buf[ms_size];
1029 ui8 mel_vlc_buf[mel_vlc_size];
1030 ui8 *mel_buf = mel_vlc_buf;
1031 ui8 *vlc_buf = mel_vlc_buf + mel_size;
1035 vlc_struct_avx2 vlc;
1038 ms_init(&ms, ms_size, ms_buf);
1040 const ui32 p = 30 - missing_msbs;
1051 const __m256i right_shift = _mm256_set_epi32(
1052 0, 7, 6, 5, 4, 3, 2, 1
1055 const __m256i left_shift = _mm256_set_epi32(
1056 6, 5, 4, 3, 2, 1, 0, 7
1059 ui32 n_loop = (width + 15) / 16;
1061 __m256i e_val_vec[65];
1063 e_val_vec[i] = ZERO;
1065 __m256i prev_e_val_vec = ZERO;
1067 __m256i cx_val_vec[65];
1068 __m256i prev_cx_val_vec = ZERO;
1077 fn_proc_cq proc_cq = proc_cq1;
1078 fn_proc_mel_encode proc_mel_encode = proc_mel_encode1;
1079 fn_proc_vlc_encode proc_vlc_encode = proc_vlc_encode1;
1082 for (
ui32 y = 0; y < height; y += 2)
1084 e_val_vec[n_loop] = prev_e_val_vec;
1086 __m256i tmp = _mm256_and_si256(prev_cx_val_vec, _mm256_set1_epi32(8));
1087 cx_val_vec[n_loop] = _mm256_srli_epi32(tmp, 3);
1089 prev_e_val_vec = ZERO;
1090 prev_cx_val_vec = ZERO;
1092 ui32 *sp = buf + y * stride;
1095 for (
ui32 x = 0; x < n_loop; ++x) {
1098 if ((x == (n_loop - 1)) && (_width % 16)) {
1099 ui32 tmp_buf[16] = { 0 };
1100 memcpy(tmp_buf, sp, (_width % 16) *
sizeof(
ui32));
1101 src_vec[0] = _mm256_loadu_si256((__m256i*)(tmp_buf));
1102 src_vec[2] = _mm256_loadu_si256((__m256i*)(tmp_buf + 8));
1103 if (y + 1 < height) {
1104 memcpy(tmp_buf, sp + stride, (_width % 16) *
sizeof(
ui32));
1105 src_vec[1] = _mm256_loadu_si256((__m256i*)(tmp_buf));
1106 src_vec[3] = _mm256_loadu_si256((__m256i*)(tmp_buf + 8));
1114 src_vec[0] = _mm256_loadu_si256((__m256i*)(sp));
1115 src_vec[2] = _mm256_loadu_si256((__m256i*)(sp + 8));
1117 if (y + 1 < height) {
1118 src_vec[1] = _mm256_loadu_si256((__m256i*)(sp + stride));
1119 src_vec[3] = _mm256_loadu_si256((__m256i*)(sp + 8 + stride));
1134 __m256i rho_vec, e_qmax_vec;
1135 proc_pixel(src_vec, p, eq_vec, s_vec, rho_vec, e_qmax_vec);
1138 tmp = _mm256_permutevar8x32_epi32(e_val_vec[x], right_shift);
1139 tmp = _mm256_insert_epi32(tmp, _mm_cvtsi128_si32(_mm256_castsi256_si128(e_val_vec[x + 1])), 7);
1141 auto max_e_vec = _mm256_max_epi32(tmp, e_val_vec[x]);
1142 max_e_vec = _mm256_sub_epi32(max_e_vec, ONE);
1145 tmp = _mm256_max_epi32(max_e_vec, ONE);
1146 __m256i tmp1 = _mm256_sub_epi32(rho_vec, ONE);
1147 tmp1 = _mm256_and_si256(rho_vec, tmp1);
1149 auto cmp = _mm256_cmpeq_epi32(tmp1, ZERO);
1150 auto kappa_vec1_ = _mm256_and_si256(cmp, ONE);
1151 auto kappa_vec2_ = _mm256_and_si256(_mm256_xor_si256(cmp, _mm256_set1_epi32((int32_t)0xffffffff)), tmp);
1152 const __m256i kappa_vec = _mm256_max_epi32(kappa_vec1_, kappa_vec2_);
1157 tmp = proc_cq(x, cx_val_vec, rho_vec, right_shift);
1159 auto cq_vec = _mm256_permutevar8x32_epi32(tmp, left_shift);
1160 cq_vec = _mm256_insert_epi32(cq_vec, prev_cq, 0);
1161 prev_cq = (
ui32)_mm256_extract_epi32(tmp, 7);
1163 update_lep(x, prev_e_val_vec, eq_vec, e_val_vec, left_shift);
1164 update_lcxp(x, prev_cx_val_vec, rho_vec, cx_val_vec, left_shift);
1168 auto uq_vec = _mm256_max_epi32(kappa_vec, e_qmax_vec);
1169 auto u_q_vec = _mm256_sub_epi32(uq_vec, kappa_vec);
1171 auto eps_vec = cal_eps_vec(eq_vec, u_q_vec, e_qmax_vec);
1172 __m256i tuple_vec = cal_tuple(cq_vec, rho_vec, eps_vec, vlc_tbl);
1173 ui32 _ignore = ((n_loop - 1) == x) ? ignore : 0;
1175 proc_mel_encode(&mel, cq_vec, rho_vec, u_q_vec, _ignore,
1178 proc_ms_encode(&ms, tuple_vec, uq_vec, rho_vec, s_vec);
1188 tuple_vec = _mm256_srli_epi32(tuple_vec, 4);
1189 _mm256_storeu_si256((__m256i*)tuple, tuple_vec);
1190 _mm256_storeu_si256((__m256i*)u_q, u_q_vec);
1192 proc_vlc_encode(&vlc, tuple, u_q, _ignore);
1195 tmp = _mm256_permutevar8x32_epi32(cx_val_vec[0], right_shift);
1196 tmp = _mm256_slli_epi32(tmp, 2);
1197 tmp = _mm256_add_epi32(tmp, cx_val_vec[0]);
1198 prev_cq = (
ui32)_mm_cvtsi128_si32(_mm256_castsi256_si128(tmp));
1202 proc_mel_encode = proc_mel_encode2;
1203 proc_vlc_encode = proc_vlc_encode2;
1210 lengths[0] = mel.pos + vlc.pos + ms.pos;
1211 elastic->
get_buffer(mel.pos + vlc.pos + ms.pos, coded);
1212 memcpy(coded->
buf, ms.buf, ms.pos);
1213 memcpy(coded->
buf + ms.pos, mel.buf, mel.pos);
1214 memcpy(coded->
buf + ms.pos + mel.pos, vlc.buf - vlc.pos + 1, vlc.pos);
1217 ui32 num_bytes = mel.pos + vlc.pos;
1218 coded->
buf[lengths[0]-1] = (
ui8)(num_bytes >> 4);
1219 coded->
buf[lengths[0]-2] = coded->
buf[lengths[0]-2] & 0xF0;
1220 coded->
buf[lengths[0]-2] =
1221 (
ui8)(coded->
buf[lengths[0]-2] | (num_bytes & 0xF));
void get_buffer(ui32 needed_bytes, coded_lists *&p)
static bool uvlc_init_tables()
Initializes uvlc_tbl0 and uvlc_tbl1 tables.
static bool vlc_init_tables()
Initializes vlc_tbl0 and vlc_tbl1 tables, from table0.h and table1.h.
ui16 vlc_tbl1[1024]
vlc_tbl1 contains decoding information for non-initial row of quads
ui16 vlc_tbl0[1024]
vlc_tbl0 contains decoding information for initial row of quads
static void ms_terminate(ms_struct *msp)
static void vlc_encode(vlc_struct *vlcp, int cwd, int cwd_len)
static void terminate_mel_vlc(mel_struct *melp, vlc_struct *vlcp)
void ojph_encode_codeblock_avx2(ui32 *buf, ui32 missing_msbs, ui32 num_passes, ui32 width, ui32 height, ui32 stride, ui32 *lengths, ojph::mem_elastic_allocator *elastic, ojph::coded_lists *&coded)
static void mel_init(dec_mel_st *melp, ui8 *bbuf, int lcup, int scup)
Initiates a dec_mel_st structure for MEL decoding and reads some bytes in order to get the read addre...
static void ms_init(ms_struct *msp, ui32 buffer_size, ui8 *data)
static void ms_encode(ms_struct *msp, ui32 cwd, int cwd_len)
static void mel_encode(mel_struct *melp, bool bit)
static void mel_emit_bit(mel_struct *melp, int v)
static bool tables_initialized
bool initialize_block_encoder_tables_avx2()
static void vlc_init(vlc_struct *vlcp, ui32 buffer_size, ui8 *data)
static ui32 population_count(ui32 val)
#define OJPH_ERROR(t,...)