xref: /OK3568_Linux_fs/external/mpp/mpp/codec/dec/vp9/vp9d_parser.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2 *
3 * Copyright 2015 Rockchip Electronics Co. LTD
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 *      http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17 #include <stdlib.h>
18 
19 #include <string.h>
20 
21 #include "mpp_env.h"
22 #include "mpp_mem.h"
23 #include "mpp_debug.h"
24 #include "mpp_common.h"
25 #include "mpp_bitread.h"
26 #include "mpp_packet_impl.h"
27 #include "mpp_compat_impl.h"
28 
29 #include "vp9data.h"
30 #include "vp9d_codec.h"
31 #include "vp9d_parser.h"
32 #include "mpp_frame_impl.h"
33 
34 /**
35  * Clip a signed integer into the -(2^p),(2^p-1) range.
36  * @param  a value to clip
37  * @param  p bit position to clip at
38  * @return clipped value
39  */
av_clip_uintp2(RK_S32 a,RK_S32 p)40 static   RK_U32 av_clip_uintp2(RK_S32 a, RK_S32 p)
41 {
42     if (a & ~((1 << p) - 1)) return -a >> 31 & ((1 << p) - 1);
43     else                   return  a;
44 }
45 
46 
47 RK_U32 vp9d_debug = 0;
48 
49 #define VP9_SYNCCODE 0x498342
50 //#define dump
51 #ifdef dump
52 static FILE *vp9_p_fp = NULL;
53 static FILE *vp9_p_fp1 = NULL;
54 static FILE *vp9_p_fp2 = NULL;
55 static RK_S32 dec_num = 0;
56 static RK_S32 count = 0;
57 #endif
58 
59 #ifndef FASTDIV
60 #   define FASTDIV(a,b) ((RK_U32)((((RK_U64)a) * vpx_inverse[b]) >> 32))
61 #endif /* FASTDIV */
62 
63 /* a*inverse[b]>>32 == a/b for all 0<=a<=16909558 && 2<=b<=256
64  * for a>16909558, is an overestimate by less than 1 part in 1<<24 */
65 const RK_U32 vpx_inverse[257] = {
66     0, 4294967295U, 2147483648U, 1431655766, 1073741824,  858993460,  715827883,  613566757,
67     536870912,  477218589,  429496730,  390451573,  357913942,  330382100,  306783379,  286331154,
68     268435456,  252645136,  238609295,  226050911,  214748365,  204522253,  195225787,  186737709,
69     178956971,  171798692,  165191050,  159072863,  153391690,  148102321,  143165577,  138547333,
70     134217728,  130150525,  126322568,  122713352,  119304648,  116080198,  113025456,  110127367,
71     107374183,  104755300,  102261127,   99882961,   97612894,   95443718,   93368855,   91382283,
72     89478486,   87652394,   85899346,   84215046,   82595525,   81037119,   79536432,   78090315,
73     76695845,   75350304,   74051161,   72796056,   71582789,   70409300,   69273667,   68174085,
74     67108864,   66076420,   65075263,   64103990,   63161284,   62245903,   61356676,   60492498,
75     59652324,   58835169,   58040099,   57266231,   56512728,   55778797,   55063684,   54366675,
76     53687092,   53024288,   52377650,   51746594,   51130564,   50529028,   49941481,   49367441,
77     48806447,   48258060,   47721859,   47197443,   46684428,   46182445,   45691142,   45210183,
78     44739243,   44278014,   43826197,   43383509,   42949673,   42524429,   42107523,   41698712,
79     41297763,   40904451,   40518560,   40139882,   39768216,   39403370,   39045158,   38693400,
80     38347923,   38008561,   37675152,   37347542,   37025581,   36709123,   36398028,   36092163,
81     35791395,   35495598,   35204650,   34918434,   34636834,   34359739,   34087043,   33818641,
82     33554432,   33294321,   33038210,   32786010,   32537632,   32292988,   32051995,   31814573,
83     31580642,   31350127,   31122952,   30899046,   30678338,   30460761,   30246249,   30034737,
84     29826162,   29620465,   29417585,   29217465,   29020050,   28825284,   28633116,   28443493,
85     28256364,   28071682,   27889399,   27709467,   27531842,   27356480,   27183338,   27012373,
86     26843546,   26676816,   26512144,   26349493,   26188825,   26030105,   25873297,   25718368,
87     25565282,   25414008,   25264514,   25116768,   24970741,   24826401,   24683721,   24542671,
88     24403224,   24265352,   24129030,   23994231,   23860930,   23729102,   23598722,   23469767,
89     23342214,   23216040,   23091223,   22967740,   22845571,   22724695,   22605092,   22486740,
90     22369622,   22253717,   22139007,   22025474,   21913099,   21801865,   21691755,   21582751,
91     21474837,   21367997,   21262215,   21157475,   21053762,   20951060,   20849356,   20748635,
92     20648882,   20550083,   20452226,   20355296,   20259280,   20164166,   20069941,   19976593,
93     19884108,   19792477,   19701685,   19611723,   19522579,   19434242,   19346700,   19259944,
94     19173962,   19088744,   19004281,   18920561,   18837576,   18755316,   18673771,   18592933,
95     18512791,   18433337,   18354562,   18276457,   18199014,   18122225,   18046082,   17970575,
96     17895698,   17821442,   17747799,   17674763,   17602325,   17530479,   17459217,   17388532,
97     17318417,   17248865,   17179870,   17111424,   17043522,   16976156,   16909321,   16843010,
98     16777216
99 };
100 
split_parse_frame(SplitContext_t * ctx,RK_U8 * buf,RK_S32 size)101 static void split_parse_frame(SplitContext_t *ctx, RK_U8 *buf, RK_S32 size)
102 {
103     VP9ParseContext *s = (VP9ParseContext *)ctx->priv_data;
104 
105     if (buf[0] & 0x4) {
106         ctx->key_frame = 0;
107     } else {
108         ctx->key_frame = 1;
109     }
110 
111     if (buf[0] & 0x2) {
112         if (ctx->pts == -1)
113             ctx->pts = s->pts;
114         s->pts = -1;
115     } else {
116         s->pts = ctx->pts;
117         ctx->pts = -1;
118     }
119 
120     (void)size;
121 }
122 
vp9d_split_frame(SplitContext_t * ctx,RK_U8 ** out_data,RK_S32 * out_size,RK_U8 * data,RK_S32 size)123 RK_S32 vp9d_split_frame(SplitContext_t *ctx,
124                         RK_U8 **out_data, RK_S32 *out_size,
125                         RK_U8 *data, RK_S32 size)
126 {
127     VP9ParseContext *s = (VP9ParseContext *)ctx->priv_data;
128     RK_S32 full_size = size;
129     RK_S32 marker;
130 
131     if (size <= 0) {
132         *out_size = 0;
133         *out_data = data;
134 
135         return 0;
136     }
137 
138     if (s->n_frames > 0) {
139         *out_data = data;
140         *out_size = s->size[--s->n_frames];
141         split_parse_frame(ctx, *out_data, *out_size);
142 
143         return s->n_frames > 0 ? *out_size : size /* i.e. include idx tail */;
144     }
145 
146     marker = data[size - 1];
147     if ((marker & 0xe0) == 0xc0) {
148         RK_S32 nbytes = 1 + ((marker >> 3) & 0x3);
149         RK_S32 n_frames = 1 + (marker & 0x7), idx_sz = 2 + n_frames * nbytes;
150 
151         if (size >= idx_sz && data[size - idx_sz] == marker) {
152             RK_U8 *idx = data + size + 1 - idx_sz;
153             RK_S32 first = 1;
154 
155             switch (nbytes) {
156 #define case_n(a, rd) \
157             case a: \
158                 while (n_frames--) { \
159                     RK_U32 sz = rd; \
160                     idx += a; \
161                     if (sz == 0 || sz > (RK_U32)size) { \
162                         s->n_frames = 0; \
163                         *out_size = size > full_size ? full_size : size; \
164                         *out_data = data; \
165                         mpp_err("Superframe packet size too big: %u > %d\n", \
166                                sz, size); \
167                         return full_size; \
168                     } \
169                     if (first) { \
170                         first = 0; \
171                         *out_data = data; \
172                         *out_size = sz; \
173                         s->n_frames = n_frames; \
174                     } else { \
175                         s->size[n_frames] = sz; \
176                     } \
177                     data += sz; \
178                     size -= sz; \
179                 } \
180                 split_parse_frame(ctx, *out_data, *out_size); \
181                 return *out_size
182 
183                 case_n(1, *idx);
184                 case_n(2, MPP_RL16(idx));
185                 case_n(3, MPP_RL24(idx));
186                 case_n(4, MPP_RL32(idx));
187             }
188         }
189     }
190 
191     *out_data = data;
192     *out_size = size;
193     split_parse_frame(ctx, data, size);
194     return size;
195 }
196 
vp9d_get_frame_stream(Vp9CodecContext * ctx,RK_U8 * buf,RK_S32 length)197 MPP_RET vp9d_get_frame_stream(Vp9CodecContext *ctx, RK_U8 *buf, RK_S32 length)
198 {
199     RK_S32 buff_size = 0;
200     RK_U8 *data = NULL;
201     RK_S32 size = 0;
202 
203     data = (RK_U8 *)mpp_packet_get_data(ctx->pkt);
204     size = (RK_S32)mpp_packet_get_size(ctx->pkt);
205 
206     if (length > size) {
207         mpp_free(data);
208         mpp_packet_deinit(&ctx->pkt);
209         buff_size = length + 10 * 1024;
210         data = mpp_malloc(RK_U8, buff_size);
211         mpp_packet_init(&ctx->pkt, (void *)data, length);
212         mpp_packet_set_size(ctx->pkt, buff_size);
213     }
214 
215     memcpy(data, buf, length);
216     mpp_packet_set_length(ctx->pkt, length);
217 
218     return MPP_OK;
219 }
220 
vp9d_split_init(Vp9CodecContext * vp9_ctx)221 MPP_RET vp9d_split_init(Vp9CodecContext *vp9_ctx)
222 {
223     SplitContext_t *ps;
224     VP9ParseContext *sc;
225 
226     ps = (SplitContext_t *)mpp_calloc(SplitContext_t, 1);
227     if (!ps) {
228         mpp_err("vp9 parser malloc fail");
229         return MPP_ERR_NOMEM;
230     }
231 
232     sc = (VP9ParseContext *)mpp_calloc(VP9ParseContext, 1);
233     if (!sc) {
234         mpp_err("vp9 parser context malloc fail");
235         mpp_free(ps);
236         return MPP_ERR_NOMEM;
237     }
238 
239     ps->priv_data = (void*)sc;
240     vp9_ctx->priv_data2 = (void*)ps;
241 
242     return MPP_OK;
243 }
244 
vp9d_split_deinit(Vp9CodecContext * vp9_ctx)245 MPP_RET vp9d_split_deinit(Vp9CodecContext *vp9_ctx)
246 {
247     SplitContext_t *ps = (SplitContext_t *)vp9_ctx->priv_data2;
248 
249     if (ps)
250         MPP_FREE(ps->priv_data);
251     MPP_FREE(vp9_ctx->priv_data2);
252 
253     return MPP_OK;
254 }
255 
vp9_ref_frame(Vp9CodecContext * ctx,VP9Frame * dst,VP9Frame * src)256 static RK_S32 vp9_ref_frame(Vp9CodecContext *ctx, VP9Frame *dst, VP9Frame *src)
257 {
258     VP9Context *s = ctx->priv_data;
259     MppFrameImpl *impl_frm = (MppFrameImpl *)dst->f;
260 
261     if (src->ref == NULL || src->slot_index >= 0x7f) {
262         mpp_err("vp9_ref_frame is vaild");
263         return -1;
264     }
265     dst->slot_index = src->slot_index;
266     dst->ref = src->ref;
267     dst->ref->invisible = src->ref->invisible;
268     dst->ref->ref_count++;
269     vp9d_dbg(VP9D_DBG_REF, "get prop slot frame %p  count %d", dst->f, dst->ref->ref_count);
270     mpp_buf_slot_get_prop(s->slots, src->slot_index, SLOT_FRAME, &dst->f);
271     impl_frm->buffer = NULL; //parser no need process hal buf
272     vp9d_dbg(VP9D_DBG_REF, "get prop slot frame after %p", dst->f);
273     return 0;
274 }
275 
vp9_unref_frame(VP9Context * s,VP9Frame * f)276 static void vp9_unref_frame( VP9Context *s, VP9Frame *f)
277 {
278     if (f->ref->ref_count <= 0 || f->slot_index >= 0x7f) {
279         mpp_err("ref count alreay is zero");
280         return;
281     }
282     f->ref->ref_count--;
283     if (!f->ref->ref_count) {
284         if (f->slot_index <= 0x7f) {
285             if (f->ref->invisible && !f->ref->is_output) {
286                 MppBuffer framebuf = NULL;
287 
288                 mpp_buf_slot_get_prop(s->slots, f->slot_index, SLOT_BUFFER, &framebuf);
289                 mpp_buffer_put(framebuf);
290                 f->ref->invisible = 0;
291             }
292             mpp_buf_slot_clr_flag(s->slots, f->slot_index, SLOT_CODEC_USE);
293         }
294         mpp_free(f->ref);
295         f->slot_index = 0xff;
296         f->ref = NULL;
297     }
298     f->ref = NULL;
299     return;
300 }
301 
302 
vp9_frame_free(VP9Context * s)303 static  RK_S32 vp9_frame_free(VP9Context *s)
304 {
305     RK_S32 i;
306     for (i = 0; i < 3; i++) {
307         if (s->frames[i].ref) {
308             vp9_unref_frame(s, &s->frames[i]);
309         }
310         mpp_frame_deinit(&s->frames[i].f);
311     }
312     for (i = 0; i < 8; i++) {
313         if (s->refs[i].ref) {
314             vp9_unref_frame(s, &s->refs[i]);
315         }
316         mpp_frame_deinit(&s->refs[i].f);
317     }
318     return 0;
319 }
320 
vp9_frame_init(VP9Context * s)321 static RK_S32 vp9_frame_init(VP9Context *s)
322 {
323     RK_S32 i;
324     for (i = 0; i < 3; i++) {
325         mpp_frame_init(&s->frames[i].f);
326         if (!s->frames[i].f) {
327             vp9_frame_free(s);
328             mpp_err("Failed to allocate frame buffer %d\n", i);
329             return MPP_ERR_NOMEM;
330         }
331         s->frames[i].slot_index = 0x7f;
332         s->frames[i].ref = NULL;
333     }
334 
335     for (i = 0; i < 8; i++) {
336         mpp_frame_init(&(s->refs[i].f));
337         if (!s->refs[i].f) {
338             vp9_frame_free(s);
339             mpp_err("Failed to allocate frame buffer %d\n", i);
340             return MPP_ERR_NOMEM;
341         }
342         s->refs[i].slot_index = 0x7f;
343         s->refs[i].ref = NULL;
344     }
345     return MPP_OK;
346 }
347 
vp9d_parser_init(Vp9CodecContext * vp9_ctx,ParserCfg * init)348 MPP_RET vp9d_parser_init(Vp9CodecContext *vp9_ctx, ParserCfg *init)
349 {
350     VP9Context *s = mpp_calloc(VP9Context, 1);
351     vp9_ctx->priv_data = (void*)s;
352     if (!vp9_ctx->priv_data) {
353         mpp_err("vp9 codec context malloc fail");
354         return MPP_ERR_NOMEM;
355     }
356     vp9_frame_init(s);
357     s->last_bpp = 0;
358     s->filter.sharpness = -1;
359 
360 #ifdef dump
361     count = 0;
362 #endif
363 
364     s->packet_slots = init->packet_slots;
365     s->slots = init->frame_slots;
366     s->cfg = init->cfg;
367     s->hw_info = init->hw_info;
368     mpp_buf_slot_setup(s->slots, 25);
369 
370     mpp_env_get_u32("vp9d_debug", &vp9d_debug, 0);
371 
372     return MPP_OK;
373 }
374 
vp9d_parser_deinit(Vp9CodecContext * vp9_ctx)375 MPP_RET vp9d_parser_deinit(Vp9CodecContext *vp9_ctx)
376 {
377     VP9Context *s = vp9_ctx->priv_data;
378     vp9_frame_free(s);
379     mpp_free(s->c_b);
380     s->c_b_size = 0;
381     MPP_FREE(vp9_ctx->priv_data);
382     return MPP_OK;
383 }
384 
hor_align_64(RK_U32 val)385 static RK_U32 hor_align_64(RK_U32 val)
386 {
387     return MPP_ALIGN(val, 64);
388 }
389 
vp9_alloc_frame(Vp9CodecContext * ctx,VP9Frame * frame)390 static RK_S32 vp9_alloc_frame(Vp9CodecContext *ctx, VP9Frame *frame)
391 {
392     VP9Context *s = ctx->priv_data;
393     mpp_frame_set_width(frame->f, ctx->width);
394     mpp_frame_set_height(frame->f, ctx->height);
395 
396     mpp_frame_set_hor_stride(frame->f, ctx->width * s->bpp / 8);
397     mpp_frame_set_ver_stride(frame->f, ctx->height);
398     mpp_frame_set_errinfo(frame->f, 0);
399     mpp_frame_set_discard(frame->f, 0);
400     mpp_frame_set_pts(frame->f, s->pts);
401     // set current poc
402     s->cur_poc++;
403     mpp_frame_set_poc(frame->f, s->cur_poc);
404 
405     if (MPP_FRAME_FMT_IS_FBC(s->cfg->base.out_fmt)) {
406         RK_U32 fbc_hdr_stride = MPP_ALIGN(ctx->width, 64);
407 
408         mpp_slots_set_prop(s->slots, SLOTS_HOR_ALIGN, hor_align_64);
409         mpp_frame_set_fmt(frame->f, ctx->pix_fmt | ((s->cfg->base.out_fmt & (MPP_FRAME_FBC_MASK))));
410 
411         if (*compat_ext_fbc_hdr_256_odd)
412             fbc_hdr_stride = MPP_ALIGN(ctx->width, 256) | 256;
413 
414         mpp_frame_set_fbc_hdr_stride(frame->f, fbc_hdr_stride);
415     } else
416         mpp_frame_set_fmt(frame->f, ctx->pix_fmt);
417 
418     if (s->cfg->base.enable_thumbnail && s->hw_info->cap_down_scale)
419         mpp_frame_set_thumbnail_en(frame->f, 1);
420     else
421         mpp_frame_set_thumbnail_en(frame->f, 0);
422 
423     mpp_buf_slot_get_unused(s->slots, &frame->slot_index);
424     mpp_buf_slot_set_prop(s->slots, frame->slot_index, SLOT_FRAME, frame->f);
425     mpp_buf_slot_set_flag(s->slots, frame->slot_index, SLOT_CODEC_USE);
426     mpp_buf_slot_set_flag(s->slots, frame->slot_index, SLOT_HAL_OUTPUT);
427     frame->ref = mpp_calloc(RefInfo, 1);
428     frame->ref->ref_count++;
429     frame->ref->invisible = s->invisible;
430     frame->ref->is_output = 0;
431 
432     return 0;
433 }
434 
435 
436 
437 // for some reason the sign bit is at the end, not the start, of a bit sequence
get_sbits_inv(BitReadCtx_t * gb,RK_S32 n)438 static RK_S32 get_sbits_inv(BitReadCtx_t *gb, RK_S32 n)
439 {
440     RK_S32 value;
441     RK_S32 v;
442     READ_BITS(gb, n, &v);
443     READ_ONEBIT(gb, &value);
444     return value ? -v : v;
445 __BITREAD_ERR:
446     return MPP_ERR_STREAM;
447 }
448 
update_size(Vp9CodecContext * ctx,RK_S32 w,RK_S32 h,RK_S32 fmt)449 static RK_S32 update_size(Vp9CodecContext *ctx, RK_S32 w, RK_S32 h, RK_S32 fmt)
450 {
451     VP9Context *s = ctx->priv_data;
452 
453     if (w == ctx->width && h == ctx->height && ctx->pix_fmt == fmt)
454         return 0;
455 
456     ctx->width   = w;
457     ctx->height  = h;
458     ctx->pix_fmt = fmt;
459     s->sb_cols   = (w + 63) >> 6;
460     s->sb_rows   = (h + 63) >> 6;
461     s->cols      = (w + 7) >> 3;
462     s->rows      = (h + 7) >> 3;
463 
464     // these will be re-allocated a little later
465     if (s->bpp != s->last_bpp) {
466         s->last_bpp = s->bpp;
467     }
468 
469     return 0;
470 }
471 
inv_recenter_nonneg(RK_S32 v,RK_S32 m)472 static RK_S32 inv_recenter_nonneg(RK_S32 v, RK_S32 m)
473 {
474     return v > 2 * m ? v : v & 1 ? m - ((v + 1) >> 1) : m + (v >> 1);
475 }
476 
477 // differential forward probability updates
update_prob(VpxRangeCoder * c,RK_S32 p,RK_U8 * delta)478 static RK_S32 update_prob(VpxRangeCoder *c, RK_S32 p, RK_U8 *delta)
479 {
480     static const RK_S32 inv_map_table[255] = {
481         7,  20,  33,  46,  59,  72,  85,  98, 111, 124, 137, 150, 163, 176,
482         189, 202, 215, 228, 241, 254,   1,   2,   3,   4,   5,   6,   8,   9,
483         10,  11,  12,  13,  14,  15,  16,  17,  18,  19,  21,  22,  23,  24,
484         25,  26,  27,  28,  29,  30,  31,  32,  34,  35,  36,  37,  38,  39,
485         40,  41,  42,  43,  44,  45,  47,  48,  49,  50,  51,  52,  53,  54,
486         55,  56,  57,  58,  60,  61,  62,  63,  64,  65,  66,  67,  68,  69,
487         70,  71,  73,  74,  75,  76,  77,  78,  79,  80,  81,  82,  83,  84,
488         86,  87,  88,  89,  90,  91,  92,  93,  94,  95,  96,  97,  99, 100,
489         101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
490         116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
491         131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
492         146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
493         161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
494         177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
495         192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
496         207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
497         222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
498         237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
499         252, 253, 253,
500     };
501     RK_S32 d;
502 
503     /* This code is trying to do a differential probability update. For a
504      * current probability A in the range [1, 255], the difference to a new
505      * probability of any value can be expressed differentially as 1-A,255-A
506      * where some part of this (absolute range) exists both in positive as
507      * well as the negative part, whereas another part only exists in one
508      * half. We're trying to code this shared part differentially, i.e.
509      * times two where the value of the lowest bit specifies the sign, and
510      * the single part is then coded on top of this. This absolute difference
511      * then again has a value of [0,254], but a bigger value in this range
512      * indicates that we're further away from the original value A, so we
513      * can code this as a VLC code, since higher values are increasingly
514      * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
515      * updates vs. the 'fine, exact' updates further down the range, which
516      * adds one extra dimension to this differential update model. */
517 
518     if (!vpx_rac_get(c)) {
519         d = vpx_rac_get_uint(c, 4) + 0;
520     } else if (!vpx_rac_get(c)) {
521         d = vpx_rac_get_uint(c, 4) + 16;
522     } else if (!vpx_rac_get(c)) {
523         d = vpx_rac_get_uint(c, 5) + 32;
524     } else {
525         d = vpx_rac_get_uint(c, 7);
526         if (d >= 65)
527             d = (d << 1) - 65 + vpx_rac_get(c);
528         d += 64;
529         //av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
530     }
531     *delta = d;
532     return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
533            255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
534 }
535 
mpp_get_bit1(BitReadCtx_t * gb)536 static RK_S32 mpp_get_bit1(BitReadCtx_t *gb)
537 {
538     RK_S32 value;
539     READ_ONEBIT(gb, &value);
540     return value;
541 __BITREAD_ERR:
542     return 0;
543 }
544 
mpp_get_bits(BitReadCtx_t * gb,RK_S32 num_bit)545 static RK_S32 mpp_get_bits(BitReadCtx_t *gb, RK_S32 num_bit)
546 {
547     RK_S32 value;
548     READ_BITS(gb, num_bit, &value);
549     return value;
550 __BITREAD_ERR:
551     return 0;
552 }
553 
read_colorspace_details(Vp9CodecContext * ctx)554 static RK_S32 read_colorspace_details(Vp9CodecContext *ctx)
555 {
556     static const MppFrameColorSpace colorspaces[8] = {
557         MPP_FRAME_SPC_UNSPECIFIED, MPP_FRAME_SPC_BT470BG, MPP_FRAME_SPC_BT709, MPP_FRAME_SPC_SMPTE170M,
558         MPP_FRAME_SPC_SMPTE240M, MPP_FRAME_SPC_BT2020_NCL, MPP_FRAME_SPC_RESERVED, MPP_FRAME_SPC_RGB,
559     };
560     VP9Context *s = ctx->priv_data;
561     RK_S32 res;
562     RK_S32 bits = ctx->profile <= 1 ? 0 : 1 + mpp_get_bit1(&s->gb); // 0:8, 1:10, 2:12
563 
564     vp9d_dbg(VP9D_DBG_HEADER, "bit_depth %d", 8 + bits * 2);
565     s->bpp_index = bits;
566     s->bpp = 8 + bits * 2;
567     s->bytesperpixel = (7 + s->bpp) >> 3;
568     ctx->colorspace = colorspaces[mpp_get_bits(&s->gb, 3)];
569     vp9d_dbg(VP9D_DBG_HEADER, "color_space %d", ctx->colorspace);
570     if (ctx->colorspace == MPP_FRAME_SPC_RGB) { // RGB = profile 1
571 
572         {
573             mpp_err("RGB not supported in profile %d\n", ctx->profile);
574             return MPP_ERR_STREAM;
575         }
576     } else {
577         static const RK_S32 pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
578             {   { -1, MPP_FMT_YUV422SP },
579                 { -1, MPP_FMT_YUV420SP }
580             },
581             {   { -1, MPP_FMT_YUV422SP_10BIT},
582                 { -1, MPP_FMT_YUV420SP_10BIT}
583             },
584             {   { -1, -1 },
585                 { -1, -1 }
586             }
587         };
588         ctx->color_range = mpp_get_bit1(&s->gb) ? MPP_FRAME_RANGE_JPEG : MPP_FRAME_RANGE_MPEG;
589         vp9d_dbg(VP9D_DBG_HEADER, "color_range %d", ctx->color_range);
590         if (ctx->profile & 1) {
591             s->ss_h = mpp_get_bit1(&s->gb);
592             vp9d_dbg(VP9D_DBG_HEADER, "subsampling_x %d", s->ss_h);
593             s->ss_v = mpp_get_bit1(&s->gb);
594             vp9d_dbg(VP9D_DBG_HEADER, "subsampling_y %d", s->ss_v);
595             s->extra_plane = 0;
596             res = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
597             if (res == MPP_FMT_YUV420SP || res < 0) {
598                 mpp_err("YUV FMT %d not supported in profile %d\n", res, ctx->profile);
599                 return MPP_ERR_STREAM;
600             } else if (mpp_get_bit1(&s->gb)) {
601                 s->extra_plane = 1;
602                 vp9d_dbg(VP9D_DBG_HEADER, "has_extra_plane 1");
603                 mpp_err("Profile %d color details reserved bit set\n", ctx->profile);
604                 return  MPP_ERR_STREAM;
605             }
606         } else {
607             s->extra_plane = 0;
608             s->ss_h = s->ss_v = 1;
609             res = pix_fmt_for_ss[bits][1][1];
610         }
611     }
612 
613     return res;
614 }
615 
decode012(BitReadCtx_t * gb)616 static RK_S32 decode012(BitReadCtx_t *gb)
617 {
618     RK_S32 n;
619     n = mpp_get_bit1(gb);
620     if (n == 0)
621         return 0;
622     else
623         return mpp_get_bit1(gb) + 1;
624 }
625 
decode_parser_header(Vp9CodecContext * ctx,const RK_U8 * data,RK_S32 size,RK_S32 * refo)626 static RK_S32 decode_parser_header(Vp9CodecContext *ctx,
627                                    const RK_U8 *data, RK_S32 size, RK_S32 *refo)
628 {
629     VP9Context *s = ctx->priv_data;
630     RK_S32 c, i, j, k, l, m, n, max, size2, res, sharp;
631     RK_U32 w, h;
632     RK_S32 fmt = ctx->pix_fmt;
633     RK_S32 last_invisible;
634     const RK_U8 *data2;
635 #ifdef dump
636     char filename[20] = "data/acoef";
637     if (vp9_p_fp2 != NULL) {
638         fclose(vp9_p_fp2);
639 
640     }
641     sprintf(&filename[10], "%d.bin", dec_num);
642     vp9_p_fp2 = fopen(filename, "wb");
643 #endif
644 
645     /* general header */
646     mpp_set_bitread_ctx(&s->gb, (RK_U8*)data, size);
647     if (mpp_get_bits(&s->gb, 2) != 0x2) { // frame marker
648         mpp_err("Invalid frame marker\n");
649         return MPP_ERR_STREAM;
650     }
651     ctx->profile  = mpp_get_bit1(&s->gb);
652     ctx->profile |= mpp_get_bit1(&s->gb) << 1;
653     if (ctx->profile == 3) ctx->profile += mpp_get_bit1(&s->gb);
654     if (ctx->profile > 3) {
655         mpp_err("Profile %d is not yet supported\n", ctx->profile);
656         return MPP_ERR_STREAM;
657     }
658     vp9d_dbg(VP9D_DBG_HEADER, "profile %d", ctx->profile);
659     s->show_existing_frame = mpp_get_bit1(&s->gb);
660     vp9d_dbg(VP9D_DBG_HEADER, "show_existing_frame %d", s->show_existing_frame);
661     if (s->show_existing_frame) {
662         *refo = mpp_get_bits(&s->gb, 3);
663         vp9d_dbg(VP9D_DBG_HEADER, "frame_to_show %d", *refo);
664         return 0;
665     }
666     s->last_keyframe  = s->keyframe;
667     s->keyframe       = !mpp_get_bit1(&s->gb);
668     vp9d_dbg(VP9D_DBG_HEADER, "frame_type %d", s->keyframe);
669     last_invisible    = s->invisible;
670     s->invisible      = !mpp_get_bit1(&s->gb);
671     vp9d_dbg(VP9D_DBG_HEADER, "show_frame_flag %d", s->invisible);
672     s->errorres       = mpp_get_bit1(&s->gb);
673     vp9d_dbg(VP9D_DBG_HEADER, "error_resilient_mode %d", s->errorres);
674     s->use_last_frame_mvs = !s->errorres && !last_invisible;
675     s->got_keyframes += s->keyframe ? 1 : 0;
676     vp9d_dbg(VP9D_DBG_HEADER, "keyframe=%d, intraonly=%d, got_keyframes=%d\n",
677              s->keyframe, s->intraonly, s->got_keyframes);
678     if (!s->got_keyframes) {
679         mpp_err_f("have not got keyframe.\n");
680         return MPP_ERR_STREAM;
681     }
682     if (s->keyframe) {
683         if (mpp_get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
684             mpp_err("Invalid sync code\n");
685             return MPP_ERR_STREAM;
686         }
687 
688         if ((fmt = read_colorspace_details(ctx)) < 0)
689             return fmt;
690         // for profile 1, here follows the subsampling bits
691         s->refreshrefmask = 0xff;
692         w = mpp_get_bits(&s->gb, 16) + 1;
693         vp9d_dbg(VP9D_DBG_HEADER, "frame_size_width %d", w);
694         h = mpp_get_bits(&s->gb, 16) + 1;
695         vp9d_dbg(VP9D_DBG_HEADER, "frame_size_height %d", h);
696         if (mpp_get_bit1(&s->gb)) {// display size
697             RK_S32 dw, dh;
698             vp9d_dbg(VP9D_DBG_HEADER, "display_info_flag %d", 1);
699             dw = mpp_get_bits(&s->gb, 16) + 1;
700             vp9d_dbg(VP9D_DBG_HEADER, "display_size_width %d", dw);
701             dh = mpp_get_bits(&s->gb, 16) + 1;
702             vp9d_dbg(VP9D_DBG_HEADER, "display_size_width %d", dh);
703         } else
704             vp9d_dbg(VP9D_DBG_HEADER, "display_info_flag %d", 0);
705     } else {
706         s->intraonly  = s->invisible ? mpp_get_bit1(&s->gb) : 0;
707         vp9d_dbg(VP9D_DBG_HEADER, "intra_only %d", s->intraonly);
708         s->resetctx   = s->errorres ? 0 : mpp_get_bits(&s->gb, 2);
709         vp9d_dbg(VP9D_DBG_HEADER, "reset_frame_context_value %d", s->resetctx);
710         if (s->intraonly) {
711             if (mpp_get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
712                 mpp_err("Invalid sync code\n");
713                 return MPP_ERR_STREAM;
714             }
715             if (ctx->profile == 1) {
716                 if ((fmt = read_colorspace_details(ctx)) < 0)
717                     return fmt;
718             } else {
719                 s->ss_h = s->ss_v = 1;
720                 s->bpp = 8;
721                 s->bpp_index = 0;
722                 s->bytesperpixel = 1;
723                 fmt = MPP_FMT_YUV420SP;
724                 ctx->colorspace = MPP_FRAME_SPC_BT470BG;
725                 ctx->color_range = MPP_FRAME_RANGE_JPEG;
726             }
727             s->refreshrefmask = mpp_get_bits(&s->gb, 8);
728             vp9d_dbg(VP9D_DBG_HEADER, "refresh_frame_flags %d", s->refreshrefmask);
729             w = mpp_get_bits(&s->gb, 16) + 1;
730             vp9d_dbg(VP9D_DBG_HEADER, "frame_size_width %d", w);
731             h = mpp_get_bits(&s->gb, 16) + 1;
732             vp9d_dbg(VP9D_DBG_HEADER, "frame_size_height %d", h);
733             if (mpp_get_bit1(&s->gb)) {// display size
734                 RK_S32 dw, dh;
735                 vp9d_dbg(VP9D_DBG_HEADER, "display_info_flag %d", 1);
736                 dw = mpp_get_bits(&s->gb, 16) + 1;
737                 vp9d_dbg(VP9D_DBG_HEADER, "display_size_width %d", dw);
738                 dh = mpp_get_bits(&s->gb, 16) + 1;
739                 vp9d_dbg(VP9D_DBG_HEADER, "display_size_width %d", dh);
740             } else
741                 vp9d_dbg(VP9D_DBG_HEADER, "display_info_flag %d", 0);
742         } else {
743             s->refreshrefmask = mpp_get_bits(&s->gb, 8);
744             vp9d_dbg(VP9D_DBG_HEADER, "refresh_frame_flags %d", s->refreshrefmask);
745             s->refidx[0]      = mpp_get_bits(&s->gb, 3);
746             s->signbias[0]    = mpp_get_bit1(&s->gb) && !s->errorres;
747             s->refidx[1]      = mpp_get_bits(&s->gb, 3);
748             s->signbias[1]    = mpp_get_bit1(&s->gb) && !s->errorres;
749             s->refidx[2]      = mpp_get_bits(&s->gb, 3);
750             s->signbias[2]    = mpp_get_bit1(&s->gb) && !s->errorres;
751             vp9d_dbg(VP9D_DBG_HEADER, "ref_idx %d %d %d",
752                      s->refidx[0], s->refidx[1], s->refidx[2]);
753             vp9d_dbg(VP9D_DBG_HEADER, "ref_idx_ref_frame_sign_bias %d %d %d",
754                      s->signbias[0], s->signbias[1], s->signbias[2]);
755             if (!s->refs[s->refidx[0]].ref ||
756                 !s->refs[s->refidx[1]].ref ||
757                 !s->refs[s->refidx[2]].ref ) {
758                 mpp_err("Not all references are available\n");
759                 //return -1;//AVERROR_INVALIDDATA;
760             }
761             if (mpp_get_bit1(&s->gb)) {
762 
763                 vp9d_dbg(VP9D_DBG_HEADER, "ref_flag 0");
764                 w = mpp_frame_get_width(s->refs[s->refidx[0]].f);
765                 h = mpp_frame_get_height(s->refs[s->refidx[0]].f);
766             } else if (mpp_get_bit1(&s->gb)) {
767                 vp9d_dbg(VP9D_DBG_HEADER, "ref_flag 2");
768                 w = mpp_frame_get_width(s->refs[s->refidx[1]].f);
769                 h = mpp_frame_get_height(s->refs[s->refidx[1]].f);
770             } else if (mpp_get_bit1(&s->gb)) {
771                 vp9d_dbg(VP9D_DBG_HEADER, "ref_flag 1");
772                 w = mpp_frame_get_width(s->refs[s->refidx[2]].f);
773                 h = mpp_frame_get_height(s->refs[s->refidx[2]].f);
774             } else {
775                 w = mpp_get_bits(&s->gb, 16) + 1;
776                 vp9d_dbg(VP9D_DBG_HEADER, "frame_size_width %d", w);
777                 h = mpp_get_bits(&s->gb, 16) + 1;
778                 vp9d_dbg(VP9D_DBG_HEADER, "frame_size_height %d", h);
779             }
780             if (w == 0 || h == 0) {
781                 mpp_err("ref frame w:%d h:%d\n", w, h);
782                 return -1;
783             }
784             // Note that in this code, "CUR_FRAME" is actually before we
785             // have formally allocated a frame, and thus actually represents
786             // the _last_ frame
787             s->use_last_frame_mvs &= mpp_frame_get_width(s->frames[CUR_FRAME].f) == w &&
788                                      mpp_frame_get_height(s->frames[CUR_FRAME].f) == h;
789             if (mpp_get_bit1(&s->gb)) {// display size
790                 RK_S32 dw, dh;
791                 vp9d_dbg(VP9D_DBG_HEADER, "display_info_flag %d", 1);
792                 dw = mpp_get_bits(&s->gb, 16) + 1;
793                 vp9d_dbg(VP9D_DBG_HEADER, "display_size_width %d", dw);
794                 dh = mpp_get_bits(&s->gb, 16) + 1;
795                 vp9d_dbg(VP9D_DBG_HEADER, "display_size_width %d", dh);
796             } else
797                 vp9d_dbg(VP9D_DBG_HEADER, "display_info_flag %d", 0);
798             s->highprecisionmvs = mpp_get_bit1(&s->gb);
799             vp9d_dbg(VP9D_DBG_HEADER, "allow_high_precision_mv %d", s->highprecisionmvs);
800             s->filtermode = mpp_get_bit1(&s->gb) ? FILTER_SWITCHABLE :
801                             mpp_get_bits(&s->gb, 2);
802             vp9d_dbg(VP9D_DBG_HEADER, "filtermode %d", s->filtermode);
803             s->allowcompinter = (s->signbias[0] != s->signbias[1] ||
804                                  s->signbias[0] != s->signbias[2]);
805             if (s->allowcompinter) {
806                 if (s->signbias[0] == s->signbias[1]) {
807                     s->fixcompref    = 2;
808                     s->varcompref[0] = 0;
809                     s->varcompref[1] = 1;
810                 } else if (s->signbias[0] == s->signbias[2]) {
811                     s->fixcompref    = 1;
812                     s->varcompref[0] = 0;
813                     s->varcompref[1] = 2;
814                 } else {
815                     s->fixcompref    = 0;
816                     s->varcompref[0] = 1;
817                     s->varcompref[1] = 2;
818                 }
819             }
820 
821             for (i = 0; i < 3; i++) {
822                 RK_U32 refw = mpp_frame_get_width(s->refs[s->refidx[i]].f);
823                 RK_U32 refh = mpp_frame_get_height(s->refs[s->refidx[i]].f);
824                 RK_S32 reffmt = mpp_frame_get_fmt(s->refs[s->refidx[i]].f) & MPP_FRAME_FMT_MASK;
825 
826                 vp9d_dbg(VP9D_DBG_REF, "ref get width frame slot %p", s->refs[s->refidx[i]].f);
827                 if (reffmt != fmt) {
828                     /* mpp_err("Ref pixfmt (%s) did not match current frame (%s)",
829                            av_get_pix_fmt_name(ref->format),
830                            av_get_pix_fmt_name(fmt)); */
831                     //return -1;//AVERROR_INVALIDDATA;
832                 } else if (refw == w && refh == h) {
833                     s->mvscale[i][0] = (refw << 14) / w;
834                     s->mvscale[i][1] = (refh << 14) / h;
835                 } else {
836                     if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
837                         mpp_err("Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
838                                 refw, refh, w, h);
839                         return MPP_ERR_VALUE;
840                     }
841                     s->mvscale[i][0] = (refw << 14) / w;
842                     s->mvscale[i][1] = (refh << 14) / h;
843                     s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
844                     s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
845                 }
846             }
847         }
848     }
849 
850     s->refreshctx   = s->errorres ? 0 : mpp_get_bit1(&s->gb);
851     vp9d_dbg(VP9D_DBG_HEADER, "refresh_frame_context_flag %d", s->refreshctx);
852     s->parallelmode = s->errorres ? 1 : mpp_get_bit1(&s->gb);
853     vp9d_dbg(VP9D_DBG_HEADER, "frame_parallel_decoding_mode %d", s->parallelmode);
854     s->framectxid   = c = mpp_get_bits(&s->gb, 2);
855     vp9d_dbg(VP9D_DBG_HEADER, "frame_context_idx %d", s->framectxid);
856 
857     /* loopfilter header data */
858     if (s->keyframe || s->errorres || s->intraonly) {
859         // reset loopfilter defaults
860         s->lf_delta.ref[0] = 1;
861         s->lf_delta.ref[1] = 0;
862         s->lf_delta.ref[2] = -1;
863         s->lf_delta.ref[3] = -1;
864         s->lf_delta.mode[0] = 0;
865         s->lf_delta.mode[1] = 0;
866     }
867     s->filter.level = mpp_get_bits(&s->gb, 6);
868     vp9d_dbg(VP9D_DBG_HEADER, "filter_level %d", s->filter.level);
869     sharp = mpp_get_bits(&s->gb, 3);
870     vp9d_dbg(VP9D_DBG_HEADER, "sharpness_level %d", sharp);
871     // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
872     // the old cache values since they are still valid
873     if (s->filter.sharpness != sharp)
874         memset(s->filter.lim_lut, 0, sizeof(s->filter.lim_lut));
875     s->filter.sharpness = sharp;
876 
877     if ((s->lf_delta.enabled = mpp_get_bit1(&s->gb))) {
878         vp9d_dbg(VP9D_DBG_HEADER, "mode_ref_delta_enabled 1");
879         if ((s->lf_delta.update = mpp_get_bit1(&s->gb))) {
880             vp9d_dbg(VP9D_DBG_HEADER, "mode_ref_delta_update 1");
881             for (i = 0; i < 4; i++) {
882                 if (mpp_get_bit1(&s->gb))
883                     s->lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
884                 vp9d_dbg(VP9D_DBG_HEADER, "ref_deltas %d", s->lf_delta.ref[i]);
885             }
886             for (i = 0; i < 2; i++) {
887                 if (mpp_get_bit1(&s->gb))
888                     s->lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
889                 vp9d_dbg(VP9D_DBG_HEADER, "mode_deltas %d", s->lf_delta.mode[i]);
890             }
891         }
892     }
893 
894     /* quantization header data */
895     s->yac_qi      = mpp_get_bits(&s->gb, 8);
896     vp9d_dbg(VP9D_DBG_HEADER, "base_qindex %d", s->yac_qi);
897     s->ydc_qdelta  = mpp_get_bit1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
898     vp9d_dbg(VP9D_DBG_HEADER, "ydc_qdelta %d", s->ydc_qdelta);
899     s->uvdc_qdelta = mpp_get_bit1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
900     vp9d_dbg(VP9D_DBG_HEADER, "uvdc_qdelta %d", s->uvdc_qdelta);
901     s->uvac_qdelta = mpp_get_bit1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
902     vp9d_dbg(VP9D_DBG_HEADER, "uvac_qdelta %d", s->uvac_qdelta);
903     s->lossless    = s->yac_qi == 0 && s->ydc_qdelta == 0 &&
904                      s->uvdc_qdelta == 0 && s->uvac_qdelta == 0;
905 
906     /* segmentation header info */
907     s->segmentation.update_map = 0;
908     s->segmentation.ignore_refmap = 0;
909     if ((s->segmentation.enabled = mpp_get_bit1(&s->gb))) {
910         vp9d_dbg(VP9D_DBG_HEADER, "segmentation_enabled 1");
911         if ((s->segmentation.update_map = mpp_get_bit1(&s->gb))) {
912             vp9d_dbg(VP9D_DBG_HEADER, "update_map 1");
913             for (i = 0; i < 7; i++) {
914                 s->prob.seg[i] = mpp_get_bit1(&s->gb) ?
915                                  mpp_get_bits(&s->gb, 8) : 255;
916                 vp9d_dbg(VP9D_DBG_HEADER, "tree_probs %d value 0x%x", i, s->prob.seg[i]);
917             }
918             s->segmentation.temporal = mpp_get_bit1(&s->gb);
919             if (s->segmentation.temporal) {
920                 vp9d_dbg(VP9D_DBG_HEADER, "tempora_update 1");
921                 for (i = 0; i < 3; i++) {
922                     s->prob.segpred[i] = mpp_get_bit1(&s->gb) ?
923                                          mpp_get_bits(&s->gb, 8) : 255;
924                     vp9d_dbg(VP9D_DBG_HEADER, "pred_probs %d", i, s->prob.segpred[i]);
925                 }
926             } else {
927                 for (i = 0; i < 3; i++)
928                     s->prob.segpred[i] = 0xff;
929             }
930         }
931         if ((!s->segmentation.update_map || s->segmentation.temporal) &&
932             (w !=  mpp_frame_get_width(s->frames[CUR_FRAME].f) ||
933              h !=  mpp_frame_get_height(s->frames[CUR_FRAME].f))) {
934             /* av_log(ctx, AV_LOG_WARNING,
935                    "Reference segmap (temp=%d,update=%d) enabled on size-change!\n",
936                    s->segmentation.temporal, s->segmentation.update_map);
937                 s->segmentation.ignore_refmap = 1; */
938             //return -1;//AVERROR_INVALIDDATA;
939         }
940 
941         if (mpp_get_bit1(&s->gb)) {
942             vp9d_dbg(VP9D_DBG_HEADER, "update_data 1");
943             s->segmentation.absolute_vals = mpp_get_bit1(&s->gb);
944             vp9d_dbg(VP9D_DBG_HEADER, "abs_delta %d", s->segmentation.absolute_vals);
945             for (i = 0; i < 8; i++) {
946                 if ((s->segmentation.feat[i].q_enabled = mpp_get_bit1(&s->gb)))
947                     s->segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
948                 vp9d_dbg(VP9D_DBG_HEADER, "frame_qp_delta %d", s->segmentation.feat[i].q_val);
949                 if ((s->segmentation.feat[i].lf_enabled = mpp_get_bit1(&s->gb)))
950                     s->segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
951                 vp9d_dbg(VP9D_DBG_HEADER, "frame_loopfilter_value %d", i, s->segmentation.feat[i].lf_val);
952                 if ((s->segmentation.feat[i].ref_enabled = mpp_get_bit1(&s->gb)))
953                     s->segmentation.feat[i].ref_val = mpp_get_bits(&s->gb, 2);
954                 vp9d_dbg(VP9D_DBG_HEADER, "frame_reference_info %d", i, s->segmentation.feat[i].ref_val);
955                 s->segmentation.feat[i].skip_enabled = mpp_get_bit1(&s->gb);
956                 vp9d_dbg(VP9D_DBG_HEADER, "frame_skip %d", i, s->segmentation.feat[i].skip_enabled);
957             }
958         }
959     } else {
960         vp9d_dbg(VP9D_DBG_HEADER, "segmentation_enabled 0");
961         s->segmentation.feat[0].q_enabled    = 0;
962         s->segmentation.feat[0].lf_enabled   = 0;
963         s->segmentation.feat[0].skip_enabled = 0;
964         s->segmentation.feat[0].ref_enabled  = 0;
965     }
966 
967     // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
968     for (i = 0; i < (s->segmentation.enabled ? 8 : 1); i++) {
969         RK_S32 qyac, qydc, quvac, quvdc, lflvl, sh;
970 
971         if (s->segmentation.feat[i].q_enabled) {
972             if (s->segmentation.absolute_vals)
973                 qyac = s->segmentation.feat[i].q_val;
974             else
975                 qyac = s->yac_qi + s->segmentation.feat[i].q_val;
976         } else {
977             qyac  = s->yac_qi;
978         }
979         qydc  = av_clip_uintp2(qyac + s->ydc_qdelta, 8);
980         quvdc = av_clip_uintp2(qyac + s->uvdc_qdelta, 8);
981         quvac = av_clip_uintp2(qyac + s->uvac_qdelta, 8);
982         qyac  = av_clip_uintp2(qyac, 8);
983 
984         s->segmentation.feat[i].qmul[0][0] = vp9_dc_qlookup[s->bpp_index][qydc];
985         s->segmentation.feat[i].qmul[0][1] = vp9_ac_qlookup[s->bpp_index][qyac];
986         s->segmentation.feat[i].qmul[1][0] = vp9_dc_qlookup[s->bpp_index][quvdc];
987         s->segmentation.feat[i].qmul[1][1] = vp9_ac_qlookup[s->bpp_index][quvac];
988 
989         sh = s->filter.level >= 32;
990         if (s->segmentation.feat[i].lf_enabled) {
991             if (s->segmentation.absolute_vals)
992                 lflvl = av_clip_uintp2(s->segmentation.feat[i].lf_val, 6);
993             else
994                 lflvl = av_clip_uintp2(s->filter.level + s->segmentation.feat[i].lf_val, 6);
995         } else {
996             lflvl  = s->filter.level;
997         }
998         if (s->lf_delta.enabled) {
999             s->segmentation.feat[i].lflvl[0][0] =
1000                 s->segmentation.feat[i].lflvl[0][1] =
1001                     av_clip_uintp2(lflvl + (s->lf_delta.ref[0] << sh), 6);
1002             for (j = 1; j < 4; j++) {
1003                 s->segmentation.feat[i].lflvl[j][0] =
1004                     av_clip_uintp2(lflvl + ((s->lf_delta.ref[j] +
1005                                              s->lf_delta.mode[0]) * (1 << sh)), 6);
1006                 s->segmentation.feat[i].lflvl[j][1] =
1007                     av_clip_uintp2(lflvl + ((s->lf_delta.ref[j] +
1008                                              s->lf_delta.mode[1]) * (1 << sh)), 6);
1009             }
1010         } else {
1011             memset(s->segmentation.feat[i].lflvl, lflvl,
1012                    sizeof(s->segmentation.feat[i].lflvl));
1013         }
1014     }
1015 
1016     /* tiling info */
1017     if ((res = update_size(ctx, w, h, fmt)) < 0) {
1018         mpp_err("Failed to initialize decoder for %dx%d @ %d\n", w, h, fmt);
1019         return res;
1020     }
1021 
1022     for (s->tiling.log2_tile_cols = 0;
1023          (s->sb_cols >> s->tiling.log2_tile_cols) > 64;
1024          s->tiling.log2_tile_cols++) ;
1025     for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
1026     max = MPP_MAX(0, max - 1);
1027     while ((RK_U32)max > s->tiling.log2_tile_cols) {
1028         if (mpp_get_bit1(&s->gb)) {
1029             s->tiling.log2_tile_cols++;
1030             vp9d_dbg(VP9D_DBG_HEADER, "log2_tile_col_end_flag 1");
1031         } else {
1032             vp9d_dbg(VP9D_DBG_HEADER, "log2_tile_col_end_flag 0");
1033             break;
1034         }
1035     }
1036     s->tiling.log2_tile_rows = decode012(&s->gb);
1037     vp9d_dbg(VP9D_DBG_HEADER, "log2_tile_rows %d", s->tiling.log2_tile_rows);
1038     s->tiling.tile_rows = 1 << s->tiling.log2_tile_rows;
1039     if (s->tiling.tile_cols != (1U << s->tiling.log2_tile_cols)) {
1040         s->tiling.tile_cols = 1 << s->tiling.log2_tile_cols;
1041         {
1042             RK_U32 min_size = sizeof(VpxRangeCoder) * s->tiling.tile_cols;
1043             if (min_size > s->c_b_size) {
1044                 s->c_b = (VpxRangeCoder *)mpp_malloc(RK_U8, min_size);
1045                 s->c_b_size = min_size;
1046             }
1047         }
1048         if (!s->c_b) {
1049             mpp_err("Ran out of memory during range coder init\n");
1050             return MPP_ERR_NOMEM;
1051         }
1052     }
1053 
1054     if (s->keyframe || s->errorres ||
1055         (s->intraonly && s->resetctx == 3)) {
1056         s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
1057                                                   s->prob_ctx[3].p = vp9_default_probs;
1058         memcpy(s->prob_ctx[0].coef, vp9_default_coef_probs,
1059                sizeof(vp9_default_coef_probs));
1060         memcpy(s->prob_ctx[1].coef, vp9_default_coef_probs,
1061                sizeof(vp9_default_coef_probs));
1062         memcpy(s->prob_ctx[2].coef, vp9_default_coef_probs,
1063                sizeof(vp9_default_coef_probs));
1064         memcpy(s->prob_ctx[3].coef, vp9_default_coef_probs,
1065                sizeof(vp9_default_coef_probs));
1066     } else if (s->intraonly && s->resetctx == 2) {
1067         s->prob_ctx[c].p = vp9_default_probs;
1068         memcpy(s->prob_ctx[c].coef, vp9_default_coef_probs,
1069                sizeof(vp9_default_coef_probs));
1070     }
1071     if (s->keyframe || s->errorres || s->intraonly)
1072         s->framectxid = c = 0;
1073 
1074     // next 16 bits is size of the rest of the header (arith-coded)
1075     size2 = mpp_get_bits(&s->gb, 16);
1076     vp9d_dbg(VP9D_DBG_HEADER, "first_partition_size %d", size2);
1077     data2 = mpp_align_get_bits(&s->gb);
1078     vp9d_dbg(VP9D_DBG_HEADER, "offset %d", data2 - data);
1079     if (size2 > size - (data2 - data)) {
1080         mpp_err("Invalid compressed header size\n");
1081         return MPP_ERR_STREAM;
1082     }
1083     vpx_init_range_decoder(&s->c, data2, size2);
1084     if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
1085         mpp_err("Marker bit was set\n");
1086         return MPP_ERR_STREAM;
1087     }
1088 
1089     if (s->keyframe || s->intraonly) {
1090         memset(s->counts.coef, 0, sizeof(s->counts.coef));
1091         memset(s->counts.eob,  0, sizeof(s->counts.eob));
1092     } else {
1093         memset(&s->counts, 0, sizeof(s->counts));
1094     }
1095     // FIXME is it faster to not copy here, but do it down in the fw updates
1096     // as explicit copies if the fw update is missing (and skip the copy upon
1097     // fw update)?
1098     s->prob.p = s->prob_ctx[c].p;
1099     memset(&s->prob_flag_delta, 0, sizeof(s->prob_flag_delta));
1100     // txfm updates
1101     if (s->lossless) {
1102         s->txfmmode = TX_4X4;
1103     } else {
1104         s->txfmmode = vpx_rac_get_uint(&s->c, 2);
1105         if (s->txfmmode == 3)
1106             s->txfmmode += vpx_rac_get(&s->c);
1107 
1108         if (s->txfmmode == TX_SWITCHABLE) {
1109             for (i = 0; i < 2; i++) {
1110 
1111                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1112                     s->prob_flag_delta.p_flag.tx8p[i] = 1;
1113                     s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i],
1114                                                     &s->prob_flag_delta.p_delta.tx8p[i]);
1115                 }
1116 
1117             }
1118             for (i = 0; i < 2; i++)
1119                 for (j = 0; j < 2; j++) {
1120 
1121                     if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1122                         s->prob_flag_delta.p_flag.tx16p[i][j] = 1;
1123                         s->prob.p.tx16p[i][j] =
1124                             update_prob(&s->c, s->prob.p.tx16p[i][j],
1125                                         &s->prob_flag_delta.p_delta.tx16p[i][j]);
1126                     }
1127                 }
1128             for (i = 0; i < 2; i++)
1129                 for (j = 0; j < 3; j++) {
1130 
1131                     if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1132                         s->prob_flag_delta.p_flag.tx32p[i][j] = 1;
1133                         s->prob.p.tx32p[i][j] =
1134                             update_prob(&s->c, s->prob.p.tx32p[i][j],
1135                                         &s->prob_flag_delta.p_delta.tx32p[i][j]);
1136                     }
1137                 }
1138         }
1139     }
1140 
1141     // coef updates
1142     for (i = 0; i < 4; i++) {
1143         RK_U8 (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
1144         if (vpx_rac_get(&s->c)) {
1145             for (j = 0; j < 2; j++)
1146                 for (k = 0; k < 2; k++)
1147                     for (l = 0; l < 6; l++)
1148                         for (m = 0; m < 6; m++) {
1149                             RK_U8 *p = s->prob.coef[i][j][k][l][m];
1150                             RK_U8 *p_flag = s->prob_flag_delta.coef_flag[i][j][k][l][m];
1151                             RK_U8 *p_delta = s->prob_flag_delta.coef_delta[i][j][k][l][m];
1152                             RK_U8 *r = ref[j][k][l][m];
1153                             if (l == 0 && m >= 3) // dc only has 3 pt
1154                                 break;
1155                             for (n = 0; n < 3; n++) {
1156                                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1157                                     p_flag[n] = 1;
1158                                     p[n] = update_prob(&s->c, r[n], &p_delta[n]);
1159                                 } else {
1160                                     p_flag[n] = 0;
1161                                     p[n] = r[n];
1162                                 }
1163                             }
1164                         }
1165         } else {
1166             for (j = 0; j < 2; j++)
1167                 for (k = 0; k < 2; k++)
1168                     for (l = 0; l < 6; l++)
1169                         for (m = 0; m < 6; m++) {
1170                             RK_U8 *p = s->prob.coef[i][j][k][l][m];
1171                             RK_U8 *r = ref[j][k][l][m];
1172                             if (m >= 3 && l == 0) // dc only has 3 pt
1173                                 break;
1174                             memcpy(p, r, 3);
1175                         }
1176         }
1177         if (s->txfmmode == (RK_U32)i)
1178             break;
1179     }
1180 
1181     // mode updates
1182     for (i = 0; i < 3; i++) {
1183 
1184         if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1185             s->prob_flag_delta.p_flag.skip[i] = 1;
1186             s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i],
1187                                             &s->prob_flag_delta.p_delta.skip[i]);
1188         }
1189     }
1190 
1191     if (!s->keyframe && !s->intraonly) {
1192         for (i = 0; i < 7; i++)
1193             for (j = 0; j < 3; j++) {
1194                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1195                     s->prob_flag_delta.p_flag.mv_mode[i][j] = 1;
1196                     s->prob.p.mv_mode[i][j] =
1197                         update_prob(&s->c, s->prob.p.mv_mode[i][j],
1198                                     &s->prob_flag_delta.p_delta.mv_mode[i][j]);
1199                 }
1200             }
1201 
1202         if (s->filtermode == FILTER_SWITCHABLE)
1203             for (i = 0; i < 4; i++)
1204                 for (j = 0; j < 2; j++) {
1205                     if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1206                         s->prob_flag_delta.p_flag.filter[i][j] = 1;
1207                         s->prob.p.filter[i][j] =
1208                             update_prob(&s->c, s->prob.p.filter[i][j],
1209                                         &s->prob_flag_delta.p_delta.filter[i][j]);
1210                     }
1211                 }
1212 
1213         for (i = 0; i < 4; i++) {
1214 
1215             if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1216                 s->prob_flag_delta.p_flag.intra[i] = 1;
1217                 s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i],
1218                                                  &s->prob_flag_delta.p_delta.intra[i]);
1219             }
1220 
1221         }
1222 
1223         if (s->allowcompinter) {
1224             s->comppredmode = vpx_rac_get(&s->c);
1225             if (s->comppredmode)
1226                 s->comppredmode += vpx_rac_get(&s->c);
1227             if (s->comppredmode == PRED_SWITCHABLE)
1228                 for (i = 0; i < 5; i++) {
1229                     if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1230                         s->prob_flag_delta.p_flag.comp[i] = 1;
1231                         s->prob.p.comp[i] =
1232                             update_prob(&s->c, s->prob.p.comp[i],
1233                                         &s->prob_flag_delta.p_delta.comp[i]);
1234                     }
1235                 }
1236         } else {
1237             s->comppredmode = PRED_SINGLEREF;
1238         }
1239 
1240         if (s->comppredmode != PRED_COMPREF) {
1241             for (i = 0; i < 5; i++) {
1242                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1243                     s->prob_flag_delta.p_flag.single_ref[i][0] = 1;
1244                     s->prob.p.single_ref[i][0] =
1245                         update_prob(&s->c, s->prob.p.single_ref[i][0],
1246                                     &s->prob_flag_delta.p_delta.single_ref[i][0]);
1247                 }
1248                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1249                     s->prob_flag_delta.p_flag.single_ref[i][1] = 1;
1250                     s->prob.p.single_ref[i][1] =
1251                         update_prob(&s->c, s->prob.p.single_ref[i][1],
1252                                     &s->prob_flag_delta.p_delta.single_ref[i][1]);
1253                 }
1254             }
1255         }
1256 
1257         if (s->comppredmode != PRED_SINGLEREF) {
1258             for (i = 0; i < 5; i++) {
1259                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1260                     s->prob_flag_delta.p_flag.comp_ref[i] = 1;
1261                     s->prob.p.comp_ref[i] =
1262                         update_prob(&s->c, s->prob.p.comp_ref[i],
1263                                     &s->prob_flag_delta.p_delta.comp_ref[i]);
1264                 }
1265             }
1266         }
1267 
1268         for (i = 0; i < 4; i++)
1269             for (j = 0; j < 9; j++) {
1270                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1271                     s->prob_flag_delta.p_flag.y_mode[i][j] = 1;
1272                     s->prob.p.y_mode[i][j] =
1273                         update_prob(&s->c, s->prob.p.y_mode[i][j],
1274                                     &s->prob_flag_delta.p_delta.y_mode[i][j]);
1275                 }
1276             }
1277 
1278         for (i = 0; i < 4; i++)
1279             for (j = 0; j < 4; j++)
1280                 for (k = 0; k < 3; k++) {
1281                     if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1282                         s->prob_flag_delta.p_flag.partition[3 - i][j][k] = 1;
1283                         s->prob.p.partition[3 - i][j][k] =
1284                             update_prob(&s->c, s->prob.p.partition[3 - i][j][k],
1285                                         &s->prob_flag_delta.p_delta.partition[3 - i][j][k]);
1286                     }
1287                 }
1288 
1289         // mv fields don't use the update_prob subexp model for some reason
1290         for (i = 0; i < 3; i++) {
1291             if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1292                 s->prob_flag_delta.p_flag.mv_joint[i]   = 1;
1293                 s->prob_flag_delta.p_delta.mv_joint[i]  =
1294                     s->prob.p.mv_joint[i]   = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1295             }
1296         }
1297 
1298         for (i = 0; i < 2; i++) {
1299             if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1300                 s->prob_flag_delta.p_flag.mv_comp[i].sign   = 1;
1301                 s->prob_flag_delta.p_delta.mv_comp[i].sign  =
1302                     s->prob.p.mv_comp[i].sign   = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1303             }
1304 
1305             for (j = 0; j < 10; j++)
1306                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1307                     s->prob_flag_delta.p_flag.mv_comp[i].classes[j]  = 1;
1308                     s->prob_flag_delta.p_delta.mv_comp[i].classes[j] =
1309                         s->prob.p.mv_comp[i].classes[j]  = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1310                 }
1311 
1312             if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1313                 s->prob_flag_delta.p_flag.mv_comp[i].class0  = 1;
1314                 s->prob_flag_delta.p_delta.mv_comp[i].class0 =
1315                     s->prob.p.mv_comp[i].class0  = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1316             }
1317 
1318             for (j = 0; j < 10; j++)
1319                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1320                     s->prob_flag_delta.p_flag.mv_comp[i].bits[j]  = 1;
1321                     s->prob_flag_delta.p_delta.mv_comp[i].bits[j] =
1322                         s->prob.p.mv_comp[i].bits[j]  = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1323                 }
1324         }
1325 
1326         for (i = 0; i < 2; i++) {
1327             for (j = 0; j < 2; j++)
1328                 for (k = 0; k < 3; k++)
1329                     if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1330                         s->prob_flag_delta.p_flag.mv_comp[i].class0_fp[j][k]  = 1;
1331                         s->prob_flag_delta.p_delta.mv_comp[i].class0_fp[j][k] =
1332                             s->prob.p.mv_comp[i].class0_fp[j][k]  = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1333                     }
1334 
1335             for (j = 0; j < 3; j++)
1336                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1337                     s->prob_flag_delta.p_flag.mv_comp[i].fp[j]  = 1;
1338                     s->prob_flag_delta.p_delta.mv_comp[i].fp[j] =
1339                         s->prob.p.mv_comp[i].fp[j]  =
1340                             (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1341                 }
1342         }
1343 
1344         if (s->highprecisionmvs) {
1345             for (i = 0; i < 2; i++) {
1346                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1347                     s->prob_flag_delta.p_flag.mv_comp[i].class0_hp  = 1;
1348                     s->prob_flag_delta.p_delta.mv_comp[i].class0_hp =
1349                         s->prob.p.mv_comp[i].class0_hp  = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1350                 }
1351 
1352                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1353                     s->prob_flag_delta.p_flag.mv_comp[i].hp  = 1;
1354                     s->prob_flag_delta.p_delta.mv_comp[i].hp =
1355                         s->prob.p.mv_comp[i].hp  = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1356                 }
1357             }
1358         }
1359     }
1360 
1361     return (RK_S32)((data2 - data) + size2);
1362 }
1363 
adapt_prob(RK_U8 * p,RK_U32 ct0,RK_U32 ct1,RK_S32 max_count,RK_S32 update_factor)1364 static void adapt_prob(RK_U8 *p, RK_U32 ct0, RK_U32 ct1,
1365                        RK_S32 max_count, RK_S32 update_factor)
1366 {
1367     RK_U32 ct = ct0 + ct1, p2, p1;
1368 
1369     if (!ct)
1370         return;
1371 
1372     p1 = *p;
1373     p2 = ((ct0 << 8) + (ct >> 1)) / ct;
1374     p2 = mpp_clip(p2, 1, 255);
1375     ct = MPP_MIN(ct, (RK_U32)max_count);
1376     update_factor = FASTDIV(update_factor * ct, max_count);
1377 
1378     // (p1 * (256 - update_factor) + p2 * update_factor + 128) >> 8
1379     *p = p1 + (((p2 - p1) * update_factor + 128) >> 8);
1380 }
1381 
adapt_probs(VP9Context * s)1382 static void adapt_probs(VP9Context *s)
1383 {
1384     RK_S32 i, j, k, l, m;
1385     prob_context *p = &s->prob_ctx[s->framectxid].p;
1386     RK_S32 uf = (s->keyframe || s->intraonly || !s->last_keyframe) ? 112 : 128;
1387 
1388     // coefficients
1389     for (i = 0; i < 4; i++)
1390         for (j = 0; j < 2; j++)
1391             for (k = 0; k < 2; k++)
1392                 for (l = 0; l < 6; l++)
1393                     for (m = 0; m < 6; m++) {
1394                         RK_U8 *pp = s->prob_ctx[s->framectxid].coef[i][j][k][l][m];
1395                         RK_U32 *e = s->counts.eob[i][j][k][l][m];
1396                         RK_U32 *c = s->counts.coef[i][j][k][l][m];
1397 
1398                         if (l == 0 && m >= 3) // dc only has 3 pt
1399                             break;
1400                         /*  if(i == 0 && j == 0 && k== 1 && l == 0){
1401                              mpp_log("e[0] = 0x%x e[1] = 0x%x c[0] = 0x%x c[1] = 0x%x c[2] = 0x%x \n",
1402                              e[0],e[1],c[0],c[1],c[2]);
1403                              mpp_log("pp[0] = 0x%x pp[1] = 0x%x pp[2] = 0x%x",pp[0],pp[1],pp[2]);
1404                           }*/
1405                         adapt_prob(&pp[0], e[0], e[1], 24, uf);
1406                         adapt_prob(&pp[1], c[0], c[1] + c[2], 24, uf);
1407                         adapt_prob(&pp[2], c[1], c[2], 24, uf);
1408                         /* if(i == 0 && j == 0 && k== 1 && l == 0){
1409                             mpp_log("after pp[0] = 0x%x pp[1] = 0x%x pp[2] = 0x%x",pp[0],pp[1],pp[2]);
1410                          }*/
1411                     }
1412 #ifdef dump
1413     fwrite(&s->counts, 1, sizeof(s->counts), vp9_p_fp);
1414     fflush(vp9_p_fp);
1415 #endif
1416 
1417     if (s->keyframe || s->intraonly) {
1418         memcpy(p->skip,  s->prob.p.skip,  sizeof(p->skip));
1419         memcpy(p->tx32p, s->prob.p.tx32p, sizeof(p->tx32p));
1420         memcpy(p->tx16p, s->prob.p.tx16p, sizeof(p->tx16p));
1421         memcpy(p->tx8p,  s->prob.p.tx8p,  sizeof(p->tx8p));
1422         return;
1423     }
1424 
1425     // skip flag
1426     for (i = 0; i < 3; i++)
1427         adapt_prob(&p->skip[i], s->counts.skip[i][0], s->counts.skip[i][1], 20, 128);
1428 
1429     // intra/inter flag
1430     for (i = 0; i < 4; i++)
1431         adapt_prob(&p->intra[i], s->counts.intra[i][0], s->counts.intra[i][1], 20, 128);
1432 
1433     // comppred flag
1434     if (s->comppredmode == PRED_SWITCHABLE) {
1435         for (i = 0; i < 5; i++)
1436             adapt_prob(&p->comp[i], s->counts.comp[i][0], s->counts.comp[i][1], 20, 128);
1437     }
1438 
1439     // reference frames
1440     if (s->comppredmode != PRED_SINGLEREF) {
1441         for (i = 0; i < 5; i++)
1442             adapt_prob(&p->comp_ref[i], s->counts.comp_ref[i][0],
1443                        s->counts.comp_ref[i][1], 20, 128);
1444     }
1445 
1446     if (s->comppredmode != PRED_COMPREF) {
1447         for (i = 0; i < 5; i++) {
1448             RK_U8 *pp = p->single_ref[i];
1449             RK_U32 (*c)[2] = s->counts.single_ref[i];
1450 
1451             adapt_prob(&pp[0], c[0][0], c[0][1], 20, 128);
1452             adapt_prob(&pp[1], c[1][0], c[1][1], 20, 128);
1453         }
1454     }
1455 
1456     // block partitioning
1457     for (i = 0; i < 4; i++)
1458         for (j = 0; j < 4; j++) {
1459             RK_U8 *pp = p->partition[i][j];
1460             RK_U32 *c = s->counts.partition[i][j];
1461             // mpp_log("befor pp[0] = 0x%x pp[1] = 0x%x pp[2] = 0x%x",pp[0],pp[1],pp[2]);
1462             // mpp_log("befor c[0] = 0x%x c[1] = 0x%x c[2] = 0x%x",c[0],c[1],c[2]);
1463             adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
1464             adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
1465             adapt_prob(&pp[2], c[2], c[3], 20, 128);
1466             // mpp_log(" after pp[0] = 0x%x pp[1] = 0x%x pp[2] = 0x%x",pp[0],pp[1],pp[2]);
1467         }
1468 
1469     // tx size
1470     if (s->txfmmode == TX_SWITCHABLE) {
1471         for (i = 0; i < 2; i++) {
1472             RK_U32 *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i];
1473 
1474             adapt_prob(&p->tx8p[i], s->counts.tx8p[i][0], s->counts.tx8p[i][1], 20, 128);
1475             adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128);
1476             adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128);
1477             adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128);
1478             adapt_prob(&p->tx32p[i][1], c32[1], c32[2] + c32[3], 20, 128);
1479             adapt_prob(&p->tx32p[i][2], c32[2], c32[3], 20, 128);
1480         }
1481     }
1482 
1483     // interpolation filter
1484     if (s->filtermode == FILTER_SWITCHABLE) {
1485         for (i = 0; i < 4; i++) {
1486             RK_U8 *pp = p->filter[i];
1487             RK_U32 *c = s->counts.filter[i];
1488 
1489             adapt_prob(&pp[0], c[0], c[1] + c[2], 20, 128);
1490             adapt_prob(&pp[1], c[1], c[2], 20, 128);
1491         }
1492     }
1493 
1494     // inter modes
1495     for (i = 0; i < 7; i++) {
1496         RK_U8 *pp = p->mv_mode[i];
1497         RK_U32 *c = s->counts.mv_mode[i];
1498 
1499         adapt_prob(&pp[0], c[2], c[1] + c[0] + c[3], 20, 128);
1500         adapt_prob(&pp[1], c[0], c[1] + c[3], 20, 128);
1501         adapt_prob(&pp[2], c[1], c[3], 20, 128);
1502     }
1503 
1504     // mv joints
1505     {
1506         RK_U8 *pp = p->mv_joint;
1507         RK_U32 *c = s->counts.mv_joint;
1508 
1509         adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
1510         adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
1511         adapt_prob(&pp[2], c[2], c[3], 20, 128);
1512     }
1513 
1514     // mv components
1515     for (i = 0; i < 2; i++) {
1516         RK_U8 *pp;
1517         RK_U32 *c, (*c2)[2], sum;
1518 
1519         adapt_prob(&p->mv_comp[i].sign, s->counts.sign[i][0],
1520                    s->counts.sign[i][1], 20, 128);
1521 
1522         pp = p->mv_comp[i].classes;
1523         c = s->counts.classes[i];
1524         sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10];
1525         adapt_prob(&pp[0], c[0], sum, 20, 128);
1526         sum -= c[1];
1527         adapt_prob(&pp[1], c[1], sum, 20, 128);
1528         sum -= c[2] + c[3];
1529         adapt_prob(&pp[2], c[2] + c[3], sum, 20, 128);
1530         adapt_prob(&pp[3], c[2], c[3], 20, 128);
1531         sum -= c[4] + c[5];
1532         adapt_prob(&pp[4], c[4] + c[5], sum, 20, 128);
1533         adapt_prob(&pp[5], c[4], c[5], 20, 128);
1534         sum -= c[6];
1535         adapt_prob(&pp[6], c[6], sum, 20, 128);
1536         adapt_prob(&pp[7], c[7] + c[8], c[9] + c[10], 20, 128);
1537         adapt_prob(&pp[8], c[7], c[8], 20, 128);
1538         adapt_prob(&pp[9], c[9], c[10], 20, 128);
1539 
1540         adapt_prob(&p->mv_comp[i].class0, s->counts.class0[i][0],
1541                    s->counts.class0[i][1], 20, 128);
1542         pp = p->mv_comp[i].bits;
1543         c2 = s->counts.bits[i];
1544         for (j = 0; j < 10; j++)
1545             adapt_prob(&pp[j], c2[j][0], c2[j][1], 20, 128);
1546 
1547         for (j = 0; j < 2; j++) {
1548             pp = p->mv_comp[i].class0_fp[j];
1549             c = s->counts.class0_fp[i][j];
1550             adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
1551             adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
1552             adapt_prob(&pp[2], c[2], c[3], 20, 128);
1553         }
1554         pp = p->mv_comp[i].fp;
1555         c = s->counts.fp[i];
1556         adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
1557         adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
1558         adapt_prob(&pp[2], c[2], c[3], 20, 128);
1559 
1560         if (s->highprecisionmvs) {
1561             adapt_prob(&p->mv_comp[i].class0_hp, s->counts.class0_hp[i][0],
1562                        s->counts.class0_hp[i][1], 20, 128);
1563             adapt_prob(&p->mv_comp[i].hp, s->counts.hp[i][0],
1564                        s->counts.hp[i][1], 20, 128);
1565         }
1566     }
1567 
1568     // y intra modes
1569     for (i = 0; i < 4; i++) {
1570         RK_U8 *pp = p->y_mode[i];
1571         RK_U32 *c = s->counts.y_mode[i], sum, s2;
1572 
1573         sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
1574         adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
1575         sum -= c[TM_VP8_PRED];
1576         adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
1577         sum -= c[VERT_PRED];
1578         adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
1579         s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
1580         sum -= s2;
1581         adapt_prob(&pp[3], s2, sum, 20, 128);
1582         s2 -= c[HOR_PRED];
1583         adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
1584         adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128);
1585         sum -= c[DIAG_DOWN_LEFT_PRED];
1586         adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
1587         sum -= c[VERT_LEFT_PRED];
1588         adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
1589         adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
1590     }
1591 
1592     // uv intra modes
1593     for (i = 0; i < 10; i++) {
1594         RK_U8 *pp = p->uv_mode[i];
1595         RK_U32 *c = s->counts.uv_mode[i], sum, s2;
1596 
1597         sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
1598         adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
1599         sum -= c[TM_VP8_PRED];
1600         adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
1601         sum -= c[VERT_PRED];
1602         adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
1603         s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
1604         sum -= s2;
1605         adapt_prob(&pp[3], s2, sum, 20, 128);
1606         s2 -= c[HOR_PRED];
1607         adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
1608         adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128);
1609         sum -= c[DIAG_DOWN_LEFT_PRED];
1610         adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
1611         sum -= c[VERT_LEFT_PRED];
1612         adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
1613         adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
1614     }
1615 #if 0 //def dump
1616     fwrite(s->counts.y_mode, 1, sizeof(s->counts.y_mode), vp9_p_fp1);
1617     fwrite(s->counts.uv_mode, 1, sizeof(s->counts.uv_mode), vp9_p_fp1);
1618     fflush(vp9_p_fp1);
1619 #endif
1620 }
1621 
1622 
vp9_parser_frame(Vp9CodecContext * ctx,HalDecTask * task)1623 RK_S32 vp9_parser_frame(Vp9CodecContext *ctx, HalDecTask *task)
1624 {
1625 
1626     const RK_U8 *data = NULL;
1627     RK_S32 size = 0;
1628     VP9Context *s = (VP9Context *)ctx->priv_data;
1629     RK_S32 res, i, ref = 0;
1630 
1631     vp9d_dbg(VP9D_DBG_FUNCTION, "%s", __FUNCTION__);
1632     task->valid = -1;
1633 #ifdef dump
1634     dec_num++;
1635 #endif
1636     data = (const RK_U8 *)mpp_packet_get_pos(ctx->pkt);
1637     size = (RK_S32)mpp_packet_get_length(ctx->pkt);
1638 
1639     s->pts = mpp_packet_get_pts(ctx->pkt);
1640 
1641     vp9d_dbg(VP9D_DBG_HEADER, "data size %d", size);
1642     if (size <= 0) {
1643         return MPP_OK;
1644     }
1645     if ((res = decode_parser_header(ctx, data, size, &ref)) < 0) {
1646         return res;
1647     } else if (res == 0) {
1648         if (!s->refs[ref].ref) {
1649             //mpp_err("Requested reference %d not available\n", ref);
1650             return -1;//AVERROR_INVALIDDATA;
1651         }
1652         {
1653             MppFrame frame = NULL;
1654 
1655             mpp_buf_slot_get_prop(s->slots, s->refs[ref].slot_index, SLOT_FRAME_PTR, &frame);
1656             mpp_frame_set_pts(frame, s->pts);
1657             mpp_buf_slot_set_flag(s->slots, s->refs[ref].slot_index, SLOT_QUEUE_USE);
1658             mpp_buf_slot_enqueue(s->slots, s->refs[ref].slot_index, QUEUE_DISPLAY);
1659             s->refs[ref].ref->is_output = 1;
1660         }
1661 
1662         mpp_log("out repeat num %d", s->outframe_num++);
1663         return size;
1664     }
1665     data += res;
1666     size -= res;
1667 
1668     if (s->frames[REF_FRAME_MVPAIR].ref)
1669         vp9_unref_frame(s, &s->frames[REF_FRAME_MVPAIR]);
1670 
1671     if (!s->intraonly && !s->keyframe && !s->errorres && s->frames[CUR_FRAME].ref) {
1672         if ((res = vp9_ref_frame(ctx, &s->frames[REF_FRAME_MVPAIR], &s->frames[CUR_FRAME])) < 0)
1673             return res;
1674     }
1675 
1676     if (s->frames[CUR_FRAME].ref)
1677         vp9_unref_frame(s, &s->frames[CUR_FRAME]);
1678 
1679     if ((res = vp9_alloc_frame(ctx, &s->frames[CUR_FRAME])) < 0)
1680         return res;
1681 
1682     if (s->refreshctx && s->parallelmode) {
1683         RK_S32 j, k, l, m;
1684 
1685         for (i = 0; i < 4; i++) {
1686             for (j = 0; j < 2; j++)
1687                 for (k = 0; k < 2; k++)
1688                     for (l = 0; l < 6; l++)
1689                         for (m = 0; m < 6; m++)
1690                             memcpy(s->prob_ctx[s->framectxid].coef[i][j][k][l][m],
1691                                    s->prob.coef[i][j][k][l][m], 3);
1692             if ((RK_S32)s->txfmmode == i)
1693                 break;
1694         }
1695         s->prob_ctx[s->framectxid].p = s->prob.p;
1696     }
1697 
1698     vp9d_parser2_syntax(ctx);
1699 
1700     task->syntax.data = (void*)&ctx->pic_params;
1701     task->syntax.number = 1;
1702     task->valid = 1;
1703     task->output = s->frames[CUR_FRAME].slot_index;
1704     task->input_packet = ctx->pkt;
1705 
1706     for (i = 0; i < 3; i++) {
1707         if (s->refs[s->refidx[i]].slot_index < 0x7f) {
1708             MppFrame mframe = NULL;
1709             mpp_buf_slot_set_flag(s->slots, s->refs[s->refidx[i]].slot_index, SLOT_HAL_INPUT);
1710             task->refer[i] = s->refs[s->refidx[i]].slot_index;
1711             mpp_buf_slot_get_prop(s->slots, task->refer[i], SLOT_FRAME_PTR, &mframe);
1712             if (mframe && !s->keyframe && !s->intraonly)
1713                 task->flags.ref_err |= mpp_frame_get_errinfo(mframe);
1714         } else {
1715             task->refer[i] = -1;
1716         }
1717     }
1718 
1719     vp9d_dbg(VP9D_DBG_REF, "ref_errinfo=%d\n", task->flags.ref_err);
1720     if (s->eos) {
1721         task->flags.eos = 1;
1722     }
1723 
1724     if (!s->invisible) {
1725         mpp_buf_slot_set_flag(s->slots,  s->frames[CUR_FRAME].slot_index, SLOT_QUEUE_USE);
1726         mpp_buf_slot_enqueue(s->slots, s->frames[CUR_FRAME].slot_index, QUEUE_DISPLAY);
1727     }
1728     vp9d_dbg(VP9D_DBG_REF, "s->refreshrefmask = %d s->frames[CUR_FRAME] = %d",
1729              s->refreshrefmask, s->frames[CUR_FRAME].slot_index);
1730     for (i = 0; i < 3; i++) {
1731         if (s->refs[s->refidx[i]].ref != NULL) {
1732             vp9d_dbg(VP9D_DBG_REF, "ref buf select %d", s->refs[s->refidx[i]].slot_index);
1733         }
1734     }
1735     // ref frame setup
1736     for (i = 0; i < 8; i++) {
1737         vp9d_dbg(VP9D_DBG_REF, "s->refreshrefmask = 0x%x", s->refreshrefmask);
1738         res = 0;
1739         if (s->refreshrefmask & (1 << i)) {
1740             if (s->refs[i].ref)
1741                 vp9_unref_frame(s, &s->refs[i]);
1742             vp9d_dbg(VP9D_DBG_REF, "update ref index in %d", i);
1743             res = vp9_ref_frame(ctx, &s->refs[i], &s->frames[CUR_FRAME]);
1744         }
1745 
1746         if (s->refs[i].ref)
1747             vp9d_dbg(VP9D_DBG_REF, "s->refs[%d] = %d", i, s->refs[i].slot_index);
1748         if (res < 0)
1749             return 0;
1750     }
1751     return 0;
1752 }
1753 
vp9d_paser_reset(Vp9CodecContext * ctx)1754 MPP_RET vp9d_paser_reset(Vp9CodecContext *ctx)
1755 {
1756     RK_S32 i;
1757     VP9Context *s = ctx->priv_data;
1758     SplitContext_t *ps = (SplitContext_t *)ctx->priv_data2;
1759     VP9ParseContext *pc = (VP9ParseContext *)ps->priv_data;
1760 
1761     s->got_keyframes = 0;
1762     s->cur_poc = 0;
1763     for (i = 0; i < 3; i++) {
1764         if (s->frames[i].ref) {
1765             vp9_unref_frame(s, &s->frames[i]);
1766         }
1767     }
1768     for (i = 0; i < 8; i++) {
1769         if (s->refs[i].ref) {
1770             vp9_unref_frame(s, &s->refs[i]);
1771         }
1772     }
1773     memset(pc, 0, sizeof(VP9ParseContext));
1774 
1775     s->eos = 0;
1776     if (ps) {
1777         ps->eos = 0;
1778     }
1779     return MPP_OK;
1780 }
inv_count_data(VP9Context * s)1781 static void inv_count_data(VP9Context *s)
1782 {
1783     RK_U32 partition_probs[4][4][4];
1784     RK_U32 count_uv[10][10];
1785     RK_U32 count_y_mode[4][10];
1786     RK_U32 *dst_uv = NULL;
1787     RK_S32 i, j;
1788 
1789     /*
1790                  syntax              hardware
1791              *+++++64x64+++++*   *++++8x8++++*
1792              *+++++32x32+++*     *++++16x16++++*
1793              *+++++16x16+++*     *++++32x32++++*
1794              *+++++8x8+++*       *++++64x64++++++*
1795      */
1796 
1797     memcpy(&partition_probs, s->counts.partition, sizeof(s->counts.partition));
1798     j = 0;
1799     for (i = 3; i >= 0; i--) {
1800         memcpy(&s->counts.partition[j], &partition_probs[i], 64);
1801         j++;
1802     }
1803     if (!(s->keyframe || s->intraonly)) {
1804         memcpy(count_y_mode, s->counts.y_mode, sizeof(s->counts.y_mode));
1805         for (i = 0; i < 4; i++) {
1806             RK_U32 value = 0;
1807             for (j = 0; j < 10; j++) {
1808                 value = count_y_mode[i][j];
1809                 if (j == 0)
1810                     s->counts.y_mode[i][2] = value;
1811                 else if (j == 1)
1812                     s->counts.y_mode[i][0] = value;
1813                 else if (j == 2)
1814                     s->counts.y_mode[i][1] = value;
1815                 else if (j == 7)
1816                     s->counts.y_mode[i][8] = value;
1817                 else if (j == 8)
1818                     s->counts.y_mode[i][7] = value;
1819                 else
1820                     s->counts.y_mode[i][j] = value;
1821 
1822             }
1823         }
1824 
1825 
1826         memcpy(count_uv, s->counts.uv_mode, sizeof(s->counts.uv_mode));
1827 
1828         /*change uv_mode to hardware need style*/
1829         /*
1830               syntax              hardware
1831          *+++++ v   ++++*     *++++ dc   ++++*
1832          *+++++ h   ++++*     *++++ v   ++++*
1833          *+++++ dc  ++++*     *++++ h  ++++*
1834          *+++++ d45 ++++*     *++++ d45 ++++*
1835          *+++++ d135++++*     *++++ d135++++*
1836          *+++++ d117++++*     *++++ d117++++*
1837          *+++++ d153++++*     *++++ d153++++*
1838          *+++++ d63 ++++*     *++++ d207++++*
1839          *+++++ d207 ++++*    *++++ d63 ++++*
1840          *+++++ tm  ++++*     *++++ tm  ++++*
1841         */
1842         for (i = 0; i < 10; i++) {
1843             RK_U32 *src_uv = (RK_U32 *)(count_uv[i]);
1844             RK_U32 value = 0;
1845             if (i == 0) {
1846                 dst_uv = s->counts.uv_mode[2]; //dc
1847             } else if ( i == 1) {
1848                 dst_uv = s->counts.uv_mode[0]; //h
1849             }  else if ( i == 2) {
1850                 dst_uv = s->counts.uv_mode[1]; //h
1851             }  else if ( i == 7) {
1852                 dst_uv = s->counts.uv_mode[8]; //d207
1853             } else if (i == 8) {
1854                 dst_uv = s->counts.uv_mode[7]; //d63
1855             } else {
1856                 dst_uv = s->counts.uv_mode[i];
1857             }
1858             for (j = 0; j < 10; j++) {
1859                 value = src_uv[j];
1860                 if (j == 0)
1861                     dst_uv[2] = value;
1862                 else if (j == 1)
1863                     dst_uv[0] = value;
1864                 else if (j == 2)
1865                     dst_uv[1] = value;
1866                 else if (j == 7)
1867                     dst_uv[8] = value;
1868                 else if (j == 8)
1869                     dst_uv[7] = value;
1870                 else
1871                     dst_uv[j] = value;
1872             }
1873 
1874         }
1875     }
1876 }
1877 
vp9_parser_update(Vp9CodecContext * ctx,void * count_info)1878 void vp9_parser_update(Vp9CodecContext *ctx, void *count_info)
1879 {
1880     VP9Context *s = ctx->priv_data;
1881 
1882 #ifdef dump
1883     char filename[20] = "data/pcout";
1884     char filename1[20] = "data/uppor";
1885     if (vp9_p_fp != NULL) {
1886         fclose(vp9_p_fp);
1887 
1888     }
1889     if (vp9_p_fp1 != NULL) {
1890         fclose(vp9_p_fp1);
1891 
1892     }
1893     sprintf(&filename[10], "%d.bin", count);
1894     sprintf(&filename1[10], "%d.bin", count);
1895     mpp_log("filename %s", filename);
1896     vp9_p_fp = fopen(filename, "wb");
1897     vp9_p_fp1 = fopen(filename1, "wb");
1898 #endif
1899     //update count from hardware
1900     if (count_info != NULL) {
1901 
1902         memcpy((void *)&s->counts, count_info, sizeof(s->counts));
1903 
1904         if (s->refreshctx && !s->parallelmode) {
1905 #ifdef dump
1906             count++;
1907 #endif
1908             inv_count_data(s);
1909             adapt_probs(s);
1910 
1911         }
1912     }
1913 
1914     return;
1915 }
1916