xref: /rockchip-linux_mpp/mpp/codec/dec/vp9/vp9d_parser.c (revision 437bfbeb9567cca9cd9080e3f6954aa9d6a94f18)
1 /*
2 *
3 * Copyright 2015 Rockchip Electronics Co. LTD
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 *      http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17 #include <stdlib.h>
18 
19 #include <string.h>
20 
21 #include "mpp_env.h"
22 #include "mpp_mem.h"
23 #include "mpp_debug.h"
24 #include "mpp_common.h"
25 #include "mpp_bitread.h"
26 #include "mpp_packet_impl.h"
27 #include "mpp_compat_impl.h"
28 
29 #include "vp9data.h"
30 #include "vp9d_codec.h"
31 #include "vp9d_parser.h"
32 #include "mpp_frame_impl.h"
33 
34 /**
35  * Clip a signed integer into the -(2^p),(2^p-1) range.
36  * @param  a value to clip
37  * @param  p bit position to clip at
38  * @return clipped value
39  */
av_clip_uintp2(RK_S32 a,RK_S32 p)40 static   RK_U32 av_clip_uintp2(RK_S32 a, RK_S32 p)
41 {
42     if (a & ~((1 << p) - 1)) return -a >> 31 & ((1 << p) - 1);
43     else                   return  a;
44 }
45 
46 RK_U32 vp9d_debug = 0;
47 
48 #define VP9_SYNCCODE 0x498342
49 //#define dump
50 #ifdef dump
51 static FILE *vp9_p_fp = NULL;
52 static FILE *vp9_p_fp1 = NULL;
53 static FILE *vp9_p_fp2 = NULL;
54 static RK_S32 dec_num = 0;
55 static RK_S32 count = 0;
56 #endif
57 
58 #ifndef FASTDIV
59 #   define FASTDIV(a,b) ((RK_U32)((((RK_U64)a) * vpx_inverse[b]) >> 32))
60 #endif /* FASTDIV */
61 
62 /* a*inverse[b]>>32 == a/b for all 0<=a<=16909558 && 2<=b<=256
63  * for a>16909558, is an overestimate by less than 1 part in 1<<24 */
64 const RK_U32 vpx_inverse[257] = {
65     0, 4294967295U, 2147483648U, 1431655766, 1073741824,  858993460,  715827883,  613566757,
66     536870912,  477218589,  429496730,  390451573,  357913942,  330382100,  306783379,  286331154,
67     268435456,  252645136,  238609295,  226050911,  214748365,  204522253,  195225787,  186737709,
68     178956971,  171798692,  165191050,  159072863,  153391690,  148102321,  143165577,  138547333,
69     134217728,  130150525,  126322568,  122713352,  119304648,  116080198,  113025456,  110127367,
70     107374183,  104755300,  102261127,   99882961,   97612894,   95443718,   93368855,   91382283,
71     89478486,   87652394,   85899346,   84215046,   82595525,   81037119,   79536432,   78090315,
72     76695845,   75350304,   74051161,   72796056,   71582789,   70409300,   69273667,   68174085,
73     67108864,   66076420,   65075263,   64103990,   63161284,   62245903,   61356676,   60492498,
74     59652324,   58835169,   58040099,   57266231,   56512728,   55778797,   55063684,   54366675,
75     53687092,   53024288,   52377650,   51746594,   51130564,   50529028,   49941481,   49367441,
76     48806447,   48258060,   47721859,   47197443,   46684428,   46182445,   45691142,   45210183,
77     44739243,   44278014,   43826197,   43383509,   42949673,   42524429,   42107523,   41698712,
78     41297763,   40904451,   40518560,   40139882,   39768216,   39403370,   39045158,   38693400,
79     38347923,   38008561,   37675152,   37347542,   37025581,   36709123,   36398028,   36092163,
80     35791395,   35495598,   35204650,   34918434,   34636834,   34359739,   34087043,   33818641,
81     33554432,   33294321,   33038210,   32786010,   32537632,   32292988,   32051995,   31814573,
82     31580642,   31350127,   31122952,   30899046,   30678338,   30460761,   30246249,   30034737,
83     29826162,   29620465,   29417585,   29217465,   29020050,   28825284,   28633116,   28443493,
84     28256364,   28071682,   27889399,   27709467,   27531842,   27356480,   27183338,   27012373,
85     26843546,   26676816,   26512144,   26349493,   26188825,   26030105,   25873297,   25718368,
86     25565282,   25414008,   25264514,   25116768,   24970741,   24826401,   24683721,   24542671,
87     24403224,   24265352,   24129030,   23994231,   23860930,   23729102,   23598722,   23469767,
88     23342214,   23216040,   23091223,   22967740,   22845571,   22724695,   22605092,   22486740,
89     22369622,   22253717,   22139007,   22025474,   21913099,   21801865,   21691755,   21582751,
90     21474837,   21367997,   21262215,   21157475,   21053762,   20951060,   20849356,   20748635,
91     20648882,   20550083,   20452226,   20355296,   20259280,   20164166,   20069941,   19976593,
92     19884108,   19792477,   19701685,   19611723,   19522579,   19434242,   19346700,   19259944,
93     19173962,   19088744,   19004281,   18920561,   18837576,   18755316,   18673771,   18592933,
94     18512791,   18433337,   18354562,   18276457,   18199014,   18122225,   18046082,   17970575,
95     17895698,   17821442,   17747799,   17674763,   17602325,   17530479,   17459217,   17388532,
96     17318417,   17248865,   17179870,   17111424,   17043522,   16976156,   16909321,   16843010,
97     16777216
98 };
99 
split_parse_frame(SplitContext_t * ctx,RK_U8 * buf,RK_S32 size)100 static void split_parse_frame(SplitContext_t *ctx, RK_U8 *buf, RK_S32 size)
101 {
102     VP9ParseContext *s = (VP9ParseContext *)ctx->priv_data;
103 
104     if (buf[0] & 0x4) {
105         ctx->key_frame = 0;
106     } else {
107         ctx->key_frame = 1;
108     }
109 
110     if (buf[0] & 0x2) {
111         if (ctx->pts == -1)
112             ctx->pts = s->pts;
113         s->pts = -1;
114     } else {
115         s->pts = ctx->pts;
116         ctx->pts = -1;
117     }
118 
119     (void)size;
120 }
121 
vp9d_split_frame(SplitContext_t * ctx,RK_U8 ** out_data,RK_S32 * out_size,RK_U8 * data,RK_S32 size)122 RK_S32 vp9d_split_frame(SplitContext_t *ctx,
123                         RK_U8 **out_data, RK_S32 *out_size,
124                         RK_U8 *data, RK_S32 size)
125 {
126     VP9ParseContext *s = (VP9ParseContext *)ctx->priv_data;
127     RK_S32 full_size = size;
128     RK_S32 marker;
129 
130     if (size <= 0) {
131         *out_size = 0;
132         *out_data = data;
133 
134         return 0;
135     }
136 
137     if (s->n_frames > 0) {
138         *out_data = data;
139         *out_size = s->size[--s->n_frames];
140         split_parse_frame(ctx, *out_data, *out_size);
141 
142         return s->n_frames > 0 ? *out_size : size /* i.e. include idx tail */;
143     }
144 
145     marker = data[size - 1];
146     if ((marker & 0xe0) == 0xc0) {
147         RK_S32 nbytes = 1 + ((marker >> 3) & 0x3);
148         RK_S32 n_frames = 1 + (marker & 0x7), idx_sz = 2 + n_frames * nbytes;
149 
150         if (size >= idx_sz && data[size - idx_sz] == marker) {
151             RK_U8 *idx = data + size + 1 - idx_sz;
152             RK_S32 first = 1;
153 
154             switch (nbytes) {
155 #define case_n(a, rd) \
156             case a: \
157                 while (n_frames--) { \
158                     RK_U32 sz = rd; \
159                     idx += a; \
160                     if (sz == 0 || sz > (RK_U32)size) { \
161                         s->n_frames = 0; \
162                         *out_size = size > full_size ? full_size : size; \
163                         *out_data = data; \
164                         mpp_err("Superframe packet size too big: %u > %d\n", \
165                                sz, size); \
166                         return full_size; \
167                     } \
168                     if (first) { \
169                         first = 0; \
170                         *out_data = data; \
171                         *out_size = sz; \
172                         s->n_frames = n_frames; \
173                     } else { \
174                         s->size[n_frames] = sz; \
175                     } \
176                     data += sz; \
177                     size -= sz; \
178                 } \
179                 split_parse_frame(ctx, *out_data, *out_size); \
180                 return *out_size
181 
182                 case_n(1, *idx);
183                 case_n(2, MPP_RL16(idx));
184                 case_n(3, MPP_RL24(idx));
185                 case_n(4, MPP_RL32(idx));
186             }
187         }
188     }
189 
190     *out_data = data;
191     *out_size = size;
192     split_parse_frame(ctx, data, size);
193     return size;
194 }
195 
vp9d_get_frame_stream(Vp9CodecContext * ctx,RK_U8 * buf,RK_S32 length)196 MPP_RET vp9d_get_frame_stream(Vp9CodecContext *ctx, RK_U8 *buf, RK_S32 length)
197 {
198     RK_S32 buff_size = 0;
199     RK_U8 *data = NULL;
200     RK_S32 size = 0;
201 
202     data = (RK_U8 *)mpp_packet_get_data(ctx->pkt);
203     size = (RK_S32)mpp_packet_get_size(ctx->pkt);
204 
205     if (length > size) {
206         mpp_free(data);
207         mpp_packet_deinit(&ctx->pkt);
208         buff_size = length + 10 * 1024;
209         data = mpp_malloc(RK_U8, buff_size);
210         mpp_packet_init(&ctx->pkt, (void *)data, length);
211         mpp_packet_set_size(ctx->pkt, buff_size);
212     }
213 
214     memcpy(data, buf, length);
215     mpp_packet_set_length(ctx->pkt, length);
216 
217     return MPP_OK;
218 }
219 
vp9d_split_init(Vp9CodecContext * vp9_ctx)220 MPP_RET vp9d_split_init(Vp9CodecContext *vp9_ctx)
221 {
222     SplitContext_t *ps;
223     VP9ParseContext *sc;
224 
225     ps = (SplitContext_t *)mpp_calloc(SplitContext_t, 1);
226     if (!ps) {
227         mpp_err("vp9 parser malloc fail");
228         return MPP_ERR_NOMEM;
229     }
230 
231     sc = (VP9ParseContext *)mpp_calloc(VP9ParseContext, 1);
232     if (!sc) {
233         mpp_err("vp9 parser context malloc fail");
234         mpp_free(ps);
235         return MPP_ERR_NOMEM;
236     }
237 
238     ps->priv_data = (void*)sc;
239     vp9_ctx->priv_data2 = (void*)ps;
240 
241     return MPP_OK;
242 }
243 
vp9d_split_deinit(Vp9CodecContext * vp9_ctx)244 MPP_RET vp9d_split_deinit(Vp9CodecContext *vp9_ctx)
245 {
246     SplitContext_t *ps = (SplitContext_t *)vp9_ctx->priv_data2;
247 
248     if (ps)
249         MPP_FREE(ps->priv_data);
250     MPP_FREE(vp9_ctx->priv_data2);
251 
252     return MPP_OK;
253 }
254 
vp9_ref_frame(Vp9CodecContext * ctx,VP9Frame * dst,VP9Frame * src)255 static RK_S32 vp9_ref_frame(Vp9CodecContext *ctx, VP9Frame *dst, VP9Frame *src)
256 {
257     VP9Context *s = ctx->priv_data;
258     MppFrameImpl *impl_frm = (MppFrameImpl *)dst->f;
259 
260     if (src->ref == NULL || src->slot_index >= 0x7f) {
261         mpp_err("vp9_ref_frame is vaild");
262         return -1;
263     }
264     dst->slot_index = src->slot_index;
265     dst->ref = src->ref;
266     dst->ref->invisible = src->ref->invisible;
267     dst->ref->ref_count++;
268     vp9d_dbg(VP9D_DBG_REF, "get prop slot frame %p  count %d", dst->f, dst->ref->ref_count);
269     mpp_buf_slot_get_prop(s->slots, src->slot_index, SLOT_FRAME, &dst->f);
270     impl_frm->buffer = NULL; //parser no need process hal buf
271     vp9d_dbg(VP9D_DBG_REF, "get prop slot frame after %p", dst->f);
272     return 0;
273 }
274 
vp9_unref_frame(VP9Context * s,VP9Frame * f)275 static void vp9_unref_frame( VP9Context *s, VP9Frame *f)
276 {
277     if (f->ref->ref_count <= 0 || f->slot_index >= 0x7f) {
278         mpp_err("ref count alreay is zero");
279         return;
280     }
281     f->ref->ref_count--;
282     if (!f->ref->ref_count) {
283         if (f->slot_index <= 0x7f) {
284             if (f->ref->invisible && !f->ref->is_output) {
285                 MppBuffer framebuf = NULL;
286 
287                 mpp_buf_slot_get_prop(s->slots, f->slot_index, SLOT_BUFFER, &framebuf);
288                 mpp_buffer_put(framebuf);
289                 f->ref->invisible = 0;
290             }
291             mpp_buf_slot_clr_flag(s->slots, f->slot_index, SLOT_CODEC_USE);
292         }
293         mpp_free(f->ref);
294         f->slot_index = 0xff;
295         f->ref = NULL;
296     }
297     f->ref = NULL;
298     return;
299 }
300 
301 
vp9_frame_free(VP9Context * s)302 static  RK_S32 vp9_frame_free(VP9Context *s)
303 {
304     RK_S32 i;
305     for (i = 0; i < 3; i++) {
306         if (s->frames[i].ref) {
307             vp9_unref_frame(s, &s->frames[i]);
308         }
309         mpp_frame_deinit(&s->frames[i].f);
310     }
311     for (i = 0; i < 8; i++) {
312         if (s->refs[i].ref) {
313             vp9_unref_frame(s, &s->refs[i]);
314         }
315         mpp_frame_deinit(&s->refs[i].f);
316     }
317     return 0;
318 }
319 
vp9_frame_init(VP9Context * s)320 static RK_S32 vp9_frame_init(VP9Context *s)
321 {
322     RK_S32 i;
323     for (i = 0; i < 3; i++) {
324         mpp_frame_init(&s->frames[i].f);
325         if (!s->frames[i].f) {
326             vp9_frame_free(s);
327             mpp_err("Failed to allocate frame buffer %d\n", i);
328             return MPP_ERR_NOMEM;
329         }
330         s->frames[i].slot_index = 0x7f;
331         s->frames[i].ref = NULL;
332     }
333 
334     for (i = 0; i < 8; i++) {
335         mpp_frame_init(&(s->refs[i].f));
336         if (!s->refs[i].f) {
337             vp9_frame_free(s);
338             mpp_err("Failed to allocate frame buffer %d\n", i);
339             return MPP_ERR_NOMEM;
340         }
341         s->refs[i].slot_index = 0x7f;
342         s->refs[i].ref = NULL;
343     }
344     return MPP_OK;
345 }
346 
vp9d_parser_init(Vp9CodecContext * vp9_ctx,ParserCfg * init)347 MPP_RET vp9d_parser_init(Vp9CodecContext *vp9_ctx, ParserCfg *init)
348 {
349     VP9Context *s = mpp_calloc(VP9Context, 1);
350     vp9_ctx->priv_data = (void*)s;
351     if (!vp9_ctx->priv_data) {
352         mpp_err("vp9 codec context malloc fail");
353         return MPP_ERR_NOMEM;
354     }
355     vp9_frame_init(s);
356     s->last_bpp = 0;
357     s->filter.sharpness = -1;
358 
359 #ifdef dump
360     count = 0;
361 #endif
362 
363     s->packet_slots = init->packet_slots;
364     s->slots = init->frame_slots;
365     s->cfg = init->cfg;
366     s->hw_info = init->hw_info;
367     mpp_buf_slot_setup(s->slots, 25);
368 
369     mpp_env_get_u32("vp9d_debug", &vp9d_debug, 0);
370 
371     return MPP_OK;
372 }
373 
vp9d_parser_deinit(Vp9CodecContext * vp9_ctx)374 MPP_RET vp9d_parser_deinit(Vp9CodecContext *vp9_ctx)
375 {
376     VP9Context *s = vp9_ctx->priv_data;
377     vp9_frame_free(s);
378     mpp_free(s->c_b);
379     s->c_b_size = 0;
380     MPP_FREE(vp9_ctx->priv_data);
381     return MPP_OK;
382 }
383 
vp9_alloc_frame(Vp9CodecContext * ctx,VP9Frame * frame)384 static RK_S32 vp9_alloc_frame(Vp9CodecContext *ctx, VP9Frame *frame)
385 {
386     VP9Context *s = ctx->priv_data;
387     mpp_frame_set_width(frame->f, ctx->width);
388     mpp_frame_set_height(frame->f, ctx->height);
389 
390     mpp_frame_set_hor_stride(frame->f, 0);
391     mpp_frame_set_ver_stride(frame->f, 0);
392     mpp_frame_set_errinfo(frame->f, 0);
393     mpp_frame_set_discard(frame->f, 0);
394     mpp_frame_set_pts(frame->f, s->pts);
395     mpp_frame_set_dts(frame->f, s->dts);
396     // set current poc
397     s->cur_poc++;
398     mpp_frame_set_poc(frame->f, s->cur_poc);
399 
400     if (MPP_FRAME_FMT_IS_FBC(s->cfg->base.out_fmt)) {
401         RK_U32 fbc_hdr_stride = mpp_align_64(ctx->width);
402 
403         mpp_slots_set_prop(s->slots, SLOTS_HOR_ALIGN, mpp_align_64);
404         mpp_frame_set_fmt(frame->f, ctx->pix_fmt | ((s->cfg->base.out_fmt & (MPP_FRAME_FBC_MASK))));
405 
406         if (*compat_ext_fbc_hdr_256_odd)
407             fbc_hdr_stride = mpp_align_256_odd(ctx->width);
408 
409         mpp_frame_set_fbc_hdr_stride(frame->f, fbc_hdr_stride);
410     } else {
411         if (mpp_get_soc_type() == ROCKCHIP_SOC_RK3576)
412             mpp_slots_set_prop(s->slots, SLOTS_HOR_ALIGN, mpp_align_128_odd_plus_64);
413         else
414             mpp_slots_set_prop(s->slots, SLOTS_HOR_ALIGN, mpp_align_256_odd);
415         mpp_slots_set_prop(s->slots, SLOTS_VER_ALIGN, mpp_align_64);
416         if (MPP_FRAME_FMT_IS_TILE(s->cfg->base.out_fmt))
417             mpp_frame_set_fmt(frame->f, ctx->pix_fmt | ((s->cfg->base.out_fmt & (MPP_FRAME_TILE_FLAG))));
418         else
419             mpp_frame_set_fmt(frame->f, ctx->pix_fmt);
420     }
421 
422     if (s->cfg->base.enable_thumbnail && s->hw_info->cap_down_scale)
423         mpp_frame_set_thumbnail_en(frame->f, s->cfg->base.enable_thumbnail);
424     else
425         mpp_frame_set_thumbnail_en(frame->f, 0);
426 
427     mpp_buf_slot_get_unused(s->slots, &frame->slot_index);
428     mpp_buf_slot_set_prop(s->slots, frame->slot_index, SLOT_FRAME, frame->f);
429     mpp_buf_slot_set_flag(s->slots, frame->slot_index, SLOT_CODEC_USE);
430     mpp_buf_slot_set_flag(s->slots, frame->slot_index, SLOT_HAL_OUTPUT);
431     frame->ref = mpp_calloc(RefInfo, 1);
432     frame->ref->ref_count++;
433     frame->ref->invisible = s->invisible;
434     frame->ref->is_output = 0;
435 
436     return 0;
437 }
438 
439 
440 
441 // for some reason the sign bit is at the end, not the start, of a bit sequence
get_sbits_inv(BitReadCtx_t * gb,RK_S32 n)442 static RK_S32 get_sbits_inv(BitReadCtx_t *gb, RK_S32 n)
443 {
444     RK_S32 value;
445     RK_S32 v;
446     READ_BITS(gb, n, &v);
447     READ_ONEBIT(gb, &value);
448     return value ? -v : v;
449 __BITREAD_ERR:
450     return MPP_ERR_STREAM;
451 }
452 
update_size(Vp9CodecContext * ctx,RK_S32 w,RK_S32 h,RK_S32 fmt)453 static RK_S32 update_size(Vp9CodecContext *ctx, RK_S32 w, RK_S32 h, RK_S32 fmt)
454 {
455     VP9Context *s = ctx->priv_data;
456 
457     if (w == ctx->width && h == ctx->height && ctx->pix_fmt == fmt)
458         return 0;
459 
460     ctx->width   = w;
461     ctx->height  = h;
462     ctx->pix_fmt = fmt;
463     s->sb_cols   = (w + 63) >> 6;
464     s->sb_rows   = (h + 63) >> 6;
465     s->cols      = (w + 7) >> 3;
466     s->rows      = (h + 7) >> 3;
467 
468     // these will be re-allocated a little later
469     if (s->bpp != s->last_bpp) {
470         s->last_bpp = s->bpp;
471     }
472 
473     return 0;
474 }
475 
inv_recenter_nonneg(RK_S32 v,RK_S32 m)476 static RK_S32 inv_recenter_nonneg(RK_S32 v, RK_S32 m)
477 {
478     return v > 2 * m ? v : v & 1 ? m - ((v + 1) >> 1) : m + (v >> 1);
479 }
480 
481 // differential forward probability updates
update_prob(VpxRangeCoder * c,RK_S32 p,RK_U8 * delta)482 static RK_S32 update_prob(VpxRangeCoder *c, RK_S32 p, RK_U8 *delta)
483 {
484     static const RK_S32 inv_map_table[255] = {
485         7,  20,  33,  46,  59,  72,  85,  98, 111, 124, 137, 150, 163, 176,
486         189, 202, 215, 228, 241, 254,   1,   2,   3,   4,   5,   6,   8,   9,
487         10,  11,  12,  13,  14,  15,  16,  17,  18,  19,  21,  22,  23,  24,
488         25,  26,  27,  28,  29,  30,  31,  32,  34,  35,  36,  37,  38,  39,
489         40,  41,  42,  43,  44,  45,  47,  48,  49,  50,  51,  52,  53,  54,
490         55,  56,  57,  58,  60,  61,  62,  63,  64,  65,  66,  67,  68,  69,
491         70,  71,  73,  74,  75,  76,  77,  78,  79,  80,  81,  82,  83,  84,
492         86,  87,  88,  89,  90,  91,  92,  93,  94,  95,  96,  97,  99, 100,
493         101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
494         116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
495         131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
496         146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
497         161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
498         177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
499         192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
500         207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
501         222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
502         237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
503         252, 253, 253,
504     };
505     RK_S32 d;
506 
507     /* This code is trying to do a differential probability update. For a
508      * current probability A in the range [1, 255], the difference to a new
509      * probability of any value can be expressed differentially as 1-A,255-A
510      * where some part of this (absolute range) exists both in positive as
511      * well as the negative part, whereas another part only exists in one
512      * half. We're trying to code this shared part differentially, i.e.
513      * times two where the value of the lowest bit specifies the sign, and
514      * the single part is then coded on top of this. This absolute difference
515      * then again has a value of [0,254], but a bigger value in this range
516      * indicates that we're further away from the original value A, so we
517      * can code this as a VLC code, since higher values are increasingly
518      * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
519      * updates vs. the 'fine, exact' updates further down the range, which
520      * adds one extra dimension to this differential update model. */
521 
522     if (!vpx_rac_get(c)) {
523         d = vpx_rac_get_uint(c, 4) + 0;
524     } else if (!vpx_rac_get(c)) {
525         d = vpx_rac_get_uint(c, 4) + 16;
526     } else if (!vpx_rac_get(c)) {
527         d = vpx_rac_get_uint(c, 5) + 32;
528     } else {
529         d = vpx_rac_get_uint(c, 7);
530         if (d >= 65)
531             d = (d << 1) - 65 + vpx_rac_get(c);
532         d += 64;
533         //av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
534     }
535     *delta = d;
536     return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
537            255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
538 }
539 
mpp_get_bit1(BitReadCtx_t * gb)540 static RK_S32 mpp_get_bit1(BitReadCtx_t *gb)
541 {
542     RK_S32 value;
543     READ_ONEBIT(gb, &value);
544     return value;
545 __BITREAD_ERR:
546     return 0;
547 }
548 
mpp_get_bits(BitReadCtx_t * gb,RK_S32 num_bit)549 static RK_S32 mpp_get_bits(BitReadCtx_t *gb, RK_S32 num_bit)
550 {
551     RK_S32 value;
552     READ_BITS(gb, num_bit, &value);
553     return value;
554 __BITREAD_ERR:
555     return 0;
556 }
557 
read_colorspace_details(Vp9CodecContext * ctx)558 static RK_S32 read_colorspace_details(Vp9CodecContext *ctx)
559 {
560     static const MppFrameColorSpace colorspaces[8] = {
561         MPP_FRAME_SPC_UNSPECIFIED, MPP_FRAME_SPC_BT470BG, MPP_FRAME_SPC_BT709, MPP_FRAME_SPC_SMPTE170M,
562         MPP_FRAME_SPC_SMPTE240M, MPP_FRAME_SPC_BT2020_NCL, MPP_FRAME_SPC_RESERVED, MPP_FRAME_SPC_RGB,
563     };
564     VP9Context *s = ctx->priv_data;
565     RK_S32 res;
566     RK_S32 bits = ctx->profile <= 1 ? 0 : 1 + mpp_get_bit1(&s->gb); // 0:8, 1:10, 2:12
567 
568     vp9d_dbg(VP9D_DBG_HEADER, "bit_depth %d", 8 + bits * 2);
569     s->bpp_index = bits;
570     s->bpp = 8 + bits * 2;
571     s->bytesperpixel = (7 + s->bpp) >> 3;
572     ctx->colorspace = colorspaces[mpp_get_bits(&s->gb, 3)];
573     vp9d_dbg(VP9D_DBG_HEADER, "color_space %d", ctx->colorspace);
574     if (ctx->colorspace == MPP_FRAME_SPC_RGB) { // RGB = profile 1
575 
576         {
577             mpp_err("RGB not supported in profile %d\n", ctx->profile);
578             return MPP_ERR_STREAM;
579         }
580     } else {
581         static const RK_S32 pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
582             {   { -1, MPP_FMT_YUV422SP },
583                 { -1, MPP_FMT_YUV420SP }
584             },
585             {   { -1, MPP_FMT_YUV422SP_10BIT},
586                 { -1, MPP_FMT_YUV420SP_10BIT}
587             },
588             {   { -1, -1 },
589                 { -1, -1 }
590             }
591         };
592         ctx->color_range = mpp_get_bit1(&s->gb) ? MPP_FRAME_RANGE_JPEG : MPP_FRAME_RANGE_MPEG;
593         vp9d_dbg(VP9D_DBG_HEADER, "color_range %d", ctx->color_range);
594         if (ctx->profile & 1) {
595             s->ss_h = mpp_get_bit1(&s->gb);
596             vp9d_dbg(VP9D_DBG_HEADER, "subsampling_x %d", s->ss_h);
597             s->ss_v = mpp_get_bit1(&s->gb);
598             vp9d_dbg(VP9D_DBG_HEADER, "subsampling_y %d", s->ss_v);
599             s->extra_plane = 0;
600             res = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
601             if (res == MPP_FMT_YUV420SP || res < 0) {
602                 mpp_err("YUV FMT %d not supported in profile %d\n", res, ctx->profile);
603                 return MPP_ERR_STREAM;
604             } else if (mpp_get_bit1(&s->gb)) {
605                 s->extra_plane = 1;
606                 vp9d_dbg(VP9D_DBG_HEADER, "has_extra_plane 1");
607                 mpp_err("Profile %d color details reserved bit set\n", ctx->profile);
608                 return  MPP_ERR_STREAM;
609             }
610         } else {
611             s->extra_plane = 0;
612             s->ss_h = s->ss_v = 1;
613             res = pix_fmt_for_ss[bits][1][1];
614         }
615     }
616 
617     return res;
618 }
619 
decode012(BitReadCtx_t * gb)620 static RK_S32 decode012(BitReadCtx_t *gb)
621 {
622     RK_S32 n;
623     n = mpp_get_bit1(gb);
624     if (n == 0)
625         return 0;
626     else
627         return mpp_get_bit1(gb) + 1;
628 }
629 
decode_parser_header(Vp9CodecContext * ctx,const RK_U8 * data,RK_S32 size,RK_S32 * refo)630 static RK_S32 decode_parser_header(Vp9CodecContext *ctx,
631                                    const RK_U8 *data, RK_S32 size, RK_S32 *refo)
632 {
633     VP9Context *s = ctx->priv_data;
634     RK_S32 c, i, j, k, l, m, n, max, size2, res, sharp;
635     RK_U32 w, h;
636     RK_S32 fmt = ctx->pix_fmt;
637     RK_S32 last_invisible;
638     const RK_U8 *data2;
639 
640 #ifdef dump
641     char filename[20] = "data/acoef";
642     if (vp9_p_fp2 != NULL) {
643         fclose(vp9_p_fp2);
644 
645     }
646     sprintf(&filename[10], "%d.bin", dec_num);
647     vp9_p_fp2 = fopen(filename, "wb");
648 #endif
649 
650     /* general header */
651     mpp_set_bitread_ctx(&s->gb, (RK_U8*)data, size);
652     if (mpp_get_bits(&s->gb, 2) != 0x2) { // frame marker
653         mpp_err("Invalid frame marker\n");
654         return MPP_ERR_STREAM;
655     }
656 
657     ctx->profile  = mpp_get_bit1(&s->gb);
658     ctx->profile |= mpp_get_bit1(&s->gb) << 1;
659 
660     if (ctx->profile == 3) ctx->profile += mpp_get_bit1(&s->gb);
661     if (ctx->profile > 3) {
662         mpp_err("Profile %d is not yet supported\n", ctx->profile);
663         return MPP_ERR_STREAM;
664     }
665 
666     vp9d_dbg(VP9D_DBG_HEADER, "profile %d", ctx->profile);
667     s->show_existing_frame = mpp_get_bit1(&s->gb);
668     vp9d_dbg(VP9D_DBG_HEADER, "show_existing_frame %d", s->show_existing_frame);
669 
670     if (s->show_existing_frame) {
671         *refo = mpp_get_bits(&s->gb, 3);
672         vp9d_dbg(VP9D_DBG_HEADER, "frame_to_show %d", *refo);
673         return 0;
674     }
675 
676     s->last_keyframe  = s->keyframe;
677     s->keyframe       = !mpp_get_bit1(&s->gb);
678     vp9d_dbg(VP9D_DBG_HEADER, "frame_type %d", s->keyframe);
679     last_invisible    = s->invisible;
680     s->invisible      = !mpp_get_bit1(&s->gb);
681     vp9d_dbg(VP9D_DBG_HEADER, "show_frame_flag %d", s->invisible);
682     s->errorres       = mpp_get_bit1(&s->gb);
683     vp9d_dbg(VP9D_DBG_HEADER, "error_resilient_mode %d", s->errorres);
684     s->use_last_frame_mvs = !s->errorres && !last_invisible;
685     s->got_keyframes += s->keyframe ? 1 : 0;
686     vp9d_dbg(VP9D_DBG_HEADER, "keyframe=%d, intraonly=%d, got_keyframes=%d\n",
687              s->keyframe, s->intraonly, s->got_keyframes);
688 
689     if (!s->got_keyframes) {
690         mpp_err_f("have not got keyframe.\n");
691         return MPP_ERR_STREAM;
692     }
693 
694     /* set mvscale=16 default */
695     for (i = 0; i < 3; i++) {
696         s->mvscale[i][0] = 16;
697         s->mvscale[i][1] = 16;
698     }
699 
700     if (s->keyframe) {
701         if (mpp_get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
702             mpp_err("Invalid sync code\n");
703             return MPP_ERR_STREAM;
704         }
705 
706         if ((fmt = read_colorspace_details(ctx)) < 0)
707             return fmt;
708         // for profile 1, here follows the subsampling bits
709         s->refreshrefmask = 0xff;
710         w = mpp_get_bits(&s->gb, 16) + 1;
711         vp9d_dbg(VP9D_DBG_HEADER, "frame_size_width %d", w);
712         h = mpp_get_bits(&s->gb, 16) + 1;
713         vp9d_dbg(VP9D_DBG_HEADER, "frame_size_height %d", h);
714         if (mpp_get_bit1(&s->gb)) {// display size
715             RK_S32 dw, dh;
716             vp9d_dbg(VP9D_DBG_HEADER, "display_info_flag %d", 1);
717             dw = mpp_get_bits(&s->gb, 16) + 1;
718             vp9d_dbg(VP9D_DBG_HEADER, "display_size_width %d", dw);
719             dh = mpp_get_bits(&s->gb, 16) + 1;
720             vp9d_dbg(VP9D_DBG_HEADER, "display_size_width %d", dh);
721         } else
722             vp9d_dbg(VP9D_DBG_HEADER, "display_info_flag %d", 0);
723     } else {
724         s->intraonly  = s->invisible ? mpp_get_bit1(&s->gb) : 0;
725         vp9d_dbg(VP9D_DBG_HEADER, "intra_only %d", s->intraonly);
726         s->resetctx   = s->errorres ? 0 : mpp_get_bits(&s->gb, 2);
727         vp9d_dbg(VP9D_DBG_HEADER, "reset_frame_context_value %d", s->resetctx);
728         if (s->intraonly) {
729             if (mpp_get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
730                 mpp_err("Invalid sync code\n");
731                 return MPP_ERR_STREAM;
732             }
733             if (ctx->profile == 1) {
734                 if ((fmt = read_colorspace_details(ctx)) < 0)
735                     return fmt;
736             } else {
737                 s->ss_h = s->ss_v = 1;
738                 s->bpp = 8;
739                 s->bpp_index = 0;
740                 s->bytesperpixel = 1;
741                 fmt = MPP_FMT_YUV420SP;
742                 ctx->colorspace = MPP_FRAME_SPC_BT470BG;
743                 ctx->color_range = MPP_FRAME_RANGE_JPEG;
744             }
745             s->refreshrefmask = mpp_get_bits(&s->gb, 8);
746             vp9d_dbg(VP9D_DBG_HEADER, "refresh_frame_flags %d", s->refreshrefmask);
747             w = mpp_get_bits(&s->gb, 16) + 1;
748             vp9d_dbg(VP9D_DBG_HEADER, "frame_size_width %d", w);
749             h = mpp_get_bits(&s->gb, 16) + 1;
750             vp9d_dbg(VP9D_DBG_HEADER, "frame_size_height %d", h);
751             if (mpp_get_bit1(&s->gb)) {// display size
752                 RK_S32 dw, dh;
753                 vp9d_dbg(VP9D_DBG_HEADER, "display_info_flag %d", 1);
754                 dw = mpp_get_bits(&s->gb, 16) + 1;
755                 vp9d_dbg(VP9D_DBG_HEADER, "display_size_width %d", dw);
756                 dh = mpp_get_bits(&s->gb, 16) + 1;
757                 vp9d_dbg(VP9D_DBG_HEADER, "display_size_width %d", dh);
758             } else
759                 vp9d_dbg(VP9D_DBG_HEADER, "display_info_flag %d", 0);
760         } else {
761             s->refreshrefmask = mpp_get_bits(&s->gb, 8);
762             vp9d_dbg(VP9D_DBG_HEADER, "refresh_frame_flags %d", s->refreshrefmask);
763             s->refidx[0]      = mpp_get_bits(&s->gb, 3);
764             s->signbias[0]    = mpp_get_bit1(&s->gb) && !s->errorres;
765             s->refidx[1]      = mpp_get_bits(&s->gb, 3);
766             s->signbias[1]    = mpp_get_bit1(&s->gb) && !s->errorres;
767             s->refidx[2]      = mpp_get_bits(&s->gb, 3);
768             s->signbias[2]    = mpp_get_bit1(&s->gb) && !s->errorres;
769             vp9d_dbg(VP9D_DBG_HEADER, "ref_idx %d %d %d",
770                      s->refidx[0], s->refidx[1], s->refidx[2]);
771             vp9d_dbg(VP9D_DBG_HEADER, "ref_idx_ref_frame_sign_bias %d %d %d",
772                      s->signbias[0], s->signbias[1], s->signbias[2]);
773             if (!s->refs[s->refidx[0]].ref ||
774                 !s->refs[s->refidx[1]].ref ||
775                 !s->refs[s->refidx[2]].ref ) {
776                 mpp_err("Not all references are available\n");
777                 //return -1;//AVERROR_INVALIDDATA;
778             }
779             if (mpp_get_bit1(&s->gb)) {
780 
781                 vp9d_dbg(VP9D_DBG_HEADER, "ref_flag 0");
782                 w = mpp_frame_get_width(s->refs[s->refidx[0]].f);
783                 h = mpp_frame_get_height(s->refs[s->refidx[0]].f);
784             } else if (mpp_get_bit1(&s->gb)) {
785                 vp9d_dbg(VP9D_DBG_HEADER, "ref_flag 2");
786                 w = mpp_frame_get_width(s->refs[s->refidx[1]].f);
787                 h = mpp_frame_get_height(s->refs[s->refidx[1]].f);
788             } else if (mpp_get_bit1(&s->gb)) {
789                 vp9d_dbg(VP9D_DBG_HEADER, "ref_flag 1");
790                 w = mpp_frame_get_width(s->refs[s->refidx[2]].f);
791                 h = mpp_frame_get_height(s->refs[s->refidx[2]].f);
792             } else {
793                 w = mpp_get_bits(&s->gb, 16) + 1;
794                 vp9d_dbg(VP9D_DBG_HEADER, "frame_size_width %d", w);
795                 h = mpp_get_bits(&s->gb, 16) + 1;
796                 vp9d_dbg(VP9D_DBG_HEADER, "frame_size_height %d", h);
797             }
798             if (w == 0 || h == 0) {
799                 mpp_err("ref frame w:%d h:%d\n", w, h);
800                 return -1;
801             }
802             // Note that in this code, "CUR_FRAME" is actually before we
803             // have formally allocated a frame, and thus actually represents
804             // the _last_ frame
805             s->use_last_frame_mvs &= mpp_frame_get_width(s->frames[CUR_FRAME].f) == w &&
806                                      mpp_frame_get_height(s->frames[CUR_FRAME].f) == h;
807             if (mpp_get_bit1(&s->gb)) {// display size
808                 RK_S32 dw, dh;
809                 vp9d_dbg(VP9D_DBG_HEADER, "display_info_flag %d", 1);
810                 dw = mpp_get_bits(&s->gb, 16) + 1;
811                 vp9d_dbg(VP9D_DBG_HEADER, "display_size_width %d", dw);
812                 dh = mpp_get_bits(&s->gb, 16) + 1;
813                 vp9d_dbg(VP9D_DBG_HEADER, "display_size_width %d", dh);
814             } else
815                 vp9d_dbg(VP9D_DBG_HEADER, "display_info_flag %d", 0);
816             s->highprecisionmvs = mpp_get_bit1(&s->gb);
817             vp9d_dbg(VP9D_DBG_HEADER, "allow_high_precision_mv %d", s->highprecisionmvs);
818             s->filtermode = mpp_get_bit1(&s->gb) ? FILTER_SWITCHABLE :
819                             mpp_get_bits(&s->gb, 2);
820             vp9d_dbg(VP9D_DBG_HEADER, "filtermode %d", s->filtermode);
821             s->allowcompinter = (s->signbias[0] != s->signbias[1] ||
822                                  s->signbias[0] != s->signbias[2]);
823             if (s->allowcompinter) {
824                 if (s->signbias[0] == s->signbias[1]) {
825                     s->fixcompref    = 2;
826                     s->varcompref[0] = 0;
827                     s->varcompref[1] = 1;
828                 } else if (s->signbias[0] == s->signbias[2]) {
829                     s->fixcompref    = 1;
830                     s->varcompref[0] = 0;
831                     s->varcompref[1] = 2;
832                 } else {
833                     s->fixcompref    = 0;
834                     s->varcompref[0] = 1;
835                     s->varcompref[1] = 2;
836                 }
837             }
838 
839             for (i = 0; i < 3; i++) {
840                 RK_U32 refw = mpp_frame_get_width(s->refs[s->refidx[i]].f);
841                 RK_U32 refh = mpp_frame_get_height(s->refs[s->refidx[i]].f);
842                 RK_S32 reffmt = mpp_frame_get_fmt(s->refs[s->refidx[i]].f) & MPP_FRAME_FMT_MASK;
843 
844                 vp9d_dbg(VP9D_DBG_REF, "ref get width frame slot %p", s->refs[s->refidx[i]].f);
845                 if (reffmt != fmt) {
846                     /* mpp_err("Ref pixfmt (%s) did not match current frame (%s)",
847                            av_get_pix_fmt_name(ref->format),
848                            av_get_pix_fmt_name(fmt)); */
849                     //return -1;//AVERROR_INVALIDDATA;
850                 } else if (refw == w && refh == h) {
851                     s->mvscale[i][0] = (refw << 14) / w;
852                     s->mvscale[i][1] = (refh << 14) / h;
853                 } else {
854                     if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
855                         mpp_err("Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
856                                 refw, refh, w, h);
857                         return MPP_ERR_VALUE;
858                     }
859                     s->mvscale[i][0] = (refw << 14) / w;
860                     s->mvscale[i][1] = (refh << 14) / h;
861                     s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
862                     s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
863                 }
864             }
865         }
866     }
867 
868     s->refreshctx   = s->errorres ? 0 : mpp_get_bit1(&s->gb);
869     vp9d_dbg(VP9D_DBG_HEADER, "refresh_frame_context_flag %d", s->refreshctx);
870     s->parallelmode = s->errorres ? 1 : mpp_get_bit1(&s->gb);
871     vp9d_dbg(VP9D_DBG_HEADER, "frame_parallel_decoding_mode %d", s->parallelmode);
872     s->framectxid   = c = mpp_get_bits(&s->gb, 2);
873     vp9d_dbg(VP9D_DBG_HEADER, "frame_context_idx %d", s->framectxid);
874 
875     /* loopfilter header data */
876     if (s->keyframe || s->errorres || s->intraonly) {
877         // reset loopfilter defaults
878         s->lf_delta.ref[0] = 1;
879         s->lf_delta.ref[1] = 0;
880         s->lf_delta.ref[2] = -1;
881         s->lf_delta.ref[3] = -1;
882         s->lf_delta.mode[0] = 0;
883         s->lf_delta.mode[1] = 0;
884     }
885     s->filter.level = mpp_get_bits(&s->gb, 6);
886     vp9d_dbg(VP9D_DBG_HEADER, "filter_level %d", s->filter.level);
887     sharp = mpp_get_bits(&s->gb, 3);
888     vp9d_dbg(VP9D_DBG_HEADER, "sharpness_level %d", sharp);
889     // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
890     // the old cache values since they are still valid
891     if (s->filter.sharpness != sharp)
892         memset(s->filter.lim_lut, 0, sizeof(s->filter.lim_lut));
893     s->filter.sharpness = sharp;
894 
895     if ((s->lf_delta.enabled = mpp_get_bit1(&s->gb))) {
896         vp9d_dbg(VP9D_DBG_HEADER, "mode_ref_delta_enabled 1");
897         if ((s->lf_delta.update = mpp_get_bit1(&s->gb))) {
898             vp9d_dbg(VP9D_DBG_HEADER, "mode_ref_delta_update 1");
899             for (i = 0; i < 4; i++) {
900                 if (mpp_get_bit1(&s->gb))
901                     s->lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
902                 vp9d_dbg(VP9D_DBG_HEADER, "ref_deltas %d", s->lf_delta.ref[i]);
903             }
904             for (i = 0; i < 2; i++) {
905                 if (mpp_get_bit1(&s->gb))
906                     s->lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
907                 vp9d_dbg(VP9D_DBG_HEADER, "mode_deltas %d", s->lf_delta.mode[i]);
908             }
909         }
910     }
911 
912     /* quantization header data */
913     s->yac_qi      = mpp_get_bits(&s->gb, 8);
914     vp9d_dbg(VP9D_DBG_HEADER, "base_qindex %d", s->yac_qi);
915     s->ydc_qdelta  = mpp_get_bit1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
916     vp9d_dbg(VP9D_DBG_HEADER, "ydc_qdelta %d", s->ydc_qdelta);
917     s->uvdc_qdelta = mpp_get_bit1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
918     vp9d_dbg(VP9D_DBG_HEADER, "uvdc_qdelta %d", s->uvdc_qdelta);
919     s->uvac_qdelta = mpp_get_bit1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
920     vp9d_dbg(VP9D_DBG_HEADER, "uvac_qdelta %d", s->uvac_qdelta);
921     s->lossless    = s->yac_qi == 0 && s->ydc_qdelta == 0 &&
922                      s->uvdc_qdelta == 0 && s->uvac_qdelta == 0;
923 
924     /* segmentation header info */
925     s->segmentation.update_map = 0;
926     s->segmentation.ignore_refmap = 0;
927     if ((s->segmentation.enabled = mpp_get_bit1(&s->gb))) {
928         vp9d_dbg(VP9D_DBG_HEADER, "segmentation_enabled 1");
929         if ((s->segmentation.update_map = mpp_get_bit1(&s->gb))) {
930             vp9d_dbg(VP9D_DBG_HEADER, "update_map 1");
931             for (i = 0; i < 7; i++) {
932                 s->prob.seg[i] = mpp_get_bit1(&s->gb) ?
933                                  mpp_get_bits(&s->gb, 8) : 255;
934                 vp9d_dbg(VP9D_DBG_HEADER, "tree_probs %d value 0x%x", i, s->prob.seg[i]);
935             }
936             s->segmentation.temporal = mpp_get_bit1(&s->gb);
937             if (s->segmentation.temporal) {
938                 vp9d_dbg(VP9D_DBG_HEADER, "tempora_update 1");
939                 for (i = 0; i < 3; i++) {
940                     s->prob.segpred[i] = mpp_get_bit1(&s->gb) ?
941                                          mpp_get_bits(&s->gb, 8) : 255;
942                     vp9d_dbg(VP9D_DBG_HEADER, "pred_probs %d", i, s->prob.segpred[i]);
943                 }
944             } else {
945                 for (i = 0; i < 3; i++)
946                     s->prob.segpred[i] = 0xff;
947             }
948         }
949         if ((!s->segmentation.update_map || s->segmentation.temporal) &&
950             (w !=  mpp_frame_get_width(s->frames[CUR_FRAME].f) ||
951              h !=  mpp_frame_get_height(s->frames[CUR_FRAME].f))) {
952             /* av_log(ctx, AV_LOG_WARNING,
953                    "Reference segmap (temp=%d,update=%d) enabled on size-change!\n",
954                    s->segmentation.temporal, s->segmentation.update_map);
955                 s->segmentation.ignore_refmap = 1; */
956             //return -1;//AVERROR_INVALIDDATA;
957         }
958 
959         if (mpp_get_bit1(&s->gb)) {
960             vp9d_dbg(VP9D_DBG_HEADER, "update_data 1");
961             s->segmentation.absolute_vals = mpp_get_bit1(&s->gb);
962             vp9d_dbg(VP9D_DBG_HEADER, "abs_delta %d", s->segmentation.absolute_vals);
963             for (i = 0; i < 8; i++) {
964                 if ((s->segmentation.feat[i].q_enabled = mpp_get_bit1(&s->gb)))
965                     s->segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
966                 vp9d_dbg(VP9D_DBG_HEADER, "frame_qp_delta %d", s->segmentation.feat[i].q_val);
967                 if ((s->segmentation.feat[i].lf_enabled = mpp_get_bit1(&s->gb)))
968                     s->segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
969                 vp9d_dbg(VP9D_DBG_HEADER, "frame_loopfilter_value %d", i, s->segmentation.feat[i].lf_val);
970                 if ((s->segmentation.feat[i].ref_enabled = mpp_get_bit1(&s->gb)))
971                     s->segmentation.feat[i].ref_val = mpp_get_bits(&s->gb, 2);
972                 vp9d_dbg(VP9D_DBG_HEADER, "frame_reference_info %d", i, s->segmentation.feat[i].ref_val);
973                 s->segmentation.feat[i].skip_enabled = mpp_get_bit1(&s->gb);
974                 vp9d_dbg(VP9D_DBG_HEADER, "frame_skip %d", i, s->segmentation.feat[i].skip_enabled);
975             }
976         }
977     } else {
978         vp9d_dbg(VP9D_DBG_HEADER, "segmentation_enabled 0");
979         s->segmentation.feat[0].q_enabled    = 0;
980         s->segmentation.feat[0].lf_enabled   = 0;
981         s->segmentation.feat[0].skip_enabled = 0;
982         s->segmentation.feat[0].ref_enabled  = 0;
983     }
984 
985     // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
986     for (i = 0; i < (s->segmentation.enabled ? 8 : 1); i++) {
987         RK_S32 qyac, qydc, quvac, quvdc, lflvl, sh;
988 
989         if (s->segmentation.feat[i].q_enabled) {
990             if (s->segmentation.absolute_vals)
991                 qyac = s->segmentation.feat[i].q_val;
992             else
993                 qyac = s->yac_qi + s->segmentation.feat[i].q_val;
994         } else {
995             qyac  = s->yac_qi;
996         }
997         qydc  = av_clip_uintp2(qyac + s->ydc_qdelta, 8);
998         quvdc = av_clip_uintp2(qyac + s->uvdc_qdelta, 8);
999         quvac = av_clip_uintp2(qyac + s->uvac_qdelta, 8);
1000         qyac  = av_clip_uintp2(qyac, 8);
1001 
1002         s->segmentation.feat[i].qmul[0][0] = vp9_dc_qlookup[s->bpp_index][qydc];
1003         s->segmentation.feat[i].qmul[0][1] = vp9_ac_qlookup[s->bpp_index][qyac];
1004         s->segmentation.feat[i].qmul[1][0] = vp9_dc_qlookup[s->bpp_index][quvdc];
1005         s->segmentation.feat[i].qmul[1][1] = vp9_ac_qlookup[s->bpp_index][quvac];
1006 
1007         sh = s->filter.level >= 32;
1008         if (s->segmentation.feat[i].lf_enabled) {
1009             if (s->segmentation.absolute_vals)
1010                 lflvl = av_clip_uintp2(s->segmentation.feat[i].lf_val, 6);
1011             else
1012                 lflvl = av_clip_uintp2(s->filter.level + s->segmentation.feat[i].lf_val, 6);
1013         } else {
1014             lflvl  = s->filter.level;
1015         }
1016         if (s->lf_delta.enabled) {
1017             s->segmentation.feat[i].lflvl[0][0] =
1018                 s->segmentation.feat[i].lflvl[0][1] =
1019                     av_clip_uintp2(lflvl + (s->lf_delta.ref[0] << sh), 6);
1020             for (j = 1; j < 4; j++) {
1021                 s->segmentation.feat[i].lflvl[j][0] =
1022                     av_clip_uintp2(lflvl + ((s->lf_delta.ref[j] +
1023                                              s->lf_delta.mode[0]) * (1 << sh)), 6);
1024                 s->segmentation.feat[i].lflvl[j][1] =
1025                     av_clip_uintp2(lflvl + ((s->lf_delta.ref[j] +
1026                                              s->lf_delta.mode[1]) * (1 << sh)), 6);
1027             }
1028         } else {
1029             memset(s->segmentation.feat[i].lflvl, lflvl,
1030                    sizeof(s->segmentation.feat[i].lflvl));
1031         }
1032     }
1033 
1034     /* tiling info */
1035     if ((res = update_size(ctx, w, h, fmt)) < 0) {
1036         mpp_err("Failed to initialize decoder for %dx%d @ %d\n", w, h, fmt);
1037         return res;
1038     }
1039 
1040     for (s->tiling.log2_tile_cols = 0;
1041          (s->sb_cols >> s->tiling.log2_tile_cols) > 64;
1042          s->tiling.log2_tile_cols++) ;
1043     for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
1044     max = MPP_MAX(0, max - 1);
1045     while ((RK_U32)max > s->tiling.log2_tile_cols) {
1046         if (mpp_get_bit1(&s->gb)) {
1047             s->tiling.log2_tile_cols++;
1048             vp9d_dbg(VP9D_DBG_HEADER, "log2_tile_col_end_flag 1");
1049         } else {
1050             vp9d_dbg(VP9D_DBG_HEADER, "log2_tile_col_end_flag 0");
1051             break;
1052         }
1053     }
1054     s->tiling.log2_tile_rows = decode012(&s->gb);
1055     vp9d_dbg(VP9D_DBG_HEADER, "log2_tile_rows %d", s->tiling.log2_tile_rows);
1056     s->tiling.tile_rows = 1 << s->tiling.log2_tile_rows;
1057     if (s->tiling.tile_cols != (1U << s->tiling.log2_tile_cols)) {
1058         s->tiling.tile_cols = 1 << s->tiling.log2_tile_cols;
1059         {
1060             RK_U32 min_size = sizeof(VpxRangeCoder) * s->tiling.tile_cols;
1061             if (min_size > s->c_b_size) {
1062                 s->c_b = (VpxRangeCoder *)mpp_malloc(RK_U8, min_size);
1063                 s->c_b_size = min_size;
1064             }
1065         }
1066         if (!s->c_b) {
1067             mpp_err("Ran out of memory during range coder init\n");
1068             return MPP_ERR_NOMEM;
1069         }
1070     }
1071 
1072     if (s->keyframe || s->errorres ||
1073         (s->intraonly && s->resetctx == 3)) {
1074         s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
1075                                                   s->prob_ctx[3].p = vp9_default_probs;
1076         memcpy(s->prob_ctx[0].coef, vp9_default_coef_probs,
1077                sizeof(vp9_default_coef_probs));
1078         memcpy(s->prob_ctx[1].coef, vp9_default_coef_probs,
1079                sizeof(vp9_default_coef_probs));
1080         memcpy(s->prob_ctx[2].coef, vp9_default_coef_probs,
1081                sizeof(vp9_default_coef_probs));
1082         memcpy(s->prob_ctx[3].coef, vp9_default_coef_probs,
1083                sizeof(vp9_default_coef_probs));
1084     } else if (s->intraonly && s->resetctx == 2) {
1085         s->prob_ctx[c].p = vp9_default_probs;
1086         memcpy(s->prob_ctx[c].coef, vp9_default_coef_probs,
1087                sizeof(vp9_default_coef_probs));
1088     }
1089     if (s->keyframe || s->errorres || s->intraonly)
1090         s->framectxid = c = 0;
1091 
1092     // next 16 bits is size of the rest of the header (arith-coded)
1093     size2 = mpp_get_bits(&s->gb, 16);
1094     vp9d_dbg(VP9D_DBG_HEADER, "first_partition_size %d", size2);
1095     s->first_partition_size = size2;
1096     data2 = mpp_align_get_bits(&s->gb);
1097     vp9d_dbg(VP9D_DBG_HEADER, "offset %d", data2 - data);
1098     s->uncompress_head_size_in_byte = data2 - data;
1099     if (size2 > size - (data2 - data)) {
1100         mpp_err("Invalid compressed header size\n");
1101         return MPP_ERR_STREAM;
1102     }
1103     vpx_init_range_decoder(&s->c, data2, size2);
1104     if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
1105         mpp_err("Marker bit was set\n");
1106         return MPP_ERR_STREAM;
1107     }
1108 
1109     if (s->keyframe || s->intraonly) {
1110         memset(s->counts.coef, 0, sizeof(s->counts.coef));
1111         memset(s->counts.eob,  0, sizeof(s->counts.eob));
1112     } else {
1113         memset(&s->counts, 0, sizeof(s->counts));
1114     }
1115     // FIXME is it faster to not copy here, but do it down in the fw updates
1116     // as explicit copies if the fw update is missing (and skip the copy upon
1117     // fw update)?
1118     s->prob.p = s->prob_ctx[c].p;
1119     memset(&s->prob_flag_delta, 0, sizeof(s->prob_flag_delta));
1120     // txfm updates
1121     if (s->lossless) {
1122         s->txfmmode = TX_4X4;
1123     } else {
1124         s->txfmmode = vpx_rac_get_uint(&s->c, 2);
1125         if (s->txfmmode == 3)
1126             s->txfmmode += vpx_rac_get(&s->c);
1127 
1128         if (s->txfmmode == TX_SWITCHABLE) {
1129             for (i = 0; i < 2; i++) {
1130 
1131                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1132                     s->prob_flag_delta.p_flag.tx8p[i] = 1;
1133                     s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i],
1134                                                     &s->prob_flag_delta.p_delta.tx8p[i]);
1135                 }
1136 
1137             }
1138             for (i = 0; i < 2; i++)
1139                 for (j = 0; j < 2; j++) {
1140 
1141                     if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1142                         s->prob_flag_delta.p_flag.tx16p[i][j] = 1;
1143                         s->prob.p.tx16p[i][j] =
1144                             update_prob(&s->c, s->prob.p.tx16p[i][j],
1145                                         &s->prob_flag_delta.p_delta.tx16p[i][j]);
1146                     }
1147                 }
1148             for (i = 0; i < 2; i++)
1149                 for (j = 0; j < 3; j++) {
1150 
1151                     if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1152                         s->prob_flag_delta.p_flag.tx32p[i][j] = 1;
1153                         s->prob.p.tx32p[i][j] =
1154                             update_prob(&s->c, s->prob.p.tx32p[i][j],
1155                                         &s->prob_flag_delta.p_delta.tx32p[i][j]);
1156                     }
1157                 }
1158         }
1159     }
1160 
1161     // coef updates
1162     for (i = 0; i < 4; i++) {
1163         RK_U8 (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
1164         if (vpx_rac_get(&s->c)) {
1165             for (j = 0; j < 2; j++)
1166                 for (k = 0; k < 2; k++)
1167                     for (l = 0; l < 6; l++)
1168                         for (m = 0; m < 6; m++) {
1169                             RK_U8 *p = s->prob.coef[i][j][k][l][m];
1170                             RK_U8 *p_flag = s->prob_flag_delta.coef_flag[i][j][k][l][m];
1171                             RK_U8 *p_delta = s->prob_flag_delta.coef_delta[i][j][k][l][m];
1172                             RK_U8 *r = ref[j][k][l][m];
1173                             if (l == 0 && m >= 3) // dc only has 3 pt
1174                                 break;
1175                             for (n = 0; n < 3; n++) {
1176                                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1177                                     p_flag[n] = 1;
1178                                     p[n] = update_prob(&s->c, r[n], &p_delta[n]);
1179                                 } else {
1180                                     p_flag[n] = 0;
1181                                     p[n] = r[n];
1182                                 }
1183                             }
1184                         }
1185         } else {
1186             for (j = 0; j < 2; j++)
1187                 for (k = 0; k < 2; k++)
1188                     for (l = 0; l < 6; l++)
1189                         for (m = 0; m < 6; m++) {
1190                             RK_U8 *p = s->prob.coef[i][j][k][l][m];
1191                             RK_U8 *r = ref[j][k][l][m];
1192                             if (m >= 3 && l == 0) // dc only has 3 pt
1193                                 break;
1194                             memcpy(p, r, 3);
1195                         }
1196         }
1197         if (s->txfmmode == (RK_U32)i)
1198             break;
1199     }
1200 
1201     // mode updates
1202     for (i = 0; i < 3; i++) {
1203 
1204         if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1205             s->prob_flag_delta.p_flag.skip[i] = 1;
1206             s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i],
1207                                             &s->prob_flag_delta.p_delta.skip[i]);
1208         }
1209     }
1210 
1211     if (!s->keyframe && !s->intraonly) {
1212         for (i = 0; i < 7; i++)
1213             for (j = 0; j < 3; j++) {
1214                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1215                     s->prob_flag_delta.p_flag.mv_mode[i][j] = 1;
1216                     s->prob.p.mv_mode[i][j] =
1217                         update_prob(&s->c, s->prob.p.mv_mode[i][j],
1218                                     &s->prob_flag_delta.p_delta.mv_mode[i][j]);
1219                 }
1220             }
1221 
1222         if (s->filtermode == FILTER_SWITCHABLE)
1223             for (i = 0; i < 4; i++)
1224                 for (j = 0; j < 2; j++) {
1225                     if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1226                         s->prob_flag_delta.p_flag.filter[i][j] = 1;
1227                         s->prob.p.filter[i][j] =
1228                             update_prob(&s->c, s->prob.p.filter[i][j],
1229                                         &s->prob_flag_delta.p_delta.filter[i][j]);
1230                     }
1231                 }
1232 
1233         for (i = 0; i < 4; i++) {
1234 
1235             if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1236                 s->prob_flag_delta.p_flag.intra[i] = 1;
1237                 s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i],
1238                                                  &s->prob_flag_delta.p_delta.intra[i]);
1239             }
1240 
1241         }
1242 
1243         if (s->allowcompinter) {
1244             s->comppredmode = vpx_rac_get(&s->c);
1245             if (s->comppredmode)
1246                 s->comppredmode += vpx_rac_get(&s->c);
1247             if (s->comppredmode == PRED_SWITCHABLE)
1248                 for (i = 0; i < 5; i++) {
1249                     if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1250                         s->prob_flag_delta.p_flag.comp[i] = 1;
1251                         s->prob.p.comp[i] =
1252                             update_prob(&s->c, s->prob.p.comp[i],
1253                                         &s->prob_flag_delta.p_delta.comp[i]);
1254                     }
1255                 }
1256         } else {
1257             s->comppredmode = PRED_SINGLEREF;
1258         }
1259 
1260         if (s->comppredmode != PRED_COMPREF) {
1261             for (i = 0; i < 5; i++) {
1262                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1263                     s->prob_flag_delta.p_flag.single_ref[i][0] = 1;
1264                     s->prob.p.single_ref[i][0] =
1265                         update_prob(&s->c, s->prob.p.single_ref[i][0],
1266                                     &s->prob_flag_delta.p_delta.single_ref[i][0]);
1267                 }
1268                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1269                     s->prob_flag_delta.p_flag.single_ref[i][1] = 1;
1270                     s->prob.p.single_ref[i][1] =
1271                         update_prob(&s->c, s->prob.p.single_ref[i][1],
1272                                     &s->prob_flag_delta.p_delta.single_ref[i][1]);
1273                 }
1274             }
1275         }
1276 
1277         if (s->comppredmode != PRED_SINGLEREF) {
1278             for (i = 0; i < 5; i++) {
1279                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1280                     s->prob_flag_delta.p_flag.comp_ref[i] = 1;
1281                     s->prob.p.comp_ref[i] =
1282                         update_prob(&s->c, s->prob.p.comp_ref[i],
1283                                     &s->prob_flag_delta.p_delta.comp_ref[i]);
1284                 }
1285             }
1286         }
1287 
1288         for (i = 0; i < 4; i++)
1289             for (j = 0; j < 9; j++) {
1290                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1291                     s->prob_flag_delta.p_flag.y_mode[i][j] = 1;
1292                     s->prob.p.y_mode[i][j] =
1293                         update_prob(&s->c, s->prob.p.y_mode[i][j],
1294                                     &s->prob_flag_delta.p_delta.y_mode[i][j]);
1295                 }
1296             }
1297 
1298         for (i = 0; i < 4; i++)
1299             for (j = 0; j < 4; j++)
1300                 for (k = 0; k < 3; k++) {
1301                     if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1302                         s->prob_flag_delta.p_flag.partition[3 - i][j][k] = 1;
1303                         s->prob.p.partition[3 - i][j][k] =
1304                             update_prob(&s->c, s->prob.p.partition[3 - i][j][k],
1305                                         &s->prob_flag_delta.p_delta.partition[3 - i][j][k]);
1306                     }
1307                 }
1308 
1309         // mv fields don't use the update_prob subexp model for some reason
1310         for (i = 0; i < 3; i++) {
1311             if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1312                 s->prob_flag_delta.p_flag.mv_joint[i]   = 1;
1313                 s->prob_flag_delta.p_delta.mv_joint[i]  =
1314                     s->prob.p.mv_joint[i]   = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1315             }
1316         }
1317 
1318         for (i = 0; i < 2; i++) {
1319             if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1320                 s->prob_flag_delta.p_flag.mv_comp[i].sign   = 1;
1321                 s->prob_flag_delta.p_delta.mv_comp[i].sign  =
1322                     s->prob.p.mv_comp[i].sign   = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1323             }
1324 
1325             for (j = 0; j < 10; j++)
1326                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1327                     s->prob_flag_delta.p_flag.mv_comp[i].classes[j]  = 1;
1328                     s->prob_flag_delta.p_delta.mv_comp[i].classes[j] =
1329                         s->prob.p.mv_comp[i].classes[j]  = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1330                 }
1331 
1332             if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1333                 s->prob_flag_delta.p_flag.mv_comp[i].class0  = 1;
1334                 s->prob_flag_delta.p_delta.mv_comp[i].class0 =
1335                     s->prob.p.mv_comp[i].class0  = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1336             }
1337 
1338             for (j = 0; j < 10; j++)
1339                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1340                     s->prob_flag_delta.p_flag.mv_comp[i].bits[j]  = 1;
1341                     s->prob_flag_delta.p_delta.mv_comp[i].bits[j] =
1342                         s->prob.p.mv_comp[i].bits[j]  = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1343                 }
1344         }
1345 
1346         for (i = 0; i < 2; i++) {
1347             for (j = 0; j < 2; j++)
1348                 for (k = 0; k < 3; k++)
1349                     if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1350                         s->prob_flag_delta.p_flag.mv_comp[i].class0_fp[j][k]  = 1;
1351                         s->prob_flag_delta.p_delta.mv_comp[i].class0_fp[j][k] =
1352                             s->prob.p.mv_comp[i].class0_fp[j][k]  = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1353                     }
1354 
1355             for (j = 0; j < 3; j++)
1356                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1357                     s->prob_flag_delta.p_flag.mv_comp[i].fp[j]  = 1;
1358                     s->prob_flag_delta.p_delta.mv_comp[i].fp[j] =
1359                         s->prob.p.mv_comp[i].fp[j]  =
1360                             (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1361                 }
1362         }
1363 
1364         if (s->highprecisionmvs) {
1365             for (i = 0; i < 2; i++) {
1366                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1367                     s->prob_flag_delta.p_flag.mv_comp[i].class0_hp  = 1;
1368                     s->prob_flag_delta.p_delta.mv_comp[i].class0_hp =
1369                         s->prob.p.mv_comp[i].class0_hp  = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1370                 }
1371 
1372                 if (vpx_rac_get_prob_branchy(&s->c, 252)) {
1373                     s->prob_flag_delta.p_flag.mv_comp[i].hp  = 1;
1374                     s->prob_flag_delta.p_delta.mv_comp[i].hp =
1375                         s->prob.p.mv_comp[i].hp  = (vpx_rac_get_uint(&s->c, 7) << 1) | 1;
1376                 }
1377             }
1378         }
1379     }
1380 
1381     return (RK_S32)((data2 - data) + size2);
1382 }
1383 
adapt_prob(RK_U8 * p,RK_U32 ct0,RK_U32 ct1,RK_S32 max_count,RK_S32 update_factor)1384 static void adapt_prob(RK_U8 *p, RK_U32 ct0, RK_U32 ct1,
1385                        RK_S32 max_count, RK_S32 update_factor)
1386 {
1387     RK_U32 ct = ct0 + ct1, p2, p1;
1388 
1389     if (!ct)
1390         return;
1391 
1392     p1 = *p;
1393     p2 = ((ct0 << 8) + (ct >> 1)) / ct;
1394     p2 = mpp_clip(p2, 1, 255);
1395     ct = MPP_MIN(ct, (RK_U32)max_count);
1396     update_factor = FASTDIV(update_factor * ct, max_count);
1397 
1398     // (p1 * (256 - update_factor) + p2 * update_factor + 128) >> 8
1399     *p = p1 + (((p2 - p1) * update_factor + 128) >> 8);
1400 }
1401 
adapt_probs(VP9Context * s)1402 static void adapt_probs(VP9Context *s)
1403 {
1404     RK_S32 i, j, k, l, m;
1405     prob_context *p = &s->prob_ctx[s->framectxid].p;
1406     RK_S32 uf = (s->keyframe || s->intraonly || !s->last_keyframe) ? 112 : 128;
1407 
1408     // coefficients
1409     for (i = 0; i < 4; i++)
1410         for (j = 0; j < 2; j++)
1411             for (k = 0; k < 2; k++)
1412                 for (l = 0; l < 6; l++)
1413                     for (m = 0; m < 6; m++) {
1414                         RK_U8 *pp = s->prob_ctx[s->framectxid].coef[i][j][k][l][m];
1415                         RK_U32 *e = s->counts.eob[i][j][k][l][m];
1416                         RK_U32 *c = s->counts.coef[i][j][k][l][m];
1417 
1418                         if (l == 0 && m >= 3) // dc only has 3 pt
1419                             break;
1420                         /*  if(i == 0 && j == 0 && k== 1 && l == 0){
1421                              mpp_log("e[0] = 0x%x e[1] = 0x%x c[0] = 0x%x c[1] = 0x%x c[2] = 0x%x \n",
1422                              e[0],e[1],c[0],c[1],c[2]);
1423                              mpp_log("pp[0] = 0x%x pp[1] = 0x%x pp[2] = 0x%x",pp[0],pp[1],pp[2]);
1424                           }*/
1425                         adapt_prob(&pp[0], e[0], e[1], 24, uf);
1426                         adapt_prob(&pp[1], c[0], c[1] + c[2], 24, uf);
1427                         adapt_prob(&pp[2], c[1], c[2], 24, uf);
1428                         /* if(i == 0 && j == 0 && k== 1 && l == 0){
1429                             mpp_log("after pp[0] = 0x%x pp[1] = 0x%x pp[2] = 0x%x",pp[0],pp[1],pp[2]);
1430                          }*/
1431                     }
1432 #ifdef dump
1433     fwrite(&s->counts, 1, sizeof(s->counts), vp9_p_fp);
1434     fflush(vp9_p_fp);
1435 #endif
1436 
1437     if (s->keyframe || s->intraonly) {
1438         memcpy(p->skip,  s->prob.p.skip,  sizeof(p->skip));
1439         memcpy(p->tx32p, s->prob.p.tx32p, sizeof(p->tx32p));
1440         memcpy(p->tx16p, s->prob.p.tx16p, sizeof(p->tx16p));
1441         memcpy(p->tx8p,  s->prob.p.tx8p,  sizeof(p->tx8p));
1442         return;
1443     }
1444 
1445     // skip flag
1446     for (i = 0; i < 3; i++)
1447         adapt_prob(&p->skip[i], s->counts.skip[i][0], s->counts.skip[i][1], 20, 128);
1448 
1449     // intra/inter flag
1450     for (i = 0; i < 4; i++)
1451         adapt_prob(&p->intra[i], s->counts.intra[i][0], s->counts.intra[i][1], 20, 128);
1452 
1453     // comppred flag
1454     if (s->comppredmode == PRED_SWITCHABLE) {
1455         for (i = 0; i < 5; i++)
1456             adapt_prob(&p->comp[i], s->counts.comp[i][0], s->counts.comp[i][1], 20, 128);
1457     }
1458 
1459     // reference frames
1460     if (s->comppredmode != PRED_SINGLEREF) {
1461         for (i = 0; i < 5; i++)
1462             adapt_prob(&p->comp_ref[i], s->counts.comp_ref[i][0],
1463                        s->counts.comp_ref[i][1], 20, 128);
1464     }
1465 
1466     if (s->comppredmode != PRED_COMPREF) {
1467         for (i = 0; i < 5; i++) {
1468             RK_U8 *pp = p->single_ref[i];
1469             RK_U32 (*c)[2] = s->counts.single_ref[i];
1470 
1471             adapt_prob(&pp[0], c[0][0], c[0][1], 20, 128);
1472             adapt_prob(&pp[1], c[1][0], c[1][1], 20, 128);
1473         }
1474     }
1475 
1476     // block partitioning
1477     for (i = 0; i < 4; i++)
1478         for (j = 0; j < 4; j++) {
1479             RK_U8 *pp = p->partition[i][j];
1480             RK_U32 *c = s->counts.partition[i][j];
1481             // mpp_log("befor pp[0] = 0x%x pp[1] = 0x%x pp[2] = 0x%x",pp[0],pp[1],pp[2]);
1482             // mpp_log("befor c[0] = 0x%x c[1] = 0x%x c[2] = 0x%x",c[0],c[1],c[2]);
1483             adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
1484             adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
1485             adapt_prob(&pp[2], c[2], c[3], 20, 128);
1486             // mpp_log(" after pp[0] = 0x%x pp[1] = 0x%x pp[2] = 0x%x",pp[0],pp[1],pp[2]);
1487         }
1488 
1489     // tx size
1490     if (s->txfmmode == TX_SWITCHABLE) {
1491         for (i = 0; i < 2; i++) {
1492             RK_U32 *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i];
1493 
1494             adapt_prob(&p->tx8p[i], s->counts.tx8p[i][0], s->counts.tx8p[i][1], 20, 128);
1495             adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128);
1496             adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128);
1497             adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128);
1498             adapt_prob(&p->tx32p[i][1], c32[1], c32[2] + c32[3], 20, 128);
1499             adapt_prob(&p->tx32p[i][2], c32[2], c32[3], 20, 128);
1500         }
1501     }
1502 
1503     // interpolation filter
1504     if (s->filtermode == FILTER_SWITCHABLE) {
1505         for (i = 0; i < 4; i++) {
1506             RK_U8 *pp = p->filter[i];
1507             RK_U32 *c = s->counts.filter[i];
1508 
1509             adapt_prob(&pp[0], c[0], c[1] + c[2], 20, 128);
1510             adapt_prob(&pp[1], c[1], c[2], 20, 128);
1511         }
1512     }
1513 
1514     // inter modes
1515     for (i = 0; i < 7; i++) {
1516         RK_U8 *pp = p->mv_mode[i];
1517         RK_U32 *c = s->counts.mv_mode[i];
1518 
1519         adapt_prob(&pp[0], c[2], c[1] + c[0] + c[3], 20, 128);
1520         adapt_prob(&pp[1], c[0], c[1] + c[3], 20, 128);
1521         adapt_prob(&pp[2], c[1], c[3], 20, 128);
1522     }
1523 
1524     // mv joints
1525     {
1526         RK_U8 *pp = p->mv_joint;
1527         RK_U32 *c = s->counts.mv_joint;
1528 
1529         adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
1530         adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
1531         adapt_prob(&pp[2], c[2], c[3], 20, 128);
1532     }
1533 
1534     // mv components
1535     for (i = 0; i < 2; i++) {
1536         RK_U8 *pp;
1537         RK_U32 *c, (*c2)[2], sum;
1538 
1539         adapt_prob(&p->mv_comp[i].sign, s->counts.sign[i][0],
1540                    s->counts.sign[i][1], 20, 128);
1541 
1542         pp = p->mv_comp[i].classes;
1543         c = s->counts.classes[i];
1544         sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10];
1545         adapt_prob(&pp[0], c[0], sum, 20, 128);
1546         sum -= c[1];
1547         adapt_prob(&pp[1], c[1], sum, 20, 128);
1548         sum -= c[2] + c[3];
1549         adapt_prob(&pp[2], c[2] + c[3], sum, 20, 128);
1550         adapt_prob(&pp[3], c[2], c[3], 20, 128);
1551         sum -= c[4] + c[5];
1552         adapt_prob(&pp[4], c[4] + c[5], sum, 20, 128);
1553         adapt_prob(&pp[5], c[4], c[5], 20, 128);
1554         sum -= c[6];
1555         adapt_prob(&pp[6], c[6], sum, 20, 128);
1556         adapt_prob(&pp[7], c[7] + c[8], c[9] + c[10], 20, 128);
1557         adapt_prob(&pp[8], c[7], c[8], 20, 128);
1558         adapt_prob(&pp[9], c[9], c[10], 20, 128);
1559 
1560         adapt_prob(&p->mv_comp[i].class0, s->counts.class0[i][0],
1561                    s->counts.class0[i][1], 20, 128);
1562         pp = p->mv_comp[i].bits;
1563         c2 = s->counts.bits[i];
1564         for (j = 0; j < 10; j++)
1565             adapt_prob(&pp[j], c2[j][0], c2[j][1], 20, 128);
1566 
1567         for (j = 0; j < 2; j++) {
1568             pp = p->mv_comp[i].class0_fp[j];
1569             c = s->counts.class0_fp[i][j];
1570             adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
1571             adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
1572             adapt_prob(&pp[2], c[2], c[3], 20, 128);
1573         }
1574         pp = p->mv_comp[i].fp;
1575         c = s->counts.fp[i];
1576         adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
1577         adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
1578         adapt_prob(&pp[2], c[2], c[3], 20, 128);
1579 
1580         if (s->highprecisionmvs) {
1581             adapt_prob(&p->mv_comp[i].class0_hp, s->counts.class0_hp[i][0],
1582                        s->counts.class0_hp[i][1], 20, 128);
1583             adapt_prob(&p->mv_comp[i].hp, s->counts.hp[i][0],
1584                        s->counts.hp[i][1], 20, 128);
1585         }
1586     }
1587 
1588     // y intra modes
1589     for (i = 0; i < 4; i++) {
1590         RK_U8 *pp = p->y_mode[i];
1591         RK_U32 *c = s->counts.y_mode[i], sum, s2;
1592 
1593         sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
1594         adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
1595         sum -= c[TM_VP8_PRED];
1596         adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
1597         sum -= c[VERT_PRED];
1598         adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
1599         s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
1600         sum -= s2;
1601         adapt_prob(&pp[3], s2, sum, 20, 128);
1602         s2 -= c[HOR_PRED];
1603         adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
1604         adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128);
1605         sum -= c[DIAG_DOWN_LEFT_PRED];
1606         adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
1607         sum -= c[VERT_LEFT_PRED];
1608         adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
1609         adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
1610     }
1611 
1612     // uv intra modes
1613     for (i = 0; i < 10; i++) {
1614         RK_U8 *pp = p->uv_mode[i];
1615         RK_U32 *c = s->counts.uv_mode[i], sum, s2;
1616 
1617         sum = c[0] + c[1] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
1618         adapt_prob(&pp[0], c[DC_PRED], sum, 20, 128);
1619         sum -= c[TM_VP8_PRED];
1620         adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
1621         sum -= c[VERT_PRED];
1622         adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
1623         s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
1624         sum -= s2;
1625         adapt_prob(&pp[3], s2, sum, 20, 128);
1626         s2 -= c[HOR_PRED];
1627         adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
1628         adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128);
1629         sum -= c[DIAG_DOWN_LEFT_PRED];
1630         adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
1631         sum -= c[VERT_LEFT_PRED];
1632         adapt_prob(&pp[7], c[VERT_LEFT_PRED], sum, 20, 128);
1633         adapt_prob(&pp[8], c[HOR_DOWN_PRED], c[HOR_UP_PRED], 20, 128);
1634     }
1635 #if 0 //def dump
1636     fwrite(s->counts.y_mode, 1, sizeof(s->counts.y_mode), vp9_p_fp1);
1637     fwrite(s->counts.uv_mode, 1, sizeof(s->counts.uv_mode), vp9_p_fp1);
1638     fflush(vp9_p_fp1);
1639 #endif
1640 }
1641 
1642 
vp9_parser_frame(Vp9CodecContext * ctx,HalDecTask * task)1643 RK_S32 vp9_parser_frame(Vp9CodecContext *ctx, HalDecTask *task)
1644 {
1645 
1646     const RK_U8 *data = NULL;
1647     RK_S32 size = 0;
1648     VP9Context *s = (VP9Context *)ctx->priv_data;
1649     RK_S32 res, i, ref = 0;
1650 
1651     vp9d_dbg(VP9D_DBG_FUNCTION, "%s", __FUNCTION__);
1652     task->valid = -1;
1653 #ifdef dump
1654     dec_num++;
1655 #endif
1656     data = (const RK_U8 *)mpp_packet_get_pos(ctx->pkt);
1657     size = (RK_S32)mpp_packet_get_length(ctx->pkt);
1658 
1659     s->pts = mpp_packet_get_pts(ctx->pkt);
1660     s->dts = mpp_packet_get_dts(ctx->pkt);
1661 
1662     vp9d_dbg(VP9D_DBG_HEADER, "data size %d", size);
1663     if (size <= 0) {
1664         return MPP_OK;
1665     }
1666     if ((res = decode_parser_header(ctx, data, size, &ref)) < 0) {
1667         return res;
1668     } else if (res == 0) {
1669         if (!s->refs[ref].ref) {
1670             //mpp_err("Requested reference %d not available\n", ref);
1671             return -1;//AVERROR_INVALIDDATA;
1672         }
1673         {
1674             MppFrame frame = NULL;
1675 
1676             mpp_buf_slot_get_prop(s->slots, s->refs[ref].slot_index, SLOT_FRAME_PTR, &frame);
1677             mpp_frame_set_pts(frame, s->pts);
1678             mpp_frame_set_dts(frame, s->dts);
1679             mpp_buf_slot_set_flag(s->slots, s->refs[ref].slot_index, SLOT_QUEUE_USE);
1680             mpp_buf_slot_enqueue(s->slots, s->refs[ref].slot_index, QUEUE_DISPLAY);
1681             s->refs[ref].ref->is_output = 1;
1682         }
1683 
1684         mpp_log("out repeat num %d", s->outframe_num++);
1685         return size;
1686     }
1687     data += res;
1688     size -= res;
1689 
1690     if (s->frames[REF_FRAME_MVPAIR].ref)
1691         vp9_unref_frame(s, &s->frames[REF_FRAME_MVPAIR]);
1692 
1693     if (!s->intraonly && !s->keyframe && !s->errorres && s->frames[CUR_FRAME].ref) {
1694         if ((res = vp9_ref_frame(ctx, &s->frames[REF_FRAME_MVPAIR], &s->frames[CUR_FRAME])) < 0)
1695             return res;
1696     }
1697 
1698     if (s->frames[CUR_FRAME].ref)
1699         vp9_unref_frame(s, &s->frames[CUR_FRAME]);
1700 
1701     if ((res = vp9_alloc_frame(ctx, &s->frames[CUR_FRAME])) < 0)
1702         return res;
1703 
1704     if (s->refreshctx && s->parallelmode) {
1705         RK_S32 j, k, l, m;
1706 
1707         for (i = 0; i < 4; i++) {
1708             for (j = 0; j < 2; j++)
1709                 for (k = 0; k < 2; k++)
1710                     for (l = 0; l < 6; l++)
1711                         for (m = 0; m < 6; m++)
1712                             memcpy(s->prob_ctx[s->framectxid].coef[i][j][k][l][m],
1713                                    s->prob.coef[i][j][k][l][m], 3);
1714             if ((RK_S32)s->txfmmode == i)
1715                 break;
1716         }
1717         s->prob_ctx[s->framectxid].p = s->prob.p;
1718     }
1719 
1720     vp9d_parser2_syntax(ctx);
1721 
1722     task->syntax.data = (void*)&ctx->pic_params;
1723     task->syntax.number = 1;
1724     task->valid = 1;
1725     task->output = s->frames[CUR_FRAME].slot_index;
1726     task->input_packet = ctx->pkt;
1727 
1728     for (i = 0; i < 3; i++) {
1729         if (s->refs[s->refidx[i]].slot_index < 0x7f) {
1730             MppFrame mframe = NULL;
1731             mpp_buf_slot_set_flag(s->slots, s->refs[s->refidx[i]].slot_index, SLOT_HAL_INPUT);
1732             task->refer[i] = s->refs[s->refidx[i]].slot_index;
1733             mpp_buf_slot_get_prop(s->slots, task->refer[i], SLOT_FRAME_PTR, &mframe);
1734             if (mframe && !s->keyframe && !s->intraonly)
1735                 task->flags.ref_err |= mpp_frame_get_errinfo(mframe);
1736         } else {
1737             task->refer[i] = -1;
1738         }
1739     }
1740 
1741     vp9d_dbg(VP9D_DBG_REF, "ref_errinfo=%d\n", task->flags.ref_err);
1742     if (s->eos) {
1743         task->flags.eos = 1;
1744     }
1745 
1746     if (!s->invisible) {
1747         mpp_buf_slot_set_flag(s->slots,  s->frames[CUR_FRAME].slot_index, SLOT_QUEUE_USE);
1748         mpp_buf_slot_enqueue(s->slots, s->frames[CUR_FRAME].slot_index, QUEUE_DISPLAY);
1749     }
1750     vp9d_dbg(VP9D_DBG_REF, "s->refreshrefmask = %d s->frames[CUR_FRAME] = %d",
1751              s->refreshrefmask, s->frames[CUR_FRAME].slot_index);
1752     for (i = 0; i < 3; i++) {
1753         if (s->refs[s->refidx[i]].ref != NULL) {
1754             vp9d_dbg(VP9D_DBG_REF, "ref buf select %d", s->refs[s->refidx[i]].slot_index);
1755         }
1756     }
1757     // ref frame setup
1758     for (i = 0; i < 8; i++) {
1759         vp9d_dbg(VP9D_DBG_REF, "s->refreshrefmask = 0x%x", s->refreshrefmask);
1760         res = 0;
1761         if (s->refreshrefmask & (1 << i)) {
1762             if (s->refs[i].ref)
1763                 vp9_unref_frame(s, &s->refs[i]);
1764             vp9d_dbg(VP9D_DBG_REF, "update ref index in %d", i);
1765             res = vp9_ref_frame(ctx, &s->refs[i], &s->frames[CUR_FRAME]);
1766         }
1767 
1768         if (s->refs[i].ref)
1769             vp9d_dbg(VP9D_DBG_REF, "s->refs[%d] = %d", i, s->refs[i].slot_index);
1770         if (res < 0)
1771             return 0;
1772     }
1773     return 0;
1774 }
1775 
vp9d_paser_reset(Vp9CodecContext * ctx)1776 MPP_RET vp9d_paser_reset(Vp9CodecContext *ctx)
1777 {
1778     RK_S32 i;
1779     VP9Context *s = ctx->priv_data;
1780     SplitContext_t *ps = (SplitContext_t *)ctx->priv_data2;
1781     VP9ParseContext *pc = (VP9ParseContext *)ps->priv_data;
1782 
1783     s->got_keyframes = 0;
1784     s->cur_poc = 0;
1785     for (i = 0; i < 3; i++) {
1786         if (s->frames[i].ref) {
1787             vp9_unref_frame(s, &s->frames[i]);
1788         }
1789     }
1790     for (i = 0; i < 8; i++) {
1791         if (s->refs[i].ref) {
1792             vp9_unref_frame(s, &s->refs[i]);
1793         }
1794     }
1795     memset(pc, 0, sizeof(VP9ParseContext));
1796 
1797     s->eos = 0;
1798     if (ps) {
1799         ps->eos = 0;
1800     }
1801     return MPP_OK;
1802 }
inv_count_data(VP9Context * s)1803 static void inv_count_data(VP9Context *s)
1804 {
1805     RK_U32 partition_probs[4][4][4];
1806     RK_U32 count_uv[10][10];
1807     RK_U32 count_y_mode[4][10];
1808     RK_U32 *dst_uv = NULL;
1809     RK_S32 i, j;
1810 
1811     /*
1812                  syntax              hardware
1813              *+++++64x64+++++*   *++++8x8++++*
1814              *+++++32x32+++*     *++++16x16++++*
1815              *+++++16x16+++*     *++++32x32++++*
1816              *+++++8x8+++*       *++++64x64++++++*
1817      */
1818 
1819     memcpy(&partition_probs, s->counts.partition, sizeof(s->counts.partition));
1820     j = 0;
1821     for (i = 3; i >= 0; i--) {
1822         memcpy(&s->counts.partition[j], &partition_probs[i], 64);
1823         j++;
1824     }
1825     if (!(s->keyframe || s->intraonly)) {
1826         memcpy(count_y_mode, s->counts.y_mode, sizeof(s->counts.y_mode));
1827         for (i = 0; i < 4; i++) {
1828             RK_U32 value = 0;
1829             for (j = 0; j < 10; j++) {
1830                 value = count_y_mode[i][j];
1831                 if (j == 0)
1832                     s->counts.y_mode[i][2] = value;
1833                 else if (j == 1)
1834                     s->counts.y_mode[i][0] = value;
1835                 else if (j == 2)
1836                     s->counts.y_mode[i][1] = value;
1837                 else if (j == 7)
1838                     s->counts.y_mode[i][8] = value;
1839                 else if (j == 8)
1840                     s->counts.y_mode[i][7] = value;
1841                 else
1842                     s->counts.y_mode[i][j] = value;
1843 
1844             }
1845         }
1846 
1847 
1848         memcpy(count_uv, s->counts.uv_mode, sizeof(s->counts.uv_mode));
1849 
1850         /*change uv_mode to hardware need style*/
1851         /*
1852               syntax              hardware
1853          *+++++ v   ++++*     *++++ dc   ++++*
1854          *+++++ h   ++++*     *++++ v   ++++*
1855          *+++++ dc  ++++*     *++++ h  ++++*
1856          *+++++ d45 ++++*     *++++ d45 ++++*
1857          *+++++ d135++++*     *++++ d135++++*
1858          *+++++ d117++++*     *++++ d117++++*
1859          *+++++ d153++++*     *++++ d153++++*
1860          *+++++ d63 ++++*     *++++ d207++++*
1861          *+++++ d207 ++++*    *++++ d63 ++++*
1862          *+++++ tm  ++++*     *++++ tm  ++++*
1863         */
1864         for (i = 0; i < 10; i++) {
1865             RK_U32 *src_uv = (RK_U32 *)(count_uv[i]);
1866             RK_U32 value = 0;
1867             if (i == 0) {
1868                 dst_uv = s->counts.uv_mode[2]; //dc
1869             } else if ( i == 1) {
1870                 dst_uv = s->counts.uv_mode[0]; //h
1871             }  else if ( i == 2) {
1872                 dst_uv = s->counts.uv_mode[1]; //h
1873             }  else if ( i == 7) {
1874                 dst_uv = s->counts.uv_mode[8]; //d207
1875             } else if (i == 8) {
1876                 dst_uv = s->counts.uv_mode[7]; //d63
1877             } else {
1878                 dst_uv = s->counts.uv_mode[i];
1879             }
1880             for (j = 0; j < 10; j++) {
1881                 value = src_uv[j];
1882                 if (j == 0)
1883                     dst_uv[2] = value;
1884                 else if (j == 1)
1885                     dst_uv[0] = value;
1886                 else if (j == 2)
1887                     dst_uv[1] = value;
1888                 else if (j == 7)
1889                     dst_uv[8] = value;
1890                 else if (j == 8)
1891                     dst_uv[7] = value;
1892                 else
1893                     dst_uv[j] = value;
1894             }
1895 
1896         }
1897     }
1898 }
1899 
vp9_parser_update(Vp9CodecContext * ctx,void * count_info)1900 void vp9_parser_update(Vp9CodecContext *ctx, void *count_info)
1901 {
1902     VP9Context *s = ctx->priv_data;
1903 
1904 #ifdef dump
1905     char filename[20] = "data/pcout";
1906     char filename1[20] = "data/uppor";
1907     if (vp9_p_fp != NULL) {
1908         fclose(vp9_p_fp);
1909 
1910     }
1911     if (vp9_p_fp1 != NULL) {
1912         fclose(vp9_p_fp1);
1913 
1914     }
1915     sprintf(&filename[10], "%d.bin", count);
1916     sprintf(&filename1[10], "%d.bin", count);
1917     mpp_log("filename %s", filename);
1918     vp9_p_fp = fopen(filename, "wb");
1919     vp9_p_fp1 = fopen(filename1, "wb");
1920 #endif
1921     //update count from hardware
1922     if (count_info != NULL) {
1923 
1924         memcpy((void *)&s->counts, count_info, sizeof(s->counts));
1925 
1926         if (s->refreshctx && !s->parallelmode) {
1927 #ifdef dump
1928             count++;
1929 #endif
1930             inv_count_data(s);
1931             adapt_probs(s);
1932 
1933         }
1934     }
1935 
1936     return;
1937 }
1938