1 /*
2 * Copyright 2015 Rockchip Electronics Co. LTD
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define MODULE_TAG "mpp_buffer"
18
19 #include <string.h>
20
21 #include "mpp_env.h"
22 #include "mpp_hash.h"
23 #include "mpp_lock.h"
24 #include "mpp_debug.h"
25 #include "mpp_mem_pool.h"
26
27 #include "mpp_buffer_impl.h"
28
29 #define MAX_GROUP_BIT 8
30 #define MAX_MISC_GROUP_BIT 3
31 #define BUFFER_OPS_MAX_COUNT 1024
32 #define MPP_BUFFER_SPECIAL_DMA_HEAP_NUM 8
33
34 #define SEARCH_GROUP_BY_ID(id) ((MppBufferService::get_instance())->get_group_by_id(id))
35
36 typedef MPP_RET (*BufferOp)(MppAllocator allocator, MppBufferInfo *data);
37
38 // use this class only need it to init legacy group before main
39 class MppBufferService
40 {
41 private:
42
43 // avoid any unwanted function
44 MppBufferService();
45 ~MppBufferService();
46 MppBufferService(const MppBufferService &);
47 MppBufferService &operator=(const MppBufferService &);
48
49 // buffer group final release function
50 void destroy_group(MppBufferGroupImpl *group);
51
52 RK_U32 get_group_id();
53 RK_U32 group_id;
54 RK_U32 group_count;
55 RK_U32 finalizing;
56 RK_U32 finished;
57
58 RK_U32 total_size;
59 RK_U32 total_max;
60
61 // misc group for internal / externl buffer with different type
62 RK_U32 misc[MPP_BUFFER_MODE_BUTT][MPP_BUFFER_TYPE_BUTT];
63 RK_U32 misc_count;
64 /* preset allocator apis */
65 MppAllocator mAllocator[MPP_BUFFER_TYPE_BUTT];
66 MppAllocatorApi *mAllocatorApi[MPP_BUFFER_TYPE_BUTT];
67 /* special dma heap allocator apis */
68 MppAllocator mAllocatorDmaHeapWithFlag[MPP_BUFFER_SPECIAL_DMA_HEAP_NUM];
69
70 struct list_head mListGroup;
71 DECLARE_HASHTABLE(mHashGroup, MAX_GROUP_BIT);
72
73 // list for used buffer which do not have group
74 struct list_head mListOrphan;
75
76 public:
get_instance()77 static MppBufferService *get_instance() {
78 static MppBufferService instance;
79 return &instance;
80 }
get_lock()81 static Mutex *get_lock() {
82 static Mutex lock;
83 return &lock;
84 }
85
86 MppBufferGroupImpl *get_group(const char *tag, const char *caller,
87 MppBufferMode mode, MppBufferType type,
88 RK_U32 is_misc);
89 RK_U32 get_misc(MppBufferMode mode, MppBufferType type);
90 void put_group(const char *caller, MppBufferGroupImpl *group);
91 MppBufferGroupImpl *get_group_by_id(RK_U32 id);
92 void dump(const char *info);
93 RK_U32 is_finalizing();
94 void inc_total(RK_U32 size);
95 void dec_total(RK_U32 size);
get_total_now()96 RK_U32 get_total_now() { return total_size; };
get_total_max()97 RK_U32 get_total_max() { return total_max; };
98 };
99
100 static const char *mode2str[MPP_BUFFER_MODE_BUTT] = {
101 "internal",
102 "external",
103 };
104
105 static const char *type2str[MPP_BUFFER_TYPE_BUTT] = {
106 "normal",
107 "ion",
108 "dma-buf",
109 "drm",
110 };
111 static const char *ops2str[BUF_OPS_BUTT] = {
112 "grp create ",
113 "grp release",
114 "grp reset",
115 "grp orphan",
116 "grp destroy",
117
118 "buf commit ",
119 "buf create ",
120 "buf mmap ",
121 "buf ref inc",
122 "buf ref dec",
123 "buf discard",
124 "buf destroy",
125 };
126
127 static MppMemPool mpp_buffer_pool = mpp_mem_pool_init_f(MODULE_TAG, sizeof(MppBufferImpl));
128 static MppMemPool mpp_buf_grp_pool = mpp_mem_pool_init_f("mpp_buf_grp", sizeof(MppBufferGroupImpl));
129
130 RK_U32 mpp_buffer_debug = 0;
131
buf_logs_init(RK_U32 max_count)132 static MppBufLogs *buf_logs_init(RK_U32 max_count)
133 {
134 MppBufLogs *logs = NULL;
135 pthread_mutexattr_t attr;
136
137 if (!max_count)
138 return NULL;
139
140 logs = mpp_malloc_size(MppBufLogs, sizeof(MppBufLogs) + max_count * sizeof(MppBufLog));
141 if (!logs) {
142 mpp_err_f("failed to create %d buf logs\n", max_count);
143 return NULL;
144 }
145
146 pthread_mutexattr_init(&attr);
147 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
148 pthread_mutex_init(&logs->lock, &attr);
149 pthread_mutexattr_destroy(&attr);
150
151 logs->max_count = max_count;
152 logs->log_count = 0;
153 logs->log_write = 0;
154 logs->log_read = 0;
155 logs->logs = (MppBufLog *)(logs + 1);
156
157 return logs;
158 }
159
buf_logs_deinit(MppBufLogs * logs)160 static void buf_logs_deinit(MppBufLogs *logs)
161 {
162 pthread_mutex_destroy(&logs->lock);
163 MPP_FREE(logs);
164 }
165
buf_logs_write(MppBufLogs * logs,RK_U32 group_id,RK_S32 buffer_id,MppBufOps ops,RK_S32 ref_count,const char * caller)166 static void buf_logs_write(MppBufLogs *logs, RK_U32 group_id, RK_S32 buffer_id,
167 MppBufOps ops, RK_S32 ref_count, const char *caller)
168 {
169 MppBufLog *log = NULL;
170
171 pthread_mutex_lock(&logs->lock);
172
173 log = &logs->logs[logs->log_write];
174 log->group_id = group_id;
175 log->buffer_id = buffer_id;
176 log->ops = ops;
177 log->ref_count = ref_count;
178 log->caller = caller;
179
180 logs->log_write++;
181 if (logs->log_write >= logs->max_count)
182 logs->log_write = 0;
183
184 if (logs->log_count < logs->max_count)
185 logs->log_count++;
186 else {
187 logs->log_read++;
188 if (logs->log_read >= logs->max_count)
189 logs->log_read = 0;
190 }
191
192 pthread_mutex_unlock(&logs->lock);
193 }
194
buf_logs_dump(MppBufLogs * logs)195 static void buf_logs_dump(MppBufLogs *logs)
196 {
197 while (logs->log_count) {
198 MppBufLog *log = &logs->logs[logs->log_read];
199
200 if (log->buffer_id >= 0)
201 mpp_log("group %3d buffer %4d ops %s ref_count %d caller %s\n",
202 log->group_id, log->buffer_id,
203 ops2str[log->ops], log->ref_count, log->caller);
204 else
205 mpp_log("group %3d ops %s\n", log->group_id, ops2str[log->ops]);
206
207 logs->log_read++;
208 if (logs->log_read >= logs->max_count)
209 logs->log_read = 0;
210 logs->log_count--;
211 }
212 mpp_assert(logs->log_read == logs->log_write);
213 }
214
buf_add_log(MppBufferImpl * buffer,MppBufOps ops,const char * caller)215 static void buf_add_log(MppBufferImpl *buffer, MppBufOps ops, const char* caller)
216 {
217 if (buffer->log_runtime_en) {
218 mpp_log("group %3d buffer %4d fd %3d ops %s ref_count %d caller %s\n",
219 buffer->group_id, buffer->buffer_id, buffer->info.fd,
220 ops2str[ops], buffer->ref_count, caller);
221 }
222 if (buffer->logs)
223 buf_logs_write(buffer->logs, buffer->group_id, buffer->buffer_id,
224 ops, buffer->ref_count, caller);
225 }
226
buf_grp_add_log(MppBufferGroupImpl * group,MppBufOps ops,const char * caller)227 static void buf_grp_add_log(MppBufferGroupImpl *group, MppBufOps ops, const char* caller)
228 {
229 if (group->log_runtime_en) {
230 mpp_log("group %3d mode %d type %d ops %s\n", group->group_id,
231 group->mode, group->type, ops2str[ops]);
232 }
233 if (group->logs)
234 buf_logs_write(group->logs, group->group_id, -1, ops, 0, caller);
235 }
236
put_buffer(MppBufferGroupImpl * group,MppBufferImpl * buffer,RK_U32 reuse,const char * caller)237 static MPP_RET put_buffer(MppBufferGroupImpl *group, MppBufferImpl *buffer,
238 RK_U32 reuse, const char *caller)
239 {
240 mpp_assert(group);
241
242 pthread_mutex_lock(&buffer->lock);
243
244 if (!MppBufferService::get_instance()->is_finalizing())
245 mpp_assert(buffer->ref_count == 0);
246
247 list_del_init(&buffer->list_status);
248
249 if (reuse) {
250 if (buffer->used && group) {
251 group->count_used--;
252 list_add_tail(&buffer->list_status, &group->list_unused);
253 group->count_unused++;
254 } else {
255 mpp_err_f("can not reuse unused buffer %d at group %p:%d\n",
256 buffer->buffer_id, group, buffer->group_id);
257 }
258 buffer->used = 0;
259
260 pthread_mutex_unlock(&buffer->lock);
261 return MPP_OK;
262 }
263
264 /* release buffer here */
265 BufferOp func = (buffer->mode == MPP_BUFFER_INTERNAL) ?
266 (buffer->alloc_api->free) :
267 (buffer->alloc_api->release);
268
269 func(buffer->allocator, &buffer->info);
270
271 if (group) {
272 RK_U32 destroy = 0;
273
274 if (buffer->used)
275 group->count_used--;
276 else
277 group->count_unused--;
278
279 group->usage -= buffer->info.size;
280 group->buffer_count--;
281
282 if (group->mode == MPP_BUFFER_INTERNAL)
283 MppBufferService::get_instance()->dec_total(buffer->info.size);
284
285 buf_add_log(buffer, BUF_DESTROY, caller);
286
287 if (group->is_orphan && !group->usage && !group->is_finalizing)
288 destroy = 1;
289
290 if (destroy)
291 MppBufferService::get_instance()->put_group(caller, group);
292 } else {
293 mpp_assert(MppBufferService::get_instance()->is_finalizing());
294 }
295 pthread_mutex_unlock(&buffer->lock);
296
297 mpp_mem_pool_put_f(caller, mpp_buffer_pool, buffer);
298
299 return MPP_OK;
300 }
301
inc_buffer_ref(MppBufferImpl * buffer,const char * caller)302 static MPP_RET inc_buffer_ref(MppBufferImpl *buffer, const char *caller)
303 {
304 MPP_RET ret = MPP_OK;
305
306 pthread_mutex_lock(&buffer->lock);
307 buffer->ref_count++;
308 buf_add_log(buffer, BUF_REF_INC, caller);
309 if (!buffer->used) {
310 MppBufferGroupImpl *group = NULL;
311
312 {
313 AutoMutex auto_lock(MppBufferService::get_lock());
314 group = SEARCH_GROUP_BY_ID(buffer->group_id);
315 }
316 // NOTE: when increasing ref_count the unused buffer must be under certain group
317 mpp_assert(group);
318 buffer->used = 1;
319 if (group) {
320 pthread_mutex_lock(&group->buf_lock);
321 list_del_init(&buffer->list_status);
322 list_add_tail(&buffer->list_status, &group->list_used);
323 group->count_used++;
324 group->count_unused--;
325 pthread_mutex_unlock(&group->buf_lock);
326 } else {
327 mpp_err_f("unused buffer without group\n");
328 ret = MPP_NOK;
329 }
330 }
331 pthread_mutex_unlock(&buffer->lock);
332 return ret;
333 }
334
dump_buffer_info(MppBufferImpl * buffer)335 static void dump_buffer_info(MppBufferImpl *buffer)
336 {
337 mpp_log("buffer %p fd %4d size %10d ref_count %3d discard %d caller %s\n",
338 buffer, buffer->info.fd, buffer->info.size,
339 buffer->ref_count, buffer->discard, buffer->caller);
340 }
341
mpp_buffer_create(const char * tag,const char * caller,MppBufferGroupImpl * group,MppBufferInfo * info,MppBufferImpl ** buffer)342 MPP_RET mpp_buffer_create(const char *tag, const char *caller,
343 MppBufferGroupImpl *group, MppBufferInfo *info,
344 MppBufferImpl **buffer)
345 {
346 MPP_BUF_FUNCTION_ENTER();
347
348 MPP_RET ret = MPP_OK;
349 BufferOp func = NULL;
350 MppBufferImpl *p = NULL;
351
352 if (NULL == group) {
353 mpp_err_f("can not create buffer without group\n");
354 ret = MPP_NOK;
355 goto RET;
356 }
357
358 if (group->limit_count && group->buffer_count >= group->limit_count) {
359 if (group->log_runtime_en)
360 mpp_log_f("group %d reach count limit %d\n", group->group_id, group->limit_count);
361 ret = MPP_NOK;
362 goto RET;
363 }
364
365 if (group->limit_size && info->size > group->limit_size) {
366 mpp_err_f("required size %d reach group size limit %d\n", info->size, group->limit_size);
367 ret = MPP_NOK;
368 goto RET;
369 }
370
371 p = (MppBufferImpl *)mpp_mem_pool_get_f(caller, mpp_buffer_pool);
372 if (NULL == p) {
373 mpp_err_f("failed to allocate context\n");
374 ret = MPP_ERR_MALLOC;
375 goto RET;
376 }
377
378 func = (group->mode == MPP_BUFFER_INTERNAL) ?
379 (group->alloc_api->alloc) : (group->alloc_api->import);
380 ret = func(group->allocator, info);
381 if (ret) {
382 mpp_err_f("failed to create buffer with size %d\n", info->size);
383 mpp_mem_pool_put_f(caller, mpp_buffer_pool, p);
384 ret = MPP_ERR_MALLOC;
385 goto RET;
386 }
387
388 if (NULL == tag)
389 tag = group->tag;
390
391 strncpy(p->tag, tag, sizeof(p->tag));
392 p->caller = caller;
393 pthread_mutexattr_t attr;
394 pthread_mutexattr_init(&attr);
395 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
396 pthread_mutex_init(&p->lock, &attr);
397 pthread_mutexattr_destroy(&attr);
398 p->allocator = group->allocator;
399 p->alloc_api = group->alloc_api;
400 p->log_runtime_en = group->log_runtime_en;
401 p->log_history_en = group->log_history_en;
402 p->group_id = group->group_id;
403 p->mode = group->mode;
404 p->type = group->type;
405 p->logs = group->logs;
406 p->info = *info;
407
408 pthread_mutex_lock(&group->buf_lock);
409 p->buffer_id = group->buffer_id++;
410 INIT_LIST_HEAD(&p->list_status);
411
412 if (buffer) {
413 p->ref_count++;
414 p->used = 1;
415 list_add_tail(&p->list_status, &group->list_used);
416 group->count_used++;
417 *buffer = p;
418 } else {
419 list_add_tail(&p->list_status, &group->list_unused);
420 group->count_unused++;
421 }
422
423 group->usage += info->size;
424 group->buffer_count++;
425 pthread_mutex_unlock(&group->buf_lock);
426
427 buf_add_log(p, (group->mode == MPP_BUFFER_INTERNAL) ? (BUF_CREATE) : (BUF_COMMIT), caller);
428
429 if (group->mode == MPP_BUFFER_INTERNAL)
430 MppBufferService::get_instance()->inc_total(info->size);
431
432 if (group->callback)
433 group->callback(group->arg, group);
434 RET:
435 MPP_BUF_FUNCTION_LEAVE();
436 return ret;
437 }
438
mpp_buffer_mmap(MppBufferImpl * buffer,const char * caller)439 MPP_RET mpp_buffer_mmap(MppBufferImpl *buffer, const char* caller)
440 {
441 MPP_BUF_FUNCTION_ENTER();
442
443 MPP_RET ret = buffer->alloc_api->mmap(buffer->allocator, &buffer->info);
444 if (ret)
445 mpp_err_f("buffer %d group %d fd %d map failed caller %s\n",
446 buffer->buffer_id, buffer->group_id, buffer->info.fd, caller);
447
448 buf_add_log(buffer, BUF_MMAP, caller);
449
450 MPP_BUF_FUNCTION_LEAVE();
451 return ret;
452 }
453
mpp_buffer_ref_inc(MppBufferImpl * buffer,const char * caller)454 MPP_RET mpp_buffer_ref_inc(MppBufferImpl *buffer, const char* caller)
455 {
456 MPP_BUF_FUNCTION_ENTER();
457
458 MPP_RET ret = inc_buffer_ref(buffer, caller);
459
460 MPP_BUF_FUNCTION_LEAVE();
461 return ret;
462 }
463
464
mpp_buffer_ref_dec(MppBufferImpl * buffer,const char * caller)465 MPP_RET mpp_buffer_ref_dec(MppBufferImpl *buffer, const char* caller)
466 {
467 MPP_RET ret = MPP_OK;
468 RK_U32 release = 0;
469
470 MPP_BUF_FUNCTION_ENTER();
471
472 pthread_mutex_lock(&buffer->lock);
473
474 buf_add_log(buffer, BUF_REF_DEC, caller);
475
476 if (buffer->ref_count <= 0) {
477 mpp_err_f("found non-positive ref_count %d caller %s\n",
478 buffer->ref_count, buffer->caller);
479 mpp_abort();
480 ret = MPP_NOK;
481 pthread_mutex_unlock(&buffer->lock);
482 goto done;
483 }
484
485 buffer->ref_count--;
486 if (buffer->ref_count == 0)
487 release = 1;
488
489 pthread_mutex_unlock(&buffer->lock);
490
491 if (release) {
492 MppBufferGroupImpl *group = NULL;
493
494 {
495 AutoMutex auto_lock(MppBufferService::get_lock());
496 group = SEARCH_GROUP_BY_ID(buffer->group_id);
497 }
498
499 mpp_assert(group);
500 if (group) {
501 RK_U32 reuse = 0;
502
503 pthread_mutex_lock(&group->buf_lock);
504
505 reuse = (!group->is_misc && !buffer->discard);
506 put_buffer(group, buffer, reuse, caller);
507
508 if (group->callback)
509 group->callback(group->arg, group);
510 pthread_mutex_unlock(&group->buf_lock);
511 }
512 }
513
514 done:
515 MPP_BUF_FUNCTION_LEAVE();
516 return ret;
517 }
518
mpp_buffer_group_dump(MppBufferGroupImpl * group,const char * caller)519 void mpp_buffer_group_dump(MppBufferGroupImpl *group, const char *caller)
520 {
521 mpp_log("\ndumping buffer group %p id %d from %s\n", group,
522 group->group_id, caller);
523 mpp_log("mode %s\n", mode2str[group->mode]);
524 mpp_log("type %s\n", type2str[group->type]);
525 mpp_log("limit size %d count %d\n", group->limit_size, group->limit_count);
526
527 mpp_log("used buffer count %d\n", group->count_used);
528
529 MppBufferImpl *pos, *n;
530 list_for_each_entry_safe(pos, n, &group->list_used, MppBufferImpl, list_status) {
531 dump_buffer_info(pos);
532 }
533
534 mpp_log("unused buffer count %d\n", group->count_unused);
535 list_for_each_entry_safe(pos, n, &group->list_unused, MppBufferImpl, list_status) {
536 dump_buffer_info(pos);
537 }
538
539 if (group->logs)
540 buf_logs_dump(group->logs);
541 }
542
mpp_buffer_get_unused(MppBufferGroupImpl * p,size_t size,const char * caller)543 MppBufferImpl *mpp_buffer_get_unused(MppBufferGroupImpl *p, size_t size, const char* caller)
544 {
545 MPP_BUF_FUNCTION_ENTER();
546
547 MppBufferImpl *buffer = NULL;
548
549 pthread_mutex_lock(&p->buf_lock);
550 if (!list_empty(&p->list_unused)) {
551 MppBufferImpl *pos, *n;
552 RK_S32 found = 0;
553 RK_S32 search_count = 0;
554
555 list_for_each_entry_safe(pos, n, &p->list_unused, MppBufferImpl, list_status) {
556 mpp_buf_dbg(MPP_BUF_DBG_CHECK_SIZE, "request size %d on buf idx %d size %d\n",
557 size, pos->buffer_id, pos->info.size);
558 if (pos->info.size >= size) {
559 buffer = pos;
560 pthread_mutex_lock(&buffer->lock);
561 buf_add_log(buffer, BUF_REF_INC, caller);
562 buffer->ref_count++;
563 buffer->used = 1;
564 list_del_init(&buffer->list_status);
565 list_add_tail(&buffer->list_status, &p->list_used);
566 p->count_used++;
567 p->count_unused--;
568 pthread_mutex_unlock(&buffer->lock);
569 found = 1;
570 break;
571 } else {
572 if (MPP_BUFFER_INTERNAL == p->mode) {
573 put_buffer(p, pos, 0, caller);
574 } else
575 search_count++;
576 }
577 }
578
579 if (!found && search_count) {
580 mpp_err_f("can not found match buffer with size larger than %d\n", size);
581 mpp_buffer_group_dump(p, caller);
582 }
583 }
584 pthread_mutex_unlock(&p->buf_lock);
585
586 MPP_BUF_FUNCTION_LEAVE();
587 return buffer;
588 }
589
mpp_buffer_to_addr(MppBuffer buffer,size_t offset)590 RK_U32 mpp_buffer_to_addr(MppBuffer buffer, size_t offset)
591 {
592 MppBufferImpl *impl = (MppBufferImpl *)buffer;
593
594 if (NULL == impl) {
595 mpp_err_f("NULL buffer convert to zero address\n");
596 return 0;
597 }
598
599 if (impl->info.fd >= (1 << 10)) {
600 mpp_err_f("buffer fd %d is too large\n");
601 return 0;
602 }
603
604 if (impl->offset + offset >= SZ_4M) {
605 mpp_err_f("offset %d + %d is larger than 4M use extra info to send offset\n");
606 return 0;
607 }
608
609 RK_U32 addr = impl->info.fd + ((impl->offset + offset) << 10);
610
611 return addr;
612 }
613
mpp_buffer_group_init(MppBufferGroupImpl ** group,const char * tag,const char * caller,MppBufferMode mode,MppBufferType type)614 MPP_RET mpp_buffer_group_init(MppBufferGroupImpl **group, const char *tag, const char *caller,
615 MppBufferMode mode, MppBufferType type)
616 {
617 MPP_BUF_FUNCTION_ENTER();
618 mpp_assert(caller);
619
620 *group = MppBufferService::get_instance()->get_group(tag, caller, mode, type, 0);
621
622 MPP_BUF_FUNCTION_LEAVE();
623 return ((*group) ? (MPP_OK) : (MPP_NOK));
624 }
625
mpp_buffer_group_deinit(MppBufferGroupImpl * p)626 MPP_RET mpp_buffer_group_deinit(MppBufferGroupImpl *p)
627 {
628 if (NULL == p) {
629 mpp_err_f("found NULL pointer\n");
630 return MPP_ERR_NULL_PTR;
631 }
632
633 MPP_BUF_FUNCTION_ENTER();
634
635 MppBufferService::get_instance()->put_group(__FUNCTION__, p);
636
637 MPP_BUF_FUNCTION_LEAVE();
638 return MPP_OK;
639 }
640
mpp_buffer_group_reset(MppBufferGroupImpl * p)641 MPP_RET mpp_buffer_group_reset(MppBufferGroupImpl *p)
642 {
643 if (NULL == p) {
644 mpp_err_f("found NULL pointer\n");
645 return MPP_ERR_NULL_PTR;
646 }
647
648 MPP_BUF_FUNCTION_ENTER();
649
650 pthread_mutex_lock(&p->buf_lock);
651
652 buf_grp_add_log(p, GRP_RESET, NULL);
653
654 if (!list_empty(&p->list_used)) {
655 MppBufferImpl *pos, *n;
656
657 list_for_each_entry_safe(pos, n, &p->list_used, MppBufferImpl, list_status) {
658 buf_add_log(pos, BUF_DISCARD, NULL);
659 pos->discard = 1;
660 }
661 }
662
663 // remove unused list
664 if (!list_empty(&p->list_unused)) {
665 MppBufferImpl *pos, *n;
666 list_for_each_entry_safe(pos, n, &p->list_unused, MppBufferImpl, list_status) {
667 put_buffer(p, pos, 0, __FUNCTION__);
668 }
669 }
670
671 pthread_mutex_unlock(&p->buf_lock);
672
673 MPP_BUF_FUNCTION_LEAVE();
674 return MPP_OK;
675 }
676
mpp_buffer_group_set_callback(MppBufferGroupImpl * p,MppBufCallback callback,void * arg)677 MPP_RET mpp_buffer_group_set_callback(MppBufferGroupImpl *p,
678 MppBufCallback callback, void *arg)
679 {
680 if (NULL == p) {
681 mpp_err_f("found NULL pointer\n");
682 return MPP_ERR_NULL_PTR;
683 }
684
685 MPP_BUF_FUNCTION_ENTER();
686
687 p->arg = arg;
688 p->callback = callback;
689
690 MPP_BUF_FUNCTION_LEAVE();
691 return MPP_OK;
692 }
693
mpp_buffer_service_dump(const char * info)694 void mpp_buffer_service_dump(const char *info)
695 {
696 AutoMutex auto_lock(MppBufferService::get_lock());
697
698 MppBufferService::get_instance()->dump(info);
699 }
700
inc_total(RK_U32 size)701 void MppBufferService::inc_total(RK_U32 size)
702 {
703 RK_U32 total = MPP_ADD_FETCH(&total_size, size);
704 bool ret;
705
706 do {
707 RK_U32 old_max = total_max;
708 RK_U32 new_max = MPP_MAX(total, old_max);
709
710 ret = MPP_BOOL_CAS(&total_max, old_max, new_max);
711 } while (!ret);
712 }
713
dec_total(RK_U32 size)714 void MppBufferService::dec_total(RK_U32 size)
715 {
716 MPP_FETCH_SUB(&total_size, size);
717 }
718
mpp_buffer_total_now()719 RK_U32 mpp_buffer_total_now()
720 {
721 return MppBufferService::get_instance()->get_total_now();
722 }
723
mpp_buffer_total_max()724 RK_U32 mpp_buffer_total_max()
725 {
726 return MppBufferService::get_instance()->get_total_max();
727 }
728
mpp_buffer_get_misc_group(MppBufferMode mode,MppBufferType type)729 MppBufferGroupImpl *mpp_buffer_get_misc_group(MppBufferMode mode, MppBufferType type)
730 {
731 MppBufferGroupImpl *misc;
732 RK_U32 id;
733
734 type = (MppBufferType)(type & MPP_BUFFER_TYPE_MASK);
735 if (type == MPP_BUFFER_TYPE_NORMAL)
736 return NULL;
737
738 mpp_assert(mode < MPP_BUFFER_MODE_BUTT);
739 mpp_assert(type < MPP_BUFFER_TYPE_BUTT);
740
741 AutoMutex auto_lock(MppBufferService::get_lock());
742
743 id = MppBufferService::get_instance()->get_misc(mode, type);
744 if (!id) {
745 char tag[32];
746 RK_S32 offset = 0;
747
748 offset += snprintf(tag + offset, sizeof(tag) - offset, "misc");
749 offset += snprintf(tag + offset, sizeof(tag) - offset, "_%s",
750 type == MPP_BUFFER_TYPE_ION ? "ion" :
751 type == MPP_BUFFER_TYPE_DRM ? "drm" : "na");
752 offset += snprintf(tag + offset, sizeof(tag) - offset, "_%s",
753 mode == MPP_BUFFER_INTERNAL ? "int" : "ext");
754
755 misc = MppBufferService::get_instance()->get_group(tag, __FUNCTION__, mode, type, 1);
756 } else
757 misc = MppBufferService::get_instance()->get_group_by_id(id);
758
759 return misc;
760 }
761
MppBufferService()762 MppBufferService::MppBufferService()
763 : group_id(1),
764 group_count(0),
765 finalizing(0),
766 finished(0),
767 total_size(0),
768 total_max(0),
769 misc_count(0)
770 {
771 RK_S32 i, j;
772
773 INIT_LIST_HEAD(&mListGroup);
774 INIT_LIST_HEAD(&mListOrphan);
775
776 // NOTE: Do not create misc group at beginning. Only create on when needed.
777 for (i = 0; i < MPP_BUFFER_MODE_BUTT; i++)
778 for (j = 0; j < MPP_BUFFER_TYPE_BUTT; j++)
779 misc[i][j] = 0;
780
781 for (i = 0; i < (RK_S32)HASH_SIZE(mHashGroup); i++)
782 INIT_HLIST_HEAD(&mHashGroup[i]);
783 }
784
785 #include "mpp_time.h"
786
~MppBufferService()787 MppBufferService::~MppBufferService()
788 {
789 RK_S32 i, j;
790
791 finalizing = 1;
792
793 // first remove legacy group which is the normal case
794 if (misc_count) {
795 mpp_log_f("cleaning misc group\n");
796 for (i = 0; i < MPP_BUFFER_MODE_BUTT; i++)
797 for (j = 0; j < MPP_BUFFER_TYPE_BUTT; j++) {
798 RK_U32 id = misc[i][j];
799
800 if (id) {
801 put_group(__FUNCTION__, get_group_by_id(id));
802 misc[i][j] = 0;
803 }
804 }
805 }
806
807 // then remove the remaining group which is the leak one
808 if (!list_empty(&mListGroup)) {
809 MppBufferGroupImpl *pos, *n;
810
811 if (mpp_buffer_debug & MPP_BUF_DBG_DUMP_ON_EXIT)
812 dump("leaked group found");
813
814 mpp_log_f("cleaning leaked group\n");
815 list_for_each_entry_safe(pos, n, &mListGroup, MppBufferGroupImpl, list_group) {
816 put_group(__FUNCTION__, pos);
817 }
818 }
819
820 // remove all orphan buffer group
821 if (!list_empty(&mListOrphan)) {
822 MppBufferGroupImpl *pos, *n;
823
824 mpp_log_f("cleaning leaked buffer\n");
825
826 list_for_each_entry_safe(pos, n, &mListOrphan, MppBufferGroupImpl, list_group) {
827 pos->clear_on_exit = 1;
828 pos->is_finalizing = 1;
829 put_group(__FUNCTION__, pos);
830 }
831 }
832 finished = 1;
833
834 for (i = 0; i < MPP_BUFFER_TYPE_BUTT; i++)
835 mpp_allocator_put(&mAllocator[i]);
836
837 for (i = 1; i < MPP_BUFFER_SPECIAL_DMA_HEAP_NUM; i++)
838 mpp_allocator_put(&mAllocatorDmaHeapWithFlag[i]);
839 }
840
get_group_id()841 RK_U32 MppBufferService::get_group_id()
842 {
843 RK_U32 id = 0;
844 static RK_U32 overflowed = 0;
845
846 if (!overflowed) {
847 /* avoid 0 group id */
848 if (group_id)
849 id = group_id++;
850 else {
851 overflowed = 1;
852 group_id = 1;
853 }
854 }
855
856 if (overflowed) {
857 id = group_id++;
858
859 /* when it is overflow avoid the used id */
860 while (get_group_by_id(id))
861 id = group_id++;
862 }
863
864 group_count++;
865
866 return id;
867 }
868
get_group(const char * tag,const char * caller,MppBufferMode mode,MppBufferType type,RK_U32 is_misc)869 MppBufferGroupImpl *MppBufferService::get_group(const char *tag, const char *caller,
870 MppBufferMode mode, MppBufferType type,
871 RK_U32 is_misc)
872 {
873 MppBufferType buffer_type = (MppBufferType)(type & MPP_BUFFER_TYPE_MASK);
874 RK_U32 flags = (type & MPP_BUFFER_FLAGS_MASK);
875 MppBufferGroupImpl *p = (MppBufferGroupImpl *)mpp_mem_pool_get_f(caller, mpp_buf_grp_pool);
876 if (NULL == p) {
877 mpp_err("MppBufferService failed to allocate group context\n");
878 return NULL;
879 }
880
881 INIT_LIST_HEAD(&p->list_group);
882 INIT_LIST_HEAD(&p->list_used);
883 INIT_LIST_HEAD(&p->list_unused);
884 INIT_HLIST_NODE(&p->hlist);
885
886 mpp_env_get_u32("mpp_buffer_debug", &mpp_buffer_debug, 0);
887 p->log_runtime_en = (mpp_buffer_debug & MPP_BUF_DBG_OPS_RUNTIME) ? (1) : (0);
888 p->log_history_en = (mpp_buffer_debug & MPP_BUF_DBG_OPS_HISTORY) ? (1) : (0);
889
890 p->caller = caller;
891 p->mode = mode;
892 p->type = buffer_type;
893 p->limit = BUFFER_GROUP_SIZE_DEFAULT;
894 p->clear_on_exit = (mpp_buffer_debug & MPP_BUF_DBG_CLR_ON_EXIT) ? (1) : (0);
895 p->dump_on_exit = (mpp_buffer_debug & MPP_BUF_DBG_DUMP_ON_EXIT) ? (1) : (0);
896
897 pthread_mutexattr_t attr;
898 pthread_mutexattr_init(&attr);
899 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
900 pthread_mutex_init(&p->buf_lock, &attr);
901 pthread_mutexattr_destroy(&attr);
902
903 {
904 AutoMutex auto_lock(get_lock());
905
906 // allocate general buffer first
907 if (!mAllocator[buffer_type])
908 mpp_allocator_get(&mAllocator[buffer_type], &mAllocatorApi[buffer_type], buffer_type);
909
910 p->allocator = mAllocator[buffer_type];
911 p->alloc_api = mAllocatorApi[buffer_type];
912
913 // allocate extra dma heap buffer if necessary
914 if (flags && MPP_BUFFER_TYPE_DMA_HEAP == get_real_allocator_type(p->allocator)) {
915 RK_U32 extra_allocator_idx = 0;
916
917 // calculate index of extra allocator
918 if (flags & MPP_BUFFER_FLAGS_CONTIG)
919 extra_allocator_idx |= 1 << 0;
920
921 if (flags & MPP_BUFFER_FLAGS_CACHABLE)
922 extra_allocator_idx |= 1 << 1;
923
924 if (flags & MPP_BUFFER_FLAGS_DMA32)
925 extra_allocator_idx |= 1 << 2;
926
927 if (!mAllocatorDmaHeapWithFlag[extra_allocator_idx])
928 mpp_allocator_get(&mAllocatorDmaHeapWithFlag[extra_allocator_idx],
929 &p->alloc_api, type);
930
931 if (mAllocatorDmaHeapWithFlag[extra_allocator_idx])
932 p->allocator = mAllocatorDmaHeapWithFlag[extra_allocator_idx];
933 }
934 }
935
936 mpp_assert(p->allocator);
937 mpp_assert(p->alloc_api);
938
939 if (p->log_history_en)
940 p->logs = buf_logs_init(BUFFER_OPS_MAX_COUNT);
941
942 mpp_assert(mode < MPP_BUFFER_MODE_BUTT);
943 mpp_assert(buffer_type < MPP_BUFFER_TYPE_BUTT);
944
945 AutoMutex auto_lock(get_lock());
946 RK_U32 id = get_group_id();
947
948 if (tag) {
949 snprintf(p->tag, sizeof(p->tag) - 1, "%s_%d", tag, id);
950 } else {
951 snprintf(p->tag, sizeof(p->tag) - 1, "unknown");
952 }
953 p->group_id = id;
954
955 list_add_tail(&p->list_group, &mListGroup);
956 hash_add(mHashGroup, &p->hlist, id);
957
958 buf_grp_add_log(p, GRP_CREATE, caller);
959
960 if (is_misc) {
961 misc[mode][buffer_type] = id;
962 p->is_misc = 1;
963 misc_count++;
964 }
965
966 return p;
967 }
968
get_misc(MppBufferMode mode,MppBufferType type)969 RK_U32 MppBufferService::get_misc(MppBufferMode mode, MppBufferType type)
970 {
971 type = (MppBufferType)(type & MPP_BUFFER_TYPE_MASK);
972 if (type == MPP_BUFFER_TYPE_NORMAL)
973 return 0;
974
975 mpp_assert(mode < MPP_BUFFER_MODE_BUTT);
976 mpp_assert(type < MPP_BUFFER_TYPE_BUTT);
977
978 return misc[mode][type];
979 }
980
put_group(const char * caller,MppBufferGroupImpl * p)981 void MppBufferService::put_group(const char *caller, MppBufferGroupImpl *p)
982 {
983 if (finished)
984 return ;
985
986 Mutex *lock = get_lock();
987
988 if (!finalizing)
989 lock->lock();
990
991 buf_grp_add_log(p, GRP_RELEASE, caller);
992
993 // remove unused list
994 if (!list_empty(&p->list_unused)) {
995 MppBufferImpl *pos, *n;
996
997 list_for_each_entry_safe(pos, n, &p->list_unused, MppBufferImpl, list_status) {
998 put_buffer(p, pos, 0, caller);
999 }
1000 }
1001
1002 if (list_empty(&p->list_used)) {
1003 destroy_group(p);
1004 } else {
1005 if (!finalizing || (finalizing && p->dump_on_exit)) {
1006 mpp_err("mpp_group %p tag %s caller %s mode %s type %s deinit with %d bytes not released\n",
1007 p, p->tag, p->caller, mode2str[p->mode], type2str[p->type], p->usage);
1008
1009 mpp_buffer_group_dump(p, caller);
1010 }
1011
1012 /* if clear on exit we need to release remaining buffer */
1013 if (p->clear_on_exit) {
1014 MppBufferImpl *pos, *n;
1015
1016 if (p->dump_on_exit)
1017 mpp_err("force release all remaining buffer\n");
1018
1019 list_for_each_entry_safe(pos, n, &p->list_used, MppBufferImpl, list_status) {
1020 if (p->dump_on_exit)
1021 mpp_err("clearing buffer %p\n", pos);
1022 pos->ref_count = 0;
1023 pos->discard = 1;
1024 put_buffer(p, pos, 0, caller);
1025 }
1026
1027 destroy_group(p);
1028 } else {
1029 // otherwise move the group to list_orphan and wait for buffer release
1030 buf_grp_add_log(p, GRP_ORPHAN, caller);
1031 list_del_init(&p->list_group);
1032 list_add_tail(&p->list_group, &mListOrphan);
1033 p->is_orphan = 1;
1034 }
1035 }
1036
1037 if (!finalizing)
1038 lock->unlock();
1039 }
1040
destroy_group(MppBufferGroupImpl * group)1041 void MppBufferService::destroy_group(MppBufferGroupImpl *group)
1042 {
1043 MppBufferMode mode = group->mode;
1044 MppBufferType type = group->type;
1045 RK_U32 id = group->group_id;
1046
1047 mpp_assert(group->count_used == 0);
1048 mpp_assert(group->count_unused == 0);
1049 if (group->count_unused || group->count_used) {
1050 mpp_err("mpp_buffer_group_deinit mismatch counter used %4d unused %4d found\n",
1051 group->count_used, group->count_unused);
1052 group->count_unused = 0;
1053 group->count_used = 0;
1054 }
1055
1056 buf_grp_add_log(group, GRP_DESTROY, __FUNCTION__);
1057
1058 list_del_init(&group->list_group);
1059 hash_del(&group->hlist);
1060 pthread_mutex_destroy(&group->buf_lock);
1061 if (group->logs) {
1062 buf_logs_deinit(group->logs);
1063 group->logs = NULL;
1064 }
1065 mpp_mem_pool_put(mpp_buf_grp_pool, group);
1066 group_count--;
1067
1068 if (id == misc[mode][type]) {
1069 misc[mode][type] = 0;
1070 misc_count--;
1071 }
1072 }
1073
get_group_by_id(RK_U32 id)1074 MppBufferGroupImpl *MppBufferService::get_group_by_id(RK_U32 id)
1075 {
1076 MppBufferGroupImpl *impl = NULL;
1077
1078 hash_for_each_possible(mHashGroup, impl, hlist, id) {
1079 if (impl->group_id == id)
1080 break;
1081 }
1082
1083 return impl;
1084 }
1085
dump(const char * info)1086 void MppBufferService::dump(const char *info)
1087 {
1088 MppBufferGroupImpl *group;
1089 struct hlist_node *n;
1090 RK_U32 key;
1091
1092 mpp_log("dumping all buffer groups for %s\n", info);
1093
1094 if (hash_empty(mHashGroup)) {
1095 mpp_log("no buffer group can be dumped\n");
1096 } else {
1097 hash_for_each_safe(mHashGroup, key, n, group, hlist) {
1098 mpp_buffer_group_dump(group, __FUNCTION__);
1099 }
1100 }
1101 }
1102
is_finalizing()1103 RK_U32 MppBufferService::is_finalizing()
1104 {
1105 return finalizing;
1106 }
1107