1 /* SPDX-License-Identifier: Apache-2.0 OR MIT */
2 /*
3 * Copyright (c) 2015 Rockchip Electronics Co., Ltd.
4 */
5
6 #define MODULE_TAG "mpp_buffer"
7
8 #include <string.h>
9
10 #include "mpp_env.h"
11 #include "mpp_hash.h"
12 #include "mpp_lock.h"
13 #include "mpp_time.h"
14 #include "mpp_debug.h"
15 #include "mpp_thread.h"
16 #include "mpp_mem_pool.h"
17 #include "mpp_singleton.h"
18
19 #include "mpp_buffer_impl.h"
20
21 #define MAX_GROUP_BIT 8
22 #define MAX_MISC_GROUP_BIT 3
23 #define BUFFER_OPS_MAX_COUNT 1024
24 #define MPP_ALLOCATOR_WITH_FLAG_NUM 8
25
26 /* NOTE: user may call buffer / buf_grp deinit after buffer service deinited */
27 typedef enum MppBufSrvStatus_e {
28 MPP_BUF_SRV_UNINITED = -1,
29 MPP_BUF_SRV_NORMAL = 0,
30 MPP_BUF_SRV_FINALIZED = 1,
31 MPP_BUF_SRV_BUTT,
32 } MppBufSrvStatus;
33
34 #define SEARCH_GROUP_BY_ID(srv, id) (get_group_by_id(srv, id))
35
36 #define get_srv_buffer() \
37 ({ \
38 MppBufferService *__tmp; \
39 if (srv_buffer) \
40 __tmp = srv_buffer; \
41 else { \
42 switch (srv_status) { \
43 case MPP_BUF_SRV_UNINITED : { \
44 mpp_buffer_service_init(); \
45 __tmp = srv_buffer; \
46 } break; \
47 case MPP_BUF_SRV_FINALIZED : { \
48 /* if called after buf srv deinited return NULL without error log */ \
49 __tmp = NULL; \
50 } break; \
51 default : { \
52 mpp_err("mpp buffer srv not init status %d at %s\n", __FUNCTION__); \
53 __tmp = NULL; \
54 } break; \
55 } \
56 } \
57 __tmp; \
58 })
59
60 static void mpp_buffer_service_init();
61
62 typedef MPP_RET (*BufferOp)(MppAllocator allocator, MppBufferInfo *data);
63
64 typedef struct MppBufferService_t {
65 rk_u32 group_id;
66 rk_u32 group_count;
67 rk_u32 finalizing;
68
69 rk_u32 total_size;
70 rk_u32 total_max;
71
72 MppMutex lock;
73 // misc group for internal / externl buffer with different type
74 rk_u32 misc[MPP_BUFFER_MODE_BUTT][MPP_BUFFER_TYPE_BUTT][MPP_ALLOCATOR_WITH_FLAG_NUM];
75 rk_u32 misc_count;
76 /* preset allocator apis */
77 MppAllocator allocator[MPP_BUFFER_TYPE_BUTT][MPP_ALLOCATOR_WITH_FLAG_NUM];
78 MppAllocatorApi *allocator_api[MPP_BUFFER_TYPE_BUTT];
79
80 struct list_head list_group;
81 DECLARE_HASHTABLE(hash_group, MAX_GROUP_BIT);
82
83 // list for used buffer which do not have group
84 struct list_head list_orphan;
85 } MppBufferService;
86
87 static const char *mode2str[MPP_BUFFER_MODE_BUTT] = {
88 "internal",
89 "external",
90 };
91
92 static const char *type2str[MPP_BUFFER_TYPE_BUTT] = {
93 "normal",
94 "ion",
95 "dma-buf",
96 "drm",
97 };
98 static const char *ops2str[BUF_OPS_BUTT] = {
99 "grp create ",
100 "grp release",
101 "grp reset",
102 "grp orphan",
103 "grp destroy",
104
105 "buf commit ",
106 "buf create ",
107 "buf mmap ",
108 "buf ref inc",
109 "buf ref dec",
110 "buf discard",
111 "buf destroy",
112 };
113
114 static MppMemPool pool_buf = NULL;
115 static MppMemPool pool_buf_grp = NULL;
116 static MppMemPool pool_buf_map_node = NULL;
117 static MppBufferService *srv_buffer = NULL;
118 static MppBufSrvStatus srv_status = MPP_BUF_SRV_UNINITED;
119 rk_u32 mpp_buffer_debug = 0;
120
121 static MppBufferGroupImpl *service_get_group(const char *tag, const char *caller,
122 MppBufferMode mode, MppBufferType type,
123 rk_u32 is_misc);
124
125 static void service_put_group(MppBufferService *srv, MppBufferGroupImpl *p, const char *caller);
126 static void service_dump(MppBufferService *srv, const char *info);
127
get_group_by_id(MppBufferService * srv,rk_u32 id)128 static MppBufferGroupImpl *get_group_by_id(MppBufferService *srv, rk_u32 id)
129 {
130 MppBufferGroupImpl *impl = NULL;
131
132 hash_for_each_possible(srv->hash_group, impl, hlist, id) {
133 if (impl->group_id == id)
134 break;
135 }
136
137 return impl;
138 }
139
buf_logs_init(rk_u32 max_count)140 static MppBufLogs *buf_logs_init(rk_u32 max_count)
141 {
142 MppBufLogs *logs = NULL;
143
144 if (!max_count)
145 return NULL;
146
147 logs = mpp_malloc_size(MppBufLogs, sizeof(MppBufLogs) + max_count * sizeof(MppBufLog));
148 if (!logs) {
149 mpp_err_f("failed to create %d buf logs\n", max_count);
150 return NULL;
151 }
152
153 {
154 pthread_mutexattr_t attr;
155
156 pthread_mutexattr_init(&attr);
157 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
158 pthread_mutex_init(&logs->lock, &attr);
159 pthread_mutexattr_destroy(&attr);
160 }
161
162 logs->max_count = max_count;
163 logs->log_count = 0;
164 logs->log_write = 0;
165 logs->log_read = 0;
166 logs->logs = (MppBufLog *)(logs + 1);
167
168 return logs;
169 }
170
buf_logs_deinit(MppBufLogs * logs)171 static void buf_logs_deinit(MppBufLogs *logs)
172 {
173 pthread_mutex_destroy(&logs->lock);
174 MPP_FREE(logs);
175 }
176
buf_logs_write(MppBufLogs * logs,rk_u32 group_id,rk_s32 buffer_id,MppBufOps ops,rk_s32 ref_count,const char * caller)177 static void buf_logs_write(MppBufLogs *logs, rk_u32 group_id, rk_s32 buffer_id,
178 MppBufOps ops, rk_s32 ref_count, const char *caller)
179 {
180 MppBufLog *log = NULL;
181
182 pthread_mutex_lock(&logs->lock);
183
184 log = &logs->logs[logs->log_write];
185 log->group_id = group_id;
186 log->buffer_id = buffer_id;
187 log->ops = ops;
188 log->ref_count = ref_count;
189 log->caller = caller;
190
191 logs->log_write++;
192 if (logs->log_write >= logs->max_count)
193 logs->log_write = 0;
194
195 if (logs->log_count < logs->max_count)
196 logs->log_count++;
197 else {
198 logs->log_read++;
199 if (logs->log_read >= logs->max_count)
200 logs->log_read = 0;
201 }
202
203 pthread_mutex_unlock(&logs->lock);
204 }
205
buf_logs_dump(MppBufLogs * logs)206 static void buf_logs_dump(MppBufLogs *logs)
207 {
208 while (logs->log_count) {
209 MppBufLog *log = &logs->logs[logs->log_read];
210
211 if (log->buffer_id >= 0)
212 mpp_log("group %3d buffer %4d ops %s ref_count %d caller %s\n",
213 log->group_id, log->buffer_id,
214 ops2str[log->ops], log->ref_count, log->caller);
215 else
216 mpp_log("group %3d ops %s\n", log->group_id, ops2str[log->ops]);
217
218 logs->log_read++;
219 if (logs->log_read >= logs->max_count)
220 logs->log_read = 0;
221 logs->log_count--;
222 }
223 mpp_assert(logs->log_read == logs->log_write);
224 }
225
buf_add_log(MppBufferImpl * buffer,MppBufOps ops,const char * caller)226 static void buf_add_log(MppBufferImpl *buffer, MppBufOps ops, const char* caller)
227 {
228 if (buffer->log_runtime_en) {
229 mpp_log("group %3d buffer %4d fd %3d ops %s ref_count %d caller %s\n",
230 buffer->group_id, buffer->buffer_id, buffer->info.fd,
231 ops2str[ops], buffer->ref_count, caller);
232 }
233 if (buffer->logs)
234 buf_logs_write(buffer->logs, buffer->group_id, buffer->buffer_id,
235 ops, buffer->ref_count, caller);
236 }
237
buf_grp_add_log(MppBufferGroupImpl * group,MppBufOps ops,const char * caller)238 static void buf_grp_add_log(MppBufferGroupImpl *group, MppBufOps ops, const char* caller)
239 {
240 if (group->log_runtime_en) {
241 mpp_log("group %3d mode %d type %d ops %s\n", group->group_id,
242 group->mode, group->type, ops2str[ops]);
243 }
244 if (group->logs)
245 buf_logs_write(group->logs, group->group_id, -1, ops, 0, caller);
246 }
247
dump_buffer_info(MppBufferImpl * buffer)248 static void dump_buffer_info(MppBufferImpl *buffer)
249 {
250 mpp_log("buffer %p fd %4d size %10d ref_count %3d discard %d caller %s\n",
251 buffer, buffer->info.fd, buffer->info.size,
252 buffer->ref_count, buffer->discard, buffer->caller);
253 }
254
mpp_buffer_group_dump(MppBufferGroupImpl * group,const char * caller)255 void mpp_buffer_group_dump(MppBufferGroupImpl *group, const char *caller)
256 {
257 MppBufferImpl *pos, *n;
258
259 mpp_log("\ndumping buffer group %p id %d from %s\n", group,
260 group->group_id, caller);
261 mpp_log("mode %s\n", mode2str[group->mode]);
262 mpp_log("type %s\n", type2str[group->type]);
263 mpp_log("limit size %d count %d\n", group->limit_size, group->limit_count);
264
265 mpp_log("used buffer count %d\n", group->count_used);
266
267 list_for_each_entry_safe(pos, n, &group->list_used, MppBufferImpl, list_status) {
268 dump_buffer_info(pos);
269 }
270
271 mpp_log("unused buffer count %d\n", group->count_unused);
272 list_for_each_entry_safe(pos, n, &group->list_unused, MppBufferImpl, list_status) {
273 dump_buffer_info(pos);
274 }
275
276 if (group->logs)
277 buf_logs_dump(group->logs);
278 }
279
clear_buffer_info(MppBufferInfo * info)280 static void clear_buffer_info(MppBufferInfo *info)
281 {
282 info->fd = -1;
283 info->ptr = NULL;
284 info->hnd = NULL;
285 info->size = 0;
286 info->index = -1;
287 info->type = MPP_BUFFER_TYPE_BUTT;
288 }
289
service_put_buffer(MppBufferService * srv,MppBufferGroupImpl * group,MppBufferImpl * buffer,rk_u32 reuse,const char * caller)290 static void service_put_buffer(MppBufferService *srv, MppBufferGroupImpl *group,
291 MppBufferImpl *buffer, rk_u32 reuse, const char *caller)
292 {
293 struct list_head list_maps;
294 MppDevBufMapNode *pos, *n;
295 MppBufferInfo info;
296
297 mpp_assert(group);
298
299 pthread_mutex_lock(&buffer->lock);
300
301 if (!srv && !srv->finalizing) {
302 mpp_assert(buffer->ref_count == 0);
303 if (buffer->ref_count > 0) {
304 pthread_mutex_unlock(&buffer->lock);
305 return;
306 }
307 }
308
309 list_del_init(&buffer->list_status);
310
311 if (reuse) {
312 if (buffer->used && group) {
313 group->count_used--;
314 list_add_tail(&buffer->list_status, &group->list_unused);
315 group->count_unused++;
316 } else {
317 mpp_err_f("can not reuse unused buffer %d at group %p:%d\n",
318 buffer->buffer_id, group, buffer->group_id);
319 }
320 buffer->used = 0;
321
322 pthread_mutex_unlock(&buffer->lock);
323 return;
324 }
325
326 /* remove all map from buffer */
327 INIT_LIST_HEAD(&list_maps);
328 list_for_each_entry_safe(pos, n, &buffer->list_maps, MppDevBufMapNode, list_buf) {
329 list_move_tail(&pos->list_buf, &list_maps);
330 pos->iova = (rk_u32)(-1);
331 }
332 mpp_assert(list_empty(&buffer->list_maps));
333 info = buffer->info;
334 if (group) {
335 rk_u32 destroy = 0;
336 rk_u32 size = buffer->info.size;
337
338 if (buffer->used)
339 group->count_used--;
340 else
341 group->count_unused--;
342
343 group->usage -= size;
344 group->buffer_count--;
345
346 /* reduce total buffer size record */
347 if (group->mode == MPP_BUFFER_INTERNAL && srv)
348 MPP_FETCH_SUB(&srv->total_size, size);
349
350 buf_add_log(buffer, BUF_DESTROY, caller);
351
352 if (group->is_orphan && !group->usage && !group->is_finalizing)
353 destroy = 1;
354
355 if (destroy)
356 service_put_group(srv, group, caller);
357 } else {
358 mpp_assert(srv_status);
359 }
360 clear_buffer_info(&buffer->info);
361
362 pthread_mutex_unlock(&buffer->lock);
363
364 list_for_each_entry_safe(pos, n, &list_maps, MppDevBufMapNode, list_buf) {
365 MppDev dev = pos->dev;
366
367 mpp_assert(dev);
368 mpp_dev_ioctl(dev, MPP_DEV_LOCK_MAP, NULL);
369 /* remove buffer from group */
370 mpp_dev_ioctl(dev, MPP_DEV_DETACH_FD, pos);
371 mpp_dev_ioctl(dev, MPP_DEV_UNLOCK_MAP, NULL);
372 mpp_mem_pool_put(pool_buf_map_node, pos, caller);
373 }
374
375 /* release buffer here */
376 {
377 BufferOp func = (buffer->mode == MPP_BUFFER_INTERNAL) ?
378 (buffer->alloc_api->free) :
379 (buffer->alloc_api->release);
380
381 func(buffer->allocator, &info);
382 }
383
384 mpp_mem_pool_put(pool_buf, buffer, caller);
385 }
386
inc_buffer_ref(MppBufferImpl * buffer,const char * caller)387 static MPP_RET inc_buffer_ref(MppBufferImpl *buffer, const char *caller)
388 {
389 MPP_RET ret = MPP_OK;
390
391 pthread_mutex_lock(&buffer->lock);
392 buffer->ref_count++;
393 buf_add_log(buffer, BUF_REF_INC, caller);
394 if (!buffer->used) {
395 MppBufferGroupImpl *group = NULL;
396 MppBufferService *srv = get_srv_buffer();
397
398 if (srv) {
399 mpp_mutex_lock(&srv->lock);
400 group = SEARCH_GROUP_BY_ID(srv, buffer->group_id);
401 mpp_mutex_unlock(&srv->lock);
402 }
403 // NOTE: when increasing ref_count the unused buffer must be under certain group
404 mpp_assert(group);
405 buffer->used = 1;
406 if (group) {
407 pthread_mutex_lock(&group->buf_lock);
408 list_del_init(&buffer->list_status);
409 list_add_tail(&buffer->list_status, &group->list_used);
410 group->count_used++;
411 group->count_unused--;
412 pthread_mutex_unlock(&group->buf_lock);
413 } else {
414 mpp_err_f("unused buffer without group\n");
415 ret = MPP_NOK;
416 }
417 }
418 pthread_mutex_unlock(&buffer->lock);
419 return ret;
420 }
421
mpp_buffer_create(const char * tag,const char * caller,MppBufferGroupImpl * group,MppBufferInfo * info,MppBufferImpl ** buffer)422 MPP_RET mpp_buffer_create(const char *tag, const char *caller,
423 MppBufferGroupImpl *group, MppBufferInfo *info,
424 MppBufferImpl **buffer)
425 {
426 MPP_BUF_FUNCTION_ENTER();
427
428 MPP_RET ret = MPP_OK;
429 BufferOp func = NULL;
430 MppBufferImpl *p = NULL;
431
432 if (!group) {
433 mpp_err_f("can not create buffer without group\n");
434 ret = MPP_NOK;
435 goto RET;
436 }
437
438 if (group->limit_count && group->buffer_count >= group->limit_count) {
439 if (group->log_runtime_en)
440 mpp_log_f("group %d reach count limit %d\n", group->group_id, group->limit_count);
441 ret = MPP_NOK;
442 goto RET;
443 }
444
445 if (group->limit_size && info->size > group->limit_size) {
446 mpp_err_f("required size %d reach group size limit %d\n", info->size, group->limit_size);
447 ret = MPP_NOK;
448 goto RET;
449 }
450
451 p = (MppBufferImpl *)mpp_mem_pool_get(pool_buf, caller);
452 if (!p) {
453 mpp_err_f("failed to allocate context\n");
454 ret = MPP_ERR_MALLOC;
455 goto RET;
456 }
457
458 func = (group->mode == MPP_BUFFER_INTERNAL) ?
459 (group->alloc_api->alloc) : (group->alloc_api->import);
460 ret = func(group->allocator, info);
461 if (ret) {
462 mpp_err_f("failed to create buffer with size %d\n", info->size);
463 mpp_mem_pool_put(pool_buf, p, caller);
464 ret = MPP_ERR_MALLOC;
465 goto RET;
466 }
467
468 if (!tag)
469 tag = group->tag;
470
471 snprintf(p->tag, sizeof(p->tag), "%s", tag);
472 p->caller = caller;
473 pthread_mutexattr_t attr;
474 pthread_mutexattr_init(&attr);
475 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
476 pthread_mutex_init(&p->lock, &attr);
477 pthread_mutexattr_destroy(&attr);
478 p->allocator = group->allocator;
479 p->alloc_api = group->alloc_api;
480 p->log_runtime_en = group->log_runtime_en;
481 p->log_history_en = group->log_history_en;
482 p->group_id = group->group_id;
483 p->mode = group->mode;
484 p->type = group->type;
485 p->uncached = (group->flags & MPP_ALLOC_FLAG_CACHABLE) ? 0 : 1;
486 p->logs = group->logs;
487 p->info = *info;
488
489 pthread_mutex_lock(&group->buf_lock);
490 p->buffer_id = group->buffer_id++;
491 INIT_LIST_HEAD(&p->list_status);
492 INIT_LIST_HEAD(&p->list_maps);
493
494 if (buffer) {
495 p->ref_count++;
496 p->used = 1;
497 list_add_tail(&p->list_status, &group->list_used);
498 group->count_used++;
499 *buffer = p;
500 } else {
501 list_add_tail(&p->list_status, &group->list_unused);
502 group->count_unused++;
503 }
504
505 group->usage += info->size;
506 group->buffer_count++;
507 pthread_mutex_unlock(&group->buf_lock);
508
509 buf_add_log(p, (group->mode == MPP_BUFFER_INTERNAL) ? (BUF_CREATE) : (BUF_COMMIT), caller);
510
511 if (group->mode == MPP_BUFFER_INTERNAL) {
512 MppBufferService *srv = get_srv_buffer();
513
514 if (srv) {
515 rk_u32 total = MPP_ADD_FETCH(&srv->total_size, info->size);
516 bool cas_ret;
517
518 do {
519 rk_u32 old_max = srv->total_max;
520 rk_u32 new_max = MPP_MAX(total, old_max);
521
522 cas_ret = MPP_BOOL_CAS(&srv->total_max, old_max, new_max);
523 } while (!cas_ret);
524 }
525 }
526
527 if (group->callback)
528 group->callback(group->arg, group);
529 RET:
530 MPP_BUF_FUNCTION_LEAVE();
531 return ret;
532 }
533
mpp_buffer_mmap(MppBufferImpl * buffer,const char * caller)534 MPP_RET mpp_buffer_mmap(MppBufferImpl *buffer, const char* caller)
535 {
536 MPP_BUF_FUNCTION_ENTER();
537
538 MPP_RET ret = buffer->alloc_api->mmap(buffer->allocator, &buffer->info);
539 if (ret)
540 mpp_err_f("buffer %d group %d fd %d map failed caller %s\n",
541 buffer->buffer_id, buffer->group_id, buffer->info.fd, caller);
542
543 buf_add_log(buffer, BUF_MMAP, caller);
544
545 MPP_BUF_FUNCTION_LEAVE();
546 return ret;
547 }
548
mpp_buffer_ref_inc(MppBufferImpl * buffer,const char * caller)549 MPP_RET mpp_buffer_ref_inc(MppBufferImpl *buffer, const char* caller)
550 {
551 MPP_BUF_FUNCTION_ENTER();
552
553 MPP_RET ret = inc_buffer_ref(buffer, caller);
554
555 MPP_BUF_FUNCTION_LEAVE();
556 return ret;
557 }
558
559
mpp_buffer_ref_dec(MppBufferImpl * buffer,const char * caller)560 MPP_RET mpp_buffer_ref_dec(MppBufferImpl *buffer, const char* caller)
561 {
562 MPP_RET ret = MPP_OK;
563 rk_u32 release = 0;
564
565 MPP_BUF_FUNCTION_ENTER();
566
567 pthread_mutex_lock(&buffer->lock);
568
569 if (buffer->ref_count <= 0) {
570 buf_add_log(buffer, BUF_REF_DEC, caller);
571 mpp_err_f("buffer from %s found non-positive ref_count %d caller %s\n",
572 buffer->caller, buffer->ref_count, caller);
573 mpp_abort();
574 ret = MPP_NOK;
575 pthread_mutex_unlock(&buffer->lock);
576 goto done;
577 }
578
579 buffer->ref_count--;
580 if (buffer->ref_count == 0)
581 release = 1;
582 buf_add_log(buffer, BUF_REF_DEC, caller);
583
584 pthread_mutex_unlock(&buffer->lock);
585
586 if (release) {
587 MppBufferGroupImpl *group = NULL;
588 MppBufferService *srv = get_srv_buffer();
589
590 if (srv) {
591 mpp_mutex_lock(&srv->lock);
592 group = SEARCH_GROUP_BY_ID(srv, buffer->group_id);
593 mpp_mutex_unlock(&srv->lock);
594 }
595
596 mpp_assert(group);
597 if (group) {
598 rk_u32 reuse = 0;
599
600 pthread_mutex_lock(&group->buf_lock);
601
602 reuse = (!group->is_misc && !buffer->discard);
603 service_put_buffer(srv, group, buffer, reuse, caller);
604
605 if (group->callback)
606 group->callback(group->arg, group);
607
608 pthread_mutex_unlock(&group->buf_lock);
609 }
610 }
611
612 done:
613 MPP_BUF_FUNCTION_LEAVE();
614 return ret;
615 }
616
mpp_buffer_discard(MppBufferImpl * buffer,const char * caller)617 MPP_RET mpp_buffer_discard(MppBufferImpl *buffer, const char* caller)
618 {
619 MppBufferService *srv = get_srv_buffer();
620 MppBufferGroupImpl *group = NULL;
621
622 MPP_BUF_FUNCTION_ENTER();
623
624 if (srv) {
625 mpp_mutex_lock(&srv->lock);
626 group = SEARCH_GROUP_BY_ID(srv, buffer->group_id);
627 mpp_mutex_unlock(&srv->lock);
628 }
629
630 mpp_assert(group);
631 if (group) {
632 pthread_mutex_lock(&group->buf_lock);
633 buffer->discard = 1;
634 buf_add_log(buffer, BUF_DISCARD, caller);
635 pthread_mutex_unlock(&group->buf_lock);
636 }
637
638 MPP_BUF_FUNCTION_LEAVE();
639
640 return MPP_OK;
641 }
642
mpp_buffer_get_unused(MppBufferGroupImpl * p,size_t size,const char * caller)643 MppBufferImpl *mpp_buffer_get_unused(MppBufferGroupImpl *p, size_t size, const char* caller)
644 {
645 MppBufferImpl *buffer = NULL;
646
647 MPP_BUF_FUNCTION_ENTER();
648
649 pthread_mutex_lock(&p->buf_lock);
650
651 if (!list_empty(&p->list_unused)) {
652 MppBufferImpl *pos, *n;
653 rk_s32 found = 0;
654 rk_s32 search_count = 0;
655
656 list_for_each_entry_safe(pos, n, &p->list_unused, MppBufferImpl, list_status) {
657 mpp_buf_dbg(MPP_BUF_DBG_CHECK_SIZE, "request size %d on buf idx %d size %d\n",
658 size, pos->buffer_id, pos->info.size);
659 if (pos->info.size >= size) {
660 buffer = pos;
661 pthread_mutex_lock(&buffer->lock);
662 buffer->ref_count++;
663 buffer->used = 1;
664 buf_add_log(buffer, BUF_REF_INC, caller);
665 list_del_init(&buffer->list_status);
666 list_add_tail(&buffer->list_status, &p->list_used);
667 p->count_used++;
668 p->count_unused--;
669 pthread_mutex_unlock(&buffer->lock);
670 found = 1;
671 break;
672 } else {
673 if (MPP_BUFFER_INTERNAL == p->mode) {
674 service_put_buffer(get_srv_buffer(), p, pos, 0, caller);
675 } else
676 search_count++;
677 }
678 }
679
680 if (!found && search_count) {
681 mpp_err_f("can not found match buffer with size larger than %d\n", size);
682 mpp_buffer_group_dump(p, caller);
683 }
684 }
685
686 pthread_mutex_unlock(&p->buf_lock);
687
688 MPP_BUF_FUNCTION_LEAVE();
689 return buffer;
690 }
691
mpp_buffer_to_addr(MppBuffer buffer,size_t offset)692 rk_u32 mpp_buffer_to_addr(MppBuffer buffer, size_t offset)
693 {
694 MppBufferImpl *impl = (MppBufferImpl *)buffer;
695 rk_u32 addr = 0;
696
697 if (!impl) {
698 mpp_err_f("NULL buffer convert to zero address\n");
699 return 0;
700 }
701
702 if (impl->info.fd >= (1 << 10)) {
703 mpp_err_f("buffer fd %d is too large\n");
704 return 0;
705 }
706
707 if (impl->offset + offset >= SZ_4M) {
708 mpp_err_f("offset %d + %d is larger than 4M use extra info to send offset\n");
709 return 0;
710 }
711
712 addr = impl->info.fd + ((impl->offset + offset) << 10);
713
714 return addr;
715 }
716
mpp_buffer_attach_dev_lock(const char * caller,MppBuffer buffer,MppDev dev)717 static MppDevBufMapNode *mpp_buffer_attach_dev_lock(const char *caller, MppBuffer buffer, MppDev dev)
718 {
719 MppBufferImpl *impl = (MppBufferImpl *)buffer;
720 MppDevBufMapNode *pos, *n;
721 MppDevBufMapNode *node = NULL;
722 MPP_RET ret = MPP_OK;
723
724 mpp_dev_ioctl(dev, MPP_DEV_LOCK_MAP, NULL);
725
726 pthread_mutex_lock(&impl->lock);
727
728 list_for_each_entry_safe(pos, n, &impl->list_maps, MppDevBufMapNode, list_buf) {
729 if (pos->dev == dev) {
730 node = pos;
731 goto DONE;
732 }
733 }
734
735 node = (MppDevBufMapNode *)mpp_mem_pool_get(pool_buf_map_node, caller);
736 if (!node) {
737 mpp_err("mpp_buffer_attach_dev failed to allocate map node\n");
738 ret = MPP_NOK;
739 goto DONE;
740 }
741
742 INIT_LIST_HEAD(&node->list_buf);
743 INIT_LIST_HEAD(&node->list_dev);
744 node->lock_buf = &impl->lock;
745 node->buffer = impl;
746 node->dev = dev;
747 node->pool = pool_buf_map_node;
748 node->buf_fd = impl->info.fd;
749
750 ret = mpp_dev_ioctl(dev, MPP_DEV_ATTACH_FD, node);
751 if (ret) {
752 mpp_mem_pool_put(pool_buf_map_node, node, caller);
753 node = NULL;
754 goto DONE;
755 }
756 list_add_tail(&node->list_buf, &impl->list_maps);
757
758 DONE:
759 pthread_mutex_unlock(&impl->lock);
760 mpp_dev_ioctl(dev, MPP_DEV_UNLOCK_MAP, NULL);
761
762 return node;
763 }
764
mpp_buffer_attach_dev_f(const char * caller,MppBuffer buffer,MppDev dev)765 MPP_RET mpp_buffer_attach_dev_f(const char *caller, MppBuffer buffer, MppDev dev)
766 {
767 MppDevBufMapNode *node;
768
769 node = mpp_buffer_attach_dev_lock(caller, buffer, dev);
770
771 return node ? MPP_OK : MPP_NOK;
772 }
773
mpp_buffer_detach_dev_f(const char * caller,MppBuffer buffer,MppDev dev)774 MPP_RET mpp_buffer_detach_dev_f(const char *caller, MppBuffer buffer, MppDev dev)
775 {
776 MppBufferImpl *impl = (MppBufferImpl *)buffer;
777 MppDevBufMapNode *pos, *n;
778 MPP_RET ret = MPP_OK;
779
780 mpp_dev_ioctl(dev, MPP_DEV_LOCK_MAP, NULL);
781 pthread_mutex_lock(&impl->lock);
782 list_for_each_entry_safe(pos, n, &impl->list_maps, MppDevBufMapNode, list_buf) {
783 if (pos->dev == dev) {
784 list_del_init(&pos->list_buf);
785 ret = mpp_dev_ioctl(dev, MPP_DEV_DETACH_FD, pos);
786 mpp_mem_pool_put(pool_buf_map_node, pos, caller);
787 break;
788 }
789 }
790 pthread_mutex_unlock(&impl->lock);
791 mpp_dev_ioctl(dev, MPP_DEV_UNLOCK_MAP, NULL);
792
793 return ret;
794 }
795
mpp_buffer_get_iova_f(const char * caller,MppBuffer buffer,MppDev dev)796 rk_u32 mpp_buffer_get_iova_f(const char *caller, MppBuffer buffer, MppDev dev)
797 {
798 MppDevBufMapNode *node;
799
800 node = mpp_buffer_attach_dev_lock(caller, buffer, dev);
801
802 return node ? node->iova : (rk_u32)(-1);
803 }
804
mpp_buffer_group_init(MppBufferGroupImpl ** group,const char * tag,const char * caller,MppBufferMode mode,MppBufferType type)805 MPP_RET mpp_buffer_group_init(MppBufferGroupImpl **group, const char *tag, const char *caller,
806 MppBufferMode mode, MppBufferType type)
807 {
808 MPP_BUF_FUNCTION_ENTER();
809 mpp_assert(caller);
810
811 *group = service_get_group(tag, caller, mode, type, 0);
812
813 MPP_BUF_FUNCTION_LEAVE();
814 return ((*group) ? (MPP_OK) : (MPP_NOK));
815 }
816
mpp_buffer_group_deinit(MppBufferGroupImpl * p)817 MPP_RET mpp_buffer_group_deinit(MppBufferGroupImpl *p)
818 {
819 if (!p) {
820 mpp_err_f("found NULL pointer\n");
821 return MPP_ERR_NULL_PTR;
822 }
823
824 MPP_BUF_FUNCTION_ENTER();
825
826 service_put_group(get_srv_buffer(), p, __FUNCTION__);
827
828 MPP_BUF_FUNCTION_LEAVE();
829 return MPP_OK;
830 }
831
mpp_buffer_group_reset(MppBufferGroupImpl * p)832 MPP_RET mpp_buffer_group_reset(MppBufferGroupImpl *p)
833 {
834 if (!p) {
835 mpp_err_f("found NULL pointer\n");
836 return MPP_ERR_NULL_PTR;
837 }
838
839 MPP_BUF_FUNCTION_ENTER();
840
841 pthread_mutex_lock(&p->buf_lock);
842
843 buf_grp_add_log(p, GRP_RESET, NULL);
844
845 if (!list_empty(&p->list_used)) {
846 MppBufferImpl *pos, *n;
847
848 list_for_each_entry_safe(pos, n, &p->list_used, MppBufferImpl, list_status) {
849 buf_add_log(pos, BUF_DISCARD, NULL);
850 pos->discard = 1;
851 }
852 }
853
854 // remove unused list
855 if (!list_empty(&p->list_unused)) {
856 MppBufferService *srv = get_srv_buffer();
857 MppBufferImpl *pos, *n;
858
859 list_for_each_entry_safe(pos, n, &p->list_unused, MppBufferImpl, list_status) {
860 service_put_buffer(srv, p, pos, 0, __FUNCTION__);
861 }
862 }
863
864 pthread_mutex_unlock(&p->buf_lock);
865
866 MPP_BUF_FUNCTION_LEAVE();
867 return MPP_OK;
868 }
869
mpp_buffer_group_set_callback(MppBufferGroupImpl * p,MppBufCallback callback,void * arg)870 MPP_RET mpp_buffer_group_set_callback(MppBufferGroupImpl *p,
871 MppBufCallback callback, void *arg)
872 {
873 if (!p)
874 return MPP_OK;
875
876 MPP_BUF_FUNCTION_ENTER();
877
878 p->arg = arg;
879 p->callback = callback;
880
881 MPP_BUF_FUNCTION_LEAVE();
882 return MPP_OK;
883 }
884
mpp_buffer_total_now()885 rk_u32 mpp_buffer_total_now()
886 {
887 MppBufferService *srv = get_srv_buffer();
888 rk_u32 size = 0;
889
890 if (srv)
891 size = srv->total_size;
892
893 return size;
894 }
895
mpp_buffer_total_max()896 rk_u32 mpp_buffer_total_max()
897 {
898 MppBufferService *srv = get_srv_buffer();
899 rk_u32 size = 0;
900
901 if (srv)
902 size = srv->total_max;
903
904 return size;
905 }
906
type_to_flag(MppBufferType type)907 static rk_u32 type_to_flag(MppBufferType type)
908 {
909 rk_u32 flag = MPP_ALLOC_FLAG_NONE;
910
911 if (type & MPP_BUFFER_FLAGS_DMA32)
912 flag += MPP_ALLOC_FLAG_DMA32;
913
914 if (type & MPP_BUFFER_FLAGS_CACHABLE)
915 flag += MPP_ALLOC_FLAG_CACHABLE;
916
917 if (type & MPP_BUFFER_FLAGS_CONTIG)
918 flag += MPP_ALLOC_FLAG_CMA;
919
920 return flag;
921 }
922
service_get_misc(MppBufferService * srv,MppBufferMode mode,MppBufferType type)923 static rk_u32 service_get_misc(MppBufferService *srv, MppBufferMode mode, MppBufferType type)
924 {
925 rk_u32 flag = type_to_flag(type);
926
927 type = (MppBufferType)(type & MPP_BUFFER_TYPE_MASK);
928 if (type == MPP_BUFFER_TYPE_NORMAL)
929 return 0;
930
931 mpp_assert(mode < MPP_BUFFER_MODE_BUTT);
932 mpp_assert(type < MPP_BUFFER_TYPE_BUTT);
933 mpp_assert(flag < MPP_ALLOC_FLAG_TYPE_NB);
934
935 return srv->misc[mode][type][flag];
936 }
937
mpp_buffer_get_misc_group(MppBufferMode mode,MppBufferType type)938 MppBufferGroupImpl *mpp_buffer_get_misc_group(MppBufferMode mode, MppBufferType type)
939 {
940 MppBufferService *srv = get_srv_buffer();
941 MppBufferGroupImpl *misc;
942 MppBufferType buf_type;
943 MppMutex *lock = &srv->lock;
944 rk_u32 id;
945
946 buf_type = (MppBufferType)(type & MPP_BUFFER_TYPE_MASK);
947 if (buf_type == MPP_BUFFER_TYPE_NORMAL)
948 return NULL;
949
950 mpp_assert(mode < MPP_BUFFER_MODE_BUTT);
951 mpp_assert(buf_type < MPP_BUFFER_TYPE_BUTT);
952
953 mpp_mutex_lock(lock);
954
955 id = service_get_misc(srv, mode, type);
956 if (!id) {
957 char tag[32];
958 rk_s32 offset = 0;
959
960 offset += snprintf(tag + offset, sizeof(tag) - offset, "misc");
961 offset += snprintf(tag + offset, sizeof(tag) - offset, "_%s",
962 buf_type == MPP_BUFFER_TYPE_ION ? "ion" :
963 buf_type == MPP_BUFFER_TYPE_DRM ? "drm" : "na");
964 offset += snprintf(tag + offset, sizeof(tag) - offset, "_%s",
965 mode == MPP_BUFFER_INTERNAL ? "int" : "ext");
966
967 misc = service_get_group(tag, __FUNCTION__, mode, type, 1);
968 } else
969 misc = get_group_by_id(srv, id);
970 mpp_mutex_unlock(lock);
971
972 return misc;
973 }
974
mpp_buffer_service_init()975 static void mpp_buffer_service_init()
976 {
977 MppBufferService *srv = srv_buffer;
978 rk_s32 i, j, k;
979
980 if (srv)
981 return;
982
983 mpp_env_get_u32("mpp_buffer_debug", &mpp_buffer_debug, 0);
984
985 srv = mpp_calloc(MppBufferService, 1);
986 if (!srv) {
987 mpp_err_f("alloc buffer service failed\n");
988 return;
989 }
990
991 srv_buffer = srv;
992
993 srv_status = MPP_BUF_SRV_NORMAL;
994 pool_buf = mpp_mem_pool_init_f(MODULE_TAG, sizeof(MppBufferImpl));
995 pool_buf_grp = mpp_mem_pool_init_f("mpp_buf_grp", sizeof(MppBufferGroupImpl));
996 pool_buf_map_node = mpp_mem_pool_init_f("mpp_buf_map_node", sizeof(MppDevBufMapNode));
997
998 srv->group_id = 1;
999
1000 INIT_LIST_HEAD(&srv->list_group);
1001 INIT_LIST_HEAD(&srv->list_orphan);
1002
1003 // NOTE: Do not create misc group at beginning. Only create on when needed.
1004 for (i = 0; i < MPP_BUFFER_MODE_BUTT; i++)
1005 for (j = 0; j < MPP_BUFFER_TYPE_BUTT; j++)
1006 for (k = 0; k < MPP_ALLOCATOR_WITH_FLAG_NUM; k++)
1007 srv->misc[i][j][k] = 0;
1008
1009 for (i = 0; i < (rk_s32)HASH_SIZE(srv->hash_group); i++)
1010 INIT_HLIST_HEAD(&srv->hash_group[i]);
1011
1012 mpp_mutex_init(&srv->lock);
1013 }
1014
mpp_buffer_service_deinit()1015 static void mpp_buffer_service_deinit()
1016 {
1017 MppBufferService *srv = srv_buffer;
1018 rk_s32 i, j, k;
1019
1020 if (!srv)
1021 return;
1022
1023 srv->finalizing = 1;
1024
1025 // first remove legacy group which is the normal case
1026 if (srv->misc_count) {
1027 mpp_log_f("cleaning misc group\n");
1028 for (i = 0; i < MPP_BUFFER_MODE_BUTT; i++)
1029 for (j = 0; j < MPP_BUFFER_TYPE_BUTT; j++)
1030 for (k = 0; k < MPP_ALLOCATOR_WITH_FLAG_NUM; k++) {
1031 rk_u32 id = srv->misc[i][j][k];
1032
1033 if (id) {
1034 service_put_group(srv, get_group_by_id(srv, id), __FUNCTION__);
1035 srv->misc[i][j][k] = 0;
1036 }
1037 }
1038 }
1039
1040 // then remove the remaining group which is the leak one
1041 if (!list_empty(&srv->list_group)) {
1042 MppBufferGroupImpl *pos, *n;
1043
1044 if (mpp_buffer_debug & MPP_BUF_DBG_DUMP_ON_EXIT)
1045 service_dump(srv, "leaked group found");
1046
1047 mpp_log_f("cleaning leaked group\n");
1048 list_for_each_entry_safe(pos, n, &srv->list_group, MppBufferGroupImpl, list_group) {
1049 service_put_group(srv, pos, __FUNCTION__);
1050 }
1051 }
1052
1053 // remove all orphan buffer group
1054 if (!list_empty(&srv->list_orphan)) {
1055 MppBufferGroupImpl *pos, *n;
1056
1057 mpp_log_f("cleaning leaked buffer\n");
1058
1059 list_for_each_entry_safe(pos, n, &srv->list_orphan, MppBufferGroupImpl, list_group) {
1060 pos->clear_on_exit = 1;
1061 pos->is_finalizing = 1;
1062 service_put_group(srv, pos, __FUNCTION__);
1063 }
1064 }
1065
1066 for (i = 0; i < MPP_BUFFER_TYPE_BUTT; i++) {
1067 for (j = 0; j < MPP_ALLOCATOR_WITH_FLAG_NUM; j++) {
1068 if (srv->allocator[i][j])
1069 mpp_allocator_put(&(srv->allocator[i][j]));
1070 }
1071 }
1072 mpp_mutex_destroy(&srv->lock);
1073
1074 MPP_FREE(srv_buffer);
1075 srv_status = MPP_BUF_SRV_FINALIZED;
1076 }
1077
service_get_group_id(MppBufferService * srv)1078 rk_u32 service_get_group_id(MppBufferService *srv)
1079 {
1080 static rk_u32 overflowed = 0;
1081 rk_u32 id = 0;
1082
1083 if (!overflowed) {
1084 /* avoid 0 group id */
1085 if (srv->group_id)
1086 id = srv->group_id++;
1087 else {
1088 overflowed = 1;
1089 srv->group_id = 1;
1090 }
1091 }
1092
1093 if (overflowed) {
1094 id = srv->group_id++;
1095
1096 /* when it is overflow avoid the used id */
1097 while (get_group_by_id(srv, id))
1098 id = srv->group_id++;
1099 }
1100
1101 srv->group_count++;
1102
1103 return id;
1104 }
1105
service_get_group(const char * tag,const char * caller,MppBufferMode mode,MppBufferType type,rk_u32 is_misc)1106 static MppBufferGroupImpl *service_get_group(const char *tag, const char *caller,
1107 MppBufferMode mode, MppBufferType type,
1108 rk_u32 is_misc)
1109 {
1110 MppBufferType buffer_type = (MppBufferType)(type & MPP_BUFFER_TYPE_MASK);
1111 MppBufferGroupImpl *p = NULL;
1112 MppBufferService *srv = get_srv_buffer();
1113 MppMutex *lock;
1114 rk_u32 flag;
1115 rk_u32 id;
1116
1117 /* env update */
1118 mpp_env_get_u32("mpp_buffer_debug", &mpp_buffer_debug, mpp_buffer_debug);
1119
1120 if (mode >= MPP_BUFFER_MODE_BUTT || buffer_type >= MPP_BUFFER_TYPE_BUTT) {
1121 mpp_err("MppBufferService get_group found invalid mode %d type %x\n", mode, type);
1122 return NULL;
1123 }
1124
1125 if (!srv) {
1126 mpp_err("MppBufferService get_group failed to get service\n");
1127 return NULL;
1128 }
1129
1130 lock = &srv->lock;
1131 p = (MppBufferGroupImpl *)mpp_mem_pool_get(pool_buf_grp, caller);
1132 if (!p) {
1133 mpp_err("MppBufferService failed to allocate group context\n");
1134 return NULL;
1135 }
1136
1137 flag = type_to_flag(type);
1138
1139 p->flags = (MppAllocFlagType)flag;
1140
1141 {
1142 MppAllocator allocator = NULL;
1143 MppAllocatorApi *alloc_api = NULL;
1144
1145 mpp_mutex_lock(lock);
1146
1147 allocator = srv->allocator[buffer_type][flag];
1148 alloc_api = srv->allocator_api[buffer_type];
1149
1150 // allocate general buffer first
1151 if (!allocator) {
1152 mpp_allocator_get(&allocator, &alloc_api, type, p->flags);
1153 srv->allocator[buffer_type][flag] = allocator;
1154 srv->allocator_api[buffer_type] = alloc_api;
1155 }
1156
1157 p->allocator = allocator;
1158 p->alloc_api = alloc_api;
1159 p->flags = mpp_allocator_get_flags(allocator);
1160
1161 mpp_mutex_unlock(lock);
1162 }
1163
1164 if (!p->allocator || !p->alloc_api) {
1165 mpp_mem_pool_put(pool_buf_grp, p, caller);
1166 mpp_err("MppBufferService get_group failed to get allocater with mode %d type %x\n", mode, type);
1167 return NULL;
1168 }
1169
1170 INIT_LIST_HEAD(&p->list_group);
1171 INIT_LIST_HEAD(&p->list_used);
1172 INIT_LIST_HEAD(&p->list_unused);
1173 INIT_HLIST_NODE(&p->hlist);
1174
1175 p->log_runtime_en = (mpp_buffer_debug & MPP_BUF_DBG_OPS_RUNTIME) ? (1) : (0);
1176 p->log_history_en = (mpp_buffer_debug & MPP_BUF_DBG_OPS_HISTORY) ? (1) : (0);
1177
1178 p->caller = caller;
1179 p->mode = mode;
1180 p->type = buffer_type;
1181 p->limit = BUFFER_GROUP_SIZE_DEFAULT;
1182 p->clear_on_exit = (mpp_buffer_debug & MPP_BUF_DBG_CLR_ON_EXIT) ? (1) : (0);
1183 p->dump_on_exit = (mpp_buffer_debug & MPP_BUF_DBG_DUMP_ON_EXIT) ? (1) : (0);
1184
1185 {
1186 pthread_mutexattr_t attr;
1187
1188 pthread_mutexattr_init(&attr);
1189 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
1190 pthread_mutex_init(&p->buf_lock, &attr);
1191 pthread_mutexattr_destroy(&attr);
1192 }
1193
1194 if (p->log_history_en)
1195 p->logs = buf_logs_init(BUFFER_OPS_MAX_COUNT);
1196
1197 mpp_mutex_lock(lock);
1198
1199 id = service_get_group_id(srv);
1200 if (tag) {
1201 snprintf(p->tag, sizeof(p->tag) - 1, "%s_%d", tag, id);
1202 } else {
1203 snprintf(p->tag, sizeof(p->tag) - 1, "unknown");
1204 }
1205 p->group_id = id;
1206
1207 list_add_tail(&p->list_group, &srv->list_group);
1208 hash_add(srv->hash_group, &p->hlist, id);
1209
1210 buf_grp_add_log(p, GRP_CREATE, caller);
1211
1212 if (is_misc) {
1213 srv->misc[mode][buffer_type][flag] = id;
1214 p->is_misc = 1;
1215 srv->misc_count++;
1216 }
1217
1218 mpp_mutex_unlock(lock);
1219
1220 return p;
1221 }
1222
destroy_group(MppBufferService * srv,MppBufferGroupImpl * group)1223 static void destroy_group(MppBufferService *srv, MppBufferGroupImpl *group)
1224 {
1225 mpp_assert(group->count_used == 0);
1226 mpp_assert(group->count_unused == 0);
1227
1228 if (group->count_unused || group->count_used) {
1229 mpp_err("mpp_buffer_group %s deinit mismatch counter used %4d unused %4d found\n",
1230 group->caller, group->count_used, group->count_unused);
1231 group->count_unused = 0;
1232 group->count_used = 0;
1233 }
1234
1235 buf_grp_add_log(group, GRP_DESTROY, __FUNCTION__);
1236
1237 list_del_init(&group->list_group);
1238 hash_del(&group->hlist);
1239 pthread_mutex_destroy(&group->buf_lock);
1240
1241 if (group->logs) {
1242 buf_logs_deinit(group->logs);
1243 group->logs = NULL;
1244 }
1245
1246 if (srv) {
1247 MppBufferMode mode = group->mode;
1248 MppBufferType type = group->type;
1249 rk_u32 flag = type_to_flag(type);
1250 rk_u32 id = group->group_id;
1251
1252 srv->group_count--;
1253
1254 if (id == srv->misc[mode][type][flag]) {
1255 srv->misc[mode][type][flag] = 0;
1256 srv->misc_count--;
1257 }
1258 }
1259
1260 if (pool_buf_grp)
1261 mpp_mem_pool_put_f(pool_buf_grp, group);
1262 }
1263
service_put_group(MppBufferService * srv,MppBufferGroupImpl * p,const char * caller)1264 static void service_put_group(MppBufferService *srv, MppBufferGroupImpl *p, const char *caller)
1265 {
1266 MppMutex *lock;
1267
1268 if (!srv)
1269 return;
1270
1271 lock = &srv->lock;
1272
1273 if (!srv->finalizing)
1274 mpp_mutex_lock(lock);
1275
1276 buf_grp_add_log(p, GRP_RELEASE, caller);
1277
1278 // remove unused list
1279 if (!list_empty(&p->list_unused)) {
1280 MppBufferImpl *pos, *n;
1281
1282 list_for_each_entry_safe(pos, n, &p->list_unused, MppBufferImpl, list_status) {
1283 service_put_buffer(srv, p, pos, 0, caller);
1284 }
1285 }
1286
1287 if (list_empty(&p->list_used)) {
1288 destroy_group(srv, p);
1289 } else {
1290 if (!srv->finalizing || (srv->finalizing && p->dump_on_exit)) {
1291 mpp_err("mpp_group %p tag %s caller %s mode %s type %s deinit with %d bytes not released\n",
1292 p, p->tag, p->caller, mode2str[p->mode], type2str[p->type], p->usage);
1293
1294 mpp_buffer_group_dump(p, caller);
1295 }
1296
1297 /* if clear on exit we need to release remaining buffer */
1298 if (p->clear_on_exit) {
1299 MppBufferImpl *pos, *n;
1300
1301 if (p->dump_on_exit)
1302 mpp_err("force release all remaining buffer\n");
1303
1304 list_for_each_entry_safe(pos, n, &p->list_used, MppBufferImpl, list_status) {
1305 if (p->dump_on_exit)
1306 mpp_err("clearing buffer %p\n", pos);
1307 pos->ref_count = 0;
1308 pos->discard = 1;
1309 service_put_buffer(srv, p, pos, 0, caller);
1310 }
1311
1312 destroy_group(srv, p);
1313 } else {
1314 // otherwise move the group to list_orphan and wait for buffer release
1315 buf_grp_add_log(p, GRP_ORPHAN, caller);
1316 list_del_init(&p->list_group);
1317 list_add_tail(&p->list_group, &srv->list_orphan);
1318 p->is_orphan = 1;
1319 }
1320 }
1321
1322 if (!srv->finalizing)
1323 mpp_mutex_unlock(lock);
1324 }
1325
1326
service_dump(MppBufferService * srv,const char * info)1327 static void service_dump(MppBufferService *srv, const char *info)
1328 {
1329 MppBufferGroupImpl *group;
1330 struct hlist_node *n;
1331 rk_u32 key;
1332
1333 mpp_mutex_lock(&srv->lock);
1334
1335 mpp_log("dumping all buffer groups for %s\n", info);
1336
1337 if (hash_empty(srv->hash_group)) {
1338 mpp_log("no buffer group can be dumped\n");
1339 } else {
1340 hash_for_each_safe(srv->hash_group, key, n, group, hlist) {
1341 mpp_buffer_group_dump(group, __FUNCTION__);
1342 }
1343 }
1344
1345 mpp_mutex_unlock(&srv->lock);
1346 }
1347
1348 MPP_SINGLETON(MPP_SGLN_BUFFER, mpp_buffer, mpp_buffer_service_init, mpp_buffer_service_deinit)
1349