1 /* SPDX-License-Identifier: Apache-2.0 OR MIT */
2 /*
3 * Copyright (c) 2015 Rockchip Electronics Co., Ltd.
4 */
5
6 #define MODULE_TAG "mpp_list"
7
8 #include <stdlib.h>
9 #include <string.h>
10 #include <stdarg.h>
11 #include <errno.h>
12
13 #include "mpp_mem.h"
14 #include "mpp_list.h"
15 #include "mpp_debug.h"
16 #include "mpp_common.h"
17
18 #define LIST_DEBUG(fmt, ...) mpp_log(fmt, ## __VA_ARGS__)
19 #define LIST_ERROR(fmt, ...) mpp_err(fmt, ## __VA_ARGS__)
20
list_node_init(MppListNode * node)21 static inline void list_node_init(MppListNode *node)
22 {
23 node->prev = node->next = node;
24 }
25
list_node_init_with_key_and_size(MppListNode * node,rk_u32 key,rk_s32 size)26 static inline void list_node_init_with_key_and_size(MppListNode *node, rk_u32 key, rk_s32 size)
27 {
28 list_node_init(node);
29 node->key = key;
30 node->size = size;
31 }
32
create_list(void * data,rk_s32 size,rk_u32 key)33 static MppListNode* create_list(void *data, rk_s32 size, rk_u32 key)
34 {
35 MppListNode *node = mpp_malloc_size(MppListNode, sizeof(MppListNode) + size);
36
37 if (node) {
38 void *dst = (void*)(node + 1);
39 list_node_init_with_key_and_size(node, key, size);
40 memcpy(dst, data, size);
41 } else {
42 LIST_ERROR("failed to allocate list node");
43 }
44 return node;
45 }
46
_mpp_list_add(MppListNode * _new,MppListNode * prev,MppListNode * next)47 static inline void _mpp_list_add(MppListNode * _new, MppListNode * prev, MppListNode * next)
48 {
49 next->prev = _new;
50 _new->next = next;
51 _new->prev = prev;
52 prev->next = _new;
53 }
54
mpp_list_add(MppListNode * _new,MppListNode * head)55 static inline void mpp_list_add(MppListNode *_new, MppListNode *head)
56 {
57 _mpp_list_add(_new, head, head->next);
58 }
59
mpp_list_add_tail(MppListNode * _new,MppListNode * head)60 static inline void mpp_list_add_tail(MppListNode *_new, MppListNode *head)
61 {
62 _mpp_list_add(_new, head->prev, head);
63 }
64
mpp_list_add_at_head(MppList * list,void * data,int size)65 int mpp_list_add_at_head(MppList *list, void *data, int size)
66 {
67 rk_s32 ret = -EINVAL;
68
69 if (list->head) {
70 MppListNode *node = create_list(data, size, 0);
71 if (node) {
72 mpp_list_add(node, list->head);
73 list->count++;
74 ret = 0;
75 } else {
76 ret = -ENOMEM;
77 }
78 }
79 return ret;
80 }
81
mpp_list_add_at_tail(MppList * list,void * data,int size)82 int mpp_list_add_at_tail(MppList *list, void *data, int size)
83 {
84 rk_s32 ret = -EINVAL;
85
86 if (list->head) {
87 MppListNode *node = create_list(data, size, 0);
88
89 if (node) {
90 mpp_list_add_tail(node, list->head);
91 list->count++;
92 ret = 0;
93 } else {
94 ret = -ENOMEM;
95 }
96 }
97 return ret;
98 }
99
release_list(MppListNode * node,void * data,rk_s32 size)100 static void release_list(MppListNode*node, void *data, rk_s32 size)
101 {
102 void *src = (void*)(node + 1);
103
104 if (node->size == size) {
105 if (data)
106 memcpy(data, src, size);
107 } else {
108 LIST_ERROR("node size check failed when release_list");
109 size = (size < node->size) ? (size) : (node->size);
110 if (data)
111 memcpy(data, src, size);
112 }
113 mpp_free(node);
114 }
115
_mpp_list_del(MppListNode * prev,MppListNode * next)116 static inline void _mpp_list_del(MppListNode *prev, MppListNode *next)
117 {
118 next->prev = prev;
119 prev->next = next;
120 }
121
mpp_list_del_init(MppListNode * node)122 static inline void mpp_list_del_init(MppListNode *node)
123 {
124 _mpp_list_del(node->prev, node->next);
125 list_node_init(node);
126 }
127
_list_del_node_no_lock(MppListNode * node,void * data,rk_s32 size)128 static inline void _list_del_node_no_lock(MppListNode *node, void *data, rk_s32 size)
129 {
130 mpp_list_del_init(node);
131 release_list(node, data, size);
132 }
133
mpp_list_del_at_head(MppList * list,void * data,int size)134 int mpp_list_del_at_head(MppList *list, void *data, int size)
135 {
136 rk_s32 ret = -EINVAL;
137
138 if (list->head && list->count) {
139 _list_del_node_no_lock(list->head->next, data, size);
140 list->count--;
141 ret = 0;
142 }
143 return ret;
144 }
145
mpp_list_del_at_tail(MppList * list,void * data,int size)146 int mpp_list_del_at_tail(MppList *list, void *data, int size)
147 {
148 rk_s32 ret = -EINVAL;
149
150 if (list->head && list->count) {
151 _list_del_node_no_lock(list->head->prev, data, size);
152 list->count--;
153 ret = 0;
154 }
155 return ret;
156 }
create_list_with_size(void * data,rk_s32 size,rk_u32 key)157 static MppListNode* create_list_with_size(void *data, rk_s32 size, rk_u32 key)
158 {
159 MppListNode *node = mpp_malloc_size(MppListNode, sizeof(MppListNode) + sizeof(size) + size);
160
161 if (node) {
162 rk_s32 *dst = (rk_s32 *)(node + 1);
163 list_node_init_with_key_and_size(node, key, size);
164 *dst++ = size;
165 memcpy(dst, data, size);
166 } else {
167 LIST_ERROR("failed to allocate list node");
168 }
169 return node;
170 }
171
mpp_list_fifo_wr(MppList * list,void * data,rk_s32 size)172 rk_s32 mpp_list_fifo_wr(MppList *list, void *data, rk_s32 size)
173 {
174 rk_s32 ret = -EINVAL;
175
176 if (list && list->head) {
177 MppListNode *node = create_list_with_size(data, size, 0);
178
179 if (node) {
180 mpp_list_add_tail(node, list->head);
181 list->count++;
182 ret = 0;
183 } else {
184 ret = -ENOMEM;
185 }
186 }
187 return ret;
188 }
189
release_list_with_size(MppListNode * node,void * data,rk_s32 * size)190 static void release_list_with_size(MppListNode* node, void *data, rk_s32 *size)
191 {
192 rk_s32 *src = (rk_s32*)(node + 1);
193 rk_s32 data_size = *src++;
194
195 *size = data_size;
196
197 if (data)
198 memcpy(data, src, data_size);
199
200 mpp_free(node);
201 }
202
mpp_list_fifo_rd(MppList * list,void * data,rk_s32 * size)203 rk_s32 mpp_list_fifo_rd(MppList *list, void *data, rk_s32 *size)
204 {
205 rk_s32 ret = -EINVAL;
206
207 if (list && list->head && list->count) {
208 MppListNode *node = list->head->next;
209
210 mpp_list_del_init(node);
211 release_list_with_size(node, data, size);
212 list->count--;
213 ret = 0;
214 }
215 return ret;
216 }
217
mpp_list_is_empty(MppList * list)218 int mpp_list_is_empty(MppList *list)
219 {
220 return list->count == 0;
221 }
222
mpp_list_size(MppList * list)223 int mpp_list_size(MppList *list)
224 {
225 return list->count;
226 }
227
mpp_list_add_by_key(MppList * list,void * data,rk_s32 size,rk_u32 * key)228 rk_s32 mpp_list_add_by_key(MppList *list, void *data, rk_s32 size, rk_u32 *key)
229 {
230 rk_s32 ret = 0;
231
232 if (list->head) {
233 MppListNode *node;
234 rk_u32 list_key = mpp_list_get_key(list);
235
236 *key = list_key;
237 node = create_list(data, size, list_key);
238 if (node) {
239 mpp_list_add_tail(node, list->head);
240 list->count++;
241 ret = 0;
242 } else {
243 ret = -ENOMEM;
244 }
245 }
246 return ret;
247 }
248
mpp_list_del_by_key(MppList * list,void * data,rk_s32 size,rk_u32 key)249 rk_s32 mpp_list_del_by_key(MppList *list, void *data, rk_s32 size, rk_u32 key)
250 {
251 rk_s32 ret = 0;
252
253 if (list && list->head && list->count) {
254 MppListNode *tmp = list->head->next;
255
256 ret = -EINVAL;
257 while (tmp->next != list->head) {
258 if (tmp->key == key) {
259 _list_del_node_no_lock(tmp, data, size);
260 list->count--;
261 break;
262 }
263 tmp = tmp->next;
264 }
265 }
266 return ret;
267 }
268
269
mpp_list_show_by_key(MppList * list,void * data,rk_u32 key)270 rk_s32 mpp_list_show_by_key(MppList *list, void *data, rk_u32 key)
271 {
272 rk_s32 ret = -EINVAL;
273
274 (void)list;
275 (void)data;
276 (void)key;
277 return ret;
278 }
279
mpp_list_flush(MppList * list)280 void mpp_list_flush(MppList* list)
281 {
282 if (list->head) {
283 while (list->count) {
284 MppListNode* node = list->head->next;
285
286 mpp_list_del_init(node);
287
288 if (list->destroy) {
289 list->destroy((void*)(node + 1));
290 }
291
292 mpp_free(node);
293 list->count--;
294 }
295 }
296
297 mpp_list_signal(list);
298 }
299
mpp_list_wait(MppList * list)300 MPP_RET mpp_list_wait(MppList* list)
301 {
302 int ret;
303
304 ret = mpp_mutex_cond_wait(&list->cond_lock);
305
306 if (ret == 0) {
307 return MPP_OK;
308 } else if (ret == ETIMEDOUT) {
309 return MPP_NOK;
310 } else {
311 return MPP_NOK;
312 }
313 }
314
mpp_list_wait_timed(MppList * list,rk_s64 timeout)315 MPP_RET mpp_list_wait_timed(MppList *list, rk_s64 timeout)
316 {
317 int ret;
318
319 ret = (MPP_RET)mpp_mutex_cond_timedwait(&list->cond_lock, timeout);
320
321 if (ret == 0) {
322 return MPP_OK;
323 } else if (ret == ETIMEDOUT) {
324 return MPP_NOK;
325 } else {
326 return MPP_NOK;
327 }
328 }
329
mpp_list_wait_lt(MppList * list,rk_s64 timeout,rk_s32 val)330 MPP_RET mpp_list_wait_lt(MppList *list, rk_s64 timeout, rk_s32 val)
331 {
332 if (list->count < val)
333 return MPP_OK;
334
335 if (timeout < 0) {
336 return mpp_list_wait(list);
337 } else {
338 return mpp_list_wait_timed(list, timeout);
339 }
340 }
341
mpp_list_wait_le(MppList * list,rk_s64 timeout,rk_s32 val)342 MPP_RET mpp_list_wait_le(MppList *list, rk_s64 timeout, rk_s32 val)
343 {
344 if (list->count <= val)
345 return MPP_OK;
346
347 if (timeout < 0) {
348 return mpp_list_wait(list);
349 } else {
350 return mpp_list_wait_timed(list, timeout);
351 }
352 }
353
mpp_list_wait_gt(MppList * list,rk_s64 timeout,rk_s32 val)354 MPP_RET mpp_list_wait_gt(MppList *list, rk_s64 timeout, rk_s32 val)
355 {
356 if (list->count > val)
357 return MPP_OK;
358
359 if (timeout < 0) {
360 return mpp_list_wait(list);
361 } else {
362 return mpp_list_wait_timed(list, timeout);
363 }
364 }
365
mpp_list_wait_ge(MppList * list,rk_s64 timeout,rk_s32 val)366 MPP_RET mpp_list_wait_ge(MppList *list, rk_s64 timeout, rk_s32 val)
367 {
368 if (list->count >= val)
369 return MPP_OK;
370
371 if (timeout < 0) {
372 return mpp_list_wait(list);
373 } else {
374 return mpp_list_wait_timed(list, timeout);
375 }
376 }
377
mpp_list_signal(MppList * list)378 void mpp_list_signal(MppList *list)
379 {
380 mpp_mutex_cond_signal(&list->cond_lock);
381 }
382
mpp_list_get_key(MppList * list)383 rk_u32 mpp_list_get_key(MppList *list)
384 {
385 return list->keys++;
386 }
387
mpp_list_create(node_destructor func)388 MppList *mpp_list_create(node_destructor func)
389 {
390 MppList *list = mpp_malloc(MppList, 1);
391
392 if (list == NULL) {
393 LIST_ERROR("Failed to allocate memory for mpp_list.\n");
394 return NULL;
395 }
396
397 list->destroy = func;
398 list->count = 0;
399
400 list->head = mpp_malloc(MppListNode, 1);
401 if (list->head == NULL) {
402 LIST_ERROR("Failed to allocate memory for list header.\n");
403 mpp_free(list);
404 return NULL;
405 }
406
407 list_node_init_with_key_and_size(list->head, 0, 0);
408
409 mpp_mutex_cond_init(&list->cond_lock);
410
411 return list;
412 }
413
mpp_list_destroy(MppList * list)414 void mpp_list_destroy(MppList *list)
415 {
416 MppListNode *node;
417
418 if (!list)
419 return;
420
421 mpp_list_flush(list);
422
423 node = list->head->next;
424 while (node != list->head) {
425 MppListNode *next = node->next;
426
427 mpp_free(node);
428 node = next;
429 }
430
431 mpp_mutex_cond_destroy(&list->cond_lock);
432
433 mpp_free(list->head);
434 list->head = NULL;
435
436 mpp_free(list);
437 list = NULL;
438 }
439
440 /* list sort porting from kernel list_sort.c */
441
442 /*
443 * Returns a list organized in an intermediate format suited
444 * to chaining of merge() calls: null-terminated, no reserved or
445 * sentinel head node, "prev" links not maintained.
446 */
merge(void * priv,ListCmpFunc cmp,struct list_head * a,struct list_head * b)447 static struct list_head *merge(void *priv, ListCmpFunc cmp,
448 struct list_head *a, struct list_head *b)
449 {
450 struct list_head *head, **tail = &head;
451
452 for (;;) {
453 /* if equal, take 'a' -- important for sort stability */
454 if (cmp(priv, a, b) <= 0) {
455 *tail = a;
456 tail = &a->next;
457 a = a->next;
458 if (!a) {
459 *tail = b;
460 break;
461 }
462 } else {
463 *tail = b;
464 tail = &b->next;
465 b = b->next;
466 if (!b) {
467 *tail = a;
468 break;
469 }
470 }
471 }
472 return head;
473 }
474
475 /*
476 * Combine final list merge with restoration of standard doubly-linked
477 * list structure. This approach duplicates code from merge(), but
478 * runs faster than the tidier alternatives of either a separate final
479 * prev-link restoration pass, or maintaining the prev links
480 * throughout.
481 */
merge_final(void * priv,ListCmpFunc cmp,struct list_head * head,struct list_head * a,struct list_head * b)482 static void merge_final(void *priv, ListCmpFunc cmp, struct list_head *head,
483 struct list_head *a, struct list_head *b)
484 {
485 struct list_head *tail = head;
486 rk_u8 count = 0;
487
488 for (;;) {
489 /* if equal, take 'a' -- important for sort stability */
490 if (cmp(priv, a, b) <= 0) {
491 tail->next = a;
492 a->prev = tail;
493 tail = a;
494 a = a->next;
495 if (!a)
496 break;
497 } else {
498 tail->next = b;
499 b->prev = tail;
500 tail = b;
501 b = b->next;
502 if (!b) {
503 b = a;
504 break;
505 }
506 }
507 }
508
509 /* Finish linking remainder of list b on to tail */
510 tail->next = b;
511 do {
512 /*
513 * If the merge is highly unbalanced (e.g. the input is
514 * already sorted), this loop may run many iterations.
515 * Continue callbacks to the client even though no
516 * element comparison is needed, so the client's cmp()
517 * routine can invoke cond_resched() periodically.
518 */
519 if (!++count)
520 cmp(priv, b, b);
521 b->prev = tail;
522 tail = b;
523 b = b->next;
524 } while (b);
525
526 /* And the final links to make a circular doubly-linked list */
527 tail->next = head;
528 head->prev = tail;
529 }
530
list_sort(void * priv,struct list_head * head,ListCmpFunc cmp)531 void list_sort(void *priv, struct list_head *head, ListCmpFunc cmp)
532 {
533 struct list_head *list = head->next, *pending = NULL;
534 size_t count = 0; /* Count of pending */
535
536 if (list == head->prev) /* Zero or one elements */
537 return;
538
539 /* Convert to a null-terminated singly-linked list. */
540 head->prev->next = NULL;
541
542 /*
543 * Data structure invariants:
544 * - All lists are singly linked and null-terminated; prev
545 * pointers are not maintained.
546 * - pending is a prev-linked "list of lists" of sorted
547 * sublists awaiting further merging.
548 * - Each of the sorted sublists is power-of-two in size.
549 * - Sublists are sorted by size and age, smallest & newest at front.
550 * - There are zero to two sublists of each size.
551 * - A pair of pending sublists are merged as soon as the number
552 * of following pending elements equals their size (i.e.
553 * each time count reaches an odd multiple of that size).
554 * That ensures each later final merge will be at worst 2:1.
555 * - Each round consists of:
556 * - Merging the two sublists selected by the highest bit
557 * which flips when count is incremented, and
558 * - Adding an element from the input as a size-1 sublist.
559 */
560 do {
561 size_t bits;
562 struct list_head **tail = &pending;
563
564 /* Find the least-significant clear bit in count */
565 for (bits = count; bits & 1; bits >>= 1)
566 tail = &(*tail)->prev;
567 /* Do the indicated merge */
568 if (bits) {
569 struct list_head *a = *tail, *b = a->prev;
570
571 a = merge(priv, cmp, b, a);
572 /* Install the merged result in place of the inputs */
573 a->prev = b->prev;
574 *tail = a;
575 }
576
577 /* Move one element from input list to pending */
578 list->prev = pending;
579 pending = list;
580 list = list->next;
581 pending->next = NULL;
582 count++;
583 } while (list);
584
585 /* End of input; merge together all the pending lists. */
586 list = pending;
587 pending = pending->prev;
588 for (;;) {
589 struct list_head *next = pending->prev;
590
591 if (!next)
592 break;
593 list = merge(priv, cmp, pending, list);
594 pending = next;
595 }
596 /* The final merge, rebuilding prev links */
597 merge_final(priv, cmp, head, pending, list);
598 }
599