1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <errno.h>
3*4882a593Smuzhiyun #include <inttypes.h>
4*4882a593Smuzhiyun #include <linux/list.h>
5*4882a593Smuzhiyun #include <linux/compiler.h>
6*4882a593Smuzhiyun #include <linux/string.h>
7*4882a593Smuzhiyun #include "ordered-events.h"
8*4882a593Smuzhiyun #include "session.h"
9*4882a593Smuzhiyun #include "asm/bug.h"
10*4882a593Smuzhiyun #include "debug.h"
11*4882a593Smuzhiyun #include "ui/progress.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define pr_N(n, fmt, ...) \
14*4882a593Smuzhiyun eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
17*4882a593Smuzhiyun
queue_event(struct ordered_events * oe,struct ordered_event * new)18*4882a593Smuzhiyun static void queue_event(struct ordered_events *oe, struct ordered_event *new)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun struct ordered_event *last = oe->last;
21*4882a593Smuzhiyun u64 timestamp = new->timestamp;
22*4882a593Smuzhiyun struct list_head *p;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun ++oe->nr_events;
25*4882a593Smuzhiyun oe->last = new;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun if (!last) {
30*4882a593Smuzhiyun list_add(&new->list, &oe->events);
31*4882a593Smuzhiyun oe->max_timestamp = timestamp;
32*4882a593Smuzhiyun return;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * last event might point to some random place in the list as it's
37*4882a593Smuzhiyun * the last queued event. We expect that the new event is close to
38*4882a593Smuzhiyun * this.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun if (last->timestamp <= timestamp) {
41*4882a593Smuzhiyun while (last->timestamp <= timestamp) {
42*4882a593Smuzhiyun p = last->list.next;
43*4882a593Smuzhiyun if (p == &oe->events) {
44*4882a593Smuzhiyun list_add_tail(&new->list, &oe->events);
45*4882a593Smuzhiyun oe->max_timestamp = timestamp;
46*4882a593Smuzhiyun return;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun last = list_entry(p, struct ordered_event, list);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun list_add_tail(&new->list, &last->list);
51*4882a593Smuzhiyun } else {
52*4882a593Smuzhiyun while (last->timestamp > timestamp) {
53*4882a593Smuzhiyun p = last->list.prev;
54*4882a593Smuzhiyun if (p == &oe->events) {
55*4882a593Smuzhiyun list_add(&new->list, &oe->events);
56*4882a593Smuzhiyun return;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun last = list_entry(p, struct ordered_event, list);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun list_add(&new->list, &last->list);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
__dup_event(struct ordered_events * oe,union perf_event * event)64*4882a593Smuzhiyun static union perf_event *__dup_event(struct ordered_events *oe,
65*4882a593Smuzhiyun union perf_event *event)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun union perf_event *new_event = NULL;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun if (oe->cur_alloc_size < oe->max_alloc_size) {
70*4882a593Smuzhiyun new_event = memdup(event, event->header.size);
71*4882a593Smuzhiyun if (new_event)
72*4882a593Smuzhiyun oe->cur_alloc_size += event->header.size;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun return new_event;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
dup_event(struct ordered_events * oe,union perf_event * event)78*4882a593Smuzhiyun static union perf_event *dup_event(struct ordered_events *oe,
79*4882a593Smuzhiyun union perf_event *event)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun return oe->copy_on_queue ? __dup_event(oe, event) : event;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
__free_dup_event(struct ordered_events * oe,union perf_event * event)84*4882a593Smuzhiyun static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun if (event) {
87*4882a593Smuzhiyun oe->cur_alloc_size -= event->header.size;
88*4882a593Smuzhiyun free(event);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
free_dup_event(struct ordered_events * oe,union perf_event * event)92*4882a593Smuzhiyun static void free_dup_event(struct ordered_events *oe, union perf_event *event)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun if (oe->copy_on_queue)
95*4882a593Smuzhiyun __free_dup_event(oe, event);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
alloc_event(struct ordered_events * oe,union perf_event * event)99*4882a593Smuzhiyun static struct ordered_event *alloc_event(struct ordered_events *oe,
100*4882a593Smuzhiyun union perf_event *event)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun struct list_head *cache = &oe->cache;
103*4882a593Smuzhiyun struct ordered_event *new = NULL;
104*4882a593Smuzhiyun union perf_event *new_event;
105*4882a593Smuzhiyun size_t size;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun new_event = dup_event(oe, event);
108*4882a593Smuzhiyun if (!new_event)
109*4882a593Smuzhiyun return NULL;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * We maintain the following scheme of buffers for ordered
113*4882a593Smuzhiyun * event allocation:
114*4882a593Smuzhiyun *
115*4882a593Smuzhiyun * to_free list -> buffer1 (64K)
116*4882a593Smuzhiyun * buffer2 (64K)
117*4882a593Smuzhiyun * ...
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun * Each buffer keeps an array of ordered events objects:
120*4882a593Smuzhiyun * buffer -> event[0]
121*4882a593Smuzhiyun * event[1]
122*4882a593Smuzhiyun * ...
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * Each allocated ordered event is linked to one of
125*4882a593Smuzhiyun * following lists:
126*4882a593Smuzhiyun * - time ordered list 'events'
127*4882a593Smuzhiyun * - list of currently removed events 'cache'
128*4882a593Smuzhiyun *
129*4882a593Smuzhiyun * Allocation of the ordered event uses the following order
130*4882a593Smuzhiyun * to get the memory:
131*4882a593Smuzhiyun * - use recently removed object from 'cache' list
132*4882a593Smuzhiyun * - use available object in current allocation buffer
133*4882a593Smuzhiyun * - allocate new buffer if the current buffer is full
134*4882a593Smuzhiyun *
135*4882a593Smuzhiyun * Removal of ordered event object moves it from events to
136*4882a593Smuzhiyun * the cache list.
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun if (!list_empty(cache)) {
141*4882a593Smuzhiyun new = list_entry(cache->next, struct ordered_event, list);
142*4882a593Smuzhiyun list_del_init(&new->list);
143*4882a593Smuzhiyun } else if (oe->buffer) {
144*4882a593Smuzhiyun new = &oe->buffer->event[oe->buffer_idx];
145*4882a593Smuzhiyun if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
146*4882a593Smuzhiyun oe->buffer = NULL;
147*4882a593Smuzhiyun } else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
148*4882a593Smuzhiyun oe->buffer = malloc(size);
149*4882a593Smuzhiyun if (!oe->buffer) {
150*4882a593Smuzhiyun free_dup_event(oe, new_event);
151*4882a593Smuzhiyun return NULL;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
155*4882a593Smuzhiyun oe->cur_alloc_size, size, oe->max_alloc_size);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun oe->cur_alloc_size += size;
158*4882a593Smuzhiyun list_add(&oe->buffer->list, &oe->to_free);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun oe->buffer_idx = 1;
161*4882a593Smuzhiyun new = &oe->buffer->event[0];
162*4882a593Smuzhiyun } else {
163*4882a593Smuzhiyun pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
164*4882a593Smuzhiyun return NULL;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun new->event = new_event;
168*4882a593Smuzhiyun return new;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun static struct ordered_event *
ordered_events__new_event(struct ordered_events * oe,u64 timestamp,union perf_event * event)172*4882a593Smuzhiyun ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
173*4882a593Smuzhiyun union perf_event *event)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun struct ordered_event *new;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun new = alloc_event(oe, event);
178*4882a593Smuzhiyun if (new) {
179*4882a593Smuzhiyun new->timestamp = timestamp;
180*4882a593Smuzhiyun queue_event(oe, new);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun return new;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
ordered_events__delete(struct ordered_events * oe,struct ordered_event * event)186*4882a593Smuzhiyun void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun list_move(&event->list, &oe->cache);
189*4882a593Smuzhiyun oe->nr_events--;
190*4882a593Smuzhiyun free_dup_event(oe, event->event);
191*4882a593Smuzhiyun event->event = NULL;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
ordered_events__queue(struct ordered_events * oe,union perf_event * event,u64 timestamp,u64 file_offset)194*4882a593Smuzhiyun int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
195*4882a593Smuzhiyun u64 timestamp, u64 file_offset)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct ordered_event *oevent;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if (!timestamp || timestamp == ~0ULL)
200*4882a593Smuzhiyun return -ETIME;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (timestamp < oe->last_flush) {
203*4882a593Smuzhiyun pr_oe_time(timestamp, "out of order event\n");
204*4882a593Smuzhiyun pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
205*4882a593Smuzhiyun oe->last_flush_type);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun oe->nr_unordered_events++;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun oevent = ordered_events__new_event(oe, timestamp, event);
211*4882a593Smuzhiyun if (!oevent) {
212*4882a593Smuzhiyun ordered_events__flush(oe, OE_FLUSH__HALF);
213*4882a593Smuzhiyun oevent = ordered_events__new_event(oe, timestamp, event);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun if (!oevent)
217*4882a593Smuzhiyun return -ENOMEM;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun oevent->file_offset = file_offset;
220*4882a593Smuzhiyun return 0;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
do_flush(struct ordered_events * oe,bool show_progress)223*4882a593Smuzhiyun static int do_flush(struct ordered_events *oe, bool show_progress)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun struct list_head *head = &oe->events;
226*4882a593Smuzhiyun struct ordered_event *tmp, *iter;
227*4882a593Smuzhiyun u64 limit = oe->next_flush;
228*4882a593Smuzhiyun u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
229*4882a593Smuzhiyun struct ui_progress prog;
230*4882a593Smuzhiyun int ret;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (!limit)
233*4882a593Smuzhiyun return 0;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (show_progress)
236*4882a593Smuzhiyun ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun list_for_each_entry_safe(iter, tmp, head, list) {
239*4882a593Smuzhiyun if (session_done())
240*4882a593Smuzhiyun return 0;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (iter->timestamp > limit)
243*4882a593Smuzhiyun break;
244*4882a593Smuzhiyun ret = oe->deliver(oe, iter);
245*4882a593Smuzhiyun if (ret)
246*4882a593Smuzhiyun return ret;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun ordered_events__delete(oe, iter);
249*4882a593Smuzhiyun oe->last_flush = iter->timestamp;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (show_progress)
252*4882a593Smuzhiyun ui_progress__update(&prog, 1);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (list_empty(head))
256*4882a593Smuzhiyun oe->last = NULL;
257*4882a593Smuzhiyun else if (last_ts <= limit)
258*4882a593Smuzhiyun oe->last = list_entry(head->prev, struct ordered_event, list);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (show_progress)
261*4882a593Smuzhiyun ui_progress__finish();
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun return 0;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
__ordered_events__flush(struct ordered_events * oe,enum oe_flush how,u64 timestamp)266*4882a593Smuzhiyun static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
267*4882a593Smuzhiyun u64 timestamp)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun static const char * const str[] = {
270*4882a593Smuzhiyun "NONE",
271*4882a593Smuzhiyun "FINAL",
272*4882a593Smuzhiyun "ROUND",
273*4882a593Smuzhiyun "HALF ",
274*4882a593Smuzhiyun "TOP ",
275*4882a593Smuzhiyun "TIME ",
276*4882a593Smuzhiyun };
277*4882a593Smuzhiyun int err;
278*4882a593Smuzhiyun bool show_progress = false;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (oe->nr_events == 0)
281*4882a593Smuzhiyun return 0;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun switch (how) {
284*4882a593Smuzhiyun case OE_FLUSH__FINAL:
285*4882a593Smuzhiyun show_progress = true;
286*4882a593Smuzhiyun __fallthrough;
287*4882a593Smuzhiyun case OE_FLUSH__TOP:
288*4882a593Smuzhiyun oe->next_flush = ULLONG_MAX;
289*4882a593Smuzhiyun break;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun case OE_FLUSH__HALF:
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun struct ordered_event *first, *last;
294*4882a593Smuzhiyun struct list_head *head = &oe->events;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun first = list_entry(head->next, struct ordered_event, list);
297*4882a593Smuzhiyun last = oe->last;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /* Warn if we are called before any event got allocated. */
300*4882a593Smuzhiyun if (WARN_ONCE(!last || list_empty(head), "empty queue"))
301*4882a593Smuzhiyun return 0;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun oe->next_flush = first->timestamp;
304*4882a593Smuzhiyun oe->next_flush += (last->timestamp - first->timestamp) / 2;
305*4882a593Smuzhiyun break;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun case OE_FLUSH__TIME:
309*4882a593Smuzhiyun oe->next_flush = timestamp;
310*4882a593Smuzhiyun show_progress = false;
311*4882a593Smuzhiyun break;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun case OE_FLUSH__ROUND:
314*4882a593Smuzhiyun case OE_FLUSH__NONE:
315*4882a593Smuzhiyun default:
316*4882a593Smuzhiyun break;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
320*4882a593Smuzhiyun str[how], oe->nr_events);
321*4882a593Smuzhiyun pr_oe_time(oe->max_timestamp, "max_timestamp\n");
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun err = do_flush(oe, show_progress);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (!err) {
326*4882a593Smuzhiyun if (how == OE_FLUSH__ROUND)
327*4882a593Smuzhiyun oe->next_flush = oe->max_timestamp;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun oe->last_flush_type = how;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
333*4882a593Smuzhiyun str[how], oe->nr_events);
334*4882a593Smuzhiyun pr_oe_time(oe->last_flush, "last_flush\n");
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun return err;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
ordered_events__flush(struct ordered_events * oe,enum oe_flush how)339*4882a593Smuzhiyun int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun return __ordered_events__flush(oe, how, 0);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
ordered_events__flush_time(struct ordered_events * oe,u64 timestamp)344*4882a593Smuzhiyun int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
ordered_events__first_time(struct ordered_events * oe)349*4882a593Smuzhiyun u64 ordered_events__first_time(struct ordered_events *oe)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun struct ordered_event *event;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (list_empty(&oe->events))
354*4882a593Smuzhiyun return 0;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun event = list_first_entry(&oe->events, struct ordered_event, list);
357*4882a593Smuzhiyun return event->timestamp;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
ordered_events__init(struct ordered_events * oe,ordered_events__deliver_t deliver,void * data)360*4882a593Smuzhiyun void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
361*4882a593Smuzhiyun void *data)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun INIT_LIST_HEAD(&oe->events);
364*4882a593Smuzhiyun INIT_LIST_HEAD(&oe->cache);
365*4882a593Smuzhiyun INIT_LIST_HEAD(&oe->to_free);
366*4882a593Smuzhiyun oe->max_alloc_size = (u64) -1;
367*4882a593Smuzhiyun oe->cur_alloc_size = 0;
368*4882a593Smuzhiyun oe->deliver = deliver;
369*4882a593Smuzhiyun oe->data = data;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun static void
ordered_events_buffer__free(struct ordered_events_buffer * buffer,unsigned int max,struct ordered_events * oe)373*4882a593Smuzhiyun ordered_events_buffer__free(struct ordered_events_buffer *buffer,
374*4882a593Smuzhiyun unsigned int max, struct ordered_events *oe)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun if (oe->copy_on_queue) {
377*4882a593Smuzhiyun unsigned int i;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun for (i = 0; i < max; i++)
380*4882a593Smuzhiyun __free_dup_event(oe, buffer->event[i].event);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun free(buffer);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
ordered_events__free(struct ordered_events * oe)386*4882a593Smuzhiyun void ordered_events__free(struct ordered_events *oe)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun struct ordered_events_buffer *buffer, *tmp;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (list_empty(&oe->to_free))
391*4882a593Smuzhiyun return;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /*
394*4882a593Smuzhiyun * Current buffer might not have all the events allocated
395*4882a593Smuzhiyun * yet, we need to free only allocated ones ...
396*4882a593Smuzhiyun */
397*4882a593Smuzhiyun if (oe->buffer) {
398*4882a593Smuzhiyun list_del_init(&oe->buffer->list);
399*4882a593Smuzhiyun ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /* ... and continue with the rest */
403*4882a593Smuzhiyun list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
404*4882a593Smuzhiyun list_del_init(&buffer->list);
405*4882a593Smuzhiyun ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
ordered_events__reinit(struct ordered_events * oe)409*4882a593Smuzhiyun void ordered_events__reinit(struct ordered_events *oe)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun ordered_events__deliver_t old_deliver = oe->deliver;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun ordered_events__free(oe);
414*4882a593Smuzhiyun memset(oe, '\0', sizeof(*oe));
415*4882a593Smuzhiyun ordered_events__init(oe, old_deliver, oe->data);
416*4882a593Smuzhiyun }
417