1 /** @file mlan_util.h
2 *
3 * @brief This file contains wrappers for linked-list,
4 * spinlock and timer defines.
5 *
6 *
7 * Copyright 2008-2021 NXP
8 *
9 * This software file (the File) is distributed by NXP
10 * under the terms of the GNU General Public License Version 2, June 1991
11 * (the License). You may use, redistribute and/or modify the File in
12 * accordance with the terms and conditions of the License, a copy of which
13 * is available by writing to the Free Software Foundation, Inc.,
14 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
15 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
16 *
17 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
19 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
20 * this warranty disclaimer.
21 *
22 */
23
24 /******************************************************
25 Change log:
26 10/28/2008: initial version
27 ******************************************************/
28
29 #ifndef _MLAN_UTIL_H_
30 #define _MLAN_UTIL_H_
31
32 /** Circular doubly linked list */
33 typedef struct _mlan_linked_list {
34 /** Pointer to previous node */
35 struct _mlan_linked_list *pprev;
36 /** Pointer to next node */
37 struct _mlan_linked_list *pnext;
38 } mlan_linked_list, *pmlan_linked_list;
39
40 /** List head */
41 typedef struct _mlan_list_head {
42 /** Pointer to previous node */
43 struct _mlan_linked_list *pprev;
44 /** Pointer to next node */
45 struct _mlan_linked_list *pnext;
46 /** Pointer to lock */
47 t_void *plock;
48 } mlan_list_head, *pmlan_list_head;
49
50 /** MLAN MNULL pointer */
51 #define MNULL ((void *)0)
52
53 /**
54 * @brief This function initializes a list without locking
55 *
56 * @param phead List head
57 *
58 * @return N/A
59 */
util_init_list(pmlan_linked_list phead)60 static INLINE t_void util_init_list(pmlan_linked_list phead)
61 {
62 /* Both next and prev point to self */
63 phead->pprev = phead->pnext = (pmlan_linked_list)phead;
64 }
65
66 /**
67 * @brief This function initializes a list
68 *
69 * @param phead List head
70 * @param lock_required A flag for spinlock requirement
71 * @param moal_init_lock A pointer to init lock handler
72 *
73 * @return N/A
74 */
util_init_list_head(t_void * pmoal_handle,pmlan_list_head phead,t_u8 lock_required,mlan_status (* moal_init_lock)(t_void * handle,t_void ** pplock))75 static INLINE t_void util_init_list_head(
76 t_void *pmoal_handle, pmlan_list_head phead, t_u8 lock_required,
77 mlan_status (*moal_init_lock)(t_void *handle, t_void **pplock))
78 {
79 /* Both next and prev point to self */
80 util_init_list((pmlan_linked_list)phead);
81 if (lock_required)
82 moal_init_lock(pmoal_handle, &phead->plock);
83 else
84 phead->plock = MNULL;
85 }
86
87 /**
88 * @brief This function frees a list
89 *
90 * @param phead List head
91 * @param moal_free_lock A pointer to free lock handler
92 *
93 * @return N/A
94 */
util_free_list_head(t_void * pmoal_handle,pmlan_list_head phead,mlan_status (* moal_free_lock)(t_void * handle,t_void * plock))95 static INLINE t_void util_free_list_head(
96 t_void *pmoal_handle, pmlan_list_head phead,
97 mlan_status (*moal_free_lock)(t_void *handle, t_void *plock))
98 {
99 phead->pprev = phead->pnext = MNULL;
100 if (phead->plock)
101 moal_free_lock(pmoal_handle, phead->plock);
102 }
103
104 /**
105 * @brief This function peeks into a list
106 *
107 * @param phead List head
108 * @param moal_spin_lock A pointer to spin lock handler
109 * @param moal_spin_unlock A pointer to spin unlock handler
110 *
111 * @return List node
112 */
113 static INLINE pmlan_linked_list
util_peek_list(t_void * pmoal_handle,pmlan_list_head phead,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))114 util_peek_list(t_void *pmoal_handle, pmlan_list_head phead,
115 mlan_status (*moal_spin_lock)(t_void *handle, t_void *plock),
116 mlan_status (*moal_spin_unlock)(t_void *handle, t_void *plock))
117 {
118 pmlan_linked_list pnode = MNULL;
119
120 if (moal_spin_lock)
121 moal_spin_lock(pmoal_handle, phead->plock);
122 if (phead->pnext != (pmlan_linked_list)phead)
123 pnode = phead->pnext;
124 if (moal_spin_unlock)
125 moal_spin_unlock(pmoal_handle, phead->plock);
126 return pnode;
127 }
128
129 /**
130 * @brief This function queues a node at the list tail
131 *
132 * @param phead List head
133 * @param pnode List node to queue
134 * @param moal_spin_lock A pointer to spin lock handler
135 * @param moal_spin_unlock A pointer to spin unlock handler
136 *
137 * @return N/A
138 */
util_enqueue_list_tail(t_void * pmoal_handle,pmlan_list_head phead,pmlan_linked_list pnode,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))139 static INLINE t_void util_enqueue_list_tail(
140 t_void *pmoal_handle, pmlan_list_head phead, pmlan_linked_list pnode,
141 mlan_status (*moal_spin_lock)(t_void *handle, t_void *plock),
142 mlan_status (*moal_spin_unlock)(t_void *handle, t_void *plock))
143 {
144 pmlan_linked_list pold_last;
145
146 if (moal_spin_lock)
147 moal_spin_lock(pmoal_handle, phead->plock);
148 pold_last = phead->pprev;
149 pnode->pprev = pold_last;
150 pnode->pnext = (pmlan_linked_list)phead;
151
152 phead->pprev = pold_last->pnext = pnode;
153 if (moal_spin_unlock)
154 moal_spin_unlock(pmoal_handle, phead->plock);
155 }
156
157 /**
158 * @brief This function adds a node at the list head
159 *
160 * @param phead List head
161 * @param pnode List node to add
162 * @param moal_spin_lock A pointer to spin lock handler
163 * @param moal_spin_unlock A pointer to spin unlock handler
164 *
165 * @return N/A
166 */
util_enqueue_list_head(t_void * pmoal_handle,pmlan_list_head phead,pmlan_linked_list pnode,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))167 static INLINE t_void util_enqueue_list_head(
168 t_void *pmoal_handle, pmlan_list_head phead, pmlan_linked_list pnode,
169 mlan_status (*moal_spin_lock)(t_void *handle, t_void *plock),
170 mlan_status (*moal_spin_unlock)(t_void *handle, t_void *plock))
171 {
172 pmlan_linked_list pold_first;
173
174 if (moal_spin_lock)
175 moal_spin_lock(pmoal_handle, phead->plock);
176 pold_first = phead->pnext;
177 pnode->pprev = (pmlan_linked_list)phead;
178 pnode->pnext = pold_first;
179
180 phead->pnext = pold_first->pprev = pnode;
181 if (moal_spin_unlock)
182 moal_spin_unlock(pmoal_handle, phead->plock);
183 }
184
185 /**
186 * @brief This function removes a node from the list
187 *
188 * @param phead List head
189 * @param pnode List node to remove
190 * @param moal_spin_lock A pointer to spin lock handler
191 * @param moal_spin_unlock A pointer to spin unlock handler
192 *
193 * @return N/A
194 */
util_unlink_list(t_void * pmoal_handle,pmlan_list_head phead,pmlan_linked_list pnode,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))195 static INLINE t_void util_unlink_list(
196 t_void *pmoal_handle, pmlan_list_head phead, pmlan_linked_list pnode,
197 mlan_status (*moal_spin_lock)(t_void *handle, t_void *plock),
198 mlan_status (*moal_spin_unlock)(t_void *handle, t_void *plock))
199 {
200 pmlan_linked_list pmy_prev;
201 pmlan_linked_list pmy_next;
202
203 if (moal_spin_lock)
204 moal_spin_lock(pmoal_handle, phead->plock);
205 pmy_prev = pnode->pprev;
206 pmy_next = pnode->pnext;
207 pmy_next->pprev = pmy_prev;
208 pmy_prev->pnext = pmy_next;
209
210 pnode->pnext = pnode->pprev = MNULL;
211 if (moal_spin_unlock)
212 moal_spin_unlock(pmoal_handle, phead->plock);
213 }
214
215 /**
216 * @brief This function dequeues a node from the list
217 *
218 * @param phead List head
219 * @param moal_spin_lock A pointer to spin lock handler
220 * @param moal_spin_unlock A pointer to spin unlock handler
221 *
222 * @return List node
223 */
util_dequeue_list(t_void * pmoal_handle,pmlan_list_head phead,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))224 static INLINE pmlan_linked_list util_dequeue_list(
225 t_void *pmoal_handle, pmlan_list_head phead,
226 mlan_status (*moal_spin_lock)(t_void *handle, t_void *plock),
227 mlan_status (*moal_spin_unlock)(t_void *handle, t_void *plock))
228 {
229 pmlan_linked_list pnode;
230
231 if (moal_spin_lock)
232 moal_spin_lock(pmoal_handle, phead->plock);
233 pnode = phead->pnext;
234 if (pnode && (pnode != (pmlan_linked_list)phead))
235 util_unlink_list(pmoal_handle, phead, pnode, MNULL, MNULL);
236 else
237 pnode = MNULL;
238 if (moal_spin_unlock)
239 moal_spin_unlock(pmoal_handle, phead->plock);
240 return pnode;
241 }
242
243 /** Access controlled scalar variable */
244 typedef struct _mlan_scalar {
245 /** Value */
246 t_s32 value;
247 /** Pointer to lock */
248 t_void *plock;
249 /** Control flags */
250 t_u32 flags;
251 } mlan_scalar, *pmlan_scalar;
252
253 /** Flag to scalar lock acquired */
254 #define MLAN_SCALAR_FLAG_UNIQUE_LOCK MBIT(16)
255
256 /** scalar conditional value list */
257 typedef enum _MLAN_SCALAR_CONDITIONAL {
258 MLAN_SCALAR_COND_EQUAL,
259 MLAN_SCALAR_COND_NOT_EQUAL,
260 MLAN_SCALAR_COND_GREATER_THAN,
261 MLAN_SCALAR_COND_GREATER_OR_EQUAL,
262 MLAN_SCALAR_COND_LESS_THAN,
263 MLAN_SCALAR_COND_LESS_OR_EQUAL
264 } MLAN_SCALAR_CONDITIONAL;
265
266 /**
267 * @brief This function initializes a scalar
268 *
269 * @param pscalar Pointer to scalar
270 * @param val Initial scalar value
271 * @param plock_to_use A new lock is created if NULL, else lock to use
272 * @param moal_init_lock A pointer to init lock handler
273 *
274 * @return N/A
275 */
276 static INLINE t_void
util_scalar_init(t_void * pmoal_handle,pmlan_scalar pscalar,t_s32 val,t_void * plock_to_use,mlan_status (* moal_init_lock)(t_void * handle,t_void ** pplock))277 util_scalar_init(t_void *pmoal_handle, pmlan_scalar pscalar, t_s32 val,
278 t_void *plock_to_use,
279 mlan_status (*moal_init_lock)(t_void *handle, t_void **pplock))
280 {
281 pscalar->value = val;
282 pscalar->flags = 0;
283 if (plock_to_use) {
284 pscalar->flags &= ~MLAN_SCALAR_FLAG_UNIQUE_LOCK;
285 pscalar->plock = plock_to_use;
286 } else {
287 pscalar->flags |= MLAN_SCALAR_FLAG_UNIQUE_LOCK;
288 moal_init_lock(pmoal_handle, &pscalar->plock);
289 }
290 }
291
292 /**
293 * @brief This function frees a scalar
294 *
295 * @param pscalar Pointer to scalar
296 * @param moal_free_lock A pointer to free lock handler
297 *
298 * @return N/A
299 */
300 static INLINE t_void
util_scalar_free(t_void * pmoal_handle,pmlan_scalar pscalar,mlan_status (* moal_free_lock)(t_void * handle,t_void * plock))301 util_scalar_free(t_void *pmoal_handle, pmlan_scalar pscalar,
302 mlan_status (*moal_free_lock)(t_void *handle, t_void *plock))
303 {
304 if (pscalar->flags & MLAN_SCALAR_FLAG_UNIQUE_LOCK)
305 moal_free_lock(pmoal_handle, pscalar->plock);
306 }
307
308 /**
309 * @brief This function reads value from scalar
310 *
311 * @param pscalar Pointer to scalar
312 * @param moal_spin_lock A pointer to spin lock handler
313 * @param moal_spin_unlock A pointer to spin unlock handler
314 *
315 * @return Stored value
316 */
317 static INLINE t_s32
util_scalar_read(t_void * pmoal_handle,pmlan_scalar pscalar,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))318 util_scalar_read(t_void *pmoal_handle, pmlan_scalar pscalar,
319 mlan_status (*moal_spin_lock)(t_void *handle, t_void *plock),
320 mlan_status (*moal_spin_unlock)(t_void *handle, t_void *plock))
321 {
322 t_s32 val;
323
324 if (moal_spin_lock)
325 moal_spin_lock(pmoal_handle, pscalar->plock);
326 val = pscalar->value;
327 if (moal_spin_unlock)
328 moal_spin_unlock(pmoal_handle, pscalar->plock);
329
330 return val;
331 }
332
333 /**
334 * @brief This function writes value to scalar
335 *
336 * @param pscalar Pointer to scalar
337 * @param val Value to write
338 * @param moal_spin_lock A pointer to spin lock handler
339 * @param moal_spin_unlock A pointer to spin unlock handler
340 *
341 * @return N/A
342 */
util_scalar_write(t_void * pmoal_handle,pmlan_scalar pscalar,t_s32 val,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))343 static INLINE t_void util_scalar_write(
344 t_void *pmoal_handle, pmlan_scalar pscalar, t_s32 val,
345 mlan_status (*moal_spin_lock)(t_void *handle, t_void *plock),
346 mlan_status (*moal_spin_unlock)(t_void *handle, t_void *plock))
347 {
348 if (moal_spin_lock)
349 moal_spin_lock(pmoal_handle, pscalar->plock);
350 pscalar->value = val;
351 if (moal_spin_unlock)
352 moal_spin_unlock(pmoal_handle, pscalar->plock);
353 }
354
355 /**
356 * @brief This function increments the value in scalar
357 *
358 * @param pscalar Pointer to scalar
359 * @param moal_spin_lock A pointer to spin lock handler
360 * @param moal_spin_unlock A pointer to spin unlock handler
361 *
362 * @return N/A
363 */
util_scalar_increment(t_void * pmoal_handle,pmlan_scalar pscalar,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))364 static INLINE t_void util_scalar_increment(
365 t_void *pmoal_handle, pmlan_scalar pscalar,
366 mlan_status (*moal_spin_lock)(t_void *handle, t_void *plock),
367 mlan_status (*moal_spin_unlock)(t_void *handle, t_void *plock))
368 {
369 if (moal_spin_lock)
370 moal_spin_lock(pmoal_handle, pscalar->plock);
371 pscalar->value++;
372 if (moal_spin_unlock)
373 moal_spin_unlock(pmoal_handle, pscalar->plock);
374 }
375
376 /**
377 * @brief This function decrements the value in scalar
378 *
379 * @param pscalar Pointer to scalar
380 * @param moal_spin_lock A pointer to spin lock handler
381 * @param moal_spin_unlock A pointer to spin unlock handler
382 *
383 * @return N/A
384 */
util_scalar_decrement(t_void * pmoal_handle,pmlan_scalar pscalar,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))385 static INLINE t_void util_scalar_decrement(
386 t_void *pmoal_handle, pmlan_scalar pscalar,
387 mlan_status (*moal_spin_lock)(t_void *handle, t_void *plock),
388 mlan_status (*moal_spin_unlock)(t_void *handle, t_void *plock))
389 {
390 if (moal_spin_lock)
391 moal_spin_lock(pmoal_handle, pscalar->plock);
392 pscalar->value--;
393 if (moal_spin_unlock)
394 moal_spin_unlock(pmoal_handle, pscalar->plock);
395 }
396
397 /**
398 * @brief This function adds an offset to the value in scalar,
399 * and returns the new value
400 *
401 * @param pscalar Pointer to scalar
402 * @param offset Offset value (can be negative)
403 * @param moal_spin_lock A pointer to spin lock handler
404 * @param moal_spin_unlock A pointer to spin unlock handler
405 *
406 * @return Value after offset
407 */
util_scalar_offset(t_void * pmoal_handle,pmlan_scalar pscalar,t_s32 offset,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))408 static INLINE t_s32 util_scalar_offset(
409 t_void *pmoal_handle, pmlan_scalar pscalar, t_s32 offset,
410 mlan_status (*moal_spin_lock)(t_void *handle, t_void *plock),
411 mlan_status (*moal_spin_unlock)(t_void *handle, t_void *plock))
412 {
413 t_s32 newval;
414
415 if (moal_spin_lock)
416 moal_spin_lock(pmoal_handle, pscalar->plock);
417 newval = (pscalar->value += offset);
418 if (moal_spin_unlock)
419 moal_spin_unlock(pmoal_handle, pscalar->plock);
420
421 return newval;
422 }
423
424 /**
425 * @brief This function writes the value to the scalar
426 * if existing value compared with other value is true.
427 *
428 * @param pscalar Pointer to scalar
429 * @param condition Condition to check
430 * @param val_compare Value to compare against current value
431 * ((A X B), where B = val_compare)
432 * @param val_to_set Value to set if comparison is true
433 * @param moal_spin_lock A pointer to spin lock handler
434 * @param moal_spin_unlock A pointer to spin unlock handler
435 *
436 * @return Comparison result (MTRUE or MFALSE)
437 */
util_scalar_conditional_write(t_void * pmoal_handle,pmlan_scalar pscalar,MLAN_SCALAR_CONDITIONAL condition,t_s32 val_compare,t_s32 val_to_set,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))438 static INLINE t_u8 util_scalar_conditional_write(
439 t_void *pmoal_handle, pmlan_scalar pscalar,
440 MLAN_SCALAR_CONDITIONAL condition, t_s32 val_compare, t_s32 val_to_set,
441 mlan_status (*moal_spin_lock)(t_void *handle, t_void *plock),
442 mlan_status (*moal_spin_unlock)(t_void *handle, t_void *plock))
443 {
444 t_u8 update;
445 if (moal_spin_lock)
446 moal_spin_lock(pmoal_handle, pscalar->plock);
447
448 switch (condition) {
449 case MLAN_SCALAR_COND_EQUAL:
450 update = (pscalar->value == val_compare);
451 break;
452 case MLAN_SCALAR_COND_NOT_EQUAL:
453 update = (pscalar->value != val_compare);
454 break;
455 case MLAN_SCALAR_COND_GREATER_THAN:
456 update = (pscalar->value > val_compare);
457 break;
458 case MLAN_SCALAR_COND_GREATER_OR_EQUAL:
459 update = (pscalar->value >= val_compare);
460 break;
461 case MLAN_SCALAR_COND_LESS_THAN:
462 update = (pscalar->value < val_compare);
463 break;
464 case MLAN_SCALAR_COND_LESS_OR_EQUAL:
465 update = (pscalar->value <= val_compare);
466 break;
467 default:
468 update = MFALSE;
469 break;
470 }
471 if (update)
472 pscalar->value = val_to_set;
473
474 if (moal_spin_unlock)
475 moal_spin_unlock(pmoal_handle, pscalar->plock);
476 return (update) ? MTRUE : MFALSE;
477 }
478
479 /**
480 * @brief This function counts the bits of unsigned int number
481 *
482 * @param num number
483 * @return number of bits
484 */
bitcount(t_u32 num)485 static INLINE t_u32 bitcount(t_u32 num)
486 {
487 t_u32 count = 0;
488 static t_u32 nibblebits[] = {0, 1, 1, 2, 1, 2, 2, 3,
489 1, 2, 2, 3, 2, 3, 3, 4};
490 for (; num != 0; num >>= 4)
491 count += nibblebits[num & 0x0f];
492 return count;
493 }
494
495 #endif /* !_MLAN_UTIL_H_ */
496