1 /** @file mlan_util.h
2 *
3 * @brief This file contains wrappers for linked-list,
4 * spinlock and timer defines.
5 *
6 * Copyright (C) 2008-2017, Marvell International Ltd.
7 *
8 * This software file (the "File") is distributed by Marvell International
9 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
10 * (the "License"). You may use, redistribute and/or modify this File in
11 * accordance with the terms and conditions of the License, a copy of which
12 * is available by writing to the Free Software Foundation, Inc.,
13 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
14 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
15 *
16 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
18 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
19 * this warranty disclaimer.
20 */
21
22 /******************************************************
23 Change log:
24 10/28/2008: initial version
25 ******************************************************/
26
27 #ifndef _MLAN_UTIL_H_
28 #define _MLAN_UTIL_H_
29
30 /** Circular doubly linked list */
31 typedef struct _mlan_linked_list {
32 /** Pointer to previous node */
33 struct _mlan_linked_list *pprev;
34 /** Pointer to next node */
35 struct _mlan_linked_list *pnext;
36 } mlan_linked_list, *pmlan_linked_list;
37
38 /** List head */
39 typedef struct _mlan_list_head {
40 /** Pointer to previous node */
41 struct _mlan_linked_list *pprev;
42 /** Pointer to next node */
43 struct _mlan_linked_list *pnext;
44 /** Pointer to lock */
45 t_void *plock;
46 } mlan_list_head, *pmlan_list_head;
47
48 /**
49 * @brief This function initializes a list without locking
50 *
51 * @param phead List head
52 *
53 * @return N/A
54 */
55 static INLINE t_void
util_init_list(pmlan_linked_list phead)56 util_init_list(pmlan_linked_list phead)
57 {
58 /* Both next and prev point to self */
59 phead->pprev = phead->pnext = (pmlan_linked_list)phead;
60 }
61
62 /**
63 * @brief This function initializes a list
64 *
65 * @param phead List head
66 * @param lock_required A flag for spinlock requirement
67 * @param moal_init_lock A pointer to init lock handler
68 *
69 * @return N/A
70 */
71 static INLINE t_void
util_init_list_head(t_void * pmoal_handle,pmlan_list_head phead,t_u8 lock_required,mlan_status (* moal_init_lock)(t_void * handle,t_void ** pplock))72 util_init_list_head(t_void *pmoal_handle,
73 pmlan_list_head phead,
74 t_u8 lock_required,
75 mlan_status (*moal_init_lock) (t_void *handle,
76 t_void **pplock))
77 {
78 /* Both next and prev point to self */
79 util_init_list((pmlan_linked_list)phead);
80 if (lock_required)
81 moal_init_lock(pmoal_handle, &phead->plock);
82 else
83 phead->plock = 0;
84 }
85
86 /**
87 * @brief This function frees a list
88 *
89 * @param phead List head
90 * @param moal_free_lock A pointer to free lock handler
91 *
92 * @return N/A
93 */
94 static INLINE t_void
util_free_list_head(t_void * pmoal_handle,pmlan_list_head phead,mlan_status (* moal_free_lock)(t_void * handle,t_void * plock))95 util_free_list_head(t_void *pmoal_handle,
96 pmlan_list_head phead,
97 mlan_status (*moal_free_lock) (t_void *handle,
98 t_void *plock))
99 {
100 phead->pprev = phead->pnext = 0;
101 if (phead->plock)
102 moal_free_lock(pmoal_handle, phead->plock);
103 }
104
105 /**
106 * @brief This function peeks into a list
107 *
108 * @param phead List head
109 * @param moal_spin_lock A pointer to spin lock handler
110 * @param moal_spin_unlock A pointer to spin unlock handler
111 *
112 * @return List node
113 */
114 static INLINE pmlan_linked_list
util_peek_list(t_void * pmoal_handle,pmlan_list_head phead,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))115 util_peek_list(t_void *pmoal_handle,
116 pmlan_list_head phead,
117 mlan_status (*moal_spin_lock) (t_void *handle, t_void *plock),
118 mlan_status (*moal_spin_unlock) (t_void *handle, t_void *plock))
119 {
120 pmlan_linked_list pnode = 0;
121
122 if (moal_spin_lock)
123 moal_spin_lock(pmoal_handle, phead->plock);
124 if (phead->pnext != (pmlan_linked_list)phead)
125 pnode = phead->pnext;
126 if (moal_spin_unlock)
127 moal_spin_unlock(pmoal_handle, phead->plock);
128 return pnode;
129 }
130
131 /**
132 * @brief This function queues a node at the list tail
133 *
134 * @param phead List head
135 * @param pnode List node to queue
136 * @param moal_spin_lock A pointer to spin lock handler
137 * @param moal_spin_unlock A pointer to spin unlock handler
138 *
139 * @return N/A
140 */
141 static INLINE t_void
util_enqueue_list_tail(t_void * pmoal_handle,pmlan_list_head phead,pmlan_linked_list pnode,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))142 util_enqueue_list_tail(t_void *pmoal_handle,
143 pmlan_list_head phead,
144 pmlan_linked_list pnode,
145 mlan_status (*moal_spin_lock) (t_void *handle,
146 t_void *plock),
147 mlan_status (*moal_spin_unlock) (t_void *handle,
148 t_void *plock))
149 {
150 pmlan_linked_list pold_last;
151
152 if (moal_spin_lock)
153 moal_spin_lock(pmoal_handle, phead->plock);
154 pold_last = phead->pprev;
155 pnode->pprev = pold_last;
156 pnode->pnext = (pmlan_linked_list)phead;
157
158 phead->pprev = pold_last->pnext = pnode;
159 if (moal_spin_unlock)
160 moal_spin_unlock(pmoal_handle, phead->plock);
161 }
162
163 /**
164 * @brief This function adds a node at the list head
165 *
166 * @param phead List head
167 * @param pnode List node to add
168 * @param moal_spin_lock A pointer to spin lock handler
169 * @param moal_spin_unlock A pointer to spin unlock handler
170 *
171 * @return N/A
172 */
173 static INLINE t_void
util_enqueue_list_head(t_void * pmoal_handle,pmlan_list_head phead,pmlan_linked_list pnode,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))174 util_enqueue_list_head(t_void *pmoal_handle,
175 pmlan_list_head phead,
176 pmlan_linked_list pnode,
177 mlan_status (*moal_spin_lock) (t_void *handle,
178 t_void *plock),
179 mlan_status (*moal_spin_unlock) (t_void *handle,
180 t_void *plock))
181 {
182 pmlan_linked_list pold_first;
183
184 if (moal_spin_lock)
185 moal_spin_lock(pmoal_handle, phead->plock);
186 pold_first = phead->pnext;
187 pnode->pprev = (pmlan_linked_list)phead;
188 pnode->pnext = pold_first;
189
190 phead->pnext = pold_first->pprev = pnode;
191 if (moal_spin_unlock)
192 moal_spin_unlock(pmoal_handle, phead->plock);
193 }
194
195 /**
196 * @brief This function removes a node from the list
197 *
198 * @param phead List head
199 * @param pnode List node to remove
200 * @param moal_spin_lock A pointer to spin lock handler
201 * @param moal_spin_unlock A pointer to spin unlock handler
202 *
203 * @return N/A
204 */
205 static INLINE t_void
util_unlink_list(t_void * pmoal_handle,pmlan_list_head phead,pmlan_linked_list pnode,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))206 util_unlink_list(t_void *pmoal_handle,
207 pmlan_list_head phead,
208 pmlan_linked_list pnode,
209 mlan_status (*moal_spin_lock) (t_void *handle, t_void *plock),
210 mlan_status (*moal_spin_unlock) (t_void *handle,
211 t_void *plock))
212 {
213 pmlan_linked_list pmy_prev;
214 pmlan_linked_list pmy_next;
215
216 if (moal_spin_lock)
217 moal_spin_lock(pmoal_handle, phead->plock);
218 pmy_prev = pnode->pprev;
219 pmy_next = pnode->pnext;
220 pmy_next->pprev = pmy_prev;
221 pmy_prev->pnext = pmy_next;
222
223 pnode->pnext = pnode->pprev = 0;
224 if (moal_spin_unlock)
225 moal_spin_unlock(pmoal_handle, phead->plock);
226 }
227
228 /**
229 * @brief This function dequeues a node from the list
230 *
231 * @param phead List head
232 * @param moal_spin_lock A pointer to spin lock handler
233 * @param moal_spin_unlock A pointer to spin unlock handler
234 *
235 * @return List node
236 */
237 static INLINE pmlan_linked_list
util_dequeue_list(t_void * pmoal_handle,pmlan_list_head phead,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))238 util_dequeue_list(t_void *pmoal_handle,
239 pmlan_list_head phead,
240 mlan_status (*moal_spin_lock) (t_void *handle, t_void *plock),
241 mlan_status (*moal_spin_unlock) (t_void *handle,
242 t_void *plock))
243 {
244 pmlan_linked_list pnode;
245
246 if (moal_spin_lock)
247 moal_spin_lock(pmoal_handle, phead->plock);
248 pnode = phead->pnext;
249 if (pnode && (pnode != (pmlan_linked_list)phead))
250 util_unlink_list(pmoal_handle, phead, pnode, 0, 0);
251 else
252 pnode = 0;
253 if (moal_spin_unlock)
254 moal_spin_unlock(pmoal_handle, phead->plock);
255 return pnode;
256 }
257
258 /** Access controlled scalar variable */
259 typedef struct _mlan_scalar {
260 /** Value */
261 t_s32 value;
262 /** Pointer to lock */
263 t_void *plock;
264 /** Control flags */
265 t_u32 flags;
266 } mlan_scalar, *pmlan_scalar;
267
268 /** Flag to scalar lock acquired */
269 #define MLAN_SCALAR_FLAG_UNIQUE_LOCK MBIT(16)
270
271 /** scalar conditional value list */
272 typedef enum _MLAN_SCALAR_CONDITIONAL {
273 MLAN_SCALAR_COND_EQUAL,
274 MLAN_SCALAR_COND_NOT_EQUAL,
275 MLAN_SCALAR_COND_GREATER_THAN,
276 MLAN_SCALAR_COND_GREATER_OR_EQUAL,
277 MLAN_SCALAR_COND_LESS_THAN,
278 MLAN_SCALAR_COND_LESS_OR_EQUAL
279 } MLAN_SCALAR_CONDITIONAL;
280
281 /**
282 * @brief This function initializes a scalar
283 *
284 * @param pscalar Pointer to scalar
285 * @param val Initial scalar value
286 * @param plock_to_use A new lock is created if NULL, else lock to use
287 * @param moal_init_lock A pointer to init lock handler
288 *
289 * @return N/A
290 */
291 static INLINE t_void
util_scalar_init(t_void * pmoal_handle,pmlan_scalar pscalar,t_s32 val,t_void * plock_to_use,mlan_status (* moal_init_lock)(t_void * handle,t_void ** pplock))292 util_scalar_init(t_void *pmoal_handle,
293 pmlan_scalar pscalar,
294 t_s32 val,
295 t_void *plock_to_use,
296 mlan_status (*moal_init_lock) (t_void *handle,
297 t_void **pplock))
298 {
299 pscalar->value = val;
300 pscalar->flags = 0;
301 if (plock_to_use) {
302 pscalar->flags &= ~MLAN_SCALAR_FLAG_UNIQUE_LOCK;
303 pscalar->plock = plock_to_use;
304 } else {
305 pscalar->flags |= MLAN_SCALAR_FLAG_UNIQUE_LOCK;
306 moal_init_lock(pmoal_handle, &pscalar->plock);
307 }
308 }
309
310 /**
311 * @brief This function frees a scalar
312 *
313 * @param pscalar Pointer to scalar
314 * @param moal_free_lock A pointer to free lock handler
315 *
316 * @return N/A
317 */
318 static INLINE t_void
util_scalar_free(t_void * pmoal_handle,pmlan_scalar pscalar,mlan_status (* moal_free_lock)(t_void * handle,t_void * plock))319 util_scalar_free(t_void *pmoal_handle,
320 pmlan_scalar pscalar,
321 mlan_status (*moal_free_lock) (t_void *handle, t_void *plock))
322 {
323 if (pscalar->flags & MLAN_SCALAR_FLAG_UNIQUE_LOCK)
324 moal_free_lock(pmoal_handle, pscalar->plock);
325 }
326
327 /**
328 * @brief This function reads value from scalar
329 *
330 * @param pscalar Pointer to scalar
331 * @param moal_spin_lock A pointer to spin lock handler
332 * @param moal_spin_unlock A pointer to spin unlock handler
333 *
334 * @return Stored value
335 */
336 static INLINE t_s32
util_scalar_read(t_void * pmoal_handle,pmlan_scalar pscalar,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))337 util_scalar_read(t_void *pmoal_handle,
338 pmlan_scalar pscalar,
339 mlan_status (*moal_spin_lock) (t_void *handle, t_void *plock),
340 mlan_status (*moal_spin_unlock) (t_void *handle,
341 t_void *plock))
342 {
343 t_s32 val;
344
345 if (moal_spin_lock)
346 moal_spin_lock(pmoal_handle, pscalar->plock);
347 val = pscalar->value;
348 if (moal_spin_unlock)
349 moal_spin_unlock(pmoal_handle, pscalar->plock);
350
351 return val;
352 }
353
354 /**
355 * @brief This function writes value to scalar
356 *
357 * @param pscalar Pointer to scalar
358 * @param val Value to write
359 * @param moal_spin_lock A pointer to spin lock handler
360 * @param moal_spin_unlock A pointer to spin unlock handler
361 *
362 * @return N/A
363 */
364 static INLINE t_void
util_scalar_write(t_void * pmoal_handle,pmlan_scalar pscalar,t_s32 val,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))365 util_scalar_write(t_void *pmoal_handle,
366 pmlan_scalar pscalar,
367 t_s32 val,
368 mlan_status (*moal_spin_lock) (t_void *handle, t_void *plock),
369 mlan_status (*moal_spin_unlock) (t_void *handle,
370 t_void *plock))
371 {
372 if (moal_spin_lock)
373 moal_spin_lock(pmoal_handle, pscalar->plock);
374 pscalar->value = val;
375 if (moal_spin_unlock)
376 moal_spin_unlock(pmoal_handle, pscalar->plock);
377 }
378
379 /**
380 * @brief This function increments the value in scalar
381 *
382 * @param pscalar Pointer to scalar
383 * @param moal_spin_lock A pointer to spin lock handler
384 * @param moal_spin_unlock A pointer to spin unlock handler
385 *
386 * @return N/A
387 */
388 static INLINE t_void
util_scalar_increment(t_void * pmoal_handle,pmlan_scalar pscalar,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))389 util_scalar_increment(t_void *pmoal_handle,
390 pmlan_scalar pscalar,
391 mlan_status (*moal_spin_lock) (t_void *handle,
392 t_void *plock),
393 mlan_status (*moal_spin_unlock) (t_void *handle,
394 t_void *plock))
395 {
396 if (moal_spin_lock)
397 moal_spin_lock(pmoal_handle, pscalar->plock);
398 pscalar->value++;
399 if (moal_spin_unlock)
400 moal_spin_unlock(pmoal_handle, pscalar->plock);
401 }
402
403 /**
404 * @brief This function decrements the value in scalar
405 *
406 * @param pscalar Pointer to scalar
407 * @param moal_spin_lock A pointer to spin lock handler
408 * @param moal_spin_unlock A pointer to spin unlock handler
409 *
410 * @return N/A
411 */
412 static INLINE t_void
util_scalar_decrement(t_void * pmoal_handle,pmlan_scalar pscalar,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))413 util_scalar_decrement(t_void *pmoal_handle,
414 pmlan_scalar pscalar,
415 mlan_status (*moal_spin_lock) (t_void *handle,
416 t_void *plock),
417 mlan_status (*moal_spin_unlock) (t_void *handle,
418 t_void *plock))
419 {
420 if (moal_spin_lock)
421 moal_spin_lock(pmoal_handle, pscalar->plock);
422 pscalar->value--;
423 if (moal_spin_unlock)
424 moal_spin_unlock(pmoal_handle, pscalar->plock);
425 }
426
427 /**
428 * @brief This function adds an offset to the value in scalar,
429 * and returns the new value
430 *
431 * @param pscalar Pointer to scalar
432 * @param offset Offset value (can be negative)
433 * @param moal_spin_lock A pointer to spin lock handler
434 * @param moal_spin_unlock A pointer to spin unlock handler
435 *
436 * @return Value after offset
437 */
438 static INLINE t_s32
util_scalar_offset(t_void * pmoal_handle,pmlan_scalar pscalar,t_s32 offset,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))439 util_scalar_offset(t_void *pmoal_handle,
440 pmlan_scalar pscalar,
441 t_s32 offset,
442 mlan_status (*moal_spin_lock) (t_void *handle,
443 t_void *plock),
444 mlan_status (*moal_spin_unlock) (t_void *handle,
445 t_void *plock))
446 {
447 t_s32 newval;
448
449 if (moal_spin_lock)
450 moal_spin_lock(pmoal_handle, pscalar->plock);
451 newval = (pscalar->value += offset);
452 if (moal_spin_unlock)
453 moal_spin_unlock(pmoal_handle, pscalar->plock);
454
455 return newval;
456 }
457
458 /**
459 * @brief This function writes the value to the scalar
460 * if existing value compared with other value is true.
461 *
462 * @param pscalar Pointer to scalar
463 * @param condition Condition to check
464 * @param val_compare Value to compare against current value
465 * ((A X B), where B = val_compare)
466 * @param val_to_set Value to set if comparison is true
467 * @param moal_spin_lock A pointer to spin lock handler
468 * @param moal_spin_unlock A pointer to spin unlock handler
469 *
470 * @return Comparison result (MTRUE or MFALSE)
471 */
472 static INLINE t_u8
util_scalar_conditional_write(t_void * pmoal_handle,pmlan_scalar pscalar,MLAN_SCALAR_CONDITIONAL condition,t_s32 val_compare,t_s32 val_to_set,mlan_status (* moal_spin_lock)(t_void * handle,t_void * plock),mlan_status (* moal_spin_unlock)(t_void * handle,t_void * plock))473 util_scalar_conditional_write(t_void *pmoal_handle,
474 pmlan_scalar pscalar,
475 MLAN_SCALAR_CONDITIONAL condition,
476 t_s32 val_compare,
477 t_s32 val_to_set,
478 mlan_status (*moal_spin_lock) (t_void *handle,
479 t_void *plock),
480 mlan_status (*moal_spin_unlock) (t_void *handle,
481 t_void *plock))
482 {
483 t_u8 update;
484 if (moal_spin_lock)
485 moal_spin_lock(pmoal_handle, pscalar->plock);
486
487 switch (condition) {
488 case MLAN_SCALAR_COND_EQUAL:
489 update = (pscalar->value == val_compare);
490 break;
491 case MLAN_SCALAR_COND_NOT_EQUAL:
492 update = (pscalar->value != val_compare);
493 break;
494 case MLAN_SCALAR_COND_GREATER_THAN:
495 update = (pscalar->value > val_compare);
496 break;
497 case MLAN_SCALAR_COND_GREATER_OR_EQUAL:
498 update = (pscalar->value >= val_compare);
499 break;
500 case MLAN_SCALAR_COND_LESS_THAN:
501 update = (pscalar->value < val_compare);
502 break;
503 case MLAN_SCALAR_COND_LESS_OR_EQUAL:
504 update = (pscalar->value <= val_compare);
505 break;
506 default:
507 update = MFALSE;
508 break;
509 }
510 if (update)
511 pscalar->value = val_to_set;
512
513 if (moal_spin_unlock)
514 moal_spin_unlock(pmoal_handle, pscalar->plock);
515 return (update) ? MTRUE : MFALSE;
516 }
517
518 /**
519 * @brief This function counts the bits of unsigned int number
520 *
521 * @param num number
522 * @return number of bits
523 */
524 static t_u32 INLINE
bitcount(t_u32 num)525 bitcount(t_u32 num)
526 {
527 t_u32 count = 0;
528 static t_u32 nibblebits[] = {
529 0, 1, 1, 2, 1, 2, 2, 3,
530 1, 2, 2, 3, 2, 3, 3, 4
531 };
532 for (; num != 0; num >>= 4)
533 count += nibblebits[num & 0x0f];
534 return count;
535 }
536
537 #endif /* !_MLAN_UTIL_H_ */
538