1 /******************************************************************************
2 *
3 * Copyright(c) 2019 - 2020 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15 #ifndef _PLTFM_OPS_LINUX_H_
16 #define _PLTFM_OPS_LINUX_H_
17 #include "drv_types.h"
18
19 #ifdef CONFIG_PLATFORM_AML_S905
20 extern struct device * g_pcie_reserved_mem_dev;
21 #endif
22
_os_strpbrk(const char * s,const char * ct)23 static inline char *_os_strpbrk(const char *s, const char *ct)
24 {
25 return strpbrk(s, ct);
26 }
27
_os_strsep(char ** s,const char * ct)28 static inline char *_os_strsep(char **s, const char *ct)
29 {
30 return strsep(s, ct);
31 }
32
33 #if 1
34 #define _os_sscanf(buf, fmt, ...) sscanf(buf, fmt, ##__VA_ARGS__)
35 #else
_os_sscanf(const char * buf,const char * fmt,...)36 static inline int _os_sscanf(const char *buf, const char *fmt, ...)
37 {
38 va_list args;
39 int i;
40
41 va_start(args, fmt);
42 i = vsscanf(buf, fmt, args);
43 va_end(args);
44 return i;
45 }
46 #endif
_os_strcmp(const char * s1,const char * s2)47 static inline int _os_strcmp(const char *s1, const char *s2)
48 {
49 return strcmp(s1, s2);
50 }
_os_strncmp(const char * s1,const char * s2,size_t n)51 static inline int _os_strncmp(const char *s1, const char *s2, size_t n)
52 {
53 return strncmp(s1, s2, n);
54 }
_os_strcpy(char * dest,const char * src)55 static inline char *_os_strcpy(char *dest, const char *src)
56 {
57 return strcpy(dest, src);
58 }
_os_strncpy(char * dest,const char * src,size_t n)59 static inline char *_os_strncpy(char *dest, const char *src, size_t n)
60 {
61 return strncpy(dest, src, n);
62 }
63 #if 1
64 #define _os_strchr(s, c) strchr(s, c)
65 #else
_os_strchr(const char * s,int c)66 static inline char*_os_strchr(const char *s, int c)
67 {
68 return strchr(s, c);
69 }
70 #endif
71
72 #if 1
73 #define _os_snprintf(s, sz, fmt, ...) snprintf(s, sz, fmt, ##__VA_ARGS__)
74 #define _os_vsnprintf(str, size, fmt, args) vsnprintf(str, size, fmt, args)
75 #else
_os_snprintf(char * str,size_t size,const char * fmt,...)76 static int _os_snprintf(char *str, size_t size, const char *fmt, ...)
77 {
78 va_list args;
79 int ret;
80
81 va_start(args, fmt);
82 ret = vsnprintf(str, size, fmt, args);
83 va_end(args);
84 if (size > 0)
85 str[size - 1] = '\0';
86 return ret;
87 }
88 #endif
89
_os_strlen(u8 * buf)90 static inline u32 _os_strlen(u8 *buf)
91 {
92 return strlen(buf);
93 }
_os_delay_ms(void * d,u32 ms)94 static inline void _os_delay_ms(void *d, u32 ms)
95 {
96 rtw_mdelay_os(ms);
97 }
_os_delay_us(void * d,u32 us)98 static inline void _os_delay_us(void *d, u32 us)
99 {
100 rtw_udelay_os(us);
101 }
_os_sleep_ms(void * d,u32 ms)102 static inline void _os_sleep_ms(void *d, u32 ms)
103 {
104 rtw_msleep_os(ms);
105 }
_os_sleep_us(void * d,u32 us)106 static inline void _os_sleep_us(void *d, u32 us)
107 {
108 rtw_usleep_os(us);
109 }
_os_get_cur_time_us(void)110 static inline u32 _os_get_cur_time_us(void)
111 {
112 return rtw_systime_to_us(rtw_get_current_time());
113 }
_os_get_cur_time_ms(void)114 static inline u32 _os_get_cur_time_ms(void)
115 {
116 return rtw_systime_to_ms(rtw_get_current_time());
117 }
118
_os_modular64(u64 x,u64 y)119 static inline u64 _os_modular64(u64 x, u64 y)
120 {
121 /*return do_div(x, y);*/
122 return rtw_modular64(x, y);
123 }
_os_division64(u64 x,u64 y)124 static inline u64 _os_division64(u64 x, u64 y)
125 {
126 /*return do_div(x, y);*/
127 return rtw_division64(x, y);
128 }
_os_div_round_up(u32 x,u32 y)129 static inline u32 _os_div_round_up(u32 x, u32 y)
130 {
131 return RTW_DIV_ROUND_UP(x, y);
132 }
133
134 #ifdef CONFIG_PCI_HCI
_os_cache_inv(void * d,_dma * bus_addr_l,_dma * bus_addr_h,u32 buf_sz,u8 direction)135 static inline void _os_cache_inv(void *d, _dma *bus_addr_l,
136 _dma *bus_addr_h, u32 buf_sz, u8 direction)
137 {
138 struct dvobj_priv *pobj = (struct dvobj_priv *)d;
139 PPCI_DATA pci_data = dvobj_to_pci(pobj);
140 struct pci_dev *pdev = pci_data->ppcidev;
141
142 pci_cache_inv(pdev, bus_addr_l, buf_sz, direction);
143 }
144
_os_cache_wback(void * d,_dma * bus_addr_l,_dma * bus_addr_h,u32 buf_sz,u8 direction)145 static inline void _os_cache_wback(void *d, _dma *bus_addr_l,
146 _dma *bus_addr_h, u32 buf_sz, u8 direction)
147 {
148 struct dvobj_priv *pobj = (struct dvobj_priv *)d;
149 PPCI_DATA pci_data = dvobj_to_pci(pobj);
150 struct pci_dev *pdev = pci_data->ppcidev;
151
152 pci_cache_wback(pdev, bus_addr_l, buf_sz, direction);
153 }
154
_os_dma_pool_create(void * d,char * name,u32 wd_page_sz)155 static inline void *_os_dma_pool_create(void *d, char *name, u32 wd_page_sz)
156 {
157 struct dvobj_priv *dvobj = (struct dvobj_priv *)d;
158
159 return pci_create_dma_pool(dvobj->pci_data.ppcidev, name, wd_page_sz);
160 }
161
_os_dma_pool_destory(void * d,void * pool)162 static inline void _os_dma_pool_destory(void *d, void *pool)
163 {
164 struct dvobj_priv *dvobj = (struct dvobj_priv *)d;
165
166 pci_destory_dma_pool(dvobj->pci_data.ppcidev, (struct dma_pool *)pool);
167 }
168
169 /* txbd, rxbd, wd */
_os_shmem_alloc(void * d,void * pool,_dma * bus_addr_l,_dma * bus_addr_h,u32 buf_sz,u8 cache,u8 direction,void ** os_rsvd)170 static inline void *_os_shmem_alloc(void *d, void *pool, _dma *bus_addr_l,
171 _dma *bus_addr_h, u32 buf_sz,
172 u8 cache, u8 direction, void **os_rsvd)
173 {
174 struct dvobj_priv *pobj = (struct dvobj_priv *)d;
175 PPCI_DATA pci_data = dvobj_to_pci(pobj);
176 struct pci_dev *pdev = pci_data->ppcidev;
177
178 if (cache == DMA_ADDR)
179 return pci_alloc_noncache_mem(pdev, bus_addr_l, buf_sz);
180 else if (cache == POOL_ADDR) {
181 return pci_zalloc_pool_mem(pdev, (struct dma_pool *)pool, bus_addr_l);
182 } else
183 return pci_alloc_cache_mem(pdev, bus_addr_l, buf_sz, direction);
184
185 return NULL;
186 }
187
_os_shmem_free(void * d,void * pool,u8 * vir_addr,_dma * bus_addr_l,_dma * bus_addr_h,u32 buf_sz,u8 cache,u8 direction,void * os_rsvd)188 static inline void _os_shmem_free(void *d, void *pool, u8 *vir_addr, _dma *bus_addr_l,
189 _dma *bus_addr_h, u32 buf_sz,
190 u8 cache, u8 direction, void *os_rsvd)
191 {
192 struct dvobj_priv *pobj = (struct dvobj_priv *)d;
193 PPCI_DATA pci_data = dvobj_to_pci(pobj);
194 struct pci_dev *pdev = pci_data->ppcidev;
195
196 if (cache == DMA_ADDR)
197 return pci_free_noncache_mem(pdev, vir_addr, bus_addr_l, buf_sz);
198 else if (cache == POOL_ADDR)
199 return pci_free_pool_mem(pdev, (struct dma_pool *)pool, vir_addr, bus_addr_l);
200 else
201 return pci_free_cache_mem(pdev, vir_addr, bus_addr_l, buf_sz, direction);
202 }
203 #endif /*CONFIG_PCI_HCI*/
204
_os_pkt_buf_unmap_rx(void * d,_dma bus_addr_l,_dma bus_addr_h,u32 buf_sz)205 static inline void *_os_pkt_buf_unmap_rx(void *d, _dma bus_addr_l, _dma bus_addr_h, u32 buf_sz)
206 {
207 struct dvobj_priv *pobj = (struct dvobj_priv *)d;
208 #ifdef CONFIG_PCI_HCI
209 PPCI_DATA pci_data = dvobj_to_pci(pobj);
210 struct pci_dev *pdev = pci_data->ppcidev;
211 #endif /*CONFIG_PCI_HCI*/
212
213 #ifdef CONFIG_PCI_HCI
214 #ifdef CONFIG_PLATFORM_AML_S905
215 if (g_pcie_reserved_mem_dev)
216 pdev->dev.dma_mask = NULL;
217 #endif
218 pci_unmap_single(pdev, bus_addr_l, buf_sz, PCI_DMA_FROMDEVICE);
219 #endif
220
221 #ifdef RTW_CORE_RECORD
222 phl_add_record(d, REC_RX_UNMAP, bus_addr_l, buf_sz);
223 #endif
224 return NULL;
225 }
226
_os_pkt_buf_map_rx(void * d,_dma * bus_addr_l,_dma * bus_addr_h,u32 buf_sz,void * os_priv)227 static inline void *_os_pkt_buf_map_rx(void *d, _dma *bus_addr_l, _dma *bus_addr_h,
228 u32 buf_sz, void *os_priv)
229 {
230 struct dvobj_priv *pobj = (struct dvobj_priv *)d;
231 #ifdef CONFIG_PCI_HCI
232 PPCI_DATA pci_data = dvobj_to_pci(pobj);
233 struct pci_dev *pdev = pci_data->ppcidev;
234 struct sk_buff *skb = os_priv;
235
236 #ifdef CONFIG_PLATFORM_AML_S905
237 if (g_pcie_reserved_mem_dev)
238 pdev->dev.dma_mask = NULL;
239 #endif
240 *bus_addr_l = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
241 /* *bus_addr_h = NULL;*/
242 #endif /*CONFIG_PCI_HCI*/
243
244 return NULL;
245 }
246
247 #ifdef CONFIG_PCI_HCI
_os_alloc_noncashe_skb(struct pci_dev * pdev,u32 buf_sz)248 static inline struct sk_buff *_os_alloc_noncashe_skb(struct pci_dev *pdev, u32 buf_sz)
249 {
250 struct sk_buff *skb = NULL;
251 unsigned char *data = NULL;
252
253 skb = rtw_zmalloc(sizeof(struct sk_buff));
254 if (!skb)
255 goto out;
256
257 data = pci_alloc_noncache_mem(pdev, (dma_addr_t *)&skb->cb, buf_sz);
258
259 if (!data)
260 goto nodata;
261
262 skb->head = data;
263 skb->data = data;
264 skb_reset_tail_pointer(skb);
265 skb->end = skb->tail + buf_sz;
266 skb->len = buf_sz;
267 out:
268 return skb;
269 nodata:
270 _rtw_mfree(skb, sizeof(struct sk_buff));
271 skb = NULL;
272 goto out;
273 }
274
_os_free_noncashe_skb(struct pci_dev * pdev,struct sk_buff * skb,u32 buf_sz)275 static inline void _os_free_noncashe_skb(struct pci_dev *pdev,
276 struct sk_buff *skb, u32 buf_sz)
277 {
278 /* skb buffer */
279 pci_free_noncache_mem(pdev, skb->data, (dma_addr_t *)skb->cb, buf_sz);
280
281 /* skb */
282 rtw_mfree(skb, sizeof(struct sk_buff));
283 }
284 #endif /*CONFIG_PCI_HCI*/
285
286 /* rxbuf */
287 #define PHL_RX_HEADROOM 0
_os_pkt_buf_alloc_rx(void * d,_dma * bus_addr_l,_dma * bus_addr_h,u32 buf_sz,u8 cache,void ** os_priv)288 static inline void *_os_pkt_buf_alloc_rx(void *d, _dma *bus_addr_l,
289 _dma *bus_addr_h, u32 buf_sz, u8 cache, void **os_priv)
290 {
291 struct dvobj_priv *pobj = (struct dvobj_priv *)d;
292 #ifdef CONFIG_PCI_HCI
293 PPCI_DATA pci_data = dvobj_to_pci(pobj);
294 struct pci_dev *pdev = pci_data->ppcidev;
295 #endif /*CONFIG_PCI_HCI*/
296 struct sk_buff *skb = NULL;
297 u32 rxbuf_size = buf_sz + PHL_RX_HEADROOM;
298
299 if (cache)
300 skb = rtw_skb_alloc(rxbuf_size);
301 #ifdef CONFIG_PCI_HCI
302 else
303 skb = _os_alloc_noncashe_skb(pdev, rxbuf_size);
304 #endif
305 if (!skb)
306 return NULL;
307
308 //skb_pull(skb, PHL_RX_HEADROOM);
309 #ifdef CONFIG_PCI_HCI
310 #ifdef CONFIG_PLATFORM_AML_S905
311 if (g_pcie_reserved_mem_dev)
312 pdev->dev.dma_mask = NULL;
313 #endif
314 if (cache)
315 *bus_addr_l = pci_map_single(pdev, skb->data,
316 rxbuf_size, PCI_DMA_FROMDEVICE);
317 else
318 *bus_addr_l = *(dma_addr_t *)skb->cb;
319 /* *bus_addr_h = NULL;*/
320 #endif /*CONFIG_PCI_HCI*/
321 *os_priv = skb;
322
323 return skb->data;
324 }
325
_os_pkt_buf_free_rx(void * d,u8 * vir_addr,_dma bus_addr_l,_dma bus_addr_h,u32 buf_sz,u8 cache,void * os_priv)326 static inline void _os_pkt_buf_free_rx(void *d, u8 *vir_addr, _dma bus_addr_l,
327 _dma bus_addr_h, u32 buf_sz, u8 cache, void *os_priv)
328 {
329 struct dvobj_priv *pobj = (struct dvobj_priv *)d;
330 #ifdef CONFIG_PCI_HCI
331 PPCI_DATA pci_data = dvobj_to_pci(pobj);
332 struct pci_dev *pdev = pci_data->ppcidev;
333 #endif /*CONFIG_PCI_HCI*/
334 struct sk_buff *skb = (struct sk_buff *)os_priv;
335
336 #ifdef CONFIG_PCI_HCI
337 #ifdef CONFIG_PLATFORM_AML_S905
338 if (g_pcie_reserved_mem_dev)
339 pdev->dev.dma_mask = NULL;
340 #endif
341 if (cache)
342 pci_unmap_single(pdev, bus_addr_l, buf_sz, PCI_DMA_FROMDEVICE);
343
344 if (!cache)
345 _os_free_noncashe_skb(pdev, skb, buf_sz);
346 else
347 #endif /*CONFIG_PCI_HCI*/
348 rtw_skb_free(skb);
349 }
350
351 /* phl pre-alloc network layer buffer */
_os_alloc_netbuf(void * d,u32 buf_sz,void ** os_priv)352 static inline void * _os_alloc_netbuf(void *d, u32 buf_sz, void **os_priv)
353 {
354 return _os_pkt_buf_alloc_rx(d, NULL, NULL, buf_sz, true, os_priv);
355 }
356
357 /* Free netbuf for error case. (ex. drop rx-reorder packet) */
_os_free_netbuf(void * d,u8 * vir_addr,u32 buf_sz,void * os_priv)358 static inline void _os_free_netbuf(void *d, u8 *vir_addr, u32 buf_sz, void *os_priv)
359 {
360 _os_pkt_buf_free_rx(d, vir_addr, 0,0, buf_sz, true, os_priv);
361 }
362
363
364 /*virtually contiguous memory*/
_os_mem_alloc(void * d,u32 buf_sz)365 static inline void *_os_mem_alloc(void *d, u32 buf_sz)
366 {
367 #ifdef DBG_PHL_MEM_ALLOC
368 struct dvobj_priv *obj = (struct dvobj_priv *)d;
369
370 ATOMIC_ADD_RETURN(&obj->phl_mem, buf_sz);
371 #endif
372
373 #ifdef CONFIG_PHL_USE_KMEM_ALLOC
374 return rtw_zmalloc(buf_sz);
375 #else
376 if (in_atomic()) {
377 RTW_ERR("Call rtw_zvmalloc in atomic @%s:%u\n",
378 __FUNCTION__, __LINE__);
379 dump_stack();
380 }
381 return rtw_zvmalloc(buf_sz);
382 #endif
383 }
384
385 /*virtually contiguous memory*/
_os_mem_free(void * d,void * buf,u32 buf_sz)386 static inline void _os_mem_free(void *d, void *buf, u32 buf_sz)
387 {
388 #ifdef DBG_PHL_MEM_ALLOC
389 struct dvobj_priv *obj = (struct dvobj_priv *)d;
390
391 ATOMIC_SUB(&obj->phl_mem, buf_sz);
392 #endif
393
394 #ifdef CONFIG_PHL_USE_KMEM_ALLOC
395 rtw_mfree(buf, buf_sz);
396 #else
397 if (in_atomic()) {
398 RTW_ERR("Call rtw_vmfree in atomic @%s:%u\n",
399 __FUNCTION__, __LINE__);
400 dump_stack();
401 }
402 rtw_vmfree(buf, buf_sz);
403 #endif
404 }
405
406 /*physically contiguous memory if the buffer will be accessed by a DMA device*/
_os_kmem_alloc(void * d,u32 buf_sz)407 static inline void *_os_kmem_alloc(void *d, u32 buf_sz)
408 {
409 #ifdef DBG_PHL_MEM_ALLOC
410 struct dvobj_priv *obj = (struct dvobj_priv *)d;
411 ATOMIC_ADD_RETURN(&obj->phl_mem, buf_sz);
412 #endif
413 return rtw_zmalloc(buf_sz);
414 }
415
416 /*physically contiguous memory if the buffer will be accessed by a DMA device*/
_os_kmem_free(void * d,void * buf,u32 buf_sz)417 static inline void _os_kmem_free(void *d, void *buf, u32 buf_sz)
418 {
419 #ifdef DBG_PHL_MEM_ALLOC
420 struct dvobj_priv *obj = (struct dvobj_priv *)d;
421 ATOMIC_SUB(&obj->phl_mem, buf_sz);
422 #endif
423
424 rtw_mfree(buf, buf_sz);
425 }
_os_mem_set(void * d,void * buf,s8 value,u32 size)426 static inline void _os_mem_set(void *d, void *buf, s8 value, u32 size)
427 {
428 _rtw_memset(buf, value, size);
429 }
_os_mem_cpy(void * d,void * dest,void * src,u32 size)430 static inline void _os_mem_cpy(void *d, void *dest, void *src, u32 size)
431 {
432 _rtw_memcpy(dest, src, size);
433 }
434 /*Return Value
435 * <0 :the first byte that does not match in both memory blocks has a lower value in ptr1 than in ptr2 (if evaluated as unsigned char values)
436 * 0 :the contents of both memory blocks are equal
437 * >0 :the first byte that does not match in both memory blocks has a greater value in ptr1 than in ptr2 (if evaluated as unsigned char values)
438 */
_os_mem_cmp(void * d,const void * dest,const void * src,size_t size)439 static inline int _os_mem_cmp(void *d, const void *dest, const void *src, size_t size)
440 {
441
442 return memcmp(dest, src, size);
443 }
_os_init_timer(void * d,_os_timer * timer,void (* call_back_func)(void * context),void * context,const char * sz_id)444 static inline void _os_init_timer(void *d, _os_timer *timer,
445 void (*call_back_func)(void *context), void *context,
446 const char *sz_id)
447 {
448 _init_timer(timer, call_back_func, context);
449 }
450
_os_set_timer(void * d,_os_timer * timer,u32 ms_delay)451 static inline void _os_set_timer(void *d, _os_timer *timer, u32 ms_delay)
452 {
453 _set_timer(timer, ms_delay);
454 }
455
_os_cancel_timer(void * d,_os_timer * timer)456 static inline void _os_cancel_timer(void *d, _os_timer *timer)
457 {
458 _cancel_timer_ex(timer);
459 }
460
_os_cancel_timer_async(void * d,_os_timer * timer)461 static inline void _os_cancel_timer_async(void *d, _os_timer *timer)
462 {
463 _cancel_timer_async(timer);
464 }
465
_os_release_timer(void * d,_os_timer * timer)466 static inline void _os_release_timer(void *d, _os_timer *timer)
467 {
468
469 }
_os_mutex_init(void * d,_os_mutex * mutex)470 static inline void _os_mutex_init(void *d, _os_mutex *mutex)
471 {
472 _rtw_mutex_init(mutex);
473 }
474
_os_mutex_deinit(void * d,_os_mutex * mutex)475 static inline void _os_mutex_deinit(void *d, _os_mutex *mutex)
476 {
477 _rtw_mutex_free(mutex);
478 }
479
_os_mutex_lock(void * d,_os_mutex * mutex)480 static inline void _os_mutex_lock(void *d, _os_mutex *mutex)
481 {
482 _rtw_mutex_lock_interruptible(mutex);
483 }
484
_os_mutex_unlock(void * d,_os_mutex * mutex)485 static inline void _os_mutex_unlock(void *d, _os_mutex *mutex)
486 {
487 _rtw_mutex_unlock(mutex);
488 }
489
_os_sema_init(void * d,_os_sema * sema,int int_cnt)490 static inline void _os_sema_init(void *d, _os_sema *sema, int int_cnt)
491 {
492 _rtw_init_sema(sema, int_cnt);
493 }
494
_os_sema_free(void * d,_os_sema * sema)495 static inline void _os_sema_free(void *d, _os_sema *sema)
496 {
497 _rtw_free_sema(sema);
498 }
499
_os_sema_up(void * d,_os_sema * sema)500 static inline void _os_sema_up(void *d, _os_sema *sema)
501 {
502 _rtw_up_sema(sema);
503 }
504
_os_sema_down(void * d,_os_sema * sema)505 static inline u8 _os_sema_down(void *d, _os_sema *sema)
506 {
507 _rtw_down_sema(sema);
508 return 0; //success
509 }
510
511 /* event */
_os_event_init(void * h,_os_event * event)512 static __inline void _os_event_init(void *h, _os_event *event)
513 {
514 init_completion(event);
515 }
516
_os_event_free(void * h,_os_event * event)517 static __inline void _os_event_free(void *h, _os_event *event)
518 {
519 }
520
_os_event_reset(void * h,_os_event * event)521 static __inline void _os_event_reset(void *h, _os_event *event)
522 {
523 /* TODO */
524 }
525
_os_event_set(void * h,_os_event * event)526 static __inline void _os_event_set(void *h, _os_event *event)
527 {
528 complete(event);
529 }
530
531 /*
532 * m_sec
533 * == 0 : wait for completion
534 * > 0 : wait for timeout or completion
535 * return value
536 * 0:timeout
537 * otherwise:success
538 */
_os_event_wait(void * h,_os_event * event,u32 m_sec)539 static __inline int _os_event_wait(void *h, _os_event *event, u32 m_sec)
540 {
541 unsigned long expire;
542
543 if (m_sec) {
544 expire = msecs_to_jiffies(m_sec);
545
546 if (expire > MAX_SCHEDULE_TIMEOUT)
547 expire = MAX_SCHEDULE_TIMEOUT;
548 }
549 else {
550 expire = MAX_SCHEDULE_TIMEOUT;
551 }
552
553 expire = wait_for_completion_timeout(event, expire);
554
555 if (expire == 0)
556 return 0; /* timeout */
557
558 return jiffies_to_msecs(expire); /* success */
559 }
560
561 /* spinlock */
562
_os_spinlock_init(void * d,_os_lock * plock)563 static inline void _os_spinlock_init(void *d, _os_lock *plock)
564 {
565 _rtw_spinlock_init(plock);
566 }
_os_spinlock_free(void * d,_os_lock * plock)567 static inline void _os_spinlock_free(void *d, _os_lock *plock)
568 {
569 _rtw_spinlock_free(plock);
570 }
571
_os_spinlock(void * d,_os_lock * plock,enum lock_type type,_os_spinlockfg * flags)572 static inline void _os_spinlock(void *d, _os_lock *plock,
573 enum lock_type type, _os_spinlockfg *flags
574 )
575 {
576 if(type == _irq)
577 {
578 if(flags==NULL)
579 RTW_ERR("_os_spinlock_irq: flags=NULL @%s:%u\n",
580 __FUNCTION__, __LINE__);
581 _rtw_spinlock_irq(plock, flags);
582 }
583 else if(type == _bh)
584 _rtw_spinlock_bh(plock);
585 else if(type == _ps)
586 _rtw_spinlock(plock);
587 }
_os_spinunlock(void * d,_os_lock * plock,enum lock_type type,_os_spinlockfg * flags)588 static inline void _os_spinunlock(void *d, _os_lock *plock,
589 enum lock_type type, _os_spinlockfg *flags
590 )
591 {
592 if(type == _irq)
593 {
594 if(flags==NULL)
595 RTW_ERR("_os_spinunlock_irq: flags=NULL @%s:%u\n",
596 __FUNCTION__, __LINE__);
597 _rtw_spinunlock_irq(plock, flags);
598 }
599 else if(type == _bh)
600 _rtw_spinunlock_bh(plock);
601 else if(type == _ps)
602 _rtw_spinunlock(plock);
603 }
_os_test_and_clear_bit(int nr,unsigned long * addr)604 static inline int _os_test_and_clear_bit(int nr, unsigned long *addr)
605 {
606 return rtw_test_and_clear_bit(nr, addr);
607 }
_os_test_and_set_bit(int nr,unsigned long * addr)608 static inline int _os_test_and_set_bit(int nr, unsigned long *addr)
609 {
610 return rtw_test_and_set_bit(nr, addr);
611 }
612 /* Atomic integer operations */
_os_atomic_set(void * d,_os_atomic * v,int i)613 static inline void _os_atomic_set(void *d, _os_atomic *v, int i)
614 {
615 ATOMIC_SET(v, i);
616 }
617
_os_atomic_read(void * d,_os_atomic * v)618 static inline int _os_atomic_read(void *d, _os_atomic *v)
619 {
620 return ATOMIC_READ(v);
621 }
622
_os_atomic_add(void * d,_os_atomic * v,int i)623 static inline void _os_atomic_add(void *d, _os_atomic *v, int i)
624 {
625 ATOMIC_ADD(v, i);
626 }
_os_atomic_sub(void * d,_os_atomic * v,int i)627 static inline void _os_atomic_sub(void *d, _os_atomic *v, int i)
628 {
629 ATOMIC_SUB(v, i);
630 }
631
_os_atomic_inc(void * d,_os_atomic * v)632 static inline void _os_atomic_inc(void *d, _os_atomic *v)
633 {
634 ATOMIC_INC(v);
635 }
636
_os_atomic_dec(void * d,_os_atomic * v)637 static inline void _os_atomic_dec(void *d, _os_atomic *v)
638 {
639 ATOMIC_DEC(v);
640 }
641
_os_atomic_add_return(void * d,_os_atomic * v,int i)642 static inline int _os_atomic_add_return(void *d, _os_atomic *v, int i)
643 {
644 return ATOMIC_ADD_RETURN(v, i);
645 }
646
_os_atomic_sub_return(void * d,_os_atomic * v,int i)647 static inline int _os_atomic_sub_return(void *d, _os_atomic *v, int i)
648 {
649 return ATOMIC_SUB_RETURN(v, i);
650 }
651
_os_atomic_inc_return(void * d,_os_atomic * v)652 static inline int _os_atomic_inc_return(void *d, _os_atomic *v)
653 {
654 return ATOMIC_INC_RETURN(v);
655 }
656
_os_atomic_dec_return(void * d,_os_atomic * v)657 static inline int _os_atomic_dec_return(void *d, _os_atomic *v)
658 {
659 return ATOMIC_DEC_RETURN(v);
660 }
661 /*
662 static inline bool _os_atomic_inc_unless(void *d, _os_atomic *v, int u)
663 {
664 return ATOMIC_INC_UNLESS(v, 1, u);
665 }
666 */
667
_os_tasklet_init(void * drv_priv,_os_tasklet * task,void (* call_back_func)(void * context),void * context)668 static inline u8 _os_tasklet_init(void *drv_priv, _os_tasklet *task,
669 void (*call_back_func)(void* context), void *context)
670 {
671 rtw_tasklet_init(task,
672 (void(*)(unsigned long))call_back_func,
673 (unsigned long)task);
674 return 0;
675 }
_os_tasklet_deinit(void * drv_priv,_os_tasklet * task)676 static inline u8 _os_tasklet_deinit(void *drv_priv, _os_tasklet *task)
677 {
678 rtw_tasklet_kill(task);
679 return 0;
680 }
_os_tasklet_schedule(void * drv_priv,_os_tasklet * task)681 static inline u8 _os_tasklet_schedule(void *drv_priv, _os_tasklet *task)
682 {
683 #if 1
684 rtw_tasklet_hi_schedule(task);
685 #else
686 rtw_tasklet_schedule(task);
687 #endif
688 return 0;
689 }
690
_os_thread_init(void * drv_priv,_os_thread * thread,int (* call_back_func)(void * context),void * context,const char namefmt[])691 static __inline u8 _os_thread_init( void *drv_priv, _os_thread *thread,
692 int (*call_back_func)(void * context),
693 void *context,
694 const char namefmt[])
695 {
696 thread->thread_handler = rtw_thread_start((int(*)(void*))call_back_func, context, namefmt);
697 if (thread->thread_handler) {
698 RST_THREAD_STATUS(thread);
699 SET_THREAD_STATUS(thread, THREAD_STATUS_STARTED);
700 return RTW_PHL_STATUS_SUCCESS;
701 }
702
703 return RTW_PHL_STATUS_FAILURE;
704 }
_os_thread_deinit(void * drv_priv,_os_thread * thread)705 static __inline u8 _os_thread_deinit(void *drv_priv, _os_thread *thread)
706 {
707 if (CHK_THREAD_STATUS(thread, THREAD_STATUS_STARTED)) {
708 CLR_THREAD_STATUS(thread, THREAD_STATUS_STARTED);
709 return rtw_thread_stop(thread->thread_handler);
710 }
711
712 return RTW_PHL_STATUS_SUCCESS;
713 }
_os_thread_schedule(void * drv_priv,_os_thread * thread)714 static __inline enum rtw_phl_status _os_thread_schedule(void *drv_priv, _os_thread *thread)
715 {
716 return RTW_PHL_STATUS_SUCCESS;
717 }
_os_thread_stop(void * drv_priv,_os_thread * thread)718 static inline void _os_thread_stop(void *drv_priv, _os_thread *thread)
719 {
720 SET_THREAD_STATUS(thread, THREAD_STATUS_STOPPED);
721 }
_os_thread_check_stop(void * drv_priv,_os_thread * thread)722 static inline int _os_thread_check_stop(void *drv_priv, _os_thread *thread)
723 {
724 return CHK_THREAD_STATUS(thread, THREAD_STATUS_STOPPED);
725 }
726
_os_thread_wait_stop(void * drv_priv,_os_thread * thread)727 static inline int _os_thread_wait_stop(void *drv_priv, _os_thread *thread)
728 {
729 rtw_thread_wait_stop();
730 return RTW_PHL_STATUS_SUCCESS;
731 }
732
733 #if 0
734 static inline _os_thread _os_thread_start(int (*threadfn)(void *data),
735 void *data, const char namefmt[])
736 {
737 return rtw_thread_start(threadfn, data, namefmt);
738 }
739 static inline bool _os_thread_stop(_os_thread th)
740 {
741
742 return rtw_thread_stop(th);
743 }
744 static inline void _os_thread_wait_stop(void)
745 {
746 rtw_thread_wait_stop();
747 }
748 static inline int _os_thread_should_stop(void)
749 {
750 return kthread_should_stop();
751 }
752 #endif
753
754 #ifdef CONFIG_PHL_CPU_BALANCE
_os_workitem_config_cpu(void * drv_priv,_os_workitem * workitem,char * work_name,u8 cpu_id)755 static inline u8 _os_workitem_config_cpu(void *drv_priv, _os_workitem *workitem,
756 char *work_name, u8 cpu_id)
757 {
758 _config_workitem_cpu(workitem, work_name, cpu_id);
759 return 0;
760 }
761 #endif
762
763
_os_workitem_init(void * drv_priv,_os_workitem * workitem,void (* call_back_func)(void * context),void * context)764 static inline u8 _os_workitem_init(void *drv_priv, _os_workitem *workitem,
765 void (*call_back_func)(void* context), void *context)
766 {
767 #ifdef CONFIG_PHL_CPU_BALANCE
768 _init_workitem_cpu(workitem, call_back_func, context);
769 #else
770 _init_workitem(workitem, call_back_func, context);
771 #endif
772 return 0;
773 }
_os_workitem_schedule(void * drv_priv,_os_workitem * workitem)774 static inline u8 _os_workitem_schedule(void *drv_priv, _os_workitem *workitem)
775 {
776 #ifdef CONFIG_PHL_CPU_BALANCE
777 _set_workitem_cpu(workitem);
778 #else
779 _set_workitem(workitem);
780 #endif
781 return 0;
782 }
_os_workitem_deinit(void * drv_priv,_os_workitem * workitem)783 static inline u8 _os_workitem_deinit(void *drv_priv, _os_workitem *workitem)
784 {
785 #ifdef CONFIG_PHL_CPU_BALANCE
786 _cancel_workitem_sync_cpu(workitem);
787 #else
788 _cancel_workitem_sync(workitem);
789 #endif
790 return 0;
791 }
792
793 /* File Operation */
_os_read_file(const char * path,u8 * buf,u32 sz)794 static inline u32 _os_read_file(const char *path, u8 *buf, u32 sz)
795 {
796 return (u32)rtw_retrieve_from_file(path, buf, sz);
797 }
798
799 /*BUS*/
800 #ifdef CONFIG_PCI_HCI
801 #include <pci_ops_linux.h>
_os_read8_pcie(void * d,u32 addr)802 static inline u8 _os_read8_pcie(void *d, u32 addr)
803 {
804 return os_pci_read8((struct dvobj_priv *)d, addr);
805 }
_os_read16_pcie(void * d,u32 addr)806 static inline u16 _os_read16_pcie(void *d, u32 addr)
807 {
808 return os_pci_read16((struct dvobj_priv *)d, addr);
809
810 }
_os_read32_pcie(void * d,u32 addr)811 static inline u32 _os_read32_pcie(void *d, u32 addr)
812 {
813 return os_pci_read32((struct dvobj_priv *)d, addr);
814 }
815
_os_write8_pcie(void * d,u32 addr,u8 val)816 static inline int _os_write8_pcie(void *d, u32 addr, u8 val)
817 {
818 return os_pci_write8((struct dvobj_priv *)d, addr, val);
819 }
_os_write16_pcie(void * d,u32 addr,u16 val)820 static inline int _os_write16_pcie(void *d, u32 addr, u16 val)
821 {
822 return os_pci_write16((struct dvobj_priv *)d, addr, val);
823 }
_os_write32_pcie(void * d,u32 addr,u32 val)824 static inline int _os_write32_pcie(void *d, u32 addr, u32 val)
825 {
826 return os_pci_write32((struct dvobj_priv *)d, addr, val);
827 }
828 #endif/*#ifdef CONFIG_PCI_HCI*/
829
830 #ifdef CONFIG_USB_HCI
831 #include <usb_ops_linux.h>
_os_usbctrl_vendorreq(void * d,u8 request,u16 value,u16 index,void * pdata,u16 len,u8 requesttype)832 static inline int _os_usbctrl_vendorreq(void *d, u8 request, u16 value,
833 u16 index, void *pdata, u16 len, u8 requesttype)
834 {
835 return usbctrl_vendorreq((struct dvobj_priv *)d, request, value,
836 index, pdata, len, requesttype);
837 }
os_out_token_alloc(void * drv_priv)838 static __inline u8 os_out_token_alloc(void *drv_priv)
839 {
840 return 0; // RTW_PHL_STATUS_SUCCESS
841 }
842
os_out_token_free(void * drv_priv)843 static __inline void os_out_token_free(void *drv_priv)
844 {
845 }
846
os_usb_tx(void * d,u8 * tx_buf_ptr,u8 bulk_id,u32 len,u8 * pkt_data_buf)847 static inline int os_usb_tx(void *d, u8 *tx_buf_ptr,
848 u8 bulk_id, u32 len, u8 *pkt_data_buf)
849 {
850 return rtw_usb_write_port((struct dvobj_priv *)d, tx_buf_ptr,
851 bulk_id, len, pkt_data_buf);
852 }
853
os_enable_usb_out_pipes(void * drv_priv)854 static __inline void os_enable_usb_out_pipes(void *drv_priv)
855 {
856 }
857
os_disable_usb_out_pipes(void * drv_priv)858 static __inline void os_disable_usb_out_pipes(void *drv_priv)
859 {
860 /* Free bulkout urb */
861 rtw_usb_write_port_cancel(drv_priv);
862 }
863
os_in_token_alloc(void * drv_priv)864 static __inline u8 os_in_token_alloc(void *drv_priv)
865 {
866 // Allocate in token (pUrb) list
867 return 0;
868 }
869
os_in_token_free(void * drv_priv)870 static __inline void os_in_token_free(void *drv_priv)
871 {
872 // free in token memory
873 /*rtw_usb_read_port_free(drv_priv);*/
874 }
875
876
os_send_usb_in_token(void * drv_priv,void * rxobj,u8 * inbuf,u32 inbuf_len,u8 pipe_idx,u8 minLen)877 static __inline u8 os_send_usb_in_token(void *drv_priv, void *rxobj, u8 *inbuf, u32 inbuf_len, u8 pipe_idx, u8 minLen)
878 {
879 return rtw_usb_read_port(drv_priv, rxobj, inbuf, inbuf_len, pipe_idx, minLen);
880 }
881
os_enable_usb_in_pipes(void * drv_priv)882 static __inline void os_enable_usb_in_pipes(void *drv_priv)
883 {
884 }
885
os_disable_usb_in_pipes(void * drv_priv)886 static __inline void os_disable_usb_in_pipes(void *drv_priv)
887 {
888 // Cancel Pending IN IRPs.
889 rtw_usb_read_port_cancel(drv_priv);
890 }
891
892
893 #endif /*CONFIG_USB_HCI*/
894
895 #ifdef CONFIG_SDIO_HCI
896 #include <rtw_sdio.h>
897 #include <sdio_ops_linux.h>
898 #include <rtw_debug.h>
899
_os_sdio_cmd52_r8(void * d,u32 offset)900 static inline u8 _os_sdio_cmd52_r8(void *d, u32 offset)
901 {
902 u8 val = SDIO_ERR_VAL8;
903
904 if (rtw_sdio_read_cmd52((struct dvobj_priv *)d, offset, &val, 1) == _FAIL)
905 RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
906
907 return val;
908 }
909
_os_sdio_cmd53_r8(void * d,u32 offset)910 static inline u8 _os_sdio_cmd53_r8(void *d, u32 offset)
911 {
912 u8 val = SDIO_ERR_VAL8;
913
914
915 if (rtw_sdio_read_cmd53((struct dvobj_priv *)d, offset, &val, 1) == _FAIL)
916 RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
917
918 return val;
919 }
920
_os_sdio_cmd53_r16(void * d,u32 offset)921 static inline u16 _os_sdio_cmd53_r16(void *d, u32 offset)
922 {
923 u16 val = SDIO_ERR_VAL16;
924
925
926 if (rtw_sdio_read_cmd53((struct dvobj_priv *)d, offset, &val, 2) == _FAIL) {
927 RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
928 goto exit;
929 }
930 val = le16_to_cpu(val);
931
932 exit:
933 return val;
934 }
935
_os_sdio_cmd53_r32(void * d,u32 offset)936 static inline u32 _os_sdio_cmd53_r32(void *d, u32 offset)
937 {
938 u32 val = SDIO_ERR_VAL32;
939
940
941 if (rtw_sdio_read_cmd53((struct dvobj_priv *)d, offset, &val, 4) == _FAIL) {
942 RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
943 goto exit;
944 }
945 val = le32_to_cpu(val);
946
947 exit:
948 return val;
949 }
950
_os_sdio_cmd53_rn(void * d,u32 offset,u32 size,u8 * data)951 static inline u8 _os_sdio_cmd53_rn(void *d, u32 offset, u32 size, u8 *data)
952 {
953 struct dvobj_priv *dv = d;
954 struct sdio_data *sdio = dvobj_to_sdio(dv);
955 u8 *pbuf = data;
956 u32 sdio_read_size;
957
958 if (!data)
959 return _FAIL;
960
961 sdio_read_size = RND4(size);
962 sdio_read_size = rtw_sdio_cmd53_align_size(dv, sdio_read_size);
963
964 if (sdio_read_size > sdio->tmpbuf_sz) {
965 pbuf = rtw_malloc(sdio_read_size);
966 if (!pbuf)
967 return _FAIL;
968 }
969
970 if (rtw_sdio_read_cmd53(dv, offset, pbuf, sdio_read_size) == _FAIL) {
971 RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
972 goto exit;
973 }
974
975 if (pbuf != data)
976 _rtw_memcpy(data, pbuf, size);
977
978 exit:
979 if (pbuf != data)
980 rtw_mfree(pbuf, sdio_read_size);
981
982 return _SUCCESS;
983 }
984
_os_sdio_cmd53_r(void * d,u32 offset,u32 size,u8 * data)985 static inline u8 _os_sdio_cmd53_r(void *d, u32 offset, u32 size, u8 *data)
986 {
987 u8 ret;
988
989 ret = rtw_sdio_read_cmd53((struct dvobj_priv *)d, offset, data, size);
990 if (ret == _FAIL) {
991 RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
992 return _FAIL;
993 }
994
995 return _SUCCESS;
996 }
997
_os_sdio_cmd52_w8(void * d,u32 offset,u8 val)998 static inline void _os_sdio_cmd52_w8(void *d, u32 offset, u8 val)
999 {
1000 if (rtw_sdio_write_cmd52((struct dvobj_priv *)d, offset, &val, 1) == _FAIL)
1001 RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
1002 }
1003
_os_sdio_cmd53_w8(void * d,u32 offset,u8 val)1004 static inline void _os_sdio_cmd53_w8(void *d, u32 offset, u8 val)
1005 {
1006 if (rtw_sdio_write_cmd53((struct dvobj_priv *)d, offset, &val, 1) == _FAIL)
1007 RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
1008 }
1009
_os_sdio_cmd53_w16(void * d,u32 offset,u16 val)1010 static inline void _os_sdio_cmd53_w16(void *d, u32 offset, u16 val)
1011 {
1012 val = cpu_to_le16(val);
1013 if (rtw_sdio_write_cmd53((struct dvobj_priv *)d, offset, &val, 2) == _FAIL)
1014 RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
1015 }
1016
_os_sdio_cmd53_w32(void * d,u32 offset,u32 val)1017 static inline void _os_sdio_cmd53_w32(void *d, u32 offset, u32 val)
1018 {
1019 val = cpu_to_le32(val);
1020 if (rtw_sdio_write_cmd53((struct dvobj_priv *)d, offset, &val, 4) == _FAIL)
1021 RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
1022 }
1023
_os_sdio_cmd53_wn(void * d,u32 offset,u32 size,u8 * data)1024 static inline void _os_sdio_cmd53_wn(void *d, u32 offset, u32 size, u8 *data)
1025 {
1026 struct dvobj_priv *dv = d;
1027 struct sdio_data *sdio = dvobj_to_sdio(dv);
1028 u8 *pbuf = data;
1029
1030
1031 if (size > sdio->tmpbuf_sz) {
1032 pbuf = rtw_malloc(size);
1033 if (!pbuf)
1034 return;
1035 _rtw_memcpy(pbuf, data, size);
1036 }
1037
1038 if (rtw_sdio_write_cmd53(dv, offset, pbuf, size) == _FAIL)
1039 RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
1040
1041 if (pbuf != data)
1042 rtw_mfree(pbuf, size);
1043 }
1044
_os_sdio_cmd53_w(void * d,u32 offset,u32 size,u8 * data)1045 static inline void _os_sdio_cmd53_w(void *d, u32 offset, u32 size, u8 *data)
1046 {
1047 u8 ret;
1048
1049 ret = rtw_sdio_write_cmd53((struct dvobj_priv *)d, offset, data, size);
1050 if (ret == _FAIL)
1051 RTW_ERR("%s: I/O FAIL!\n", __FUNCTION__);
1052 }
1053
_os_sdio_f0_read(void * d,u32 addr,void * buf,size_t len)1054 static inline u8 _os_sdio_f0_read(void *d, u32 addr, void *buf, size_t len)
1055 {
1056 return rtw_sdio_f0_read((struct dvobj_priv *)d, addr, buf, len);
1057 }
1058
_os_sdio_read_cia_r8(void * d,u32 addr)1059 static inline u8 _os_sdio_read_cia_r8(void *d, u32 addr)
1060 {
1061 u8 data = 0;
1062
1063 if (rtw_sdio_f0_read((struct dvobj_priv *)d, addr, &data, 1) == _FAIL)
1064 RTW_ERR("%s: read sdio cia FAIL!\n", __FUNCTION__);
1065
1066 return data;
1067 }
1068
1069 #endif /*CONFIG_SDIO_HCI*/
1070 #endif /*_PLTFM_OPS_LINUX_H_*/
1071