1 //<MStar Software>
2 //******************************************************************************
3 // MStar Software
4 // Copyright (c) 2010 - 2012 MStar Semiconductor, Inc. All rights reserved.
5 // All software, firmware and related documentation herein ("MStar Software") are
6 // intellectual property of MStar Semiconductor, Inc. ("MStar") and protected by
7 // law, including, but not limited to, copyright law and international treaties.
8 // Any use, modification, reproduction, retransmission, or republication of all
9 // or part of MStar Software is expressly prohibited, unless prior written
10 // permission has been granted by MStar.
11 //
12 // By accessing, browsing and/or using MStar Software, you acknowledge that you
13 // have read, understood, and agree, to be bound by below terms ("Terms") and to
14 // comply with all applicable laws and regulations:
15 //
16 // 1. MStar shall retain any and all right, ownership and interest to MStar
17 // Software and any modification/derivatives thereof.
18 // No right, ownership, or interest to MStar Software and any
19 // modification/derivatives thereof is transferred to you under Terms.
20 //
21 // 2. You understand that MStar Software might include, incorporate or be
22 // supplied together with third party`s software and the use of MStar
23 // Software may require additional licenses from third parties.
24 // Therefore, you hereby agree it is your sole responsibility to separately
25 // obtain any and all third party right and license necessary for your use of
26 // such third party`s software.
27 //
28 // 3. MStar Software and any modification/derivatives thereof shall be deemed as
29 // MStar`s confidential information and you agree to keep MStar`s
30 // confidential information in strictest confidence and not disclose to any
31 // third party.
32 //
33 // 4. MStar Software is provided on an "AS IS" basis without warranties of any
34 // kind. Any warranties are hereby expressly disclaimed by MStar, including
35 // without limitation, any warranties of merchantability, non-infringement of
36 // intellectual property rights, fitness for a particular purpose, error free
37 // and in conformity with any international standard. You agree to waive any
38 // claim against MStar for any loss, damage, cost or expense that you may
39 // incur related to your use of MStar Software.
40 // In no event shall MStar be liable for any direct, indirect, incidental or
41 // consequential damages, including without limitation, lost of profit or
42 // revenues, lost or damage of data, and unauthorized system use.
43 // You agree that this Section 4 shall still apply without being affected
44 // even if MStar Software has been modified by MStar in accordance with your
45 // request or instruction for your use, except otherwise agreed by both
46 // parties in writing.
47 //
48 // 5. If requested, MStar may from time to time provide technical supports or
49 // services in relation with MStar Software to you for your use of
50 // MStar Software in conjunction with your or your customer`s product
51 // ("Services").
52 // You understand and agree that, except otherwise agreed by both parties in
53 // writing, Services are provided on an "AS IS" basis and the warranty
54 // disclaimer set forth in Section 4 above shall apply.
55 //
56 // 6. Nothing contained herein shall be construed as by implication, estoppels
57 // or otherwise:
58 // (a) conferring any license or right to use MStar name, trademark, service
59 // mark, symbol or any other identification;
60 // (b) obligating MStar or any of its affiliates to furnish any person,
61 // including without limitation, you and your customers, any assistance
62 // of any kind whatsoever, or any information; or
63 // (c) conferring any license or right under any intellectual property right.
64 //
65 // 7. These terms shall be governed by and construed in accordance with the laws
66 // of Taiwan, R.O.C., excluding its conflict of law rules.
67 // Any and all dispute arising out hereof or related hereto shall be finally
68 // settled by arbitration referred to the Chinese Arbitration Association,
69 // Taipei in accordance with the ROC Arbitration Law and the Arbitration
70 // Rules of the Association by three (3) arbitrators appointed in accordance
71 // with the said Rules.
72 // The place of arbitration shall be in Taipei, Taiwan and the language shall
73 // be English.
74 // The arbitration award shall be final and binding to both parties.
75 //
76 //******************************************************************************
77 //<MStar Software>
78 ////////////////////////////////////////////////////////////////////////////////
79 //
80 // Copyright (c) 2008-2009 MStar Semiconductor, Inc.
81 // All rights reserved.
82 //
83 // Unless otherwise stipulated in writing, any and all information contained
84 // herein regardless in any format shall remain the sole proprietary of
85 // MStar Semiconductor Inc. and be kept in strict confidence
86 // ("MStar Confidential Information") by the recipient.
87 // Any unauthorized act including without limitation unauthorized disclosure,
88 // copying, use, reproduction, sale, distribution, modification, disassembling,
89 // reverse engineering and compiling of the contents of MStar Confidential
90 // Information is unlawful and strictly prohibited. MStar hereby reserves the
91 // rights to any and all damages, losses, costs and expenses resulting therefrom.
92 //
93 ////////////////////////////////////////////////////////////////////////////////
94
95 #include <linux/mman.h>
96 #include <asm/mach/map.h>
97 #include <chip_setup.h>
98
99 #include "MsCommon.h"
100 #include "drvMMIO.h"
101 #include "halCHIP.h"
102 #include "halMPool.h"
103
104 #include <linux/vmalloc.h>
105 #include <linux/io.h>
106 #include <linux/module.h>
107
108 //--------------------------------------------------------------------------------------------------
109 // Internal define
110 //--------------------------------------------------------------------------------------------------
111 #ifdef _Debug
112 #define MPOOL_DEBUG
113 #endif
114
115 //--------------------------------------------------------------------------------------------------
116 // New Internal Variable of MPool Implementation
117 //--------------------------------------------------------------------------------------------------
118 static MS_U32 MPOOL_MAPPING;
119 #define MAX_MAPPINGSIZE 200
120
121 #define MMAP_NONCACHE true
122 #define MMAP_CACHE false
123
124 //static MS_U32 MPOOL_MAPPING;
125
126 //--------------------------------------------------------------------------------------------------
127 // Internal Variable
128 //--------------------------------------------------------------------------------------------------
129 //static MS_S32 _s32MPoolFd = -1;
130 //static void* _pAddrVirtStart[2] = { NULL, NULL};
131 //static void* _pAddrVirtEnd[2] = { NULL, NULL};
132 //static MS_U32 _u32Va2PaOff[2] = { 0, 0};
133 static unsigned long u32MpoolRegBase;
134 static MS_VIRT map_kdriver_mem(MS_PHY u32BusStart, MS_SIZE u32MapSize, MS_BOOL bNonCache);
135
136
137
138 typedef struct
139 {
140 MS_VIRT pVirtStart;
141 MS_VIRT pVirtEnd;
142 MS_U64 pPhyaddr;
143 MS_U64 u32MpoolSize;
144 MS_BOOL bIsUsed;
145 MS_SIZE s32V2Poff;
146 MS_BOOL bNonCache;
147 MS_U8 u8MiuSel;
148 MS_BOOL bIsDyn;
149 } MPOOL_INFO;
150
151
152
153
154 static MsOSMPool_DbgLevel _u32MPoolDBGLevel = E_MsOSMPool_DBG_L1;
155 static MPOOL_INFO mpool_info[MAX_MAPPINGSIZE];
156 static struct mutex _MsOS_MPool_Mutex = __MUTEX_INITIALIZER(_MsOS_MPool_Mutex);
157 static MS_BOOL g_bMpoolInit = FALSE;
158 static DEFINE_MUTEX(_Mpool_Init_Mutex);
159
160 //#ifdef CONFIG_MP_PLATFORM_UTOPIA2K_EXPORT_SYMBOL
161 #if 1
162 static unsigned long VMALLOC_SIZE;
163 static MS_VIRT driver_mem_va_start;
164 //static unsigned long driver_mem_va_end;
165 #else
166 #error "Please allocate kernel VA for driver mapping."
167 #endif
168
169 //--------------------------------------------------------------------------------------------------
170 // Internal macros
171 //--------------------------------------------------------------------------------------------------
172 #ifdef MPOOL_DEBUG
173 #define MPOOL_ERROR(fmt, args...) printk("[MPOOL USER ERR][%06d] " fmt, __LINE__, ## args)
174 #define MPOOL_WARN(fmt, args...) printk("[MPOOL USER WARN][%06d] " fmt, __LINE__, ## args)
175 #define MPOOL_PRINT(fmt, args...) printk("[MPOOL USER][%06d] " fmt, __LINE__, ## args)
176
177 #define MPOOL_ASSERT(_bool, _f) if (!(_bool)) { (_f); MS_ASSERT(0); }
178 #else
179 #define MPOOL_ERROR(fmt, args...) do{} while (0)
180 #define MPOOL_WARN(fmt, args...) do{} while (0)
181 #define MPOOL_PRINT(fmt, args...) do{} while (0)
182 #define MPOOL_ASSERT(_bool, _f) if (!(_bool)) { (_f); }
183 #endif
184
185 //#define MPOOL_IS_CACHE_ADDR(addr) ( ( ((MS_U32)_pAddrVirtStart[0] <= (MS_U32)(addr)) && ((MS_U32)_pAddrVirtEnd[0] > (MS_U32)addr) ) ? TRUE : FALSE )
186 //#define MPOOL_IS_UNCACHE_ADDR(addr) ( ( ((MS_U32)_pAddrVirtStart[1] <= (MS_U32)(addr)) && ((MS_U32)_pAddrVirtEnd[1] > (MS_U32)addr) ) ? TRUE : FALSE )
187
188 #define MPOOL_DBG_MSG(debug_level, x) do { if (_u32MPoolDBGLevel >= (debug_level)) (x); } while(0)
189
190 //--------------------------------------------------------------------------------------------------
191 // Private Function Prototype
192 //--------------------------------------------------------------------------------------------------
193 static MS_VIRT _MPool_PA2VA(MS_PHY pAddrPhys, MS_BOOL bNonCache);
194 static void _MPool_Check_aligned(MS_U32 u32Offset, MS_U32 u32MapSize);
195 static MS_BOOL _MPOOL_DELAY_BINDING(int idx);
196
197 //--------------------------------------------------------------------------------------------------
198 // Implementation
199 //--------------------------------------------------------------------------------------------------
200
201
MsOS_MPool_IsInitialized(void)202 MS_BOOL MsOS_MPool_IsInitialized(void)
203 {
204 MsOS_MPool_SetDbgLevel(E_MsOSMPool_DBG_Release);
205
206 return FALSE;
207 }
208
MsOS_MPool_Init(void)209 MS_BOOL MsOS_MPool_Init(void)
210 {
211 MS_PHY phySize;
212 MS_BOOL bRet = FALSE;
213
214 MsOS_MPool_SetDbgLevel(E_MsOSMPool_DBG_Release);
215 VMALLOC_SIZE = VMALLOC_END - VMALLOC_START;
216
217 mutex_lock(&_Mpool_Init_Mutex);
218
219 if (g_bMpoolInit)
220 {
221 bRet = TRUE;
222 goto MPool_Init_End;
223 }
224 memset(mpool_info,0,sizeof(MPOOL_INFO)*MAX_MAPPINGSIZE);
225
226 if(!MDrv_MMIO_GetBASE(&u32MpoolRegBase, &phySize, MS_MODULE_PM))
227 {
228 MPOOL_ERROR("[Utopia2K] %s Get IOMAP Base faill!\n", __FUNCTION__);
229 bRet = FALSE;
230 goto MPool_Init_End;
231 }
232
233 bRet = TRUE;
234 g_bMpoolInit = TRUE;
235 //printk("UTPA2K VA Start:0x%x, end:0x%x\n", (unsigned int)driver_mem_va_start, (unsigned int)driver_mem_va_end);
236 //printk("UTPA2K VA Size:0x%x\n", (unsigned int)(driver_mem_va_end-driver_mem_va_start));
237
238 MPool_Init_End:
239 mutex_unlock(&_Mpool_Init_Mutex);
240 return bRet;
241 }
242 #if defined(MSOS_TYPE_LINUX_KERNEL)
243 EXPORT_SYMBOL(MsOS_MPool_Init);
244 #endif
245
MsOS_MPool_Close(void)246 MS_BOOL MsOS_MPool_Close(void)
247 {
248 //printk("MsOS_MPool_Close\n");
249
250 return TRUE;
251 }
252
253
254
MsOS_MPool_VA2PA(MS_VIRT pAddrVirt)255 MS_PHY MsOS_MPool_VA2PA(MS_VIRT pAddrVirt)
256 {
257 MS_U32 phyAddr=0xFFFFFFFF;
258 int i, idx;
259
260 mutex_lock(&_Mpool_Init_Mutex);
261
262 if (!g_bMpoolInit)
263 {
264 printk(KERN_EMERG "\033[35m[Utopia2K] ERROR!! MPool was not initialized.!\033[m\n");
265 goto VA2PA_END;
266 }
267
268 for(i = 0; i < MAX_MAPPINGSIZE; i++)
269 {
270 // if not used, convert searching is over...
271 if(!mpool_info[i].bIsUsed)
272 {
273 break;
274 }
275
276 // check if cache_policy is correct
277 //if(mpool_info[i].bNonCache != bNonCache)
278 //{
279 // //printk("\033[35mcache policy is not correct, check next\033[m\n");
280 // continue;
281 //}
282
283 // we do PA_REGION check here, to check if pa_addr in located in mpool_info[i]
284 if(!((mpool_info[i].pVirtStart <= pAddrVirt) && (pAddrVirt < mpool_info[i].pVirtEnd)))
285 {
286 continue;
287 }
288
289 //return HAL_MsOS_MPool_VA2PA(pAddrVirt, mpool_info[i].s32V2Poff);
290 phyAddr = pAddrVirt - mpool_info[i].s32V2Poff;
291
292 mutex_unlock(&_Mpool_Init_Mutex);
293 return phyAddr;
294 }
295
296 // Debug
297 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("=== MPool Table Print! ===\n"));
298 for(idx=0;idx<MAX_MAPPINGSIZE;idx++)
299 {
300 if(!mpool_info[idx].bIsUsed)
301 continue;
302 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].u8MiuSel = %d\n", idx,mpool_info[idx].u8MiuSel));
303 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].bNonCache = %d\n", idx,mpool_info[idx].bNonCache));
304 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].pVirtStart =%lx\n",idx, mpool_info[idx].pVirtStart));
305 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].pVirtEnd = %lx\n", idx,mpool_info[idx].pVirtEnd));
306 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].pPhyaddr =%lx\n", idx,mpool_info[idx].pPhyaddr));
307 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].u32MpoolSize =%lx\n", idx,mpool_info[idx].u32MpoolSize));
308 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("###\n"));
309 }
310 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("==========================\n"));
311 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("[Utopia2K] ERROR!!! CAN'T SUCCESSFULLY CONVERT FROM VA(%lx) TO PA\n", pAddrVirt));
312
313 VA2PA_END:
314 mutex_unlock(&_Mpool_Init_Mutex);
315 return phyAddr;
316 }
317
MsOS_MPool_InfoMsg(void)318 void MsOS_MPool_InfoMsg(void)
319 {
320 int i;
321 printk("=========================Mapping Info=================================\n");
322 printk("MPool Total VA 0x%lx bytes (0x%lx ~ 0x%lx)\n", VMALLOC_SIZE, VMALLOC_START, VMALLOC_END);
323 for (i=0;i<MAX_MAPPINGSIZE; i++)
324 {
325 if(mpool_info[i].bIsUsed == true)
326 printk("map miu:%d-pa:0x%x with size:0x%x to VA(0x%x ~ 0x%x)\n",mpool_info[i].u8MiuSel,mpool_info[i].pPhyaddr,mpool_info[i].u32MpoolSize,mpool_info[i].pVirtStart,mpool_info[i].pVirtEnd);
327 }
328 printk("======================================================================\n");
329 return;
330 }
331
MsOS_MPool_PA2KSEG1(MS_PHY pAddrPhys)332 MS_VIRT MsOS_MPool_PA2KSEG1(MS_PHY pAddrPhys) // non-cache
333 {
334 return _MPool_PA2VA(pAddrPhys, TRUE);
335 }
MsOS_MPool_PA2KSEG0(MS_PHY pAddrPhys)336 MS_VIRT MsOS_MPool_PA2KSEG0(MS_PHY pAddrPhys) //cache
337 {
338 return _MPool_PA2VA(pAddrPhys, FALSE);
339 }
340
_MPool_PA2VA(MS_PHY pAddrPhys,MS_BOOL bNonCache)341 static MS_VIRT _MPool_PA2VA(MS_PHY pAddrPhys, MS_BOOL bNonCache)
342 {
343 MS_VIRT pAddrVirt=0;
344 int i, idx,maxID = -1;
345 MS_U64 u64Maxsize = 0;
346
347 mutex_lock(&_Mpool_Init_Mutex);
348
349 if (!g_bMpoolInit)
350 {
351 printk(KERN_EMERG "\033[35m[Utopia2K] ERROR!! MPool was not initialized.!\033[m\n");
352 goto PA2VA_End;
353 }
354
355 for(i=0;i<MAX_MAPPINGSIZE;i++)
356 {
357 // search all ...
358 if(!mpool_info[i].bIsUsed)
359 {
360 continue;
361 }
362
363 // check if cache_policy is correct
364 if(mpool_info[i].bNonCache != bNonCache)
365 {
366 continue;
367 }
368
369 // we do PA_REGION check here, to check if pAddrPhys in located in mpool_info[i]
370 if(!((mpool_info[i].pPhyaddr <= pAddrPhys) && (pAddrPhys < (mpool_info[i].pPhyaddr + mpool_info[i].u32MpoolSize))))
371 {
372 continue;
373 }
374
375 if( (mpool_info[i].pPhyaddr + mpool_info[i].u32MpoolSize - pAddrPhys) > u64Maxsize)
376 {
377 u64Maxsize = (mpool_info[i].pPhyaddr + mpool_info[i].u32MpoolSize - pAddrPhys);
378 maxID = i;
379 }
380 }
381
382 if(maxID == -1)
383 goto PA2VA_Error;
384
385 if(mpool_info[maxID].bIsDyn)
386 {
387 if((mpool_info[maxID].s32V2Poff == 0) && (mpool_info[maxID].pVirtStart == 0))
388 {
389 if(!_MPOOL_DELAY_BINDING(maxID))
390 {
391 printk("[Utopia2K] PA2KSEG delay binding fail!, pa = %x", (int)mpool_info[maxID].pPhyaddr);
392
393 goto PA2VA_Error;
394 //return (MS_U32)NULL;
395 }
396 }
397 //mpool_info[i].bIsDyn = false // joe, need to be discussed
398 }
399
400 //pAddrVirt = HAL_MsOS_MPool_PA2VA(pAddrPhys, mpool_info[i].pPhyaddr, mpool_info[i].u32MpoolSize, mpool_info[i].s32V2Poff, ENABLE_PARTIAL_MAPPING);
401 pAddrVirt = pAddrPhys + mpool_info[maxID].s32V2Poff;
402
403 if(pAddrVirt != 0)
404 {
405 mutex_unlock(&_Mpool_Init_Mutex);
406 return pAddrVirt;
407 }
408
409 PA2VA_Error:
410 // Debug
411 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("=== MPool Table Print! ===\n"));
412 for(idx=0;idx<MAX_MAPPINGSIZE;idx++)
413 {
414 if(!mpool_info[idx].bIsUsed)
415 continue;
416 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].u8MiuSel = %d\n", idx,mpool_info[idx].u8MiuSel));
417 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].bNonCache = %d\n", idx,mpool_info[idx].bNonCache));
418 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].pVirtStart =%lx\n",idx, mpool_info[idx].pVirtStart));
419 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].pVirtEnd = %lx\n", idx,mpool_info[idx].pVirtEnd));
420 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].pPhyaddr =%lx\n", idx,mpool_info[idx].pPhyaddr));
421 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].u32MpoolSize =%lx\n", idx,mpool_info[idx].u32MpoolSize));
422 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("###\n"));
423 }
424 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("==========================\n"));
425 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("[Utopia2K] ERROR!!! CAN'T SUCCESSFULLY CONVERT FROM PA(%lx) TO VA.\n", pAddrPhys));
426
427 PA2VA_End:
428 mutex_unlock(&_Mpool_Init_Mutex);
429 return 0x0;
430 }
431
MsOS_MPool_Dcache_Flush(MS_VIRT pAddrVirt,MS_SIZE tSize)432 MS_BOOL MsOS_MPool_Dcache_Flush(MS_VIRT pAddrVirt, MS_SIZE tSize)
433 {
434 mutex_lock(&_Mpool_Init_Mutex);
435 if (!g_bMpoolInit)
436 {
437 printk(KERN_EMERG "\033[35m[Utopia2K] ERROR!! MPool was not initialized.!\033[m\n");
438 mutex_unlock(&_Mpool_Init_Mutex);
439 return FALSE;
440 }
441 mutex_unlock(&_Mpool_Init_Mutex);
442
443 Chip_Flush_Cache_Range(pAddrVirt, tSize);
444
445 return TRUE;
446 }
447
MsOS_MPool_Dcache_Flush_All()448 MS_BOOL MsOS_MPool_Dcache_Flush_All()
449 {
450 mutex_lock(&_Mpool_Init_Mutex);
451 if (!g_bMpoolInit)
452 {
453 printk(KERN_EMERG "\033[35m[Utopia2K] ERROR!! MPool was not initialized.!\033[m\n");
454 mutex_unlock(&_Mpool_Init_Mutex);
455 return FALSE;
456 }
457 mutex_unlock(&_Mpool_Init_Mutex);
458
459 Chip_Flush_Cache_All();
460
461 return TRUE;
462 }
463
_MPool_Check_aligned(MS_U32 u32Offset,MS_U32 u32MapSize)464 static void _MPool_Check_aligned(MS_U32 u32Offset, MS_U32 u32MapSize)
465 {
466 if(u32Offset&0xFFF)
467 {
468 printk("\033[31m!!! Important issue !!!\033[m\n");
469 printk("\033[31mThe memory mapping address is not aligned, please make sure of that\033[m\n");
470 MS_ASSERT(0);
471 }
472
473 if(u32MapSize&0xFFF)
474 {
475 printk("\033[31m!!! Important issue !!!\033[m\n");
476 printk("\033[31mThe memory mapping size is not aligned, please make sure of that\033[m\n");
477 MS_ASSERT(0);
478 }
479
480 return;
481 }
482
_MPOOL_DELAY_BINDING(int idx)483 static MS_BOOL _MPOOL_DELAY_BINDING(int idx)
484 {
485 //Mutex lock was added on caller function.
486 MS_U32 ba_mapping_start;
487 driver_mem_va_start = 0;
488 MS_BOOL bRet = FALSE;
489
490 // if this mapping will over driver_mem_va_end, return failed, need to check your total mapping size
491 if(VMALLOC_SIZE < mpool_info[idx].u32MpoolSize)
492 {
493 printk(KERN_EMERG "\033[35m[Utopia2K] Kernel driver VA size is not enough !!\033[m\n");
494 return FALSE;
495 }
496
497 //if(mpool_info[idx].u8MiuSel == 0)
498 // ba_mapping_start = mpool_info[idx].pPhyaddr + HAL_MIU0_BUS_BASE; // miu_0
499 //else
500 // ba_mapping_start = (mpool_info[idx].pPhyaddr - HAL_MIU1_BASE) + HAL_MIU1_BUS_BASE; // miu_1
501 ba_mapping_start = HAL_MsOS_MPool_PA2BA(mpool_info[idx].pPhyaddr);
502 driver_mem_va_start = (unsigned long)map_kdriver_mem(ba_mapping_start, mpool_info[idx].u32MpoolSize, mpool_info[idx].bNonCache);
503 if(driver_mem_va_start)
504 bRet = TRUE;
505
506 // record PA_VA mapping
507 mpool_info[idx].pVirtStart = driver_mem_va_start;
508 mpool_info[idx].pVirtEnd = mpool_info[idx].pVirtStart + mpool_info[idx].u32MpoolSize;
509
510 mpool_info[idx].s32V2Poff = mpool_info[idx].pVirtStart - mpool_info[idx].pPhyaddr;
511
512 //driver_mem_va_start += mpool_info[idx].u32MpoolSize;
513
514 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].pVirtStart =%lx\n", idx,mpool_info[idx].pVirtStart));
515 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].pVirtEnd = %lx\n", idx,mpool_info[idx].pVirtEnd));
516 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].pPhyaddr =%lx\n", idx,mpool_info[idx].pPhyaddr));
517 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].u32MpoolSize =%lx\n", idx,mpool_info[idx].u32MpoolSize));
518 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("mpool_info[%d].s32V2Poff = %lx\n", idx,mpool_info[idx].s32V2Poff));
519
520 return bRet;
521 }
522
MsOS_MPool_Mapping(MS_U8 u8MiuSel,MS_SIZE u32Offset,MS_SIZE u32MapSize,MS_BOOL bNonCache)523 MS_BOOL MsOS_MPool_Mapping(MS_U8 u8MiuSel, MS_SIZE u32Offset, MS_SIZE u32MapSize, MS_BOOL bNonCache)
524 {
525 int i, idx = 0;
526 MS_U32 ba_mapping_start;
527 MS_U32 Phyaddr, PhyaddrEnd;
528 MS_BOOL bRet = FALSE;
529 driver_mem_va_start = 0;
530
531 _MPool_Check_aligned(u32Offset, u32MapSize);
532
533 //if(u8MiuSel == 0)
534 // Phyaddr = u32Offset;
535 //else
536 // Phyaddr = u32Offset + HAL_MIU1_BASE;
537 _miu_offset_to_phy(u8MiuSel, u32Offset, Phyaddr);
538
539 PhyaddrEnd = Phyaddr + u32MapSize - 1;
540
541 mutex_lock(&_Mpool_Init_Mutex);
542
543 if (!g_bMpoolInit)
544 {
545 printk(KERN_EMERG "\033[35m[Utopia2K] ERROR!! MPool was not initialized.!\033[m\n");
546 goto Mapping_End;
547 }
548
549 // if all mpool_info is used, return failed, need to enlarge MAX_MAPPINGSIZE
550 if(mpool_info[MAX_MAPPINGSIZE-1].bIsUsed)
551 {
552 printk(KERN_EMERG "\033[35m[Utopia2K] Not enough MPool, must increase MAX_MAPPINGSIZE!!\033[m\n");
553 goto Mapping_End;
554 }
555
556 // if this mapping will over driver_mem_va_end, return failed, need to check your total mapping size
557 /*if(driver_mem_va_end - driver_mem_va_start < u32MapSize)
558 {
559 printk(KERN_EMERG "\033[35m[Utopia2K] Kernel driver VA size is not enough !!\033[m\n");
560 goto Mapping_End;
561 }*/
562 if(VMALLOC_SIZE < u32MapSize)
563 {
564 printk(KERN_EMERG "\033[35m[Utopia2K] Kernel driver VA size is not enough !!\033[m\n");
565 goto Mapping_End;
566 }
567
568 for (i = 0; i < MAX_MAPPINGSIZE; i++)
569 {
570 if (mpool_info[i].bIsUsed == FALSE)
571 {
572 mpool_info[i].bIsUsed = TRUE;
573
574 if (bNonCache)
575 mpool_info[i].bNonCache = MMAP_NONCACHE;
576 else
577 mpool_info[i].bNonCache = MMAP_CACHE;
578
579 idx = i;
580 break;
581 }
582 else
583 {
584 if (bNonCache != mpool_info[i].bNonCache)
585 continue;
586
587 if ( (mpool_info[i].pPhyaddr <= Phyaddr && Phyaddr < mpool_info[i].pPhyaddr + mpool_info[i].u32MpoolSize) ||
588 (mpool_info[i].pPhyaddr <= PhyaddrEnd && PhyaddrEnd < mpool_info[i].pPhyaddr + mpool_info[i].u32MpoolSize) )
589 {
590 bRet = TRUE;
591 printk("[Utopia2K] Duplicated PA mapping.\n");
592 //MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("[Utopia2K] Duplicated PA mapping.\n"));
593 //Duplicated mapping or Overlap mapping.
594 goto Mapping_End;
595 }
596 }
597 }
598
599 ba_mapping_start = HAL_MsOS_MPool_PA2BA(Phyaddr);
600
601 driver_mem_va_start = (MS_VIRT)map_kdriver_mem(ba_mapping_start, u32MapSize, bNonCache);
602
603 // record PA_VA mapping
604 mpool_info[idx].pVirtStart = driver_mem_va_start;
605 mpool_info[idx].pVirtEnd = mpool_info[idx].pVirtStart + u32MapSize;
606
607 mpool_info[idx].pPhyaddr = Phyaddr; // pa
608 mpool_info[idx].u8MiuSel = u8MiuSel;
609
610 mpool_info[idx].u32MpoolSize = u32MapSize;
611 mpool_info[idx].s32V2Poff = mpool_info[idx].pVirtStart - mpool_info[idx].pPhyaddr;
612 mpool_info[idx].bIsDyn = false;
613
614 //driver_mem_va_start += u32MapSize;
615 bRet = TRUE;
616
617 Mapping_End:
618 mutex_unlock(&_Mpool_Init_Mutex);
619 return bRet;
620 }
621
622
MsOS_MPool_Mapping_Dynamic(MS_U8 u8MiuSel,MS_SIZE u32Offset,MS_SIZE u32MapSize,MS_BOOL bNonCache)623 MS_BOOL MsOS_MPool_Mapping_Dynamic(MS_U8 u8MiuSel, MS_SIZE u32Offset, MS_SIZE u32MapSize, MS_BOOL bNonCache)
624 {
625 MS_U32 Phyaddr, PhyaddrEnd;
626 MS_BOOL ret=TRUE;
627 int i,idx=0;
628
629 _MPool_Check_aligned(u32Offset, u32MapSize);
630
631 //if(u8MiuSel == 0)
632 // Phyaddr = u32Offset;
633 //else
634 // Phyaddr = u32Offset + HAL_MIU1_BASE;
635 _miu_offset_to_phy(u8MiuSel, u32Offset, Phyaddr);
636
637 PhyaddrEnd = Phyaddr + u32MapSize - 1;
638
639 mutex_lock(&_Mpool_Init_Mutex);
640
641 if(mpool_info[MAX_MAPPINGSIZE-1].bIsUsed)
642 {
643 printk(KERN_EMERG "\033[35m[Utopia2K] Not enough MPool, must increase MAX_MAPPINGSIZE!!\033[m\n");
644 ret = FALSE;
645 goto Mapping_Dynamic_End;
646 }
647
648 for (i=0;i<MAX_MAPPINGSIZE; i++)
649 {
650 if(mpool_info[i].bIsUsed==false)
651 {
652 mpool_info[i].bIsUsed = true;
653
654 if(bNonCache)
655 mpool_info[i].bNonCache = MMAP_NONCACHE;
656 else
657 mpool_info[i].bNonCache = MMAP_CACHE;
658
659 idx = i;
660 break;
661 }
662 else
663 {
664 if (bNonCache != mpool_info[i].bNonCache)
665 continue;
666
667 if ( (mpool_info[i].pPhyaddr == Phyaddr) && ((mpool_info[i].pPhyaddr + mpool_info[i].u32MpoolSize) == PhyaddrEnd) )
668 {
669 ret = TRUE;
670 printk("[Utopia2K] Duplicated PA mapping.\n");
671 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("[Utopia2K] Duplicated PA(0x%lx ~ 0x%lx) mapping with Table[%d](0x%lx ~0x%lx).\n",
672 Phyaddr, PhyaddrEnd, i, mpool_info[i].pPhyaddr, (mpool_info[i].pPhyaddr + mpool_info[i].u32MpoolSize -1)));
673 //MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printk("[Utopia2K] Duplicated PA mapping.\n"));
674 //Duplicated mapping or Overlap mapping.
675 goto Mapping_Dynamic_End;
676 }
677 }
678 }
679
680 mpool_info[idx].pVirtStart = 0;
681 mpool_info[idx].pVirtEnd = 0;
682 mpool_info[idx].u8MiuSel = u8MiuSel;
683 mpool_info[idx].bIsDyn = true;
684 mpool_info[idx].pPhyaddr = Phyaddr;
685 mpool_info[idx].u32MpoolSize = u32MapSize;
686 mpool_info[idx].s32V2Poff = 0;
687
688 printk("mpool_info[%d].u8MiuSel = %d\n", idx,mpool_info[idx].u8MiuSel);
689 printk("mpool_info[%d].bNonCache = %d\n", idx,mpool_info[idx].bNonCache);
690 printk("mpool_info[%d].pVirtStart =%lx\n",idx, mpool_info[idx].pVirtStart);
691 printk("mpool_info[%d].pVirtEnd = %lx\n", idx,mpool_info[idx].pVirtEnd);
692 printk("mpool_info[%d].pPhyaddr =%lx\n", idx,mpool_info[idx].pPhyaddr);
693 printk("mpool_info[%d].u32MpoolSize =%lx\n", idx,mpool_info[idx].u32MpoolSize);
694 printk("###\n");
695
696 Mapping_Dynamic_End:
697 mutex_unlock(&_Mpool_Init_Mutex);
698 return ret;
699 }
700
map_kdriver_mem(MS_PHY u32BusStart,MS_SIZE u32MapSize,MS_BOOL bNonCache)701 static MS_VIRT map_kdriver_mem(MS_PHY u32BusStart, MS_SIZE u32MapSize, MS_BOOL bNonCache){
702 MS_VIRT VirtAddr = 0;
703
704 if(pfn_valid(__phys_to_pfn(u32BusStart & PAGE_MASK)))
705 {
706 /* the __va will return the cached kernel_memory mapping !! This is due to the kernel memory is mapped as cached, and never be remapped @3.10.86(but 3.10.40 will be remaped to non-cached after allocated by cma)
707 * So, we need to remap the non-cached request !! The UnMapping will also need a corresponding change.
708 *
709 *
710 * If 3.10.86 having remap for cma, then here and UnMapping may need to change again
711 */
712 if(bNonCache)
713 {
714 int err;
715 unsigned long addr;
716 struct vm_struct *area;
717
718 area = get_vm_area_caller(u32MapSize, VM_IOREMAP, "TVOS");
719 if (!area)
720 {
721 dump_stack();
722 return NULL;
723 }
724 addr = (unsigned long)area->addr;
725 #if defined (__aarch64__)
726 err = ioremap_page_range(addr, addr + u32MapSize, u32BusStart, __pgprot(PROT_DEVICE_nGnRnE));
727 #else
728 err = ioremap_page_range(addr, addr + u32MapSize, u32BusStart, MT_DEVICE);
729 #endif
730 if(err)
731 {
732 dump_stack();
733 return NULL;
734 }
735 VirtAddr = addr;
736 }
737 else
738 VirtAddr = __va(u32BusStart);
739 }
740 else
741 {
742 if(bNonCache)
743 VirtAddr = (MS_VIRT)ioremap(u32BusStart, u32MapSize);
744 else
745 VirtAddr = (MS_VIRT)ioremap_cached(u32BusStart, u32MapSize);
746 }
747
748 return VirtAddr;
749 }
750
MsOS_MPool_UnMapping(MS_VIRT u32VirtStart,MS_SIZE u32MapSize)751 MS_BOOL MsOS_MPool_UnMapping(MS_VIRT u32VirtStart, MS_SIZE u32MapSize)
752 {
753 MS_BOOL ret = FALSE;
754 int i;
755 MS_PHY u32BusStart;
756
757 mutex_lock(&_Mpool_Init_Mutex);
758 for (i = 0; i < MAX_MAPPINGSIZE; i++)
759 {
760 if(mpool_info[i].bIsUsed == true)
761 {
762 if(mpool_info[i].pVirtStart == u32VirtStart)
763 {
764 u32BusStart = HAL_MsOS_MPool_PA2BA(mpool_info[i].pPhyaddr);
765 /* if Linux Memory and also cached, the mapping is using Linux Memory Mapping. We don't need to unmapping it */
766 if( (!mpool_info[i].bNonCache) && (pfn_valid(__phys_to_pfn(u32BusStart & PAGE_MASK))) )
767 {
768 printk("\033[35mFunction = %s, Line = %d, Linux Memory and also cached, do not unmapping\033[m\n", __PRETTY_FUNCTION__, __LINE__);
769 }
770 else
771 iounmap((volatile void __iomem *)u32VirtStart);
772
773 mpool_info[i].pVirtStart = 0;
774 mpool_info[i].pVirtEnd = 0;
775 mpool_info[i].u8MiuSel = 0;
776 mpool_info[i].bIsDyn = false;
777 mpool_info[i].pPhyaddr = 0;
778 mpool_info[i].u32MpoolSize = 0;
779 mpool_info[i].s32V2Poff = 0;
780 mpool_info[i].bNonCache = 0;
781 mpool_info[i].bIsUsed = false;
782 ret = TRUE;
783 break;
784 }
785 }
786 }
787 mutex_unlock(&_Mpool_Init_Mutex);
788 return ret;
789 }
790
MsOS_MPool_Kernel_Detect(MS_PHY * lx_addr,MS_U64 * lx_size,MS_PHY * lx2_addr,MS_U64 * lx2_size)791 MS_BOOL MsOS_MPool_Kernel_Detect(MS_PHY *lx_addr, MS_U64 *lx_size, MS_PHY *lx2_addr, MS_U64 *lx2_size)
792 {
793 printk("[%s][%d] %s is not supported\n", __FUNCTION__, __LINE__, __FUNCTION__);
794 return FALSE;
795 }
796
MsOS_MPool_SetDbgLevel(MsOSMPool_DbgLevel DbgLevel)797 void MsOS_MPool_SetDbgLevel(MsOSMPool_DbgLevel DbgLevel)
798 {
799 _u32MPoolDBGLevel = DbgLevel;
800 }
801
802 EXPORT_SYMBOL(MsOS_MPool_Mapping);
803 EXPORT_SYMBOL(MsOS_MPool_Mapping_Dynamic);
804 EXPORT_SYMBOL(MsOS_MPool_PA2KSEG1);
805 EXPORT_SYMBOL(MsOS_MPool_PA2KSEG0);
806 EXPORT_SYMBOL(MsOS_MPool_VA2PA);
807
MsOS_MPool_Add_PA2VARange(MS_U64 u64PhysAddr,MS_VIRT u64VirtAddr,MS_SIZE u64MapSize,MS_BOOL bNonCache)808 MS_BOOL MsOS_MPool_Add_PA2VARange(MS_U64 u64PhysAddr, MS_VIRT u64VirtAddr, MS_SIZE u64MapSize, MS_BOOL bNonCache)
809 {
810 MS_BOOL find = FALSE;
811 MS_U64 u64AddrOffset = 0;
812 MS_U8 u8MiuSel = 0;
813 MS_U32 i, idx = 0;
814
815 _MPool_Check_aligned(u64PhysAddr, u64MapSize);
816 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("u64PhysAddr = %tX, u64MapSize = %tX, u64VirtAddr = %tX\n", (ptrdiff_t)u64PhysAddr, (ptrdiff_t)u64MapSize, (ptrdiff_t)u64VirtAddr));
817
818 //for multi-thread access
819 mutex_lock(&_MsOS_MPool_Mutex);
820
821 for (i = 0; i < MAX_MAPPINGSIZE; i++)
822 {
823 if(mpool_info[i].bIsUsed == false)
824 {
825 mpool_info[i].bIsUsed = true;
826 if(bNonCache)
827 mpool_info[i].bNonCache = MMAP_NONCACHE;
828 else
829 mpool_info[i].bNonCache = MMAP_CACHE;
830
831 idx = i;
832 find = TRUE;
833
834 break;
835 }
836 }
837
838 if(!find)
839 {
840 printf("Not enough MPool, must increase MAX_MAPPINGSIZE!!\n");
841 mutex_unlock(&_MsOS_MPool_Mutex);
842 return FALSE;
843 }
844
845 _phy_to_miu_offset(u8MiuSel, u64AddrOffset, u64PhysAddr); // get miu & offset
846
847 mpool_info[idx].pVirtStart = u64VirtAddr;
848 mpool_info[idx].pVirtEnd = (u64VirtAddr + u64MapSize);
849 mpool_info[idx].u8MiuSel = u8MiuSel;
850 mpool_info[idx].bIsDyn = false;
851 mpool_info[idx].pPhyaddr = u64PhysAddr;
852 mpool_info[idx].u32MpoolSize = u64MapSize;
853 mpool_info[idx].s32V2Poff = mpool_info[idx].pVirtStart - mpool_info[idx].pPhyaddr;
854
855 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64VirtStart =%tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].pVirtStart));
856 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64VirtEnd = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].pVirtEnd));
857 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64Phyaddr = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].pPhyaddr));
858 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64MpoolSize = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u32MpoolSize));
859 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].s32V2Poff = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].s32V2Poff));
860
861 MPOOL_MAPPING = 1;
862 mutex_unlock(&_MsOS_MPool_Mutex);
863
864 return TRUE;
865 }
866
867 EXPORT_SYMBOL(MsOS_MPool_Add_PA2VARange);
868
MsOS_MPool_Remove_PA2VARange(MS_U64 u64PhysAddr,MS_VIRT u64VirtAddr,MS_SIZE u64MapSize,MS_BOOL bNonCache)869 MS_BOOL MsOS_MPool_Remove_PA2VARange(MS_U64 u64PhysAddr, MS_VIRT u64VirtAddr, MS_SIZE u64MapSize, MS_BOOL bNonCache)
870 {
871 MS_BOOL ret = FALSE, mNonCache = MMAP_NONCACHE;
872 MS_U32 i;
873
874 _MPool_Check_aligned(u64PhysAddr, u64MapSize);
875 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("u64PhysAddr = %tX, u64MapSize = %tX, u64VirtAddr = %tX\n", (ptrdiff_t)u64PhysAddr, (ptrdiff_t)u64MapSize, (ptrdiff_t)u64VirtAddr));
876
877 if(bNonCache)
878 mNonCache= MMAP_NONCACHE;
879 else
880 mNonCache= MMAP_CACHE;
881
882 //multi-thread access
883 mutex_lock(&_MsOS_MPool_Mutex);
884
885 for (i = 0; i < MAX_MAPPINGSIZE; i++)
886 {
887 if((mpool_info[i].bIsUsed == false) || (mpool_info[i].bNonCache != mNonCache))
888 continue;
889
890 if((mpool_info[i].pVirtStart == u64VirtAddr)
891 && (mpool_info[i].pPhyaddr == u64PhysAddr)
892 && (mpool_info[i].u32MpoolSize == u64MapSize))
893 {
894 ret = TRUE;
895 memset(&mpool_info[i], 0, sizeof(mpool_info[i]));
896 break;
897 }
898 }
899
900 mutex_unlock(&_MsOS_MPool_Mutex);
901 return ret;
902 }
903 EXPORT_SYMBOL(MsOS_MPool_Remove_PA2VARange);
904 MODULE_LICENSE("GPL");
905