1 //<MStar Software>
2 //******************************************************************************
3 // MStar Software
4 // Copyright (c) 2010 - 2012 MStar Semiconductor, Inc. All rights reserved.
5 // All software, firmware and related documentation herein ("MStar Software") are
6 // intellectual property of MStar Semiconductor, Inc. ("MStar") and protected by
7 // law, including, but not limited to, copyright law and international treaties.
8 // Any use, modification, reproduction, retransmission, or republication of all
9 // or part of MStar Software is expressly prohibited, unless prior written
10 // permission has been granted by MStar.
11 //
12 // By accessing, browsing and/or using MStar Software, you acknowledge that you
13 // have read, understood, and agree, to be bound by below terms ("Terms") and to
14 // comply with all applicable laws and regulations:
15 //
16 // 1. MStar shall retain any and all right, ownership and interest to MStar
17 // Software and any modification/derivatives thereof.
18 // No right, ownership, or interest to MStar Software and any
19 // modification/derivatives thereof is transferred to you under Terms.
20 //
21 // 2. You understand that MStar Software might include, incorporate or be
22 // supplied together with third party`s software and the use of MStar
23 // Software may require additional licenses from third parties.
24 // Therefore, you hereby agree it is your sole responsibility to separately
25 // obtain any and all third party right and license necessary for your use of
26 // such third party`s software.
27 //
28 // 3. MStar Software and any modification/derivatives thereof shall be deemed as
29 // MStar`s confidential information and you agree to keep MStar`s
30 // confidential information in strictest confidence and not disclose to any
31 // third party.
32 //
33 // 4. MStar Software is provided on an "AS IS" basis without warranties of any
34 // kind. Any warranties are hereby expressly disclaimed by MStar, including
35 // without limitation, any warranties of merchantability, non-infringement of
36 // intellectual property rights, fitness for a particular purpose, error free
37 // and in conformity with any international standard. You agree to waive any
38 // claim against MStar for any loss, damage, cost or expense that you may
39 // incur related to your use of MStar Software.
40 // In no event shall MStar be liable for any direct, indirect, incidental or
41 // consequential damages, including without limitation, lost of profit or
42 // revenues, lost or damage of data, and unauthorized system use.
43 // You agree that this Section 4 shall still apply without being affected
44 // even if MStar Software has been modified by MStar in accordance with your
45 // request or instruction for your use, except otherwise agreed by both
46 // parties in writing.
47 //
48 // 5. If requested, MStar may from time to time provide technical supports or
49 // services in relation with MStar Software to you for your use of
50 // MStar Software in conjunction with your or your customer`s product
51 // ("Services").
52 // You understand and agree that, except otherwise agreed by both parties in
53 // writing, Services are provided on an "AS IS" basis and the warranty
54 // disclaimer set forth in Section 4 above shall apply.
55 //
56 // 6. Nothing contained herein shall be construed as by implication, estoppels
57 // or otherwise:
58 // (a) conferring any license or right to use MStar name, trademark, service
59 // mark, symbol or any other identification;
60 // (b) obligating MStar or any of its affiliates to furnish any person,
61 // including without limitation, you and your customers, any assistance
62 // of any kind whatsoever, or any information; or
63 // (c) conferring any license or right under any intellectual property right.
64 //
65 // 7. These terms shall be governed by and construed in accordance with the laws
66 // of Taiwan, R.O.C., excluding its conflict of law rules.
67 // Any and all dispute arising out hereof or related hereto shall be finally
68 // settled by arbitration referred to the Chinese Arbitration Association,
69 // Taipei in accordance with the ROC Arbitration Law and the Arbitration
70 // Rules of the Association by three (3) arbitrators appointed in accordance
71 // with the said Rules.
72 // The place of arbitration shall be in Taipei, Taiwan and the language shall
73 // be English.
74 // The arbitration award shall be final and binding to both parties.
75 //
76 //******************************************************************************
77 //<MStar Software>
78 ////////////////////////////////////////////////////////////////////////////////
79 //
80 // Copyright (c) 2008-2009 MStar Semiconductor, Inc.
81 // All rights reserved.
82 //
83 // Unless otherwise stipulated in writing, any and all information contained
84 // herein regardless in any format shall remain the sole proprietary of
85 // MStar Semiconductor Inc. and be kept in strict confidence
86 // ("MStar Confidential Information") by the recipient.
87 // Any unauthorized act including without limitation unauthorized disclosure,
88 // copying, use, reproduction, sale, distribution, modification, disassembling,
89 // reverse engineering and compiling of the contents of MStar Confidential
90 // Information is unlawful and strictly prohibited. MStar hereby reserves the
91 // rights to any and all damages, losses, costs and expenses resulting therefrom.
92 //
93 ////////////////////////////////////////////////////////////////////////////////
94
95 #include <unistd.h> //close
96 #include <fcntl.h> // open
97 #include <sys/ioctl.h> // ioctl
98 #include <sys/mman.h>
99 #include "MsCommon.h"
100 #include "halMPool.h"
101 #include "halCHIP.h"
102 #include "drvMMIO.h"
103 #include <string.h>
104 #include <errno.h>
105 #include <pthread.h>
106
107 #if defined(CONFIG_UTOPIA_FRAMEWORK_KERNEL_DRIVER)
108 #include "MsOS.h"
109 #include "utopia.h"
110 #endif
111
112 #if defined (ANDROID)
113 #include <cutils/log.h>
114 #endif
115
116 #if defined (TV_OS)
117 #include "mdrv_semutex_io.h"
118 #include <pthread.h>
119 #endif
120
121 #include "mdrv_cma_pool_private.h"
122
123 //--------------------------------------------------------------------------------------------------
124 // Internal define
125 //--------------------------------------------------------------------------------------------------
126 #ifdef _Debug
127 #define MPOOL_DEBUG
128 #endif
129
130 //--------------------------------------------------------------------------------------------------
131 // New Internal Variable of MPool Implementation
132 //--------------------------------------------------------------------------------------------------
133 #define SUPPORT_PARTIAL_MAPPING 1
134 static MS_U32 MPOOL_MAPPING;
135 #define MAX_MAPPINGSIZE 200
136
137 #define MMAP_NONCACHE true
138 #define MMAP_CACHE false
139
140 #define ION_ALIGN 0x1000 // align to PAGE_SIZE, remember this ALIGN must <= PAGE_SIZE
141
142 #if (defined ANDROID) && (defined TV_OS)
143 #include <cutils/log.h>
144 #define printf LOGD
145 #ifndef LOGD // android 4.1 rename LOGx to ALOGx
146 #define LOGD ALOGD
147 #endif
148 #endif
149 /*
150 static MS_BOOL _Mpool_use[MAX_MAPPINGSIZE];//={-1}
151 static MS_U8 _Mpool_NonCache[MAX_MAPPINGSIZE];
152
153 static void* _pMpoolAddrVStart[MAX_MAPPINGSIZE];// = { NULL, NULL};
154 static void* _pMpoolAddrVEnd[MAX_MAPPINGSIZE];// = { NULL, NULL};
155 static MS_U32 _u32MpoolAddrPhys[MAX_MAPPINGSIZE];// = { 0, 0};
156 static MS_U32 _u64MpoolSize[MAX_MAPPINGSIZE];// = { 0, 0};
157 static MS_U32 _u32MPoolVa2PaOff[MAX_MAPPINGSIZE];// = { 0, 0};
158 */
159
160 //--------------------------------------------------------------------------------------------------
161 // Internal Variable
162 //--------------------------------------------------------------------------------------------------
163 static MS_S32 _s32MPoolFd = -1;
164 static MS_S32 _s32FdION = -1;
165 static void* _pAddrVirtStart[2] = { NULL, NULL};
166 static void* _pAddrVirtEnd[2] = { NULL, NULL};
167 #if (!SUPPORT_PARTIAL_MAPPING)
168 static MS_SIZE _tAddrPhys[2] = { 0, 0};
169 static MS_U32 _u32Size[2] = { 0, 0};
170 #endif
171 static MS_SIZE _tVa2PaOff[2] = { 0, 0};
172
173 static MS_VIRT u32MpoolRegBase;
174
175 typedef struct
176 {
177 MS_VIRT u64VirtStart;
178 MS_VIRT u64VirtEnd;
179 MS_U64 u64Phyaddr;
180 MS_U64 u64MpoolSize;
181 MS_BOOL bIsUsed;
182 MS_SIZE s32V2Poff;
183 MS_BOOL bNonCache;
184 MS_U8 u8MiuSel;
185 MS_BOOL bIsDyn;
186 } MPOOL_INFO;
187
188 static MsOSMPool_DbgLevel _u32MPoolDBGLevel;
189 static MPOOL_INFO mpool_info[MAX_MAPPINGSIZE];
190 static pthread_mutex_t _MsOS_MPool_Mutex = PTHREAD_MUTEX_INITIALIZER;
191
192 #define DLMALLOC_DBG 0
193 #if DLMALLOC_DBG
194 #ifdef ENABLE_KERNEL_DLMALLOC
195 #define DLMALLOC_INFO_CNT 10
196 #define DETAIL_DLMALLOC_INFO_CNT 200
197
198 typedef struct {
199 void *allocated_start_va;
200 size_t allocated_size;
201 } DETAIL_DLMALLOC_INFO;
202
203 typedef struct {
204 mspace msp;
205 DETAIL_DLMALLOC_INFO detail_dlmalloc_info[DETAIL_DLMALLOC_INFO_CNT];
206 } DLMALLOC_INFO;
207
208 static DLMALLOC_INFO mstar_dlmalloc_info[DLMALLOC_INFO_CNT];
209 #endif
210 #endif
211
212 #if defined (TV_OS)
213 static MS_S32 _s32SemutexFD = -1;
214 static void* _psAddr;
215 static pthread_mutex_t _MsOS_Openfd_Mutex = PTHREAD_MUTEX_INITIALIZER;
216 #endif
217 //--------------------------------------------------------------------------------------------------
218 // Internal macros
219 //--------------------------------------------------------------------------------------------------
220 #ifdef MPOOL_DEBUG
221 #if defined (ANDROID)
222 #ifndef LOGI // android 4.1 rename LOGx to ALOGx
223 #define LOGI ALOGI
224 #endif
225 #define MPOOL_ERROR(fmt, args...) LOGI("[MPOOL USER ERR][%06d] " fmt, __LINE__, ## args)
226 #define MPOOL_WARN(fmt, args...) LOGI("[MPOOL USER WARN][%06d] " fmt, __LINE__, ## args)
227 #define MPOOL_PRINT(fmt, args...) LOGI("[MPOOL USER][%06d] " fmt, __LINE__, ## args)
228 #else
229 #define MPOOL_ERROR(fmt, args...) printf("[MPOOL USER ERR][%06d] " fmt, __LINE__, ## args)
230 #define MPOOL_WARN(fmt, args...) printf("[MPOOL USER WARN][%06d] " fmt, __LINE__, ## args)
231 #define MPOOL_PRINT(fmt, args...) printf("[MPOOL USER][%06d] " fmt, __LINE__, ## args)
232 #endif
233
234 #define MPOOL_ASSERT(_bool, _f) if (!(_bool)) { (_f); MS_ASSERT(0); }
235 #else
236 #define MPOOL_ERROR(fmt, args...) do{} while (0)
237 #define MPOOL_WARN(fmt, args...) do{} while (0)
238 #define MPOOL_PRINT(fmt, args...) do{} while (0)
239 #define MPOOL_ASSERT(_bool, _f) if (!(_bool)) { (_f); }
240 #endif
241
242 #define MPOOL_IS_CACHE_ADDR(addr) ( ( ((MS_VIRT)_pAddrVirtStart[0] <= (MS_VIRT)(addr)) && ((MS_VIRT)_pAddrVirtEnd[0] > (MS_VIRT)addr) ) ? TRUE : FALSE )
243 #define MPOOL_IS_UNCACHE_ADDR(addr) ( ( ((MS_VIRT)_pAddrVirtStart[1] <= (MS_VIRT)(addr)) && ((MS_VIRT)_pAddrVirtEnd[1] > (MS_VIRT)addr) ) ? TRUE : FALSE )
244
245 #define MPOOL_DBG_MSG(debug_level, x) do { if (_u32MPoolDBGLevel >= (debug_level)) (x); } while(0)
246
247 static MS_S32 _s32MapMutexId = -1;
248 static MS_U8 _u8MapMutex[] = {"MPool_Mutex"};
249
250 //--------------------------------------------------------------------------------------------------
251 // Implementation
252 //--------------------------------------------------------------------------------------------------
MsOS_MPool_IsInitialized(void)253 MS_BOOL MsOS_MPool_IsInitialized(void)
254 {
255 MsOS_MPool_SetDbgLevel(E_MsOSMPool_DBG_Release);
256
257 if (0 <= _s32MPoolFd)
258 {
259 printf("\033[35mFunction = %s, Line = %d, [Warning] MPOOL is already Initialized\033[m\n", __PRETTY_FUNCTION__, __LINE__);
260 return TRUE;
261 }
262
263 return FALSE;
264 }
265
MsOS_MPool_Init(void)266 MS_BOOL MsOS_MPool_Init(void)
267 {
268 MS_PHY u32size;
269
270 MsOS_MPool_SetDbgLevel(E_MsOSMPool_DBG_Release);
271
272 if (0 <= _s32MPoolFd)
273 {
274 MPOOL_WARN("%s is initiated more than once\n", __FUNCTION__);
275 return FALSE;
276 }
277
278 memset(mpool_info, 0, sizeof(MPOOL_INFO)*MAX_MAPPINGSIZE);
279 #if DLMALLOC_DBG
280 memset(mstar_dlmalloc_info, 0, sizeof(DLMALLOC_INFO)*DLMALLOC_INFO_CNT);
281 #endif
282
283 if (0 > (_s32MPoolFd = open("/dev/malloc", O_RDWR)))
284 {
285 MPOOL_ERROR("Open /dev/malloc fail\n");
286 MS_ASSERT(0);
287 return FALSE;
288 }
289 else
290 {
291 }
292
293 //Create Mutex
294 if(_s32MapMutexId < 0)
295 {
296 MsOS_Init(); // Before MsOS_CreateMutex, need to do MsOS_Init()
297 _s32MapMutexId = MsOS_CreateMutex(E_MSOS_FIFO, (char*)_u8MapMutex, MSOS_PROCESS_SHARED);
298 }
299 if(_s32MapMutexId < 0)
300 {
301 printf("%s MsOS_CreateMutex failed!!\n", __FUNCTION__);
302 return FALSE;
303 }
304
305 #if defined (TV_OS)
306 pthread_mutex_lock(&_MsOS_Openfd_Mutex);
307 if(_s32SemutexFD < 0)
308 {
309 _s32SemutexFD = open("/dev/semutex", O_RDWR);
310 if(0 > _s32SemutexFD)
311 {
312 MPOOL_ERROR("Open /dev/semutex fail, _s32SemutexFD = %d \n",_s32SemutexFD);
313 MS_ASSERT(0);
314 pthread_mutex_unlock(&_MsOS_Openfd_Mutex);
315 return FALSE;
316 }
317 }
318 pthread_mutex_unlock(&_MsOS_Openfd_Mutex);
319 #endif
320
321 if(!MDrv_MMIO_GetBASE(&u32MpoolRegBase, &u32size, MS_MODULE_PM))
322 {
323 MPOOL_ERROR("%s Get IOMAP Base faill!\n",__FUNCTION__);
324 return FALSE;
325 }
326
327 return TRUE;
328 }
329 #if defined(MSOS_TYPE_LINUX_KERNEL)
330 EXPORT_SYMBOL(MsOS_MPool_Init);
331 #endif
332
_MPOOL_MutexLock(void)333 static MS_BOOL _MPOOL_MutexLock(void)
334 {
335 if (_s32MapMutexId != -1)
336 {
337 return MsOS_ObtainMutex(_s32MapMutexId, MSOS_WAIT_FOREVER);
338 }
339 else
340 {
341 return FALSE;
342 }
343 }
344
MsOS_ION_IsInitialized(void)345 MS_BOOL MsOS_ION_IsInitialized(void)
346 {
347 MsOS_MPool_SetDbgLevel(E_MsOSMPool_DBG_Release);
348
349 if (0 <= _s32FdION)
350 {
351 printf("\033[35mFunction = %s, Line = %d, [Warning] ION is already Initialized\033[m\n", __PRETTY_FUNCTION__, __LINE__);
352 return TRUE;
353 }
354 else
355 printf("\033[35mFunction = %s, Line = %d, ION is not Initialized\033[m\n", __PRETTY_FUNCTION__, __LINE__);
356
357 return FALSE;
358 }
359
MsOS_ION_Init(void)360 MS_BOOL MsOS_ION_Init(void)
361 {
362 printf("\033[35mOpen /dev/ion ...\033[m\n");
363 if (0 <= _s32FdION)
364 {
365 printf("\033[35m ION Fd is opened\033[m\n");
366 return FALSE;
367 }
368
369 if (0 > (_s32FdION = open("/dev/ion", O_RDWR)))
370 {
371 printf("\033[35mOpen /dev/ion fail\033[m\n");
372 return FALSE;
373 }
374
375 printf("\033[35mOpen /dev/ion success\033[m\n");
376 return TRUE;
377 }
378
_MPOOL_MutexUnlock(void)379 static MS_BOOL _MPOOL_MutexUnlock(void)
380 {
381 if (_s32MapMutexId != -1)
382 {
383 return MsOS_ReleaseMutex(_s32MapMutexId);
384 }
385 else
386 {
387 return FALSE;
388 }
389 }
390
MsOS_MPool_Close(void)391 MS_BOOL MsOS_MPool_Close(void)
392 {
393 if (0 > _s32MPoolFd)
394 {
395 MPOOL_WARN("%s is closed before initiated\n", __FUNCTION__);
396 return FALSE;
397 }
398
399 if(_s32MapMutexId != -1)
400 {
401 MsOS_DeleteMutex(_s32MapMutexId);
402 _s32MapMutexId = -1;
403 }
404
405 close(_s32MPoolFd);
406 _s32MPoolFd = -1;
407
408 #if defined (TV_OS)
409 pthread_mutex_lock(&_MsOS_Openfd_Mutex);//coverity Data race condition
410 if (0 > _s32SemutexFD)
411 {
412 MPOOL_WARN("%s is closed before initiated\n", __FUNCTION__);
413 pthread_mutex_unlock(&_MsOS_Openfd_Mutex);//coverity Data race condition
414 return FALSE;
415 }
416 close(_s32SemutexFD);
417 _s32SemutexFD = -1;
418 _psAddr = NULL;
419 pthread_mutex_unlock(&_MsOS_Openfd_Mutex);//coverity Data race condition
420 #endif
421
422 return TRUE;
423 }
424
MsOS_ION_Close(void)425 MS_BOOL MsOS_ION_Close(void)
426 {
427 if(0 > _s32FdION)
428 {
429 MPOOL_WARN("%s is closed before initiated\n", __FUNCTION__);
430 return FALSE;
431 }
432
433 close(_s32FdION);
434 _s32MPoolFd = -1;
435
436 return TRUE;
437 }
438
439 #if defined (TV_OS)
440 //~!~get sharememory size
MsOS_GetSHMSize(void)441 MS_U32 MsOS_GetSHMSize(void)
442 {
443 MS_U32 u32SHMSize = 0;
444
445 pthread_mutex_lock(&_MsOS_Openfd_Mutex);
446 if(_s32SemutexFD < 0)
447 {
448 _s32SemutexFD = open("/dev/semutex", O_RDWR);
449
450 if(0 > _s32SemutexFD)
451 {
452 MPOOL_ERROR("Open /dev/semutex fail, _s32SemutexFD = %d \n",_s32SemutexFD);
453 MS_ASSERT(0);
454 pthread_mutex_unlock(&_MsOS_Openfd_Mutex);
455 return 0;
456 }
457 }
458 pthread_mutex_unlock(&_MsOS_Openfd_Mutex);
459
460
461 if(ioctl(_s32SemutexFD,MDRV_SEMUTEX_GET_SHMSIZE, &u32SHMSize))
462 {
463 MPOOL_ERROR("get SHM size failed!\n");
464 MS_ASSERT(0);
465 return 0;
466 }
467
468 return u32SHMSize;
469 }
470
471 //~!~ expand sharemem
472 #ifdef CONFIG_UTOPIA_SHM_EXPAND_SUPPORT
MsOS_SHM_Expand(MS_U32 u32ShmSize,MS_U32 u32ExpandSize)473 MS_BOOL MsOS_SHM_Expand(MS_U32 u32ShmSize, MS_U32 u32ExpandSize)
474 {
475 printf("[msos] expanding shared memory %d\n", u32ExpandSize);
476 if(_s32SemutexFD < 0)
477 {
478 MPOOL_ERROR("%s: mapping sharemem with invalid dev fd \n", __FUNCTION__);
479 return FALSE;
480 }
481 u32ExpandSize += u32ShmSize;
482
483 if(ioctl(_s32SemutexFD,MDRV_SEMUTEX_EXPAND_SHAREMEMORY,&u32ExpandSize))
484 {
485 MPOOL_ERROR("create an anonymous sharememory failed! \n");
486 close(_s32SemutexFD);
487 return FALSE;
488 }
489
490 return TRUE;
491 }
492 #endif
493
494 //~!~ map sharemem
MsOS_Mapping_SharedMem(MS_U32 u32ShmSize,MS_BOOL * bInit)495 MS_VIRT MsOS_Mapping_SharedMem(MS_U32 u32ShmSize,MS_BOOL *bInit)
496 {
497 //void * pAddr; unused variable - fixed compile warning
498 *bInit = FALSE;
499
500 if(_s32SemutexFD < 0)
501 {
502 MPOOL_ERROR("%s: mapping sharemem with invalid dev fd \n", __FUNCTION__);
503 //return NULL; fixed compile warning
504 return 0;
505 }
506
507 //!!!
508 //there is a limitation, you must map sharemem first with the most largest len!
509 //that is , if you map with len A, then map with B(B > A), problem happens!!!
510
511 if(_psAddr)
512 {
513 MPOOL_ERROR("~!~ 2 MsOS_Mapping_SharedMem = %p \n",_psAddr);
514 return (MS_VIRT)_psAddr;
515 }
516
517 MPOOL_ERROR("~!~!~%s: pid = %d \n", __FUNCTION__,getpid());
518
519 if(ioctl(_s32SemutexFD,MDRV_SEMUTEX_CREATE_SHAREMEMORY,&u32ShmSize))
520 {
521 MPOOL_ERROR("create an anonymous sharememory failed! \n");
522 close(_s32SemutexFD);
523 //return NULL; fixed compile warning
524 return 0;
525 }
526
527 *bInit = ioctl(_s32SemutexFD,MDRV_SEMUTEX_QUERY_ISFIRST_SHAREMEMORY,NULL);
528 #ifdef CONFIG_UTOPIA_SHM_EXPAND_SUPPORT
529 _psAddr = mmap(NULL,MsOS_GetSHMSize(),PROT_READ | PROT_WRITE,MAP_SHARED,_s32SemutexFD,0);
530 #else
531 _psAddr = mmap(NULL,u32ShmSize,PROT_READ | PROT_WRITE,MAP_SHARED,_s32SemutexFD,0);
532 #endif
533
534 //LOGE("u32ShmSize= %d, _s32SemutexFD = %d, pAddr= %p \n",u32ShmSize,_s32SemutexFD,_psAddr);
535
536 if (_psAddr == MAP_FAILED)
537 {
538 MPOOL_ERROR("[%s][%d] fail\n", __FUNCTION__, __LINE__);
539 close(_s32SemutexFD);
540 //return NULL; fixed compile warning
541 return 0;
542 }
543
544 //LOGE("~!~ 1 MsOS_Mapping_SharedMem = %p \n",(MS_U32)_psAddr);
545
546 //*bInit = TRUE;
547 return (MS_VIRT)_psAddr;
548
549 }
550
551 //~!~create namedmutex
MsOS_CreateNamedMutex(MS_S8 * ps8MutexName)552 MS_S32 MsOS_CreateNamedMutex(MS_S8 *ps8MutexName)
553 {
554 MS_S32 s32Index;
555
556 pthread_mutex_lock(&_MsOS_Openfd_Mutex);
557 if(_s32SemutexFD < 0)
558 {
559 _s32SemutexFD = open("/dev/semutex", O_RDWR);
560
561 if(0 > _s32SemutexFD)
562 {
563 MPOOL_ERROR("Open /dev/semutex fail, _s32SemutexFD = %d \n",_s32SemutexFD);
564 MS_ASSERT(0);
565 pthread_mutex_unlock(&_MsOS_Openfd_Mutex);
566 return FALSE;
567 }
568 }
569 pthread_mutex_unlock(&_MsOS_Openfd_Mutex);
570
571 #if 0
572 CREATE_MUTEX_ARG createmutex;
573 createmutex.pnameaddr = (unsigned long)ps8MutexName;
574 createmutex.length = strlen(ps8MutexName);
575 s32Index = ioctl(_s32SemutexFD,MDRV_SEMUTEX_CREATE_MUTEX,&createmutex);
576 #else
577 s32Index = ioctl(_s32SemutexFD,MDRV_SEMUTEX_CREATE_MUTEX,ps8MutexName);
578 #endif
579
580 if(s32Index < MUTEX_INDEX_BEGIN)
581 {
582 MPOOL_WARN("the returned mutex index invalid!!! \n");
583 return -1;
584 }
585
586 //s32Index -= MUTEX_INDEX_BEGIN;
587
588 return s32Index;
589 }
590
591 //~!~lock mutex
MsOS_LockMutex(MS_S32 u32Index,MS_S32 flag)592 MS_BOOL MsOS_LockMutex(MS_S32 u32Index,MS_S32 flag)
593 {
594 MS_S32 res = 0;
595 if(_s32SemutexFD < 0)
596 {
597 MPOOL_WARN("%s: Lock mutex with invalid dev fd \n", __FUNCTION__);
598 return FALSE;
599 }
600
601 if(u32Index < MUTEX_INDEX_BEGIN)
602 {
603 MPOOL_WARN("%s invalid mutex index : u32Index = %d,flag = %d \n", __FUNCTION__,u32Index,flag);
604 return FALSE;
605 }
606
607 if(flag == 0)
608 {
609 res = -1;
610 while(res == -1)
611 {
612 res = ioctl(_s32SemutexFD,MDRV_SEMUTEX_LOCK,&u32Index);
613 if(res == -1 && errno == EINTR)
614 usleep(1000);
615 }
616 }
617 else if(flag == -1)
618 res = ioctl(_s32SemutexFD,MDRV_SEMUTEX_TRY_LOCK,&u32Index);
619 else
620 {
621 LOCKARG lockarg;
622 lockarg.index = u32Index;
623 lockarg.time = flag;
624 res = ioctl(_s32SemutexFD,MDRV_SEMUTEX_LOCK_WITHTIMEOUT,&lockarg);
625 }
626 if(res == 0)
627 return TRUE;
628 else
629 return FALSE;
630 }
631
632
633 //~!~lock mutex
MsOS_UnlockMutex(MS_S32 u32Index,MS_S32 flag)634 MS_BOOL MsOS_UnlockMutex(MS_S32 u32Index, MS_S32 flag)
635 {
636 MS_S32 res = 0;
637
638 if(_s32SemutexFD < 0)
639 {
640 MPOOL_WARN("%s: Lock mutex with invalid dev fd \n", __FUNCTION__);
641 return FALSE;
642 }
643
644 if(u32Index < MUTEX_INDEX_BEGIN)
645 {
646 MPOOL_WARN("%s invalid mutex index : u32Index = %d,flag = %d \n", __FUNCTION__,u32Index,flag);
647 return FALSE;
648 }
649
650 res = ioctl(_s32SemutexFD,MDRV_SEMUTEX_UNLOCK,&u32Index);
651
652 if(res == 0)
653 return TRUE;
654 else
655 return FALSE;
656 }
657
MsOS_CrossThreadUnlockMutex(MS_S32 u32Index,MS_BOOL bEnable)658 MS_BOOL MsOS_CrossThreadUnlockMutex(MS_S32 u32Index, MS_BOOL bEnable)
659 {
660 MS_S32 res = 0;
661 CROSS_THREAD_UNLOCK_INFO unlock_info = {0};
662
663 if(_s32SemutexFD < 0)
664 {
665 MPOOL_WARN("%s: Lock mutex with invalid dev fd \n", __FUNCTION__);
666 return FALSE;
667 }
668 if(u32Index < MUTEX_INDEX_BEGIN)
669 {
670 MPOOL_WARN("%s invalid mutex index : u32Index = %d \n", __FUNCTION__,u32Index);
671 return FALSE;
672 }
673 unlock_info.index = u32Index;
674 unlock_info.flag = (TRUE == bEnable)?E_CROSS_THREAD_UNLOCK_ENABLE:E_CROSS_THREAD_UNLOCK_DISABLE;
675 res = ioctl(_s32SemutexFD,MDRV_SEMUTEX_SET_CROSS_THREAD_UNLOCK,&unlock_info);
676 if(res == 0)
677 return TRUE;
678 else
679 {
680 perror("ioctl MDRV_SEMUTEX_SET_CROSS_THREAD_UNLOCK");
681 return FALSE;
682 }
683 }
684
685 //~!~lock mutex
MsOS_DeleteNamedMutexbyIndex(MS_S32 u32Index)686 MS_BOOL MsOS_DeleteNamedMutexbyIndex(MS_S32 u32Index)
687 {
688 MS_S32 res = 0;
689
690 if(_s32SemutexFD < 0)
691 {
692 MPOOL_WARN("%s: Lock mutex with invalid dev fd \n", __FUNCTION__);
693 return FALSE;
694 }
695
696 if(u32Index < MUTEX_INDEX_BEGIN)
697 {
698 MPOOL_WARN("%s invalid mutex index \n", __FUNCTION__);
699 return FALSE;
700 }
701
702 res = ioctl(_s32SemutexFD,MDRV_SEMUTEX_DEL_MUTEX,&u32Index);
703
704 if(res == 0)
705 return TRUE;
706 else
707 return FALSE;
708 }
709 #endif
710
711 //~!~create named semaphore
MsOS_CreateNamedSemaphore(char * ps8SemaName,MS_U32 u32SemaNum)712 MS_S32 MsOS_CreateNamedSemaphore(char *ps8SemaName, MS_U32 u32SemaNum)
713 {
714 CREATE_SEMA_ARG semaarg;
715 MS_S32 s32Index;
716 MS_U32 u32MaxLen;
717
718 pthread_mutex_lock(&_MsOS_Openfd_Mutex);
719 if(_s32SemutexFD < 0)
720 {
721 _s32SemutexFD = open("/dev/semutex", O_RDWR);
722
723 if(0 > _s32SemutexFD)
724 {
725 MPOOL_ERROR("Open /dev/semutex fail, _s32SemutexFD = %d \n",_s32SemutexFD);
726 MS_ASSERT(0);
727 pthread_mutex_unlock(&_MsOS_Openfd_Mutex);
728 return -1;
729 }
730 }
731 pthread_mutex_unlock(&_MsOS_Openfd_Mutex);
732
733 if (NULL == ps8SemaName)
734 return -1;
735 if (strlen((const char *)ps8SemaName) >= (SEMA_NAME_LEN-1))
736 {
737 MPOOL_WARN("%s: Warning strlen(%s) is longer than SEMA_NAME_LEN(%d). Oversize char will be discard.\n",
738 __FUNCTION__, ps8SemaName, SEMA_NAME_LEN);
739 }
740 if (0 == (u32MaxLen = MIN(strlen((const char *)ps8SemaName), (SEMA_NAME_LEN-1))))
741 {
742 MPOOL_ERROR("%s: Input string is empty.\n", __FUNCTION__);
743 return -1;
744 }
745 strncpy((char*)semaarg.semaname, (const char*)ps8SemaName, u32MaxLen);
746 semaarg.semaname[u32MaxLen] = '\0';
747 semaarg.semanum = u32SemaNum;
748
749 s32Index = ioctl(_s32SemutexFD, MDRV_SEMUTEX_CREATE_SEMAPHORE, &semaarg);
750
751 if(s32Index < SEMAPHORE_INDEX_BEGIN)
752 {
753 MPOOL_WARN("%s the returned mutex index %d invalid!!!\n", __FUNCTION__, s32Index);
754 return -1;
755 }
756
757 return s32Index;
758 }
759
MsOS_ResetNamedSemaphore(MS_S32 u32Index)760 MS_BOOL MsOS_ResetNamedSemaphore(MS_S32 u32Index)
761 {
762 MS_S32 res = 0;
763
764 if(_s32SemutexFD < 0)
765 {
766 MPOOL_WARN("%s: Reset semaphore with invalid dev fd.\n", __FUNCTION__);
767 return FALSE;
768 }
769
770 if(u32Index < SEMAPHORE_INDEX_BEGIN)
771 {
772 MPOOL_WARN("%s invalid semaphore index : u32Index = %d.\n", __FUNCTION__, u32Index);
773 return FALSE;
774 }
775
776 res = ioctl(_s32SemutexFD, MDRV_SEMUTEX_SEMA_RESET, &u32Index);
777
778 if(res == 0)
779 return TRUE;
780 else
781 return FALSE;
782 }
783
MsOS_ObtainNamedSemaphore(MS_S32 u32Index,MS_S32 flag)784 MS_BOOL MsOS_ObtainNamedSemaphore(MS_S32 u32Index, MS_S32 flag)
785 {
786 MS_S32 res = 0;
787
788 if(_s32SemutexFD < 0)
789 {
790 MPOOL_WARN("%s: Obtain semaphore with invalid dev fd.\n", __FUNCTION__);
791 return FALSE;
792 }
793
794 if(u32Index < SEMAPHORE_INDEX_BEGIN)
795 {
796 MPOOL_WARN("%s invalid semaphore index : u32Index = %d.\n", __FUNCTION__, u32Index);
797 return FALSE;
798 }
799
800 if(flag == -1)
801 res = ioctl(_s32SemutexFD, MDRV_SEMUTEX_SEMA_TRY_LOCK, &u32Index);
802 else
803 {
804 res = -1;
805 while(res == -1)
806 {
807 res = ioctl(_s32SemutexFD, MDRV_SEMUTEX_SEMA_LOCK, &u32Index);
808 if(res == -1 && errno == EINTR)
809 usleep(1000);
810 else
811 {
812 MPOOL_ERROR("%s lock semaphore error: %d.\n", __FUNCTION__, errno);
813 break;
814 }
815 }
816 }
817
818 if(res == 0)
819 return TRUE;
820 else
821 return FALSE;
822 }
823
MsOS_ReleaseNamedSemaphore(MS_S32 u32Index,MS_S32 flag)824 MS_BOOL MsOS_ReleaseNamedSemaphore(MS_S32 u32Index, MS_S32 flag)
825 {
826 MS_S32 res = 0;
827
828 if(_s32SemutexFD < 0)
829 {
830 MPOOL_WARN("%s: Obtain semaphore with invalid dev fd.\n", __FUNCTION__);
831 return FALSE;
832 }
833
834 if(u32Index < SEMAPHORE_INDEX_BEGIN)
835 {
836 MPOOL_WARN("%s invalid semaphore index : u32Index = %d.\n", __FUNCTION__, u32Index);
837 return FALSE;
838 }
839
840 res = ioctl(_s32SemutexFD, MDRV_SEMUTEX_SEMA_UNLOCK, &u32Index);
841
842 if(res == 0)
843 return TRUE;
844 else
845 return FALSE;
846 }
847
848
849 // @FIXME: support one pool at this stage
MsOS_MPool_Get(void ** ppAddrVirt,MS_U32 * pu32AddrPhys,MS_U32 * pu32Size,MS_BOOL bNonCache)850 MS_BOOL MsOS_MPool_Get(void** ppAddrVirt, MS_U32* pu32AddrPhys, MS_U32* pu32Size, MS_BOOL bNonCache)
851 {
852 #if(!SUPPORT_PARTIAL_MAPPING)
853 if(MPOOL_MAPPING) return TRUE; //we had already mapped by function MsOS_MPool_Mapping()
854
855 DevMalloc_MPool_Info_t stPoolInfo;
856 MS_VIRT ptrAddrVirt;
857 MS_U32 bCache = (bNonCache) ? 0: 1;
858 MS_U32 u32Idx = (bCache) ? 0: 1;
859
860 if (NULL == _pAddrVirtStart[u32Idx])
861 {
862 if (ioctl(_s32MPoolFd, MALLOC_IOC_MPOOL_INFO, &stPoolInfo))
863 {
864 return FALSE;
865 }
866 if (ioctl(_s32MPoolFd, MALLOC_IOC_MPOOL_CACHE, &bCache))
867 {
868 return FALSE;
869 }
870 if ((MS_VIRT)MAP_FAILED == (ptrAddrVirt = (MS_VIRT)mmap(0, stPoolInfo.u32Size, PROT_READ | PROT_WRITE, MAP_SHARED, _s32MPoolFd, 0)))
871 {
872 MS_ASSERT(0);
873 return FALSE;
874 }
875
876 _pAddrVirtStart[u32Idx] = (void*)ptrAddrVirt;
877 _pAddrVirtEnd[u32Idx] = (void*)(ptrAddrVirt + stPoolInfo.u32Size);
878 _tAddrPhys[u32Idx] = stPoolInfo.u32Addr;
879 _u32Size[u32Idx] = stPoolInfo.u32Size;
880 _tVa2PaOff[u32Idx] = (MS_SIZE)_pAddrVirtStart[u32Idx] - (MS_SIZE)_tAddrPhys[u32Idx];
881 }
882
883 if (ppAddrVirt)
884 {
885 *ppAddrVirt = _pAddrVirtStart[u32Idx];
886 }
887 if (pu32AddrPhys)
888 {
889 *pu32AddrPhys = _tAddrPhys[u32Idx];
890 }
891 if (pu32Size)
892 {
893 *pu32Size = _u32Size[u32Idx];
894 }
895 #endif
896 return TRUE;
897 }
898
_MPool_Check_aligned(MS_U64 u64Offset,MS_U64 u64MapSize)899 static void _MPool_Check_aligned(MS_U64 u64Offset, MS_U64 u64MapSize)
900 {
901 if(u64Offset&0xfff)
902 {
903 printf("!!! Important issue !!!\n");
904 printf("The memory mapping address is not aligned, please make sure of that\n");
905 MS_ASSERT(0);
906 //assert(0);
907 }
908
909 if(u64MapSize&0xfff)
910 {
911 printf("!!! Important issue !!!\n");
912 printf("The memory mapping size is not aligned, please make sure of that\n");
913 MS_ASSERT(0);
914 //assert(0);
915 }
916
917 return;
918 }
919
_MPOOL_DELAY_BINDING(MS_S32 idx)920 static MS_U8 _MPOOL_DELAY_BINDING(MS_S32 idx)
921 {
922 DevMalloc_MPool_Info_t stPoolInfo;
923 MS_U64 u64AddrVirt;
924 MS_U32 bCache = (mpool_info[idx].bNonCache) ? 0: 1;
925
926 MS_U8 u8Check_MiuSel = 0;
927 MS_U64 u64Check_Offset = 0;
928
929 stPoolInfo.u64Addr = mpool_info[idx].u64Phyaddr;
930 stPoolInfo.u64Size = mpool_info[idx].u64MpoolSize;
931 stPoolInfo.u64Interval = MIU_INTERVAL;
932 stPoolInfo.u8MiuSel = mpool_info[idx].u8MiuSel;
933
934 /* use u64Phyaddr to get MIU, offset */
935 _phy_to_miu_offset(u8Check_MiuSel, u64Check_Offset, stPoolInfo.u64Addr);
936
937 if(u8Check_MiuSel != stPoolInfo.u8MiuSel)
938 {
939 printf("\033[35mFunction = %s, Line = %d, [Error] miu_setting is wrong\033[m\n", __PRETTY_FUNCTION__, __LINE__);
940 return FALSE;
941 }
942 stPoolInfo.u64Addr = u64Check_Offset;
943
944 //prevent race condition cause mpool mapping size modified in the kernel layer
945 pthread_mutex_lock(&_MsOS_MPool_Mutex);
946
947 if (ioctl(_s32MPoolFd, MALLOC_IOC_MPOOL_SET, &stPoolInfo))
948 {
949 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
950 return FALSE;
951 }
952 if (ioctl(_s32MPoolFd, MALLOC_IOC_MPOOL_CACHE, &bCache))
953 {
954 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
955 return FALSE;
956 }
957 if ((MS_VIRT)MAP_FAILED == (u64AddrVirt = (MS_VIRT)mmap(0, mpool_info[idx].u64MpoolSize, PROT_READ | PROT_WRITE, MAP_SHARED, _s32MPoolFd, 0)))
958 {
959 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
960 MS_ASSERT(0);
961 return FALSE;
962 }
963
964 //prevent race condition cause mpool mapping size modified in the kernel layer
965 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
966
967 mpool_info[idx].u64VirtStart = u64AddrVirt;
968 mpool_info[idx].u64VirtEnd = (u64AddrVirt + mpool_info[idx].u64MpoolSize);
969
970 mpool_info[idx].s32V2Poff = mpool_info[idx].u64VirtStart - mpool_info[idx].u64Phyaddr;
971
972 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64VirtStart =%tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64VirtStart));
973 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64VirtEnd = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64VirtEnd));
974 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64Phyaddr = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64Phyaddr));
975 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64MpoolSize = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64MpoolSize));
976 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].s32V2Poff = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].s32V2Poff));
977
978 return TRUE;
979 }
980
MsOS_MPool_VA2PA(MS_VIRT pAddrVirt)981 inline MS_PHY MsOS_MPool_VA2PA(MS_VIRT pAddrVirt)
982 {
983 if (MPOOL_IS_CACHE_ADDR(pAddrVirt))
984 {
985 return (MS_PHY)HAL_MsOS_MPool_VA2PA(pAddrVirt,_tVa2PaOff[0]);
986 }
987
988 if (MPOOL_IS_UNCACHE_ADDR(pAddrVirt))
989 {
990 return (MS_PHY)HAL_MsOS_MPool_VA2PA(pAddrVirt,_tVa2PaOff[1]);
991 }
992
993 // we currently will go to here
994 MS_S32 i;
995 for(i = 0; i <MAX_MAPPINGSIZE; i++)
996 {
997 if(mpool_info[i].bIsUsed)
998 {
999 if(!((mpool_info[i].u64VirtStart <= pAddrVirt) && (pAddrVirt < mpool_info[i].u64VirtEnd)))
1000 continue;
1001
1002
1003 return (MS_PHY)HAL_MsOS_MPool_VA2PA(pAddrVirt, mpool_info[i].s32V2Poff);
1004 }
1005 }
1006 #if defined (__aarch64__)
1007 printf("ERROR!!! CAN'T SUCCESSFULLY CONVERT FROM VA(%lx) TO PA(%x)\n", pAddrVirt,0);
1008 #else
1009 printf("ERROR!!! CAN'T SUCCESSFULLY CONVERT FROM VA(%tx) TO PA(%x)\n", (ptrdiff_t)pAddrVirt,0);
1010 #endif
1011 return (MS_PHY) 0xFFFFFFFFUL;
1012 }
1013
MsOS_MPool_PA2KSEG1(MS_PHY pAddrPhys)1014 inline MS_VIRT MsOS_MPool_PA2KSEG1(MS_PHY pAddrPhys) // un-cache
1015 {
1016 MS_VIRT pAddrVirt;
1017
1018 if (NULL == _pAddrVirtStart[1])
1019 {
1020 MS_S32 i;
1021 for(i = 0; i < MAX_MAPPINGSIZE;i ++)
1022 {
1023 if(!mpool_info[i].bIsUsed)
1024 {
1025 continue; // due to unmap, we can not use break
1026 }
1027 if(!mpool_info[i].bNonCache)
1028 {
1029 continue;
1030 }
1031
1032 // we do PA_REGION check here, to check if pAddrPhys is located in mpool_info[i], to prevent we do mmap for some dynamic_mapping but not used
1033 if(! ((mpool_info[i].u64Phyaddr <= pAddrPhys) && (pAddrPhys < (mpool_info[i].u64Phyaddr + mpool_info[i].u64MpoolSize))) )
1034 {
1035 continue;
1036 }
1037
1038 if(mpool_info[i].bIsDyn)
1039 {
1040 _MPOOL_MutexLock();
1041 if((mpool_info[i].s32V2Poff == 0) && (mpool_info[i].u64VirtStart == 0))
1042 {
1043 if(!_MPOOL_DELAY_BINDING(i))
1044 {
1045 #if defined (__aarch64__)
1046 printf("PA2KSEG1 delay binding fail!, pa = %lx", mpool_info[i].u64Phyaddr);
1047 #else
1048 printf("PA2KSEG1 delay binding fail!, pa = %llx", mpool_info[i].u64Phyaddr);
1049 #endif
1050 _MPOOL_MutexUnlock();
1051 //printf("%s:%d \n",__FUNCTION__,__LINE__);
1052 pAddrVirt = MApi_CMA_Pool_GetKernelCMAPooLUserVA(pAddrPhys,true);
1053 if(pAddrVirt )
1054 return pAddrVirt;
1055
1056 //printf("%s:%d \n",__FUNCTION__,__LINE__);
1057 return (MS_VIRT)NULL;
1058 }
1059 }
1060 _MPOOL_MutexUnlock();
1061 }
1062
1063 pAddrVirt = HAL_MsOS_MPool_PA2VA(pAddrPhys, mpool_info[i].u64Phyaddr, mpool_info[i].u64MpoolSize, mpool_info[i].s32V2Poff, ENABLE_PARTIAL_MAPPING);
1064
1065 if(pAddrVirt != 0)
1066 {
1067 return pAddrVirt;
1068 }
1069 else
1070 {
1071 //printf("%s:%d \n",__FUNCTION__,__LINE__);
1072 pAddrVirt = MApi_CMA_Pool_GetKernelCMAPooLUserVA(pAddrPhys,true);
1073 if(pAddrVirt )
1074 return pAddrVirt;
1075 }
1076 }
1077
1078 MS_S32 idx;
1079 for(idx = 0; idx < MAX_MAPPINGSIZE; idx++)
1080 {
1081 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64VirtStart =%tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64VirtStart));
1082 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64VirtEnd = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64VirtEnd));
1083 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64Phyaddr = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64Phyaddr));
1084 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64MpoolSize = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64MpoolSize));
1085 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].s32V2Poff = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].s32V2Poff));
1086 }
1087 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1,printf("MsOS_MPool_PA2KSEG1 return NULL pAddrPhys =%llX\n", (unsigned long long)pAddrPhys));
1088
1089 //assert(0);
1090 //printf("%s:%d \n",__FUNCTION__,__LINE__);
1091 pAddrVirt = MApi_CMA_Pool_GetKernelCMAPooLUserVA(pAddrPhys,true);
1092 if(pAddrVirt )
1093 return pAddrVirt;
1094
1095 //printf("%s:%d \n",__FUNCTION__,__LINE__);
1096 return (MS_VIRT) 0x0;
1097 }
1098
1099 MPOOL_ASSERT(_tVa2PaOff[1], printf("MsOS_MPool_Init/MsOS_MPool_Get for un-cache have not involked yet\n"));
1100
1101 pAddrVirt = HAL_MsOS_MPool_PA2VA(pAddrPhys, 0, 0xffffffff, _tVa2PaOff[0], DISABLE_PARTIAL_MAPPING);
1102
1103 if(pAddrVirt == 0)
1104 {
1105 //printf("%s:%d \n",__FUNCTION__,__LINE__);
1106 pAddrVirt = MApi_CMA_Pool_GetKernelCMAPooLUserVA(pAddrPhys,true);
1107 if(pAddrVirt )
1108 return pAddrVirt;
1109 }
1110
1111 return pAddrVirt;
1112 }
1113
MsOS_MPool_PA2KSEG0(MS_PHY pAddrPhys)1114 inline MS_VIRT MsOS_MPool_PA2KSEG0(MS_PHY pAddrPhys) // cache
1115 {
1116 MS_VIRT pAddrVirt;
1117
1118 if (NULL == _pAddrVirtStart[0])
1119 {
1120 MS_S32 i;
1121 for(i = 0; i < MAX_MAPPINGSIZE; i++)
1122 {
1123 if(!mpool_info[i].bIsUsed)
1124 {
1125 continue; // due to unmap, we can not use break
1126 }
1127 if(mpool_info[i].bNonCache)
1128 {
1129 continue;
1130 }
1131
1132 // we do PA_REGION check here, to check if pAddrPhys is located in mpool_info[i], to prevent we do mmap for some dynamic_mapping but not used
1133 if(! ((mpool_info[i].u64Phyaddr <= pAddrPhys) && (pAddrPhys < (mpool_info[i].u64Phyaddr + mpool_info[i].u64MpoolSize))) )
1134 {
1135 continue;
1136 }
1137
1138 if(mpool_info[i].bIsDyn)
1139 {
1140 _MPOOL_MutexLock();
1141 if((mpool_info[i].s32V2Poff == 0) && (mpool_info[i].u64VirtStart == 0))
1142 {
1143 if(!_MPOOL_DELAY_BINDING(i))
1144 {
1145 printf("PA2KSEG0 delay binding fail!, pa = %tX", (ptrdiff_t)mpool_info[i].u64Phyaddr);
1146
1147 _MPOOL_MutexUnlock();
1148 //printf("%s:%d \n",__FUNCTION__,__LINE__);
1149 pAddrVirt = MApi_CMA_Pool_GetKernelCMAPooLUserVA(pAddrPhys,false);
1150 if(pAddrVirt )
1151 return pAddrVirt;
1152 //printf("%s:%d \n",__FUNCTION__,__LINE__);
1153 return (MS_VIRT)NULL;
1154 }
1155 }
1156 //mpool_info[i].bIsDyn = false // joe, need to be discussed
1157 _MPOOL_MutexUnlock();
1158 }
1159
1160 pAddrVirt = HAL_MsOS_MPool_PA2VA(pAddrPhys, mpool_info[i].u64Phyaddr, mpool_info[i].u64MpoolSize, mpool_info[i].s32V2Poff, ENABLE_PARTIAL_MAPPING);
1161
1162 if(pAddrVirt != 0)
1163 {
1164 return pAddrVirt;
1165 }
1166 else
1167 {
1168 //printf("%s:%d \n",__FUNCTION__,__LINE__);
1169 pAddrVirt = MApi_CMA_Pool_GetKernelCMAPooLUserVA(pAddrPhys,false);
1170 if(pAddrVirt )
1171 return pAddrVirt;
1172 }
1173 }
1174
1175 MS_S32 idx;
1176 for(idx = 0; idx < MAX_MAPPINGSIZE; idx++)
1177 {
1178 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64VirtStart = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64VirtStart));
1179 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64VirtEnd = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64VirtEnd));
1180 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64Phyaddr = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64Phyaddr));
1181 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64MpoolSize = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64MpoolSize));
1182 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].s32V2Poff = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].s32V2Poff));
1183 }
1184 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1,printf("MsOS_MPool_PA2KSEG0 return NULL\n"));
1185 //assert(0);
1186 //printf("%s:%d \n",__FUNCTION__,__LINE__);
1187 pAddrVirt = MApi_CMA_Pool_GetKernelCMAPooLUserVA(pAddrPhys,false);
1188 if(pAddrVirt )
1189 return pAddrVirt;
1190
1191 //printf("%s:%d \n",__FUNCTION__,__LINE__);
1192 return (MS_VIRT) 0x0;
1193 }
1194
1195 MPOOL_ASSERT(_tVa2PaOff[0], printf("MsOS_MPool_Init/MsOS_MPool_Get for cache have not involked yet\n"));
1196
1197 pAddrVirt = HAL_MsOS_MPool_PA2VA(pAddrPhys, 0, 0xffffffff, _tVa2PaOff[0], DISABLE_PARTIAL_MAPPING);
1198
1199 if(pAddrVirt == 0)
1200 {
1201 //printf("%s:%d \n",__FUNCTION__,__LINE__);
1202 pAddrVirt = MApi_CMA_Pool_GetKernelCMAPooLUserVA(pAddrPhys,false);
1203 if(pAddrVirt )
1204 return pAddrVirt;
1205 }
1206
1207 return pAddrVirt;
1208 }
1209
MsOS_MPool_Dcache_Flush_All()1210 inline MS_BOOL MsOS_MPool_Dcache_Flush_All()
1211 {
1212 if (HAL_MsOS_MPool_Dcache_Flush_All(_s32MPoolFd) == FALSE)
1213 {
1214 printf("FAIL:%d\n",__LINE__);
1215 return FALSE;
1216 }
1217
1218 return TRUE;
1219 }
1220
MsOS_MPool_Dcache_Flush(MS_VIRT pAddrVirt,MS_SIZE tSize)1221 inline MS_BOOL MsOS_MPool_Dcache_Flush(MS_VIRT pAddrVirt, MS_SIZE tSize)
1222 {
1223 MS_PHY pAddrPhys;
1224
1225 pAddrPhys = MsOS_MPool_VA2PA(pAddrVirt);
1226
1227 /* to prevent the va is a not mapped va ==> MsOS_MPool_VA2PA will return 0xFFFFFFFF */
1228 if(pAddrPhys == 0xFFFFFFFF)
1229 {
1230 printf("FAIL:%d\n",__LINE__);
1231 return FALSE;
1232 }
1233
1234 /* to prevent the va_end is a not mapped va ==> MsOS_MPool_VA2PA will return 0xFFFFFFFF */
1235 if( MsOS_MPool_VA2PA(pAddrVirt + tSize) == 0xFFFFFFFF )
1236 {
1237 MsOS_MPool_Dcache_Flush_All();
1238 return TRUE;
1239 }
1240
1241 if (HAL_MsOS_MPool_Dcache_Flush(_s32MPoolFd, pAddrVirt, tSize, pAddrPhys) == FALSE)
1242 {
1243 printf("FAIL:%d\n",__LINE__);
1244 return FALSE;
1245 }
1246
1247 return TRUE;
1248 }
1249
1250
MsOS_MPool_Mapping(MS_U8 u8MiuSel,MS_SIZE tOffset,MS_SIZE tMapSize,MS_U8 u8MapMode)1251 MS_BOOL MsOS_MPool_Mapping(MS_U8 u8MiuSel, MS_SIZE tOffset, MS_SIZE tMapSize, MS_U8 u8MapMode) // 0:cache 1: noncache
1252 {
1253 MS_S32 i,idx = 0;
1254 DevMalloc_MPool_Info_t stPoolInfo;
1255 MS_VIRT ptrAddrVirt;
1256 MS_U32 bCache = 0; //(bNonCache) ? 0: 1;
1257 MS_U64 Phyaddr, PhyaddrEnd;
1258 MS_BOOL bNonCache =0;
1259
1260 if(u8MapMode == MSOS_CACHE_BOTH || u8MapMode == MSOS_CACHE_USERSPACE || u8MapMode == MSOS_CACHE_KERNEL )
1261 {
1262 bNonCache = 0;
1263 bCache = 1;
1264 }else
1265 {
1266 bNonCache =1;
1267 bCache = 0;
1268 }
1269
1270 _miu_offset_to_phy(u8MiuSel, tOffset, Phyaddr);
1271 PhyaddrEnd = Phyaddr + tMapSize;
1272
1273 if(u8MapMode == MSOS_CACHE_BOTH || u8MapMode == MSOS_NON_CACHE_BOTH || u8MapMode == MSOS_CACHE_USERSPACE || u8MapMode == MSOS_NON_CACHE_USERSPACE)
1274 {
1275 stPoolInfo.u64Addr = tOffset;
1276 stPoolInfo.u64Size = tMapSize;
1277 stPoolInfo.u64Interval = MIU_INTERVAL;
1278 stPoolInfo.u8MiuSel = u8MiuSel;
1279
1280 _MPool_Check_aligned(tOffset, tMapSize);
1281 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("u32Offset = %lX, u32MapSize = %lX, u8MapMode = %02x\n", (unsigned long)tOffset, (unsigned long)tMapSize, u8MapMode));
1282
1283 pthread_mutex_lock(&_MsOS_MPool_Mutex);
1284 // check mmap table to avoid duplicated mmap for same start_addr and end_addr
1285 for (i = 0; i < MAX_MAPPINGSIZE; i++)
1286 {
1287 if (mpool_info[i].bIsUsed == false)
1288 {
1289 continue;
1290 }
1291 else
1292 {
1293 if (bNonCache != mpool_info[i].bNonCache || u8MiuSel != mpool_info[i].u8MiuSel)
1294 continue;
1295
1296 if ( (mpool_info[i].u64Phyaddr == Phyaddr) && ((mpool_info[i].u64Phyaddr + mpool_info[i].u64MpoolSize) == PhyaddrEnd) )
1297 {
1298 //printf("[User space] Duplicated PA(0x%llx ~ 0x%llx) mapping with Table[%ld](0x%llx ~0x%llx).\n",
1299 // Phyaddr, PhyaddrEnd, i, mpool_info[i].u64Phyaddr, (mpool_info[i].u64Phyaddr + mpool_info[i].u64MpoolSize));
1300 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
1301 return TRUE;
1302 }
1303 }
1304 }
1305
1306 for (i = 0; i < MAX_MAPPINGSIZE; i++)
1307 {
1308 if(mpool_info[i].bIsUsed == false)
1309 {
1310 idx = i;
1311 break;
1312 }
1313 }
1314 if(i >= MAX_MAPPINGSIZE)
1315 {
1316 MPOOL_ERROR("Not enough MPool, must increase MAX_MAPPINGSIZE!!\n");
1317 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
1318 return FALSE;
1319 }
1320 //prevent race condition cause mpool mapping size modified in the kernel layer
1321
1322 if (ioctl(_s32MPoolFd, MALLOC_IOC_MPOOL_SET, &stPoolInfo))
1323 {
1324 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
1325 return FALSE;
1326 }
1327
1328 if (ioctl(_s32MPoolFd, MALLOC_IOC_MPOOL_CACHE, &bCache))
1329 {
1330 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
1331 return FALSE;
1332 }
1333 if ((MS_VIRT)MAP_FAILED == (ptrAddrVirt = (MS_VIRT)mmap(0, tMapSize, PROT_READ | PROT_WRITE, MAP_SHARED, _s32MPoolFd, 0)))
1334 {
1335 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
1336 MS_ASSERT(0);
1337 return FALSE;
1338 }
1339 mpool_info[idx].bIsUsed = true;
1340 if(bNonCache)
1341 {
1342 mpool_info[idx].bNonCache = MMAP_NONCACHE;
1343 }
1344 else
1345 {
1346 mpool_info[idx].bNonCache = MMAP_CACHE;
1347 }
1348 //prevent race condition cause mpool mapping size modified in the kernel layer
1349 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
1350
1351 mpool_info[idx].u64VirtStart = ptrAddrVirt;
1352 mpool_info[idx].u64VirtEnd = (ptrAddrVirt + tMapSize);
1353 mpool_info[idx].u8MiuSel = u8MiuSel;
1354 mpool_info[idx].bIsDyn = false;
1355
1356 _miu_offset_to_phy(u8MiuSel, tOffset, mpool_info[idx].u64Phyaddr);
1357
1358 mpool_info[idx].u64MpoolSize = tMapSize;
1359 mpool_info[idx].s32V2Poff = mpool_info[idx].u64VirtStart - mpool_info[idx].u64Phyaddr;
1360
1361 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64VirtStart = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64VirtStart));
1362 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64VirtEnd = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64VirtEnd));
1363 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64Phyaddr = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64Phyaddr));
1364 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64MpoolSize = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64MpoolSize));
1365 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].s32V2Poff = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].s32V2Poff));
1366
1367 MPOOL_MAPPING = 1;
1368 }
1369
1370 #if defined(CONFIG_UTOPIA_FRAMEWORK_KERNEL_DRIVER)
1371 if(u8MapMode == MSOS_CACHE_USERSPACE || u8MapMode == MSOS_NON_CACHE_USERSPACE)
1372 return TRUE;
1373
1374
1375 MS_S32 _s32UtopiaFd = -1;
1376 KDrvMPool_Info_t stPoolInfoKernel;
1377
1378 if (0 > (_s32UtopiaFd = open("/proc/utopia", O_RDWR)))
1379 {
1380 printf("MPool : Open /proc/utopia fail\n");
1381
1382 }else
1383 {
1384 stPoolInfoKernel.u32Addr = tOffset;
1385 stPoolInfoKernel.u32Size = tMapSize;
1386 stPoolInfoKernel.u32Interval = MIU_INTERVAL;
1387 stPoolInfoKernel.u8MiuSel = u8MiuSel;
1388 stPoolInfoKernel.bcached = !(bNonCache);
1389
1390 if(ioctl(_s32UtopiaFd,UTOPIA_IOCTL_MODULE_DYNAMIC_MAPPING,&stPoolInfoKernel))
1391 {
1392 printf("MPool : UTOPIA_IOCTL_MODULE_DYNAMIC_MAPPING failed\n");
1393
1394 }
1395 close(_s32UtopiaFd);//coverity resource leak
1396 }
1397 #endif
1398
1399 return TRUE;
1400 }
1401
MsOS_MPool_Mapping_Dynamic(MS_U8 u8MiuSel,MS_SIZE u32Offset,MS_SIZE u32MapSize,MS_U8 u8MapMode)1402 MS_BOOL MsOS_MPool_Mapping_Dynamic(MS_U8 u8MiuSel, MS_SIZE u32Offset, MS_SIZE u32MapSize, MS_U8 u8MapMode)
1403 {
1404 MS_S32 i,idx = 0;
1405 MS_U64 Phyaddr, PhyaddrEnd;
1406
1407 MS_BOOL bNonCache =0;
1408
1409
1410 if(u8MapMode == MSOS_CACHE_BOTH || u8MapMode == MSOS_CACHE_USERSPACE || u8MapMode == MSOS_CACHE_KERNEL )
1411 {
1412 bNonCache = 0;
1413 }else
1414 {
1415 bNonCache =1;
1416 }
1417 _miu_offset_to_phy(u8MiuSel, u32Offset, Phyaddr);
1418 PhyaddrEnd = Phyaddr + u32MapSize;
1419
1420 #if defined(CONFIG_UTOPIA_FRAMEWORK_KERNEL_DRIVER)
1421 KDrvMPool_Info_t stPoolInfo;
1422
1423 MS_S32 _s32UtopiaFd = -1;
1424 if(u8MapMode == MSOS_CACHE_BOTH || u8MapMode == MSOS_NON_CACHE_BOTH || u8MapMode == MSOS_CACHE_KERNEL || u8MapMode == MSOS_NON_CACHE_KERNEL)
1425 {
1426
1427
1428 if (0 > (_s32UtopiaFd = open("/proc/utopia", O_RDWR)))
1429 {
1430 printf("MPool : Open /proc/utopia fail\n");
1431 }
1432 else
1433 {
1434 stPoolInfo.u32Addr = u32Offset;
1435 stPoolInfo.u32Size = u32MapSize;
1436 stPoolInfo.u32Interval = MIU_INTERVAL;
1437 stPoolInfo.u8MiuSel = u8MiuSel;
1438 stPoolInfo.bcached = !(bNonCache);
1439
1440 if(ioctl(_s32UtopiaFd,UTOPIA_IOCTL_MODULE_DYNAMIC_MAPPING,&stPoolInfo))
1441 {
1442 printf("MPool : UTOPIA_IOCTL_MODULE_DYNAMIC_MAPPING failed\n");
1443 }
1444 }
1445 }
1446 #endif
1447
1448 _MPool_Check_aligned(u32Offset, u32MapSize);
1449 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("u32Offset = %tX, u32MapSize = %tX, u8MapMode = %02x\n", (ptrdiff_t)u32Offset, (ptrdiff_t)u32MapSize, u8MapMode));
1450
1451 if(u8MapMode == MSOS_CACHE_BOTH || u8MapMode == MSOS_NON_CACHE_BOTH || u8MapMode == MSOS_CACHE_USERSPACE || u8MapMode == MSOS_NON_CACHE_USERSPACE)
1452 {
1453
1454 // check mmap table to avoid duplicated mmap for same start_addr and end_addr
1455 for (i = 0; i < MAX_MAPPINGSIZE; i++)
1456 {
1457 if (mpool_info[i].bIsUsed == false)
1458 {
1459 continue;
1460 }
1461 else
1462 {
1463 if (bNonCache != mpool_info[i].bNonCache || u8MiuSel != mpool_info[i].u8MiuSel)
1464 continue;
1465
1466 if ( (mpool_info[i].u64Phyaddr == Phyaddr) && ((mpool_info[i].u64Phyaddr + mpool_info[i].u64MpoolSize) == PhyaddrEnd) )
1467 {
1468 //printf("[User space] Duplicated PA(0x%llx ~ 0x%llx) mapping with Table[%ld](0x%llx ~0x%llx).\n",
1469 // Phyaddr, PhyaddrEnd, i, mpool_info[i].u64Phyaddr, (mpool_info[i].u64Phyaddr + mpool_info[i].u64MpoolSize));
1470 return TRUE;
1471 }
1472 }
1473 }
1474
1475 for (i = 0; i < MAX_MAPPINGSIZE; i++)
1476 {
1477 if(mpool_info[i].bIsUsed == false)
1478 {
1479 idx = i;
1480 break;
1481 }
1482 }
1483 if(i >= MAX_MAPPINGSIZE)
1484 {
1485 MPOOL_ERROR("Not enough MPool, must increase MAX_MAPPINGSIZE!!\n");
1486 return FALSE;
1487 }
1488
1489 mpool_info[idx].bIsUsed = true;
1490 if(bNonCache)
1491 {
1492 mpool_info[idx].bNonCache = MMAP_NONCACHE;
1493 }
1494 else
1495 {
1496 mpool_info[idx].bNonCache = MMAP_CACHE;
1497 }
1498 mpool_info[idx].u64VirtStart = 0;
1499 mpool_info[idx].u64VirtEnd = 0;
1500 mpool_info[idx].u8MiuSel = u8MiuSel;
1501 mpool_info[idx].bIsDyn = true;
1502
1503 _miu_offset_to_phy(u8MiuSel, u32Offset, mpool_info[idx].u64Phyaddr);
1504
1505 mpool_info[idx].u64MpoolSize = u32MapSize;
1506 mpool_info[idx].s32V2Poff = 0;
1507
1508 MPOOL_MAPPING = 1;
1509 }
1510 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("MPOOL_MAPPING =%d\n",MPOOL_MAPPING));//coverity MPOOL_MAPPING unused
1511 #if defined(CONFIG_UTOPIA_FRAMEWORK_KERNEL_DRIVER)
1512 if(0 <= _s32UtopiaFd)
1513 close(_s32UtopiaFd);//coverity resource leak
1514 #endif
1515 return TRUE;
1516 }
1517
MsOS_MPool_UnMapping(MS_VIRT ptrVirtStart,MS_SIZE tMapSize)1518 MS_BOOL MsOS_MPool_UnMapping(MS_VIRT ptrVirtStart, MS_SIZE tMapSize)
1519 {
1520 MS_S32 i;
1521
1522 for (i = 0; i < MAX_MAPPINGSIZE; i++)
1523 {
1524 if(mpool_info[i].bIsUsed == true)
1525 {
1526 if( (mpool_info[i].u64VirtStart == ptrVirtStart) && (mpool_info[i].u64MpoolSize == tMapSize) )
1527 {
1528 if( !munmap((MS_VIRT *)mpool_info[i].u64VirtStart, mpool_info[i].u64MpoolSize) )
1529 {
1530 printf("mpool[%td] Unmapping PA:[%tX], VA:[%tX], Size:[%tX] done.\n", (ptrdiff_t)i, (ptrdiff_t)mpool_info[i].u64Phyaddr, (ptrdiff_t)mpool_info[i].u64VirtStart, (ptrdiff_t)mpool_info[i].u64MpoolSize);
1531 mpool_info[i].bIsUsed = false;
1532 mpool_info[i].u64Phyaddr = 0;
1533 mpool_info[i].u64VirtStart = 0;
1534 mpool_info[i].u64VirtEnd = 0;
1535 mpool_info[i].u64MpoolSize = 0;
1536 mpool_info[i].s32V2Poff = 0;
1537
1538 /* cause default value is 0 */
1539 mpool_info[i].bNonCache = 0;
1540 mpool_info[i].u8MiuSel = 0;
1541 mpool_info[i].bIsDyn = false;
1542 }
1543 else
1544 {
1545 printf("Unmapping PA:[%tX], VA:[%tX], Size:[%tX] fail.\n", (ptrdiff_t)mpool_info[i].u64Phyaddr, (ptrdiff_t)mpool_info[i].u64VirtStart, (ptrdiff_t)mpool_info[i].u64MpoolSize);
1546
1547 break;
1548 }
1549
1550 return TRUE;
1551 }
1552 }
1553 }
1554 printf("Unmapping Fail !! VA:[%tX], Size:[%tX].\n", (ptrdiff_t)ptrVirtStart, (ptrdiff_t)tMapSize);
1555
1556 return FALSE;
1557 }
1558
MsOS_MPool_Kernel_Detect(MS_PHY * lx_addr,MS_U64 * lx_size,MS_PHY * lx2_addr,MS_U64 * lx2_size)1559 MS_BOOL MsOS_MPool_Kernel_Detect(MS_PHY *lx_addr, MS_U64 *lx_size, MS_PHY *lx2_addr, MS_U64 *lx2_size)
1560 {
1561 DevMalloc_MPool_Kernel_Info_t kernel_info;
1562 if (ioctl(_s32MPoolFd, MALLOC_IOC_MPOOL_KERNELDETECT, &kernel_info))
1563 {
1564 return FALSE;
1565 }
1566
1567 *lx_addr = kernel_info.u64lxAddr;
1568 *lx_size = kernel_info.u64lxSize;
1569 *lx2_addr = kernel_info.u64lx2Addr-0x40000000;
1570 *lx2_size = kernel_info.u64lx2Size;
1571
1572 //MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("lx_addr = %lx, lx_size = %lx\n", (MS_U64)lx_addr, lx_size));
1573 //MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("lx2_addr = %lx, lx2_size = %lx\n", (MS_U64)lx2_addr, lx2_size));
1574
1575 return TRUE;
1576 }
1577
MsOS_MPool_SetDbgLevel(MsOSMPool_DbgLevel DbgLevel)1578 void MsOS_MPool_SetDbgLevel(MsOSMPool_DbgLevel DbgLevel)
1579 {
1580 _u32MPoolDBGLevel = DbgLevel;
1581 }
1582
MsOS_MPool_SetWatchPT(MS_VIRT ptrAddrVirt,MS_U32 u32ASID,MS_U8 u8Global,MS_U8 u8WType,MS_U32 u32Mask)1583 MS_BOOL MsOS_MPool_SetWatchPT(MS_VIRT ptrAddrVirt, MS_U32 u32ASID, MS_U8 u8Global, MS_U8 u8WType, MS_U32 u32Mask)
1584 {
1585 DevMalloc_MPool_Watchpt_Info_t stWPTInfo;
1586 stWPTInfo.mask = u32Mask;
1587 stWPTInfo.u64AddrVirt = ptrAddrVirt;
1588 stWPTInfo.ASID = u32ASID;
1589 stWPTInfo.global = u8Global;
1590 stWPTInfo.rwx = u8WType;
1591 if(ioctl(_s32MPoolFd, MALLOC_IOC_SETWATCHPT, &stWPTInfo)){
1592 printf("ioctl failed\n");
1593 return FALSE;
1594 }
1595 printf("this is api test after ioctl by york\n");
1596 return TRUE;
1597 }
1598
MsOS_MPool_GetWatchPT(char * str)1599 MS_BOOL MsOS_MPool_GetWatchPT(char *str)
1600 {
1601 printf("Read watchpoint register\n");
1602 if(ioctl(_s32MPoolFd, MALLOC_IOC_GETWATCHPT, str)){
1603 printf("ioctl failed\n");
1604 return FALSE;
1605 }
1606 printf("%s\n",str);
1607 return TRUE;
1608 }
1609
MsOS_MPool_Add_PA2VARange(MS_U64 u64PhysAddr,MS_VIRT u64VirtAddr,MS_SIZE u64MapSize,MS_BOOL bNonCache)1610 MS_BOOL MsOS_MPool_Add_PA2VARange(MS_U64 u64PhysAddr, MS_VIRT u64VirtAddr, MS_SIZE u64MapSize, MS_BOOL bNonCache)
1611 {
1612 MS_BOOL find = FALSE;
1613 MS_U64 u64AddrOffset = 0;
1614 MS_U8 u8MiuSel = 0;
1615 MS_U32 i, idx = 0;
1616
1617 _MPool_Check_aligned(u64PhysAddr, u64MapSize);
1618 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("u64PhysAddr = %tX, u64MapSize = %tX, u64VirtAddr = %tX\n", (ptrdiff_t)u64PhysAddr, (ptrdiff_t)u64MapSize, (ptrdiff_t)u64VirtAddr));
1619
1620 //for multi-thread access
1621 pthread_mutex_lock(&_MsOS_MPool_Mutex);
1622
1623 for (i = 0; i < MAX_MAPPINGSIZE; i++)
1624 {
1625 if(mpool_info[i].bIsUsed == false)
1626 {
1627 mpool_info[i].bIsUsed = true;
1628 if(bNonCache)
1629 mpool_info[i].bNonCache = MMAP_NONCACHE;
1630 else
1631 mpool_info[i].bNonCache = MMAP_CACHE;
1632
1633 idx = i;
1634 find = TRUE;
1635
1636 break;
1637 }
1638 }
1639
1640 if(!find)
1641 {
1642 printf("Not enough MPool, must increase MAX_MAPPINGSIZE!!\n");
1643 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
1644 return FALSE;
1645 }
1646
1647 _phy_to_miu_offset(u8MiuSel, u64AddrOffset, u64PhysAddr); // get miu & offset
1648 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("u64AddrOffset =0x%x\n",(unsigned int)u64AddrOffset));//coverity u64AddrOffset unused
1649
1650 mpool_info[idx].u64VirtStart = u64VirtAddr;
1651 mpool_info[idx].u64VirtEnd = (u64VirtAddr + u64MapSize);
1652 mpool_info[idx].u8MiuSel = u8MiuSel;
1653 mpool_info[idx].bIsDyn = false;
1654 mpool_info[idx].u64Phyaddr = u64PhysAddr;
1655 mpool_info[idx].u64MpoolSize = u64MapSize;
1656 mpool_info[idx].s32V2Poff = mpool_info[idx].u64VirtStart - mpool_info[idx].u64Phyaddr;
1657
1658 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64VirtStart =%tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64VirtStart));
1659 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64VirtEnd = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64VirtEnd));
1660 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64Phyaddr = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64Phyaddr));
1661 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].u64MpoolSize = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].u64MpoolSize));
1662 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("mpool_info[%td].s32V2Poff = %tX\n", (ptrdiff_t)idx, (ptrdiff_t)mpool_info[idx].s32V2Poff));
1663
1664 MPOOL_MAPPING = 1;
1665 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("MPOOL_MAPPING =%d\n",MPOOL_MAPPING));//coverity MPOOL_MAPPING unused
1666 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
1667
1668 return TRUE;
1669 }
1670
MsOS_MPool_Remove_PA2VARange(MS_U64 u64PhysAddr,MS_VIRT u64VirtAddr,MS_SIZE u64MapSize,MS_BOOL bNonCache)1671 MS_BOOL MsOS_MPool_Remove_PA2VARange(MS_U64 u64PhysAddr, MS_VIRT u64VirtAddr, MS_SIZE u64MapSize, MS_BOOL bNonCache)
1672 {
1673 MS_BOOL ret = FALSE, mNonCache = MMAP_NONCACHE;
1674 MS_U32 i;
1675
1676 _MPool_Check_aligned(u64PhysAddr, u64MapSize);
1677 MPOOL_DBG_MSG(E_MsOSMPool_DBG_L1, printf("u64PhysAddr = %tX, u64MapSize = %tX, u64VirtAddr = %tX\n", (ptrdiff_t)u64PhysAddr, (ptrdiff_t)u64MapSize, (ptrdiff_t)u64VirtAddr));
1678
1679 if(bNonCache)
1680 mNonCache= MMAP_NONCACHE;
1681 else
1682 mNonCache= MMAP_CACHE;
1683
1684 //multi-thread access
1685 pthread_mutex_lock(&_MsOS_MPool_Mutex);
1686
1687 for (i = 0; i < MAX_MAPPINGSIZE; i++)
1688 {
1689 if((mpool_info[i].bIsUsed == false) || (mpool_info[i].bNonCache != mNonCache))
1690 continue;
1691
1692 if((mpool_info[i].u64VirtStart == u64VirtAddr)
1693 && (mpool_info[i].u64Phyaddr == u64PhysAddr)
1694 && (mpool_info[i].u64MpoolSize == u64MapSize))
1695 {
1696 ret = TRUE;
1697 memset(&mpool_info[i], 0, sizeof(mpool_info[i]));
1698 break;
1699 }
1700 }
1701
1702 pthread_mutex_unlock(&_MsOS_MPool_Mutex);
1703 return ret;
1704 }
1705
1706 #ifdef ENABLE_KERNEL_DLMALLOC
MsOS_MPool_CreateMemoryPool(void * base,size_t capacity,int locked)1707 mspace MsOS_MPool_CreateMemoryPool(void *base, size_t capacity, int locked)
1708 {
1709 #if DLMALLOC_DBG
1710 int first_free_dlmalloc_info_index = DLMALLOC_INFO_CNT;
1711 int i = 0;
1712 #endif
1713 MS_PHY pa_start = MsOS_MPool_VA2PA((MS_VIRT)base);
1714 MS_PHY ba_start = HAL_MsOS_MPool_PA2BA(pa_start);
1715 printf("\033[31mFunction = %s, Line = %d, VA: from 0x%lX to 0x%lX\033[m\n", __PRETTY_FUNCTION__, __LINE__, (unsigned long)base, ((unsigned long)base+capacity));
1716 printf("\033[31mFunction = %s, Line = %d, PA: from 0x%llX\033[m\n", __PRETTY_FUNCTION__, __LINE__, pa_start);
1717 printf("\033[31mFunction = %s, Line = %d, BA: from 0x%llX\033[m\n", __PRETTY_FUNCTION__, __LINE__, ba_start);
1718 DevMalloc_MPool_Dlmalloc_Info_t dlmalloc_info;
1719 dlmalloc_info.user_va = base;
1720 dlmalloc_info.bus_addr = ba_start;
1721 dlmalloc_info.capacity = capacity;
1722 dlmalloc_info.locked = locked;
1723
1724 //MsOS_DelayTask(100);
1725 if (ioctl(_s32MPoolFd, MALLOC_IOC_MPOOL_DLMALLOC_CREATE_POOL, &dlmalloc_info))
1726 {
1727 sleep(1);
1728 printf("create a mpool memory pool failed!\n");
1729 return 0;
1730 }
1731 //MsOS_DelayTask(100);
1732
1733 // we can not use pa2va, because we donot have cache/non-cache infomation, so we use the offset to calculate the va
1734 dlmalloc_info.MemoryPool = (mspace)((unsigned long)(dlmalloc_info.pool_bus_addr - ba_start) + (unsigned long)base);
1735 printf("\033[31mFunction = %s, Line = %d, get MemoryPool @ ba: 0x%llX\033[m\n", __PRETTY_FUNCTION__, __LINE__, dlmalloc_info.pool_bus_addr);
1736 printf("\033[31mFunction = %s, Line = %d, get MemoryPool @ va: 0x%lX\033[m\n\n", __PRETTY_FUNCTION__, __LINE__, (unsigned long)dlmalloc_info.MemoryPool);
1737
1738 #if DLMALLOC_DBG
1739 for(i = 0; i < DLMALLOC_INFO_CNT; i++)
1740 {
1741 if(mstar_dlmalloc_info[i].msp == dlmalloc_info.MemoryPool)
1742 break;
1743
1744 if( (mstar_dlmalloc_info[i].msp == 0) && (first_free_dlmalloc_info_index == DLMALLOC_INFO_CNT) )
1745 first_free_dlmalloc_info_index = i;
1746 }
1747
1748 if(i == DLMALLOC_INFO_CNT) // we can not find dlmalloc_info.MemoryPool @ mstar_dlmalloc_info, insert it
1749 {
1750 if(first_free_dlmalloc_info_index == DLMALLOC_INFO_CNT) // the mstar_dlmalloc_info is full, die
1751 {
1752 printf("\033[35mFunction = %s, Line = %d, no space to insert mstar_dlmalloc_info, die\033[m\n", __PRETTY_FUNCTION__, __LINE__);
1753 *(volatile int *)0x1 = 0x5566;
1754 }
1755 printf("\033[31mFunction = %s, Line = %d, insert dlmalloc_info.MemoryPool: 0x%lX to mstar_dlmalloc_info[%d]\033[m\n\n", __PRETTY_FUNCTION__, __LINE__, dlmalloc_info.MemoryPool, first_free_dlmalloc_info_index);
1756 mstar_dlmalloc_info[first_free_dlmalloc_info_index].msp = dlmalloc_info.MemoryPool;
1757 }
1758 #endif
1759
1760 return dlmalloc_info.MemoryPool;
1761 }
1762
MsOS_MPool_DeleteMemoryPool(mspace msp)1763 size_t MsOS_MPool_DeleteMemoryPool(mspace msp)
1764 {
1765 #if DLMALLOC_DBG
1766 int i = 0;
1767 #endif
1768 DevMalloc_MPool_Dlmalloc_Delete_Info_t dlmalloc_delete_info;
1769 MS_PHY pa_pool = MsOS_MPool_VA2PA((MS_VIRT)msp);
1770 MS_PHY ba_pool = HAL_MsOS_MPool_PA2BA(pa_pool);
1771
1772 dlmalloc_delete_info.pool_bus_addr = ba_pool;
1773 printf("\033[35mFunction = %s, Line = %d, release VA: 0x%lX\033[m\n", __PRETTY_FUNCTION__, __LINE__, (unsigned long)msp);
1774 printf("\033[35mFunction = %s, Line = %d, relase PA: 0x%llX\033[m\n", __PRETTY_FUNCTION__, __LINE__, pa_pool);
1775 printf("\033[35mFunction = %s, Line = %d, release BA: 0x%llX\033[m\n", __PRETTY_FUNCTION__, __LINE__, ba_pool);
1776
1777 //MsOS_DelayTask(100);
1778 if (ioctl(_s32MPoolFd, MALLOC_IOC_MPOOL_DLMALLOC_DELETE_POOL, &dlmalloc_delete_info))
1779 {
1780 sleep(1);
1781 printf("delete a mpool memory pool failed!\n");
1782 return 0;
1783 }
1784 //MsOS_DelayTask(100);
1785
1786 #if DLMALLOC_DBG
1787 for(i = 0; i < DLMALLOC_INFO_CNT; i++)
1788 {
1789 if(mstar_dlmalloc_info[i].msp == msp)
1790 break;
1791 }
1792
1793 if(i == DLMALLOC_INFO_CNT) // we can not find msp @ mstar_dlmalloc_info, die
1794 {
1795 printf("\033[35mFunction = %s, Line = %d, no mstar_dlmalloc_info match for msp: 0x%lX, die\033[m\n", __PRETTY_FUNCTION__, __LINE__, msp);
1796 *(volatile int *)0x2 = 0x5566;
1797 }
1798 else
1799 memset(&mstar_dlmalloc_info[i], 0, sizeof(mstar_dlmalloc_info[i]));
1800 #endif
1801
1802 return 0;
1803 }
1804
MsOS_MPool_AllocateMemory(mspace msp,size_t bytes)1805 void * MsOS_MPool_AllocateMemory(mspace msp, size_t bytes)
1806 {
1807 #if DLMALLOC_DBG
1808 int first_free_detail_dlmalloc_info_index = DETAIL_DLMALLOC_INFO_CNT;
1809 int i,j = 0;
1810 #endif
1811 DevMalloc_MPool_Dlmalloc_Alloc_Free_Info_t dlmalloc_alloc_free_info;
1812 void *alloc_return_va = 0;
1813 MS_PHY pa_pool = MsOS_MPool_VA2PA((MS_VIRT)msp);
1814 MS_PHY ba_pool = HAL_MsOS_MPool_PA2BA(pa_pool);
1815
1816 dlmalloc_alloc_free_info.pool_bus_addr = ba_pool;
1817 dlmalloc_alloc_free_info.alloc_size = bytes;
1818 //printf("\033[35mFunction = %s, Line = %d, alloc from pool ba: 0x%llX, size: 0x%lX\033[m\n", __PRETTY_FUNCTION__, __LINE__, dlmalloc_alloc_free_info.pool_bus_addr, dlmalloc_alloc_free_info.alloc_size);
1819
1820 //MsOS_DelayTask(100);
1821 if (ioctl(_s32MPoolFd, MALLOC_IOC_MPOOL_DLMALLOC_ALLOC_POOL_MEMORY, &dlmalloc_alloc_free_info))
1822 {
1823 sleep(1);
1824 printf("alloc memory from mpool failed!\n");
1825 return 0;
1826 }
1827 //MsOS_DelayTask(100);
1828
1829 // we can not use pa2va, because we donot have cache/non-cache infomation, so we use the offset to calculate the va
1830 if(dlmalloc_alloc_free_info.alloc_addr)
1831 alloc_return_va = (void *)((unsigned long)(dlmalloc_alloc_free_info.alloc_addr - ba_pool) + (unsigned long)msp);
1832 else
1833 alloc_return_va = 0;
1834 //printf("\033[35mFunction = %s, Line = %d, get memory @ ba: 0x%llX\033[m\n", __PRETTY_FUNCTION__, __LINE__, dlmalloc_alloc_free_info.alloc_addr);
1835 //printf("\033[35mFunction = %s, Line = %d, alloc_return_va is 0x%lX, size: 0x%lX\033[m\n", __PRETTY_FUNCTION__, __LINE__, alloc_return_va, bytes);
1836
1837 #if DLMALLOC_DBG
1838 if(alloc_return_va == 0)
1839 return alloc_return_va;
1840
1841 for(i = 0; i < DLMALLOC_INFO_CNT; i++)
1842 {
1843 if(mstar_dlmalloc_info[i].msp == msp)
1844 break;
1845 }
1846
1847 if(i == DLMALLOC_INFO_CNT)
1848 {
1849 printf("\033[35mFunction = %s, Line = %d, no mstar_dlmalloc_info match for msp: 0x%lX, die\033[m\n", __PRETTY_FUNCTION__, __LINE__, msp);
1850 for(i = 0; i < DLMALLOC_INFO_CNT; i++)
1851 printf("\033[35mFunction = %s, Line = %d, mstar_dlmalloc_info[%d].msp is 0x%lX\033[m\n", __PRETTY_FUNCTION__, __LINE__, i, mstar_dlmalloc_info[i].msp);
1852 sleep(3);
1853 *(volatile int *)0x3 = 0x5566;
1854 }
1855 else
1856 {
1857 for(j = 0; j < DETAIL_DLMALLOC_INFO_CNT; j++)
1858 {
1859 if(mstar_dlmalloc_info[i].detail_dlmalloc_info[j].allocated_start_va == alloc_return_va)
1860 {
1861 printf("\033[35mFunction = %s, Line = %d, the alloc_return_va: 0x%lX is already allocated, die\033[m\n", __PRETTY_FUNCTION__, __LINE__, alloc_return_va);
1862 *(volatile int *)0x4 = 0x5566;
1863 }
1864
1865 if( (mstar_dlmalloc_info[i].detail_dlmalloc_info[j].allocated_start_va == 0) && (first_free_detail_dlmalloc_info_index == DETAIL_DLMALLOC_INFO_CNT) )
1866 first_free_detail_dlmalloc_info_index = j;
1867 }
1868
1869 if(first_free_detail_dlmalloc_info_index == DETAIL_DLMALLOC_INFO_CNT)
1870 {
1871 printf("\033[35mFunction = %s, Line = %d, no space to insert detail_dlmalloc_info, die (having %d data)\033[m\n", __PRETTY_FUNCTION__, __LINE__, DETAIL_DLMALLOC_INFO_CNT);
1872 *(volatile int *)0x5 = 0x5566;
1873 }
1874 else
1875 {
1876 mstar_dlmalloc_info[i].detail_dlmalloc_info[first_free_detail_dlmalloc_info_index].allocated_start_va = alloc_return_va;
1877 mstar_dlmalloc_info[i].detail_dlmalloc_info[first_free_detail_dlmalloc_info_index].allocated_size = bytes;
1878 }
1879 }
1880 #endif
1881
1882 return alloc_return_va;
1883 }
1884
MsOS_MPool_FreeMemory(mspace msp,void * free_start_addr)1885 void MsOS_MPool_FreeMemory(mspace msp, void *free_start_addr)
1886 {
1887 #if DLMALLOC_DBG
1888 int i,j = 0;
1889 #endif
1890 DevMalloc_MPool_Dlmalloc_Alloc_Free_Info_t dlmalloc_alloc_free_info;
1891 MS_PHY pa_pool = MsOS_MPool_VA2PA((MS_VIRT)msp);
1892 MS_PHY ba_pool = HAL_MsOS_MPool_PA2BA(pa_pool);
1893
1894 MS_PHY pa_free_start_addr = MsOS_MPool_VA2PA((MS_VIRT)free_start_addr);
1895 MS_PHY ba_free_start_addr = HAL_MsOS_MPool_PA2BA(pa_free_start_addr);
1896
1897 dlmalloc_alloc_free_info.alloc_addr = ba_free_start_addr;
1898 dlmalloc_alloc_free_info.pool_bus_addr = ba_pool;
1899 //printf("\033[31mFunction = %s, Line = %d, free from pool ba: 0x%llX, free_addr: 0x%lX\033[m\n", __PRETTY_FUNCTION__, __LINE__, dlmalloc_alloc_free_info.pool_bus_addr, dlmalloc_alloc_free_info.alloc_addr);
1900
1901 #if DLMALLOC_DBG
1902 for(i = 0; i < DLMALLOC_INFO_CNT; i++)
1903 {
1904 if(mstar_dlmalloc_info[i].msp == msp)
1905 {
1906 for(j = 0; j < DETAIL_DLMALLOC_INFO_CNT; j++)
1907 {
1908 if(mstar_dlmalloc_info[i].detail_dlmalloc_info[j].allocated_start_va == free_start_addr)
1909 {
1910 printf("\033[31mFunction = %s, Line = %d, free size is 0x%lX\033[m\n", __PRETTY_FUNCTION__, __LINE__, mstar_dlmalloc_info[i].detail_dlmalloc_info[j].allocated_size);
1911 }
1912 }
1913 break;
1914 }
1915
1916 printf("\033[31mFunction = %s, Line = %d, i is %d\033[m\n", __PRETTY_FUNCTION__, __LINE__, i);
1917 *(volatile int *)0x10 = 0x5566;
1918 }
1919 #endif
1920
1921 //MsOS_DelayTask(100);
1922 if (ioctl(_s32MPoolFd, MALLOC_IOC_MPOOL_DLMALLOC_FREE_POOL_MEMORY, &dlmalloc_alloc_free_info))
1923 {
1924 sleep(1);
1925 printf("alloc memory from mpool failed!\n");
1926 return;
1927 }
1928 //MsOS_DelayTask(100);
1929
1930 #if DLMALLOC_DBG
1931 for(i = 0; i < DLMALLOC_INFO_CNT; i++)
1932 {
1933 if(mstar_dlmalloc_info[i].msp == msp)
1934 break;
1935 }
1936
1937 if(i == DLMALLOC_INFO_CNT)
1938 {
1939 printf("\033[31mFunction = %s, Line = %d, no mstar_dlmalloc_info match for msp: 0x%lX, die\033[m\n", __PRETTY_FUNCTION__, __LINE__, msp);
1940 *(volatile int *)0x6 = 0x5566;
1941 }
1942 else
1943 {
1944 for(j = 0; j < DETAIL_DLMALLOC_INFO_CNT; j++)
1945 {
1946 if(mstar_dlmalloc_info[i].detail_dlmalloc_info[j].allocated_start_va == free_start_addr)
1947 {
1948 mstar_dlmalloc_info[i].detail_dlmalloc_info[j].allocated_start_va = 0;
1949 mstar_dlmalloc_info[i].detail_dlmalloc_info[j].allocated_size = 0;
1950 break;
1951 }
1952 }
1953
1954 if(j == DETAIL_DLMALLOC_INFO_CNT)
1955 {
1956 printf("\033[31mFunction = %s, Line = %d, no detail_dlmalloc_info can be delete for free_start_addr 0x%lX, die\033[m\n", __PRETTY_FUNCTION__, __LINE__, free_start_addr);
1957 *(volatile int *)0x7 = 0x5566;
1958 }
1959 }
1960 #endif
1961
1962 return;
1963 }
1964
MsOS_MPool_ReallocateMemory(mspace msp,void * old_alloc_start_addr,size_t bytes)1965 void * MsOS_MPool_ReallocateMemory(mspace msp, void *old_alloc_start_addr, size_t bytes)
1966 {
1967 #if DLMALLOC_DBG
1968 int i,j = 0;
1969 #endif
1970 DevMalloc_MPool_Dlmalloc_Alloc_Free_Info_t dlmalloc_alloc_free_info;
1971 void *alloc_return_va = 0;
1972 MS_PHY pa_pool = MsOS_MPool_VA2PA((MS_VIRT)msp);
1973 MS_PHY ba_pool = HAL_MsOS_MPool_PA2BA(pa_pool);
1974
1975 MS_PHY pa_old_alloc_start_addr = MsOS_MPool_VA2PA((MS_VIRT)old_alloc_start_addr);
1976 MS_PHY ba_old_alloc_start_addr = HAL_MsOS_MPool_PA2BA(pa_old_alloc_start_addr);
1977
1978 dlmalloc_alloc_free_info.alloc_addr = ba_old_alloc_start_addr;
1979 dlmalloc_alloc_free_info.pool_bus_addr = ba_pool;
1980 dlmalloc_alloc_free_info.alloc_size = bytes;
1981 //printf("\033[35mFunction = %s, Line = %d, re-alloc from ba: 0x%llX\033[m\n", __PRETTY_FUNCTION__, __LINE__, dlmalloc_alloc_free_info.alloc_addr);
1982 //printf("\033[35mFunction = %s, Line = %d, re-alloc from pool ba: 0x%llX, size: 0x%lX\033[m\n", __PRETTY_FUNCTION__, __LINE__, dlmalloc_alloc_free_info.pool_bus_addr, dlmalloc_alloc_free_info.alloc_size);
1983
1984 //MsOS_DelayTask(100);
1985 if (ioctl(_s32MPoolFd, MALLOC_IOC_MPOOL_DLMALLOC_REALLOC_POOL_MEMORY, &dlmalloc_alloc_free_info))
1986 {
1987 sleep(1);
1988 printf("alloc memory from mpool failed!\n");
1989 return 0;
1990 }
1991 //MsOS_DelayTask(100);
1992
1993 // we can not use pa2va, because we donot have cache/non-cache infomation, so we use the offset to calculate the va
1994 if(dlmalloc_alloc_free_info.new_alloc_addr)
1995 alloc_return_va = (void *)((unsigned long)(dlmalloc_alloc_free_info.new_alloc_addr - ba_pool) + (unsigned long)msp);
1996 else
1997 alloc_return_va = 0;
1998 //printf("\033[35mFunction = %s, Line = %d, get memory @ ba: 0x%llX\033[m\n", __PRETTY_FUNCTION__, __LINE__, dlmalloc_alloc_free_info.new_alloc_addr);
1999 //printf("\033[35mFunction = %s, Line = %d, re-alloc_return_va is 0x%lX, size: 0x%lX\033[m\n", __PRETTY_FUNCTION__, __LINE__, alloc_return_va, bytes);
2000
2001 #if DLMALLOC_DBG
2002 if(alloc_return_va == 0)
2003 return alloc_return_va;
2004
2005 for(i = 0; i < DLMALLOC_INFO_CNT; i++)
2006 {
2007 if(mstar_dlmalloc_info[i].msp == msp)
2008 break;
2009 }
2010
2011 if(i == DLMALLOC_INFO_CNT)
2012 {
2013 printf("\033[35mFunction = %s, Line = %d, no mstar_dlmalloc_info match for msp: 0x%lX, die\033[m\n", __PRETTY_FUNCTION__, __LINE__, msp);
2014 *(volatile int *)0x8 = 0x5566;
2015 }
2016 else
2017 {
2018 // reset info
2019 for(j = 0; j < DETAIL_DLMALLOC_INFO_CNT; j++)
2020 {
2021 if(mstar_dlmalloc_info[i].detail_dlmalloc_info[j].allocated_start_va == old_alloc_start_addr)
2022 {
2023 mstar_dlmalloc_info[i].detail_dlmalloc_info[j].allocated_start_va = alloc_return_va;
2024 mstar_dlmalloc_info[i].detail_dlmalloc_info[j].allocated_size = bytes;
2025 break;
2026 }
2027
2028 }
2029
2030 if(j == DETAIL_DLMALLOC_INFO_CNT)
2031 {
2032 printf("\033[35mFunction = %s, Line = %d, no detail_dlmalloc_info can be delete for old_alloc_start_addr 0x%lX, die\033[m\n", __PRETTY_FUNCTION__, __LINE__, old_alloc_start_addr);
2033 *(volatile int *)0x9 = 0x5566;
2034 }
2035 }
2036 #endif
2037
2038 return alloc_return_va;
2039 }
2040 #endif
2041
2042