| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/csf/ |
| H A D | mali_kbase_csf_tiler_heap.c | 28 /* Tiler heap shrink stop limit for maintaining a minimum number of chunks */ 131 * Unless the @chunk is the first in the kernel's list of chunks belonging to 148 "Cannot support CPU cached chunks without sync operations"); in link_chunk() 153 "Linked tiler heap chunks, 0x%llX -> 0x%llX\n", in link_chunk() 190 "Cannot support CPU cached chunks without sync operations"); in init_chunk() 328 "NO_USER_FREE chunks should not have had their alloc freed")) { in alloc_new_chunk() 333 …"NO_USER_FREE chunks should not have been freed and then reallocated as imported/non-native region… in alloc_new_chunk() 338 "NO_USER_FREE chunks should not have been freed and then reallocated as JIT regions")) { in alloc_new_chunk() 343 "NO_USER_FREE chunks should not have been made ephemeral")) { in alloc_new_chunk() 348 "NO_USER_FREE chunks should not have been aliased")) { in alloc_new_chunk() [all …]
|
| H A D | mali_kbase_csf_tiler_heap_def.h | 67 * @link: Link to this chunk in a list of chunks belonging to a 77 * Chunks are allocated upon initialization of a tiler heap or in response to 78 * out-of-memory events from the firmware. Chunks are always fully backed by 99 * @chunks_list: Linked list of allocated chunks. 114 * @chunk_count: The number of chunks currently allocated. Must not be 116 * @max_chunks: The maximum number of chunks that the heap should be 119 * to keep in flight for which allocation of new chunks is
|
| H A D | mali_kbase_csf_tiler_heap.h | 53 * @initial_chunks: The initial number of chunks to allocate. Must not be 55 * @max_chunks: The maximum number of chunks that the heap should be allowed 58 * keep in flight for which allocation of new chunks is 62 * chunks reclaim for those that are hoarded with hardware while 85 * This function will terminate a chunked tiler heap and cause all the chunks
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/radeon/ |
| H A D | radeon_cs.c | 284 /* get chunks */ in radeon_cs_parser_init() 296 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); in radeon_cs_parser_init() 303 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); in radeon_cs_parser_init() 304 if (p->chunks == NULL) { in radeon_cs_parser_init() 317 p->chunks[i].length_dw = user_chunk.length_dw; in radeon_cs_parser_init() 319 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init() 322 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init() 324 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() 328 p->chunk_const_ib = &p->chunks[i]; in radeon_cs_parser_init() 330 if (p->chunks[i].length_dw == 0) in radeon_cs_parser_init() [all …]
|
| /OK3568_Linux_fs/kernel/net/sctp/ |
| H A D | chunk.c | 43 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init() 65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 81 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy() 140 * down any such message into smaller chunks. Opportunistically, fragment 141 * the chunks down to the current MTU constraints. We may get refragmented 182 /* If the peer requested that we authenticate DATA chunks in sctp_datamsg_from_user() 183 * we need to account for bundling of the AUTH chunks along with in sctp_datamsg_from_user() 235 /* Create chunks for all DATA chunks. */ in sctp_datamsg_from_user() 280 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user() 289 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
|
| H A D | outqueue.c | 11 * bundling and queueing of outgoing SCTP chunks. 204 /* Free the outqueue structure and any related pending chunks. 212 /* Throw away unacknowledged chunks. */ in __sctp_outq_teardown() 224 /* Throw away chunks that have been gap ACKed. */ in __sctp_outq_teardown() 233 /* Throw away any chunks in the retransmit queue. */ in __sctp_outq_teardown() 242 /* Throw away any chunks that are in the abandoned queue. */ in __sctp_outq_teardown() 251 /* Throw away any leftover data chunks. */ in __sctp_outq_teardown() 260 /* Throw away any leftover control chunks. */ in __sctp_outq_teardown() 273 /* Free the outqueue structure and any related pending chunks. */ 276 /* Throw away leftover chunks. */ in sctp_outq_free() [all …]
|
| H A D | auth.c | 175 * The RANDOM parameter, the CHUNKS parameter and the HMAC-ALGO 186 struct sctp_chunks_param *chunks, in sctp_auth_make_key_vector() argument 197 if (chunks) in sctp_auth_make_key_vector() 198 chunks_len = ntohs(chunks->param_hdr.length); in sctp_auth_make_key_vector() 209 if (chunks) { in sctp_auth_make_key_vector() 210 memcpy(new->data + offset, chunks, chunks_len); in sctp_auth_make_key_vector() 301 * The RANDOM parameter, the CHUNKS parameter and the HMAC-ALGO in sctp_auth_asoc_create_secret() 651 * chunks MUST NOT be listed in the CHUNKS parameter. However, if in __sctp_auth_cid() 652 * a CHUNKS parameter is received then the types for INIT, INIT-ACK, in __sctp_auth_cid() 653 * SHUTDOWN-COMPLETE and AUTH chunks MUST be ignored. in __sctp_auth_cid() [all …]
|
| /OK3568_Linux_fs/buildroot/dl/qt5location/git/src/3rdparty/mapbox-gl-native/deps/rapidjson/1.1.0/include/rapidjson/ |
| H A D | allocators.h | 86 /*! This allocator allocate memory blocks from pre-allocated memory chunks. 90 The memory chunks are allocated by BaseAllocator, which is CrtAllocator by default. 94 If the user-buffer is full then additional chunks are allocated by BaseAllocator. 98 \tparam BaseAllocator the allocator type for allocating memory chunks. Default is CrtAllocator. 108 \param baseAllocator The allocator for allocating memory chunks. 123 \param baseAllocator The allocator for allocating memory chunks. 137 /*! This deallocates all memory chunks, excluding the user-supplied buffer. 144 //! Deallocates all memory chunks, excluding the user-supplied buffer. 155 //! Computes the total capacity of allocated memory chunks. 254 /*! Chunks are stored as a singly linked list. [all …]
|
| /OK3568_Linux_fs/kernel/arch/mips/ar7/ |
| H A D | prom.c | 102 Name=Value pair in 2 chunks (len is the number of chunks) 149 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local 151 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env() 156 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env() 158 value = chunks[i].data; in parse_psp_env() 159 if (chunks[i].num) { in parse_psp_env() 160 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env() 167 i += chunks[i].len; in parse_psp_env()
|
| /OK3568_Linux_fs/kernel/drivers/net/ethernet/netronome/nfp/nfpcore/ |
| H A D | nfp_nsp.c | 504 } *chunks; in nfp_nsp_command_buf_dma_sg() local 516 chunks = kzalloc(array_size(sizeof(*chunks), nseg), GFP_KERNEL); in nfp_nsp_command_buf_dma_sg() 517 if (!chunks) in nfp_nsp_command_buf_dma_sg() 525 chunks[i].chunk = kmalloc(chunk_size, in nfp_nsp_command_buf_dma_sg() 527 if (!chunks[i].chunk) in nfp_nsp_command_buf_dma_sg() 530 chunks[i].len = min_t(u64, chunk_size, max_size - off); in nfp_nsp_command_buf_dma_sg() 535 memcpy(chunks[i].chunk, arg->in_buf + off, coff); in nfp_nsp_command_buf_dma_sg() 537 memset(chunks[i].chunk + coff, 0, chunk_size - coff); in nfp_nsp_command_buf_dma_sg() 539 off += chunks[i].len; in nfp_nsp_command_buf_dma_sg() 547 addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, in nfp_nsp_command_buf_dma_sg() [all …]
|
| /OK3568_Linux_fs/kernel/include/net/sctp/ |
| H A D | structs.h | 368 struct sctp_chunks_param *chunks; member 515 /* Chunks waiting to be submitted to lower layer. */ 516 struct list_head chunks; member 559 /* This field is used by chunks that hold fragmented data. 636 * spec violates the principle premis that all chunks are processed 696 /* This structure holds lists of chunks as we are assembling for 705 /* This contains the payload chunks. */ 710 /* This is the total size of all chunks INCLUDING padding. */ 775 * chunks sent to this address is currently being 942 /* This is the list of transports that have chunks to send. */ [all …]
|
| /OK3568_Linux_fs/kernel/drivers/infiniband/ulp/rtrs/ |
| H A D | README | 28 session. A session is associated with a set of memory chunks reserved on the 36 chunks reserved for him on the server side. Their number, size and addresses 45 which of the memory chunks has been accessed and at which offset the message 80 the server (number of memory chunks which are going to be allocated for that 122 1. When processing a write request client selects one of the memory chunks 139 1. When processing a write request client selects one of the memory chunks 144 using the IMM field, Server invalidate rkey associated to the memory chunks 162 1. When processing a read request client selects one of the memory chunks 181 1. When processing a read request client selects one of the memory chunks 186 Server invalidate rkey associated to the memory chunks first, when it finishes,
|
| /OK3568_Linux_fs/kernel/mm/ |
| H A D | zbud.c | 31 * zbud pages are divided into "chunks". The size of the chunks is fixed at 33 * into chunks allows organizing unbuddied zbud pages into a manageable number 34 * of unbuddied lists according to the number of free chunks available in the 64 * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk 66 * 63 which shows the max number of free chunks in zbud page, also there will be 112 * @first_chunks: the size of the first buddy in chunks, 0 if free 113 * @last_chunks: the size of the last buddy in chunks, 0 if free 229 /* Converts an allocation size in bytes to size in zbud chunks */ 285 /* Returns the number of free chunks in a zbud page */ 357 int chunks, i, freechunks; in zbud_alloc() local [all …]
|
| H A D | z3fold.c | 18 * As in zbud, pages are divided into "chunks". The size of the chunks is 52 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks 55 * which shows the max number of free chunks in z3fold page, also there will 99 * struct z3fold_header - z3fold page metadata occupying first chunks of each 109 * @first_chunks: the size of the first buddy in chunks, 0 if free 110 * @middle_chunks: the size of the middle buddy in chunks, 0 if free 111 * @last_chunks: the size of the last buddy in chunks, 0 if free 201 /* Converts an allocation size in bytes to size in z3fold chunks */ 597 * Returns the number of free chunks in a z3fold page. 606 * of chunks occupied by the first and the last objects. in num_free_chunks() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/infiniband/hw/usnic/ |
| H A D | usnic_vnic.c | 45 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member 118 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump() 119 chunk = &vnic->chunks[i]; in usnic_vnic_dump() 223 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt() 229 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt() 255 src = &vnic->chunks[type]; in usnic_vnic_get_resources() 287 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources() 383 &vnic->chunks[res_type]); in usnic_vnic_discover_resources() 392 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources() 428 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
|
| /OK3568_Linux_fs/kernel/kernel/ |
| H A D | audit_tree.c | 17 struct list_head chunks; member 68 * tree.chunks anchors chunk.owners[].list hash_lock 101 INIT_LIST_HEAD(&tree->chunks); in alloc_tree() 435 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk() 507 list_add(&p->list, &tree->chunks); in tag_chunk() 565 * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged 566 * chunks. The function expects tagged chunks are all at the beginning of the 567 * chunks list. 572 while (!list_empty(&victim->chunks)) { in prune_tree_chunks() 577 p = list_first_entry(&victim->chunks, struct node, list); in prune_tree_chunks() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/infiniband/hw/efa/ |
| H A D | efa_verbs.c | 92 struct pbl_chunk *chunks; member 1202 /* allocate a chunk list that consists of 4KB chunks */ in pbl_chunk_list_create() 1206 chunk_list->chunks = kcalloc(chunk_list_size, in pbl_chunk_list_create() 1207 sizeof(*chunk_list->chunks), in pbl_chunk_list_create() 1209 if (!chunk_list->chunks) in pbl_chunk_list_create() 1218 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL); in pbl_chunk_list_create() 1219 if (!chunk_list->chunks[i].buf) in pbl_chunk_list_create() 1222 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE; in pbl_chunk_list_create() 1224 chunk_list->chunks[chunk_list_size - 1].length = in pbl_chunk_list_create() 1228 /* fill the dma addresses of sg list pages to chunks: */ in pbl_chunk_list_create() [all …]
|
| /OK3568_Linux_fs/kernel/tools/testing/selftests/drivers/net/mlxsw/spectrum/ |
| H A D | devlink_lib_spectrum.sh | 15 KVDL_CHILDREN="singles chunks large_chunks" 92 devlink_resource_size_set 32000 kvd linear chunks 101 devlink_resource_size_set 32000 kvd linear chunks 110 devlink_resource_size_set 49152 kvd linear chunks
|
| /OK3568_Linux_fs/kernel/fs/ocfs2/ |
| H A D | quota.h | 40 struct list_head rc_list; /* List of chunks */ 46 struct list_head r_list[OCFS2_MAXQUOTAS]; /* List of chunks to recover */ 53 unsigned int dqi_chunks; /* Number of chunks in local quota file */ 56 struct list_head dqi_chunk; /* List of chunks */ 78 struct list_head qc_chunk; /* List of quotafile chunks */
|
| /OK3568_Linux_fs/kernel/arch/x86/kernel/cpu/resctrl/ |
| H A D | monitor.c | 219 u64 shift = 64 - width, chunks; in mbm_overflow_count() local 221 chunks = (cur_msr << shift) - (prev_msr << shift); in mbm_overflow_count() 222 return chunks >>= shift; in mbm_overflow_count() 228 u64 chunks, tval; in __mon_event_count() local 258 chunks = mbm_overflow_count(m->prev_msr, tval, rr->r->mbm_width); in __mon_event_count() 259 m->chunks += chunks; in __mon_event_count() 262 rr->val += m->chunks; in __mon_event_count() 274 u64 tval, cur_bw, chunks; in mbm_bw_count() local 280 chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width); in mbm_bw_count() 281 cur_bw = (chunks * r->mon_scale) >> 20; in mbm_bw_count()
|
| /OK3568_Linux_fs/u-boot/common/ |
| H A D | dlmalloc.src | 183 very small chunks. 266 Because freed chunks may be overwritten with link fields, this 595 int ordblks; /* number of non-inuse chunks */ 634 afterward allocate more large chunks) the value should be high 654 program undergoes phases where several large chunks are 657 chunks at all. And in well-behaved long-lived programs, 721 Using mmap segregates relatively large chunks of memory so that 731 other chunks, as can happen with normally allocated chunks, which 737 used to service later requests, as happens with normal chunks. 1150 Chunks of memory are maintained using a `boundary tag' method as [all …]
|
| H A D | dlmalloc.c | 235 Chunks of memory are maintained using a `boundary tag' method as 238 survey of such techniques.) Sizes of free chunks are stored both 240 consolidating fragmented chunks into bigger chunks very fast. The 241 size fields also hold bits representing whether chunks are free or 265 Chunks always begin on even word boundries, so the mem portion 269 Free chunks are stored in circular doubly-linked lists, and look like this: 308 2. Chunks allocated via mmap, which have the second-lowest-order 313 Available chunks are kept in any of several places (all declared below): 315 * `av': An array of chunks serving as bin headers for consolidated 316 chunks. Each bin is doubly linked. The bins are approximately [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_cs.c | 139 /* get chunks */ in amdgpu_cs_parser_init() 140 chunk_array_user = u64_to_user_ptr(cs->in.chunks); in amdgpu_cs_parser_init() 148 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), in amdgpu_cs_parser_init() 150 if (!p->chunks) { in amdgpu_cs_parser_init() 167 p->chunks[i].chunk_id = user_chunk.chunk_id; in amdgpu_cs_parser_init() 168 p->chunks[i].length_dw = user_chunk.length_dw; in amdgpu_cs_parser_init() 170 size = p->chunks[i].length_dw; in amdgpu_cs_parser_init() 173 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); in amdgpu_cs_parser_init() 174 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_parser_init() 180 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { in amdgpu_cs_parser_init() [all …]
|
| /OK3568_Linux_fs/kernel/scripts/gdb/linux/ |
| H A D | timerlist.py | 162 chunks = [] 168 chunks.append(buf[start:end]) 170 chunks.append(',') 174 chunks[0] = chunks[0][0] # Cut off the first 0 176 return "".join(chunks)
|
| /OK3568_Linux_fs/kernel/drivers/md/ |
| H A D | dm-snap-persistent.c | 32 * requires that we copy exception chunks to chunk aligned areas 44 * followed by as many exception chunks as can fit in the 111 * whole chunks worth of metadata in memory at once. 136 * When creating exceptions, all the chunks here and above are 139 * the exception store because chunks can be committed out of 143 * chunks here and above are free. It holds the value it would 144 * have held if all chunks had been committed in order of 470 * Keep track of the start of the free chunks. in insert_exceptions() 508 * Keeping reading chunks and inserting exceptions until in read_exceptions() 580 * Then there are (ps->current_area + 1) metadata chunks, each one in persistent_usage() [all …]
|