| /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/gt/ |
| H A D | selftest_timeline.c | 24 static struct page *hwsp_page(struct intel_timeline *tl) in hwsp_page() argument 26 struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj; in hwsp_page() 32 static unsigned long hwsp_cacheline(struct intel_timeline *tl) in hwsp_cacheline() argument 34 unsigned long address = (unsigned long)page_address(hwsp_page(tl)); in hwsp_cacheline() 36 return (address + tl->hwsp_offset) / CACHELINE_BYTES; in hwsp_cacheline() 55 struct intel_timeline *tl) in __mock_hwsp_record() argument 57 tl = xchg(&state->history[idx], tl); in __mock_hwsp_record() 58 if (tl) { in __mock_hwsp_record() 59 radix_tree_delete(&state->cachelines, hwsp_cacheline(tl)); in __mock_hwsp_record() 60 intel_timeline_put(tl); in __mock_hwsp_record() [all …]
|
| H A D | intel_timeline.c | 330 void __intel_timeline_pin(struct intel_timeline *tl) in __intel_timeline_pin() argument 332 GEM_BUG_ON(!atomic_read(&tl->pin_count)); in __intel_timeline_pin() 333 atomic_inc(&tl->pin_count); in __intel_timeline_pin() 336 int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww) in intel_timeline_pin() argument 340 if (atomic_add_unless(&tl->pin_count, 1, 0)) in intel_timeline_pin() 343 err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH); in intel_timeline_pin() 347 tl->hwsp_offset = in intel_timeline_pin() 348 i915_ggtt_offset(tl->hwsp_ggtt) + in intel_timeline_pin() 349 offset_in_page(tl->hwsp_offset); in intel_timeline_pin() 350 GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n", in intel_timeline_pin() [all …]
|
| H A D | intel_gt_requests.c | 17 static bool retire_requests(struct intel_timeline *tl) in retire_requests() argument 21 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests() 26 return !i915_active_fence_isset(&tl->last_request); in retire_requests() 64 struct intel_timeline *tl = xchg(&engine->retire, NULL); in engine_retire() local 67 struct intel_timeline *next = xchg(&tl->retire, NULL); in engine_retire() 77 if (mutex_trylock(&tl->mutex)) { in engine_retire() 78 retire_requests(tl); in engine_retire() 79 mutex_unlock(&tl->mutex); in engine_retire() 81 intel_timeline_put(tl); in engine_retire() 84 tl = ptr_mask_bits(next, 1); in engine_retire() [all …]
|
| H A D | intel_timeline.h | 67 static inline int __intel_timeline_sync_set(struct intel_timeline *tl, in __intel_timeline_sync_set() argument 70 return i915_syncmap_set(&tl->sync, context, seqno); in __intel_timeline_sync_set() 73 static inline int intel_timeline_sync_set(struct intel_timeline *tl, in intel_timeline_sync_set() argument 76 return __intel_timeline_sync_set(tl, fence->context, fence->seqno); in intel_timeline_sync_set() 79 static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl, in __intel_timeline_sync_is_later() argument 82 return i915_syncmap_is_later(&tl->sync, context, seqno); in __intel_timeline_sync_is_later() 85 static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl, in intel_timeline_sync_is_later() argument 88 return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno); in intel_timeline_sync_is_later() 91 void __intel_timeline_pin(struct intel_timeline *tl); 92 int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww); [all …]
|
| H A D | selftest_context.c | 18 struct intel_timeline *tl = i915_request_timeline(rq); in request_sync() local 22 intel_timeline_get(tl); in request_sync() 36 lockdep_unpin_lock(&tl->mutex, rq->cookie); in request_sync() 37 mutex_unlock(&tl->mutex); in request_sync() 40 intel_timeline_put(tl); in request_sync() 47 struct intel_timeline *tl = ce->timeline; in context_sync() local 50 mutex_lock(&tl->mutex); in context_sync() 55 if (list_empty(&tl->requests)) in context_sync() 58 rq = list_last_entry(&tl->requests, typeof(*rq), link); in context_sync() 69 mutex_unlock(&tl->mutex); in context_sync()
|
| H A D | intel_context.h | 158 struct intel_timeline *tl = ce->timeline; in intel_context_timeline_lock() local 161 err = mutex_lock_interruptible(&tl->mutex); in intel_context_timeline_lock() 165 return tl; in intel_context_timeline_lock() 168 static inline void intel_context_timeline_unlock(struct intel_timeline *tl) in intel_context_timeline_unlock() argument 169 __releases(&tl->mutex) in intel_context_timeline_unlock() 171 mutex_unlock(&tl->mutex); in intel_context_timeline_unlock()
|
| /OK3568_Linux_fs/kernel/fs/cifs/ |
| H A D | dfs_cache.h | 56 dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl, in dfs_cache_get_next_tgt() argument 59 if (!tl || list_empty(&tl->tl_list) || !it || in dfs_cache_get_next_tgt() 60 list_is_last(&it->it_list, &tl->tl_list)) in dfs_cache_get_next_tgt() 66 dfs_cache_get_tgt_iterator(struct dfs_cache_tgt_list *tl) in dfs_cache_get_tgt_iterator() argument 68 if (!tl) in dfs_cache_get_tgt_iterator() 70 return list_first_entry_or_null(&tl->tl_list, in dfs_cache_get_tgt_iterator() 75 static inline void dfs_cache_free_tgts(struct dfs_cache_tgt_list *tl) in dfs_cache_free_tgts() argument 79 if (!tl || list_empty(&tl->tl_list)) in dfs_cache_free_tgts() 81 list_for_each_entry_safe(it, nit, &tl->tl_list, it_list) { in dfs_cache_free_tgts() 86 tl->tl_numtgts = 0; in dfs_cache_free_tgts() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/net/ethernet/netronome/nfp/ |
| H A D | nfp_net_debugdump.c | 58 struct nfp_dump_tl tl; member 64 struct nfp_dump_tl tl; member 70 struct nfp_dump_tl tl; member 78 struct nfp_dump_tl tl; member 87 struct nfp_dump_tl tl; member 92 struct nfp_dump_tl tl; member 112 typedef int (*nfp_tlv_visit)(struct nfp_pf *pf, struct nfp_dump_tl *tl, 120 struct nfp_dump_tl *tl; in nfp_traverse_tlvs() local 125 while (remaining >= sizeof(*tl)) { in nfp_traverse_tlvs() 126 tl = p; in nfp_traverse_tlvs() [all …]
|
| /OK3568_Linux_fs/kernel/fs/ext4/ |
| H A D | fast_commit.c | 594 struct ext4_fc_tl *tl; in ext4_fc_reserve_space() local 623 tl = (struct ext4_fc_tl *)(sbi->s_fc_bh->b_data + off); in ext4_fc_reserve_space() 624 tl->fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD); in ext4_fc_reserve_space() 626 tl->fc_len = cpu_to_le16(pad_len); in ext4_fc_reserve_space() 628 *crc = ext4_chksum(sbi, *crc, tl, sizeof(*tl)); in ext4_fc_reserve_space() 630 ext4_fc_memzero(sb, tl + 1, pad_len, crc); in ext4_fc_reserve_space() 661 struct ext4_fc_tl tl; in ext4_fc_write_tail() local 670 dst = ext4_fc_reserve_space(sb, sizeof(tl) + sizeof(tail), &crc); in ext4_fc_write_tail() 676 tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_TAIL); in ext4_fc_write_tail() 677 tl.fc_len = cpu_to_le16(bsize - off - 1 + sizeof(struct ext4_fc_tail)); in ext4_fc_write_tail() [all …]
|
| /OK3568_Linux_fs/kernel/crypto/ |
| H A D | vmac.c | 151 int i; u64 th, tl; \ 154 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ 156 ADD128(rh, rl, th, tl); \ 162 int i; u64 th, tl; \ 165 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ 167 ADD128(rh, rl, th, tl); \ 168 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ 170 ADD128(rh1, rl1, th, tl); \ 177 int i; u64 th, tl; \ 180 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ [all …]
|
| H A D | camellia_generic.c | 367 u32 dw, tl, tr; in camellia_setup_tail() local 466 tl = subL[10] ^ (subR[10] & ~subR[8]); in camellia_setup_tail() 467 dw = tl & subL[8]; /* FL(kl1) */ in camellia_setup_tail() 469 SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ in camellia_setup_tail() 475 tl = subL[7] ^ (subR[7] & ~subR[9]); in camellia_setup_tail() 476 dw = tl & subL[9]; /* FLinv(kl2) */ in camellia_setup_tail() 478 SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ in camellia_setup_tail() 488 tl = subL[18] ^ (subR[18] & ~subR[16]); in camellia_setup_tail() 489 dw = tl & subL[16]; /* FL(kl3) */ in camellia_setup_tail() 491 SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ in camellia_setup_tail() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/ |
| H A D | mali_kbase_sync_android.c | 143 struct sync_timeline *tl; in mali_sync_timeline_alloc() local 146 tl = sync_timeline_create(&mali_timeline_ops, in mali_sync_timeline_alloc() 148 if (!tl) in mali_sync_timeline_alloc() 152 mtl = to_mali_sync_timeline(tl); in mali_sync_timeline_alloc() 156 return tl; in mali_sync_timeline_alloc() 161 struct sync_timeline *tl; in kbase_stream_close() local 163 tl = (struct sync_timeline *)file->private_data; in kbase_stream_close() 164 sync_timeline_destroy(tl); in kbase_stream_close() 175 struct sync_timeline *tl; in kbase_sync_fence_stream_create() local 180 tl = mali_sync_timeline_alloc(name); in kbase_sync_fence_stream_create() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/isdn/mISDN/ |
| H A D | fsm.c | 98 struct FsmTimer *ft = from_timer(ft, t, tl); in FsmExpireTimer() 114 timer_setup(&ft->tl, FsmExpireTimer, 0); in mISDN_FsmInitTimer() 126 del_timer(&ft->tl); in mISDN_FsmDelTimer() 141 if (timer_pending(&ft->tl)) { in mISDN_FsmAddTimer() 152 ft->tl.expires = jiffies + (millisec * HZ) / 1000; in mISDN_FsmAddTimer() 153 add_timer(&ft->tl); in mISDN_FsmAddTimer() 169 if (timer_pending(&ft->tl)) in mISDN_FsmRestartTimer() 170 del_timer(&ft->tl); in mISDN_FsmRestartTimer() 173 ft->tl.expires = jiffies + (millisec * HZ) / 1000; in mISDN_FsmRestartTimer() 174 add_timer(&ft->tl); in mISDN_FsmRestartTimer()
|
| /OK3568_Linux_fs/buildroot/dl/qt5location/git/src/3rdparty/mapbox-gl-native/src/mbgl/text/ |
| H A D | quads.cpp | 31 Point<float> tl; in getIconQuad() local 58 tl = {left, top}; in getIconQuad() 63 tl = {left, top}; in getIconQuad() 77 tl = util::matrixMultiply(matrix, tl); in getIconQuad() 91 return SymbolQuad { tl, tr, bl, br, textureRect, shapedText.writingMode, { 0.0f, 0.0f } }; in getIconQuad() 136 Point<float> tl{x1, y1}; in getGlyphQuads() local 153 tl = util::rotate(tl - center, verticalRotation) + center + xOffsetCorrection; in getGlyphQuads() 165 tl = util::matrixMultiply(matrix, tl); in getGlyphQuads() 171 quads.emplace_back(tl, tr, bl, br, rect, shapedText.writingMode, glyphOffset); in getGlyphQuads()
|
| /OK3568_Linux_fs/kernel/kernel/sched/ |
| H A D | topology.c | 1315 sd_init(struct sched_domain_topology_level *tl, in sd_init() argument 1319 struct sd_data *sdd = &tl->data; in sd_init() 1327 sched_domains_curr_level = tl->numa_level; in sd_init() 1330 sd_weight = cpumask_weight(tl->mask(cpu)); in sd_init() 1332 if (tl->sd_flags) in sd_init() 1333 sd_flags = (*tl->sd_flags)(); in sd_init() 1368 .name = tl->name, in sd_init() 1372 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); in sd_init() 1396 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) { in sd_init() 1439 #define for_each_sd_topology(tl) \ argument [all …]
|
| /OK3568_Linux_fs/kernel/drivers/s390/net/ |
| H A D | fsm.c | 135 fsm_timer *this = from_timer(this, t, tl); in fsm_expire_timer() 151 timer_setup(&this->tl, fsm_expire_timer, 0); in fsm_settimer() 161 del_timer(&this->tl); in fsm_deltimer() 173 timer_setup(&this->tl, fsm_expire_timer, 0); in fsm_addtimer() 176 this->tl.expires = jiffies + (millisec * HZ) / 1000; in fsm_addtimer() 177 add_timer(&this->tl); in fsm_addtimer() 191 del_timer(&this->tl); in fsm_modtimer() 192 timer_setup(&this->tl, fsm_expire_timer, 0); in fsm_modtimer() 195 this->tl.expires = jiffies + (millisec * HZ) / 1000; in fsm_modtimer() 196 add_timer(&this->tl); in fsm_modtimer()
|
| /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/ |
| H A D | i915_request.c | 341 struct intel_timeline * const tl = i915_request_timeline(rq); in i915_request_retire_upto() local 349 tmp = list_first_entry(&tl->requests, typeof(*tmp), link); in i915_request_retire_upto() 722 static void retire_requests(struct intel_timeline *tl) in retire_requests() argument 726 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests() 732 request_alloc_slow(struct intel_timeline *tl, in request_alloc_slow() argument 747 if (list_empty(&tl->requests)) in request_alloc_slow() 751 rq = list_first_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow() 760 rq = list_last_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow() 764 retire_requests(tl); in request_alloc_slow() 787 struct intel_timeline *tl = ce->timeline; in __i915_request_create() local [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/tl/ |
| H A D | Kbuild | 22 tl/mali_kbase_timeline.o \ 23 tl/mali_kbase_timeline_io.o \ 24 tl/mali_kbase_tlstream.o \ 25 tl/mali_kbase_tracepoints.o 29 bifrost_kbase-y += tl/backend/mali_kbase_timeline_csf.o 31 bifrost_kbase-y += tl/backend/mali_kbase_timeline_jm.o
|
| /OK3568_Linux_fs/kernel/arch/x86/crypto/ |
| H A D | camellia_glue.c | 799 u32 dw, tl, tr; in camellia_setup_tail() local 910 tl = (subRL[10] >> 32) ^ (subRL[10] & ~subRL[8]); in camellia_setup_tail() 911 dw = tl & (subRL[8] >> 32); /* FL(kl1) */ in camellia_setup_tail() 913 tt = (tr | ((u64)tl << 32)); in camellia_setup_tail() 919 tl = (subRL[7] >> 32) ^ (subRL[7] & ~subRL[9]); in camellia_setup_tail() 920 dw = tl & (subRL[9] >> 32); /* FLinv(kl2) */ in camellia_setup_tail() 922 tt = (tr | ((u64)tl << 32)); in camellia_setup_tail() 930 tl = (subRL[18] >> 32) ^ (subRL[18] & ~subRL[16]); in camellia_setup_tail() 931 dw = tl & (subRL[16] >> 32); /* FL(kl3) */ in camellia_setup_tail() 933 tt = (tr | ((u64)tl << 32)); in camellia_setup_tail() [all …]
|
| /OK3568_Linux_fs/buildroot/boot/grub2/ |
| H A D | 0117-io-gzio-Zero-gzio-tl-td-in-init_dynamic_block-if-huf.patch | 4 Subject: [PATCH] io/gzio: Zero gzio->tl/td in init_dynamic_block() if 7 If huft_build() fails, gzio->tl or gzio->td could contain pointers that 26 if (huft_build (ll, nl, 257, cplens, cplext, &gzio->tl, &gzio->bl) != 0) 28 + gzio->tl = 0; 34 huft_free (gzio->tl); 35 gzio->tl = 0;
|
| H A D | 0114-io-gzio-Bail-if-gzio-tl-td-is-NULL.patch | 4 Subject: [PATCH] io/gzio: Bail if gzio->tl/td is NULL 6 This is an ugly fix that doesn't address why gzio->tl comes to be NULL. 29 + if (gzio->tl == NULL) 31 + grub_error (GRUB_ERR_BAD_COMPRESSED_DATA, "NULL gzio->tl"); 36 if ((e = (t = gzio->tl + ((unsigned) b & ml))->e) > 16) 56 + if (gzio->tl == NULL) 58 + grub_error (GRUB_ERR_BAD_COMPRESSED_DATA, "NULL gzio->tl");
|
| H A D | 0115-io-gzio-Add-init_dynamic_block-clean-up-if-unpacking.patch | 7 init_dynamic_block() didn't clean up gzio->tl and td in some error 8 paths. This left td pointing to part of tl. Then in grub_gzio_close(), 9 when tl was freed the storage for td would also be freed. The code then 12 Explicitly clean up tl and td in the error paths. 59 + huft_free (gzio->tl); 61 + gzio->tl = NULL;
|
| /OK3568_Linux_fs/kernel/lib/ |
| H A D | inflate.c | 590 struct huft *tl, /* literal/length decoder tables */ in inflate_codes() argument 618 if ((e = (t = tl + ((unsigned)b & ml))->e) > 16) in inflate_codes() 771 struct huft *tl; /* literal/length code table */ in inflate_fixed() local 793 if ((i = huft_build(l, 288, 257, cplens, cplext, &tl, &bl)) != 0) { in inflate_fixed() 804 huft_free(tl); in inflate_fixed() 813 if (inflate_codes(tl, td, bl, bd)) { in inflate_fixed() 820 huft_free(tl); in inflate_fixed() 837 struct huft *tl; /* literal/length code table */ in inflate_dynamic() local 901 if ((i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0) in inflate_dynamic() 904 huft_free(tl); in inflate_dynamic() [all …]
|
| /OK3568_Linux_fs/buildroot/dl/qt5location/git/tests/auto/declarative_geoshape/ |
| H A D | tst_locationsingleton.qml | 83 property variant tl: QtPositioning.coordinate(1, 0) 92 property variant box: QtPositioning.rectangle(tl, br) 94 property variant coordinates: [bl, tl, tr, br] 95 property variant coordinates2: [bl, tl, tr, br, ntr] 139 … { tag: "box equal", shape1: box, shape2: QtPositioning.rectangle(tl, br), result: true }, 168 circle = QtPositioning.shapeToCircle(QtPositioning.circle(tl, 10000)) 170 compare(circle.center, tl) 174 circle = QtPositioning.shapeToCircle(QtPositioning.rectangle(tl, br)) 185 rectangle = QtPositioning.shapeToRectangle(QtPositioning.circle(tl, 10000)) 189 rectangle = QtPositioning.shapeToRectangle(QtPositioning.rectangle(tl, br)) [all …]
|
| /OK3568_Linux_fs/u-boot/drivers/spi/ |
| H A D | mxs_spi.c | 200 int tl; in mxs_spi_xfer_dma() local 258 tl = 0x10000; in mxs_spi_xfer_dma() 260 tl = min(length, xfer_max_sz); in mxs_spi_xfer_dma() 263 ((tl & 0xffff) << MXS_DMA_DESC_BYTES_OFFSET) | in mxs_spi_xfer_dma() 268 data += tl; in mxs_spi_xfer_dma() 269 length -= tl; in mxs_spi_xfer_dma() 292 dp->cmd.pio_words[3] = tl; in mxs_spi_xfer_dma()
|