Lines Matching refs:rb_mgr
26 struct ring_buf_manager *rb_mgr = &tee->rb_mgr; in tee_alloc_ring() local
40 rb_mgr->ring_start = start_addr; in tee_alloc_ring()
41 rb_mgr->ring_size = ring_size; in tee_alloc_ring()
42 rb_mgr->ring_pa = __psp_pa(start_addr); in tee_alloc_ring()
43 mutex_init(&rb_mgr->mutex); in tee_alloc_ring()
50 struct ring_buf_manager *rb_mgr = &tee->rb_mgr; in tee_free_ring() local
52 if (!rb_mgr->ring_start) in tee_free_ring()
55 free_pages((unsigned long)rb_mgr->ring_start, in tee_free_ring()
56 get_order(rb_mgr->ring_size)); in tee_free_ring()
58 rb_mgr->ring_start = NULL; in tee_free_ring()
59 rb_mgr->ring_size = 0; in tee_free_ring()
60 rb_mgr->ring_pa = 0; in tee_free_ring()
61 mutex_destroy(&rb_mgr->mutex); in tee_free_ring()
93 cmd->hi_addr = upper_32_bits(tee->rb_mgr.ring_pa); in tee_alloc_cmd_buffer()
94 cmd->low_addr = lower_32_bits(tee->rb_mgr.ring_pa); in tee_alloc_cmd_buffer()
95 cmd->size = tee->rb_mgr.ring_size; in tee_alloc_cmd_buffer()
124 tee->rb_mgr.wptr = 0; in tee_init_ring()
170 if (!tee->rb_mgr.ring_start) in tee_destroy_ring()
253 mutex_lock(&tee->rb_mgr.mutex); in tee_submit_cmd()
259 (tee->rb_mgr.ring_start + tee->rb_mgr.wptr); in tee_submit_cmd()
266 if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr || in tee_submit_cmd()
271 rptr, tee->rb_mgr.wptr); in tee_submit_cmd()
274 mutex_unlock(&tee->rb_mgr.mutex); in tee_submit_cmd()
276 mutex_lock(&tee->rb_mgr.mutex); in tee_submit_cmd()
281 (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr || in tee_submit_cmd()
284 rptr, tee->rb_mgr.wptr, cmd->flag); in tee_submit_cmd()
307 tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd); in tee_submit_cmd()
308 if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size) in tee_submit_cmd()
309 tee->rb_mgr.wptr = 0; in tee_submit_cmd()
312 iowrite32(tee->rb_mgr.wptr, tee->io_regs + tee->vdata->ring_wptr_reg); in tee_submit_cmd()
320 mutex_unlock(&tee->rb_mgr.mutex); in tee_submit_cmd()