Lines Matching refs:ec
98 return &(sp->ec[get_ec_index(sp)]); in spmc_get_sp_ec()
140 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc) in spmc_sp_synchronous_exit() argument
147 spm_secure_partition_exit(ec->c_rt_ctx, rc); in spmc_sp_synchronous_exit()
472 if (sp->ec[idx].rt_state != RT_STATE_WAITING) { in direct_req_smc_handler()
474 idx, sp->ec[idx].rt_model); in direct_req_smc_handler()
487 sp->ec[idx].rt_state = RT_STATE_RUNNING; in direct_req_smc_handler()
488 sp->ec[idx].rt_model = RT_MODEL_DIR_REQ; in direct_req_smc_handler()
489 sp->ec[idx].dir_req_origin_id = src_id; in direct_req_smc_handler()
490 sp->ec[idx].dir_req_funcid = dir_req_funcid; in direct_req_smc_handler()
560 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING); in direct_resp_smc_handler()
563 if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) { in direct_resp_smc_handler()
565 idx, sp->ec[idx].rt_model); in direct_resp_smc_handler()
572 if (dir_req_funcid != sp->ec[idx].dir_req_funcid) { in direct_resp_smc_handler()
574 sp->ec[idx].dir_req_funcid, (smc_fid & FUNCID_NUM_MASK), idx); in direct_resp_smc_handler()
581 if (sp->ec[idx].dir_req_origin_id != dst_id) { in direct_resp_smc_handler()
583 dst_id, sp->ec[idx].dir_req_origin_id, idx); in direct_resp_smc_handler()
591 sp->ec[idx].rt_state = RT_STATE_WAITING; in direct_resp_smc_handler()
594 sp->ec[idx].dir_req_origin_id = INV_SP_ID; in direct_resp_smc_handler()
597 sp->ec[idx].dir_req_funcid = 0U; in direct_resp_smc_handler()
608 spmc_sp_synchronous_exit(&sp->ec[idx], x4); in direct_resp_smc_handler()
659 if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) { in msg_wait_handler()
667 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING); in msg_wait_handler()
673 if (sp->ec[idx].rt_model == RT_MODEL_INIT) { in msg_wait_handler()
677 spmc_sp_synchronous_exit(&sp->ec[idx], x4); in msg_wait_handler()
683 sp->ec[idx].rt_state = RT_STATE_WAITING; in msg_wait_handler()
686 if (sp->ec[idx].rt_model == RT_MODEL_INTR) { in msg_wait_handler()
741 if (sp->ec[idx].rt_model == RT_MODEL_INIT) { in ffa_error_handler()
743 spmc_sp_synchronous_exit(&sp->ec[idx], x2); in ffa_error_handler()
752 if (sp->ec[idx].rt_state == RT_STATE_RUNNING && in ffa_error_handler()
753 sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) { in ffa_error_handler()
754 sp->ec[idx].rt_state = RT_STATE_WAITING; in ffa_error_handler()
755 sp->ec[idx].dir_req_origin_id = INV_SP_ID; in ffa_error_handler()
756 sp->ec[idx].dir_req_funcid = 0x00; in ffa_error_handler()
766 spmc_sp_synchronous_exit(&sp->ec[idx], x4); in ffa_error_handler()
1540 rt_state = &((sp->ec[idx]).rt_state); in ffa_run_handler()
1541 rt_model = &((sp->ec[idx]).rt_model); in ffa_run_handler()
1852 if (sp->ec[idx].rt_model != RT_MODEL_INIT) { in ffa_mem_perm_set_handler()
1959 if (sp->ec[idx].rt_model != RT_MODEL_INIT) { in ffa_mem_perm_get_handler()
2377 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec) in spmc_sp_synchronous_entry() argument
2381 assert(ec != NULL); in spmc_sp_synchronous_entry()
2384 cm_set_context(&(ec->cpu_ctx), SECURE); in spmc_sp_synchronous_entry()
2395 rc = spm_secure_partition_enter(&ec->c_rt_ctx); in spmc_sp_synchronous_entry()
2410 struct sp_exec_ctx *ec; in sp_init() local
2413 ec = spmc_get_sp_ec(sp); in sp_init()
2414 ec->rt_model = RT_MODEL_INIT; in sp_init()
2415 ec->rt_state = RT_STATE_RUNNING; in sp_init()
2419 rc = spmc_sp_synchronous_entry(ec); in sp_init()
2427 ec->rt_state = RT_STATE_WAITING; in sp_init()
2685 struct sp_exec_ctx *ec; in spmc_sp_interrupt_handler() local
2702 ec = spmc_get_sp_ec(sp); in spmc_sp_interrupt_handler()
2705 if (ec->rt_state != RT_STATE_WAITING) { in spmc_sp_interrupt_handler()
2707 linear_id, RT_STATE_WAITING, ec->rt_state); in spmc_sp_interrupt_handler()
2712 ec->rt_model = RT_MODEL_INTR; in spmc_sp_interrupt_handler()
2713 ec->rt_state = RT_STATE_RUNNING; in spmc_sp_interrupt_handler()