| /OK3568_Linux_fs/kernel/drivers/net/ipa/ |
| H A D | ipa_main.c | 83 static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) in ipa_suspend_handler() argument 89 if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags)) in ipa_suspend_handler() 90 pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); in ipa_suspend_handler() 93 ipa_interrupt_suspend_clear_all(ipa->interrupt); in ipa_suspend_handler() 107 int ipa_setup(struct ipa *ipa) in ipa_setup() argument 111 struct device *dev = &ipa->pdev->dev; in ipa_setup() 115 ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1); in ipa_setup() 119 ipa->interrupt = ipa_interrupt_setup(ipa); in ipa_setup() 120 if (IS_ERR(ipa->interrupt)) { in ipa_setup() 121 ret = PTR_ERR(ipa->interrupt); in ipa_setup() [all …]
|
| H A D | ipa_table.c | 151 ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed) in ipa_table_valid_one() argument 153 struct device *dev = &ipa->pdev->dev; in ipa_table_valid_one() 159 mem = hashed ? &ipa->mem[IPA_MEM_V6_ROUTE_HASHED] in ipa_table_valid_one() 160 : &ipa->mem[IPA_MEM_V6_ROUTE]; in ipa_table_valid_one() 162 mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED] in ipa_table_valid_one() 163 : &ipa->mem[IPA_MEM_V4_ROUTE]; in ipa_table_valid_one() 167 mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED] in ipa_table_valid_one() 168 : &ipa->mem[IPA_MEM_V6_FILTER]; in ipa_table_valid_one() 170 mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED] in ipa_table_valid_one() 171 : &ipa->mem[IPA_MEM_V4_FILTER]; in ipa_table_valid_one() [all …]
|
| H A D | ipa_mem.c | 33 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_mem_zero_region_add() local 34 dma_addr_t addr = ipa->zero_addr; in ipa_mem_zero_region_add() 58 int ipa_mem_setup(struct ipa *ipa) in ipa_mem_setup() argument 60 dma_addr_t addr = ipa->zero_addr; in ipa_mem_setup() 68 trans = ipa_cmd_trans_alloc(ipa, 4); in ipa_mem_setup() 70 dev_err(&ipa->pdev->dev, "no transaction for memory setup\n"); in ipa_mem_setup() 77 offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset; in ipa_mem_setup() 78 size = ipa->mem[IPA_MEM_MODEM_HEADER].size; in ipa_mem_setup() 79 size += ipa->mem[IPA_MEM_AP_HEADER].size; in ipa_mem_setup() 83 ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]); in ipa_mem_setup() [all …]
|
| H A D | ipa_modem.c | 36 struct ipa *ipa; member 43 struct ipa *ipa = priv->ipa; in ipa_open() local 46 ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); in ipa_open() 49 ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]); in ipa_open() 58 ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); in ipa_open() 67 struct ipa *ipa = priv->ipa; in ipa_stop() local 71 ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]); in ipa_stop() 72 ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); in ipa_stop() 90 struct ipa *ipa = priv->ipa; in ipa_start_xmit() local 97 endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]; in ipa_start_xmit() [all …]
|
| H A D | ipa_endpoint.c | 102 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid_one() argument 107 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid_one() 183 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid() argument 187 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid() 217 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) in ipa_endpoint_data_valid() 225 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid() argument 237 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc() 253 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl() local 268 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_init_ctrl() 273 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_ctrl() [all …]
|
| H A D | ipa_interrupt.c | 39 struct ipa *ipa; member 55 struct ipa *ipa = interrupt->ipa; in ipa_interrupt_process() local 62 iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET); in ipa_interrupt_process() 65 interrupt->handler[irq_id](interrupt->ipa, irq_id); in ipa_interrupt_process() 72 iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET); in ipa_interrupt_process() 78 struct ipa *ipa = interrupt->ipa; in ipa_interrupt_process_all() local 86 mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET); in ipa_interrupt_process_all() 95 mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET); in ipa_interrupt_process_all() 104 ipa_clock_get(interrupt->ipa); in ipa_isr_thread() 108 ipa_clock_put(interrupt->ipa); in ipa_isr_thread() [all …]
|
| H A D | ipa_endpoint.h | 19 struct ipa; 61 struct ipa *ipa; member 83 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa); 85 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable); 87 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa); 99 void ipa_endpoint_suspend(struct ipa *ipa); 100 void ipa_endpoint_resume(struct ipa *ipa); 102 void ipa_endpoint_setup(struct ipa *ipa); 103 void ipa_endpoint_teardown(struct ipa *ipa); 105 int ipa_endpoint_config(struct ipa *ipa); [all …]
|
| H A D | ipa_uc.c | 117 static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa) in ipa_uc_shared() argument 119 u32 offset = ipa->mem_offset + ipa->mem[IPA_MEM_UC_SHARED].offset; in ipa_uc_shared() 121 return ipa->mem_virt + offset; in ipa_uc_shared() 125 static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id) in ipa_uc_event_handler() argument 127 struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa); in ipa_uc_event_handler() 128 struct device *dev = &ipa->pdev->dev; in ipa_uc_event_handler() 138 static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id) in ipa_uc_response_hdlr() argument 140 struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa); in ipa_uc_response_hdlr() 152 ipa->uc_loaded = true; in ipa_uc_response_hdlr() 153 ipa_clock_put(ipa); in ipa_uc_response_hdlr() [all …]
|
| H A D | ipa_table.h | 11 struct ipa; 30 bool ipa_table_valid(struct ipa *ipa); 38 bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask); 42 static inline bool ipa_table_valid(struct ipa *ipa) in ipa_table_valid() argument 47 static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask) in ipa_filter_map_valid() argument 59 void ipa_table_reset(struct ipa *ipa, bool modem); 65 int ipa_table_hash_flush(struct ipa *ipa); 71 int ipa_table_setup(struct ipa *ipa); 77 void ipa_table_teardown(struct ipa *ipa); 83 void ipa_table_config(struct ipa *ipa); [all …]
|
| H A D | ipa_qmi.c | 86 struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi); in ipa_server_init_complete() local 99 dev_err(&ipa->pdev->dev, in ipa_server_init_complete() 128 struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi); in ipa_qmi_ready() local 148 ipa = container_of(ipa_qmi, struct ipa, qmi); in ipa_qmi_ready() 149 ret = ipa_modem_start(ipa); in ipa_qmi_ready() 151 dev_err(&ipa->pdev->dev, "error %d starting modem\n", ret); in ipa_qmi_ready() 186 struct ipa *ipa; in ipa_server_indication_register() local 190 ipa = container_of(ipa_qmi, struct ipa, qmi); in ipa_server_indication_register() 202 dev_err(&ipa->pdev->dev, in ipa_server_indication_register() 215 struct ipa *ipa; in ipa_server_driver_init_complete() local [all …]
|
| H A D | ipa_cmd.c | 168 bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, in ipa_cmd_table_valid() argument 171 struct device *dev = &ipa->pdev->dev; in ipa_cmd_table_valid() 177 ipa->mem_offset > offset_max - mem->offset) { in ipa_cmd_table_valid() 182 ipa->mem_offset, mem->offset, offset_max); in ipa_cmd_table_valid() 187 if (mem->offset > ipa->mem_size || in ipa_cmd_table_valid() 188 mem->size > ipa->mem_size - mem->offset) { in ipa_cmd_table_valid() 193 mem->offset, mem->size, ipa->mem_size); in ipa_cmd_table_valid() 202 static bool ipa_cmd_header_valid(struct ipa *ipa) in ipa_cmd_header_valid() argument 204 const struct ipa_mem *mem = &ipa->mem[IPA_MEM_MODEM_HEADER]; in ipa_cmd_header_valid() 205 struct device *dev = &ipa->pdev->dev; in ipa_cmd_header_valid() [all …]
|
| H A D | ipa_clock.c | 114 static int ipa_interconnect_enable(struct ipa *ipa) in ipa_interconnect_enable() argument 116 struct ipa_clock *clock = ipa->clock; in ipa_interconnect_enable() 142 static int ipa_interconnect_disable(struct ipa *ipa) in ipa_interconnect_disable() argument 144 struct ipa_clock *clock = ipa->clock; in ipa_interconnect_disable() 170 static int ipa_clock_enable(struct ipa *ipa) in ipa_clock_enable() argument 174 ret = ipa_interconnect_enable(ipa); in ipa_clock_enable() 178 ret = clk_prepare_enable(ipa->clock->core); in ipa_clock_enable() 180 ipa_interconnect_disable(ipa); in ipa_clock_enable() 186 static void ipa_clock_disable(struct ipa *ipa) in ipa_clock_disable() argument 188 clk_disable_unprepare(ipa->clock->core); in ipa_clock_disable() [all …]
|
| H A D | ipa_modem.h | 9 struct ipa; 14 int ipa_modem_start(struct ipa *ipa); 15 int ipa_modem_stop(struct ipa *ipa); 22 int ipa_modem_init(struct ipa *ipa, bool modem_init); 23 void ipa_modem_exit(struct ipa *ipa); 25 int ipa_modem_config(struct ipa *ipa); 26 void ipa_modem_deconfig(struct ipa *ipa); 28 int ipa_modem_setup(struct ipa *ipa); 29 void ipa_modem_teardown(struct ipa *ipa);
|
| H A D | ipa_smp2p.c | 60 struct ipa *ipa; member 92 smp2p->clock_on = ipa_clock_get_additional(smp2p->ipa); in ipa_smp2p_notify() 127 ipa_uc_panic_notifier(smp2p->ipa); in ipa_smp2p_panic_notifier() 158 ret = ipa_setup(smp2p->ipa); in ipa_smp2p_modem_setup_ready_isr() 160 dev_err(&smp2p->ipa->pdev->dev, in ipa_smp2p_modem_setup_ready_isr() 174 struct device *dev = &smp2p->ipa->pdev->dev; in ipa_smp2p_irq_init() 178 ret = platform_get_irq_byname(smp2p->ipa->pdev, name); in ipa_smp2p_irq_init() 201 static void ipa_smp2p_clock_release(struct ipa *ipa) in ipa_smp2p_clock_release() argument 203 if (!ipa->smp2p->clock_on) in ipa_smp2p_clock_release() 206 ipa_clock_put(ipa); in ipa_smp2p_clock_release() [all …]
|
| H A D | ipa_mem.h | 9 struct ipa; 80 int ipa_mem_config(struct ipa *ipa); 81 void ipa_mem_deconfig(struct ipa *ipa); 83 int ipa_mem_setup(struct ipa *ipa); 84 void ipa_mem_teardown(struct ipa *ipa); 86 int ipa_mem_zero_modem(struct ipa *ipa); 88 int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data); 89 void ipa_mem_exit(struct ipa *ipa);
|
| H A D | ipa_gsi.c | 17 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_gsi_trans_complete() local 19 ipa_endpoint_trans_complete(ipa->channel_map[trans->channel_id], trans); in ipa_gsi_trans_complete() 24 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_gsi_trans_release() local 26 ipa_endpoint_trans_release(ipa->channel_map[trans->channel_id], trans); in ipa_gsi_trans_release() 32 struct ipa *ipa = container_of(gsi, struct ipa, gsi); in ipa_gsi_channel_tx_queued() local 35 endpoint = ipa->channel_map[channel_id]; in ipa_gsi_channel_tx_queued() 43 struct ipa *ipa = container_of(gsi, struct ipa, gsi); in ipa_gsi_channel_tx_completed() local 46 endpoint = ipa->channel_map[channel_id]; in ipa_gsi_channel_tx_completed()
|
| H A D | ipa_cmd.h | 15 struct ipa; 66 bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, 75 bool ipa_cmd_data_valid(struct ipa *ipa); 79 static inline bool ipa_cmd_table_valid(struct ipa *ipa, in ipa_cmd_table_valid() argument 86 static inline bool ipa_cmd_data_valid(struct ipa *ipa) in ipa_cmd_data_valid() argument 180 void ipa_cmd_tag_process(struct ipa *ipa); 190 struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count);
|
| H A D | ipa_reg.c | 12 int ipa_reg_init(struct ipa *ipa) in ipa_reg_init() argument 14 struct device *dev = &ipa->pdev->dev; in ipa_reg_init() 18 res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM, in ipa_reg_init() 25 ipa->reg_virt = ioremap(res->start, resource_size(res)); in ipa_reg_init() 26 if (!ipa->reg_virt) { in ipa_reg_init() 30 ipa->reg_addr = res->start; in ipa_reg_init() 35 void ipa_reg_exit(struct ipa *ipa) in ipa_reg_exit() argument 37 iounmap(ipa->reg_virt); in ipa_reg_exit()
|
| H A D | ipa_smp2p.h | 11 struct ipa; 21 int ipa_smp2p_init(struct ipa *ipa, bool modem_init); 27 void ipa_smp2p_exit(struct ipa *ipa); 36 void ipa_smp2p_disable(struct ipa *ipa); 46 void ipa_smp2p_notify_reset(struct ipa *ipa);
|
| H A D | ipa_clock.h | 11 struct ipa; 19 u32 ipa_clock_rate(struct ipa *ipa); 41 void ipa_clock_get(struct ipa *ipa); 49 bool ipa_clock_get_additional(struct ipa *ipa); 59 void ipa_clock_put(struct ipa *ipa);
|
| H A D | ipa_uc.h | 9 struct ipa; 15 void ipa_uc_setup(struct ipa *ipa); 21 void ipa_uc_teardown(struct ipa *ipa); 30 void ipa_uc_panic_notifier(struct ipa *ipa);
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/ipa/ |
| H A D | mali_kbase_ipa.c | 48 lockdep_assert_held(&model->kbdev->ipa.lock); in kbase_ipa_model_recalculate() 213 lockdep_assert_held(&model->kbdev->ipa.lock); in kbase_ipa_term_model() 230 lockdep_assert_held(&kbdev->ipa.lock); in kbase_ipa_init_model() 264 lockdep_assert_held(&kbdev->ipa.lock); in kbase_ipa_term_locked() 267 if (kbdev->ipa.configured_model != kbdev->ipa.fallback_model) in kbase_ipa_term_locked() 268 kbase_ipa_term_model(kbdev->ipa.configured_model); in kbase_ipa_term_locked() 269 kbase_ipa_term_model(kbdev->ipa.fallback_model); in kbase_ipa_term_locked() 271 kbdev->ipa.configured_model = NULL; in kbase_ipa_term_locked() 272 kbdev->ipa.fallback_model = NULL; in kbase_ipa_term_locked() 283 mutex_init(&kbdev->ipa.lock); in kbase_ipa_init() [all …]
|
| H A D | mali_kbase_ipa_debugfs.c | 47 mutex_lock(¶m->model->kbdev->ipa.lock); in param_int_get() 49 mutex_unlock(¶m->model->kbdev->ipa.lock); in param_int_get() 65 mutex_lock(¶m->model->kbdev->ipa.lock); in param_int_set() 71 mutex_unlock(¶m->model->kbdev->ipa.lock); in param_int_set() 85 mutex_lock(¶m->model->kbdev->ipa.lock); in param_string_get() 89 mutex_unlock(¶m->model->kbdev->ipa.lock); in param_string_get() 104 mutex_lock(&model->kbdev->ipa.lock); in param_string_set() 139 mutex_unlock(&model->kbdev->ipa.lock); in param_string_set() 198 mutex_lock(&kbdev->ipa.lock); in force_fallback_model_get() 199 *val = kbdev->ipa.force_fallback_model; in force_fallback_model_get() [all …]
|
| /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/ipa/ |
| H A D | mali_kbase_ipa.c | 44 lockdep_assert_held(&model->kbdev->ipa.lock); in kbase_ipa_model_recalculate() 208 lockdep_assert_held(&model->kbdev->ipa.lock); in kbase_ipa_term_model() 225 lockdep_assert_held(&kbdev->ipa.lock); in kbase_ipa_init_model() 260 lockdep_assert_held(&kbdev->ipa.lock); in kbase_ipa_term_locked() 263 if (kbdev->ipa.configured_model != kbdev->ipa.fallback_model) in kbase_ipa_term_locked() 264 kbase_ipa_term_model(kbdev->ipa.configured_model); in kbase_ipa_term_locked() 265 kbase_ipa_term_model(kbdev->ipa.fallback_model); in kbase_ipa_term_locked() 267 kbdev->ipa.configured_model = NULL; in kbase_ipa_term_locked() 268 kbdev->ipa.fallback_model = NULL; in kbase_ipa_term_locked() 279 mutex_init(&kbdev->ipa.lock); in kbase_ipa_init() [all …]
|
| H A D | mali_kbase_ipa_debugfs.c | 47 mutex_lock(¶m->model->kbdev->ipa.lock); in param_int_get() 49 mutex_unlock(¶m->model->kbdev->ipa.lock); in param_int_get() 64 mutex_lock(¶m->model->kbdev->ipa.lock); in param_int_set() 67 mutex_unlock(¶m->model->kbdev->ipa.lock); in param_int_set() 81 mutex_lock(¶m->model->kbdev->ipa.lock); in param_string_get() 85 mutex_unlock(¶m->model->kbdev->ipa.lock); in param_string_get() 99 mutex_lock(&model->kbdev->ipa.lock); in param_string_set() 119 mutex_unlock(&model->kbdev->ipa.lock); in param_string_set() 171 lockdep_assert_held(&model->kbdev->ipa.lock); in kbase_ipa_model_debugfs_init() 212 mutex_lock(&kbdev->ipa.lock); in kbase_ipa_debugfs_init() [all …]
|