Lines Matching full:scheduler

312 static inline bool rga_mm_check_memory_limit(struct rga_scheduler_t *scheduler, int mm_flag)  in rga_mm_check_memory_limit()  argument
314 if (!scheduler) in rga_mm_check_memory_limit()
317 if (scheduler->data->mmu == RGA_MMU && in rga_mm_check_memory_limit()
320 rga_get_mmu_type_str(scheduler->data->mmu)); in rga_mm_check_memory_limit()
381 struct rga_scheduler_t *scheduler; in rga_mm_map_dma_buffer() local
383 scheduler = job ? job->scheduler : in rga_mm_map_dma_buffer()
384 rga_drvdata->scheduler[rga_drvdata->map_scheduler_index]; in rga_mm_map_dma_buffer()
385 if (scheduler == NULL) { in rga_mm_map_dma_buffer()
386 pr_err("Invalid scheduler device!\n"); in rga_mm_map_dma_buffer()
407 map_dev = scheduler->iommu_info ? scheduler->iommu_info->default_dev : scheduler->dev; in rga_mm_map_dma_buffer()
432 __func__, scheduler->core); in rga_mm_map_dma_buffer()
445 buffer->scheduler = scheduler; in rga_mm_map_dma_buffer()
464 if (!rga_mm_check_memory_limit(scheduler, mm_flag)) { in rga_mm_map_dma_buffer()
465 pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n", in rga_mm_map_dma_buffer()
466 scheduler->core, mm_flag); in rga_mm_map_dma_buffer()
493 switch (internal_buffer->dma_buffer->scheduler->data->mmu) { in rga_mm_unmap_virt_addr()
498 dma_unmap_sg(internal_buffer->dma_buffer->scheduler->dev, in rga_mm_unmap_virt_addr()
533 struct rga_scheduler_t *scheduler; in rga_mm_map_virt_addr() local
535 scheduler = job ? job->scheduler : in rga_mm_map_virt_addr()
536 rga_drvdata->scheduler[rga_drvdata->map_scheduler_index]; in rga_mm_map_virt_addr()
537 if (scheduler == NULL) { in rga_mm_map_virt_addr()
538 pr_err("Invalid scheduler device!\n"); in rga_mm_map_virt_addr()
587 if (!rga_mm_check_memory_limit(scheduler, mm_flag)) { in rga_mm_map_virt_addr()
588 pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n", in rga_mm_map_virt_addr()
589 scheduler->core, mm_flag); in rga_mm_map_virt_addr()
601 switch (scheduler->data->mmu) { in rga_mm_map_virt_addr()
603 ret = rga_iommu_map_sgt(sgt, virt_addr->size, buffer, scheduler->dev); in rga_mm_map_virt_addr()
606 __func__, scheduler->core); in rga_mm_map_virt_addr()
611 ret = dma_map_sg(scheduler->dev, sgt->sgl, sgt->orig_nents, DMA_BIDIRECTIONAL); in rga_mm_map_virt_addr()
614 __func__, scheduler->core, in rga_mm_map_virt_addr()
625 rga_get_mmu_type_str(scheduler->data->mmu), scheduler->data->mmu); in rga_mm_map_virt_addr()
632 buffer->scheduler = scheduler; in rga_mm_map_virt_addr()
662 if (internal_buffer->dma_buffer->scheduler->data->mmu == RGA_IOMMU) in rga_mm_unmap_phys_addr()
680 struct rga_scheduler_t *scheduler; in rga_mm_map_phys_addr() local
682 scheduler = job ? job->scheduler : in rga_mm_map_phys_addr()
683 rga_drvdata->scheduler[rga_drvdata->map_scheduler_index]; in rga_mm_map_phys_addr()
684 if (scheduler == NULL) { in rga_mm_map_phys_addr()
685 pr_err("Invalid scheduler device!\n"); in rga_mm_map_phys_addr()
707 if (!rga_mm_check_memory_limit(scheduler, mm_flag)) { in rga_mm_map_phys_addr()
708 pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n", in rga_mm_map_phys_addr()
709 scheduler->core, mm_flag); in rga_mm_map_phys_addr()
719 if (scheduler->data->mmu == RGA_IOMMU) { in rga_mm_map_phys_addr()
720 ret = rga_iommu_map(phys_addr, buffer_size, buffer, scheduler->dev); in rga_mm_map_phys_addr()
722 pr_err("%s core[%d] map phys_addr error!\n", __func__, scheduler->core); in rga_mm_map_phys_addr()
727 buffer->scheduler = scheduler; in rga_mm_map_phys_addr()
973 dump_buffer->dma_buffer->scheduler->core); in rga_mm_dump_buffer()
997 dump_buffer->dma_buffer->scheduler->core); in rga_mm_dump_buffer()
1033 if (buffer == NULL || job == NULL || job->scheduler == NULL) in rga_mm_is_need_mmu()
1037 if (job->scheduler->data->mmu == RGA_IOMMU) in rga_mm_is_need_mmu()
1303 struct rga_scheduler_t *scheduler; in rga_mm_sync_dma_sg_for_device() local
1312 scheduler = buffer->dma_buffer->scheduler; in rga_mm_sync_dma_sg_for_device()
1313 if (scheduler == NULL) { in rga_mm_sync_dma_sg_for_device()
1314 pr_err("%s(%d), failed to get scheduler, core = 0x%x\n", in rga_mm_sync_dma_sg_for_device()
1319 dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); in rga_mm_sync_dma_sg_for_device()
1329 struct rga_scheduler_t *scheduler; in rga_mm_sync_dma_sg_for_cpu() local
1338 scheduler = buffer->dma_buffer->scheduler; in rga_mm_sync_dma_sg_for_cpu()
1339 if (scheduler == NULL) { in rga_mm_sync_dma_sg_for_cpu()
1340 pr_err("%s(%d), failed to get scheduler, core = 0x%x\n", in rga_mm_sync_dma_sg_for_cpu()
1345 dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir); in rga_mm_sync_dma_sg_for_cpu()
1356 switch (job->scheduler->data->mmu) { in rga_mm_get_buffer_info()
1561 if (job->scheduler->data->mmu == RGA_MMU && in rga_mm_get_channel_handle_info()
1824 if (job->scheduler->data->mmu == RGA_MMU && in rga_mm_map_channel_job_buffer()