Lines Matching refs:crq

157 	entry->fmt = evt->crq.format;  in ibmvfc_trc_start()
190 entry->fmt = evt->crq.format; in ibmvfc_trc_end()
704 struct ibmvfc_crq_queue *crq = &vhost->crq; in ibmvfc_release_crq_queue() local
717 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); in ibmvfc_release_crq_queue()
718 free_page((unsigned long)crq->msgs); in ibmvfc_release_crq_queue()
758 struct ibmvfc_crq_queue *crq = &vhost->crq; in ibmvfc_reset_crq() local
772 memset(crq->msgs, 0, PAGE_SIZE); in ibmvfc_reset_crq()
773 crq->cur = 0; in ibmvfc_reset_crq()
777 crq->msg_token, PAGE_SIZE); in ibmvfc_reset_crq()
1276 evt->crq.valid = 0x80; in ibmvfc_init_event_pool()
1277 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); in ibmvfc_init_event_pool()
1344 evt->crq.format = format; in ibmvfc_init_event()
1460 __be64 *crq_as_u64 = (__be64 *) &evt->crq; in ibmvfc_send_event()
1465 if (evt->crq.format == IBMVFC_CMD_FORMAT) in ibmvfc_send_event()
1467 else if (evt->crq.format == IBMVFC_MAD_FORMAT) in ibmvfc_send_event()
1685 vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp)); in ibmvfc_queuecommand_lck()
1957 mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + in ibmvfc_bsg_request()
2036 tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp)); in ibmvfc_reset_device()
2301 if (evt->crq.format == IBMVFC_CMD_FORMAT && in ibmvfc_match_key()
2366 tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp)); in ibmvfc_abort_task_set()
2707 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, in ibmvfc_handle_async() argument
2710 const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event)); in ibmvfc_handle_async()
2714 " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id), in ibmvfc_handle_async()
2715 be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name), in ibmvfc_handle_async()
2716 ibmvfc_get_link_state(crq->link_state)); in ibmvfc_handle_async()
2718 switch (be64_to_cpu(crq->event)) { in ibmvfc_handle_async()
2720 switch (crq->link_state) { in ibmvfc_handle_async()
2759 if (!crq->scsi_id && !crq->wwpn && !crq->node_name) in ibmvfc_handle_async()
2761 if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id) in ibmvfc_handle_async()
2763 if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn) in ibmvfc_handle_async()
2765 if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name) in ibmvfc_handle_async()
2767 if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO) in ibmvfc_handle_async()
2769 if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) { in ibmvfc_handle_async()
2786 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event); in ibmvfc_handle_async()
2797 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) in ibmvfc_handle_crq() argument
2800 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); in ibmvfc_handle_crq()
2802 switch (crq->valid) { in ibmvfc_handle_crq()
2804 switch (crq->format) { in ibmvfc_handle_crq()
2819 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); in ibmvfc_handle_crq()
2826 if (crq->format == IBMVFC_PARTITION_MIGRATED) { in ibmvfc_handle_crq()
2836 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) { in ibmvfc_handle_crq()
2837 dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format); in ibmvfc_handle_crq()
2842 dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format); in ibmvfc_handle_crq()
2848 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid); in ibmvfc_handle_crq()
2852 if (crq->format == IBMVFC_ASYNC_EVENT) in ibmvfc_handle_crq()
2861 crq->ioba); in ibmvfc_handle_crq()
2867 crq->ioba); in ibmvfc_handle_crq()
3194 struct ibmvfc_async_crq *crq; in ibmvfc_next_async_crq() local
3196 crq = &async_crq->msgs[async_crq->cur]; in ibmvfc_next_async_crq()
3197 if (crq->valid & 0x80) { in ibmvfc_next_async_crq()
3202 crq = NULL; in ibmvfc_next_async_crq()
3204 return crq; in ibmvfc_next_async_crq()
3216 struct ibmvfc_crq_queue *queue = &vhost->crq; in ibmvfc_next_crq()
3217 struct ibmvfc_crq *crq; in ibmvfc_next_crq() local
3219 crq = &queue->msgs[queue->cur]; in ibmvfc_next_crq()
3220 if (crq->valid & 0x80) { in ibmvfc_next_crq()
3225 crq = NULL; in ibmvfc_next_crq()
3227 return crq; in ibmvfc_next_crq()
3261 struct ibmvfc_crq *crq; in ibmvfc_tasklet() local
3276 while ((crq = ibmvfc_next_crq(vhost)) != NULL) { in ibmvfc_tasklet()
3277 ibmvfc_handle_crq(crq, vhost); in ibmvfc_tasklet()
3278 crq->valid = 0; in ibmvfc_tasklet()
3288 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) { in ibmvfc_tasklet()
3290 ibmvfc_handle_crq(crq, vhost); in ibmvfc_tasklet()
3291 crq->valid = 0; in ibmvfc_tasklet()
3889 mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + in ibmvfc_init_passthru()
3894 mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + in ibmvfc_init_passthru()
3898 mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + in ibmvfc_init_passthru()
4877 struct ibmvfc_crq_queue *crq = &vhost->crq; in ibmvfc_init_crq() local
4880 crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL); in ibmvfc_init_crq()
4882 if (!crq->msgs) in ibmvfc_init_crq()
4885 crq->size = PAGE_SIZE / sizeof(*crq->msgs); in ibmvfc_init_crq()
4886 crq->msg_token = dma_map_single(dev, crq->msgs, in ibmvfc_init_crq()
4889 if (dma_mapping_error(dev, crq->msg_token)) in ibmvfc_init_crq()
4893 crq->msg_token, PAGE_SIZE); in ibmvfc_init_crq()
4920 crq->cur = 0; in ibmvfc_init_crq()
4930 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); in ibmvfc_init_crq()
4932 free_page((unsigned long)crq->msgs); in ibmvfc_init_crq()