1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Copyright (c) 2017 Sagi Grimberg. 4*4882a593Smuzhiyun */ 5*4882a593Smuzhiyun #include <linux/blk-mq.h> 6*4882a593Smuzhiyun #include <linux/blk-mq-rdma.h> 7*4882a593Smuzhiyun #include <rdma/ib_verbs.h> 8*4882a593Smuzhiyun 9*4882a593Smuzhiyun /** 10*4882a593Smuzhiyun * blk_mq_rdma_map_queues - provide a default queue mapping for rdma device 11*4882a593Smuzhiyun * @map: CPU to hardware queue map. 12*4882a593Smuzhiyun * @dev: rdma device to provide a mapping for. 13*4882a593Smuzhiyun * @first_vec: first interrupt vectors to use for queues (usually 0) 14*4882a593Smuzhiyun * 15*4882a593Smuzhiyun * This function assumes the rdma device @dev has at least as many available 16*4882a593Smuzhiyun * interrupt vetors as @set has queues. It will then query it's affinity mask 17*4882a593Smuzhiyun * and built queue mapping that maps a queue to the CPUs that have irq affinity 18*4882a593Smuzhiyun * for the corresponding vector. 19*4882a593Smuzhiyun * 20*4882a593Smuzhiyun * In case either the driver passed a @dev with less vectors than 21*4882a593Smuzhiyun * @set->nr_hw_queues, or @dev does not provide an affinity mask for a 22*4882a593Smuzhiyun * vector, we fallback to the naive mapping. 23*4882a593Smuzhiyun */ blk_mq_rdma_map_queues(struct blk_mq_queue_map * map,struct ib_device * dev,int first_vec)24*4882a593Smuzhiyunint blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, 25*4882a593Smuzhiyun struct ib_device *dev, int first_vec) 26*4882a593Smuzhiyun { 27*4882a593Smuzhiyun const struct cpumask *mask; 28*4882a593Smuzhiyun unsigned int queue, cpu; 29*4882a593Smuzhiyun 30*4882a593Smuzhiyun for (queue = 0; queue < map->nr_queues; queue++) { 31*4882a593Smuzhiyun mask = ib_get_vector_affinity(dev, first_vec + queue); 32*4882a593Smuzhiyun if (!mask) 33*4882a593Smuzhiyun goto fallback; 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun for_each_cpu(cpu, mask) 36*4882a593Smuzhiyun map->mq_map[cpu] = map->queue_offset + queue; 37*4882a593Smuzhiyun } 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun return 0; 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun fallback: 42*4882a593Smuzhiyun return blk_mq_map_queues(map); 43*4882a593Smuzhiyun } 44*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues); 45