Lines Matching refs:nbd

66 	struct nbd_device *nbd;  member
127 struct nbd_device *nbd; member
141 #define nbd_name(nbd) ((nbd)->disk->disk_name) argument
151 static int nbd_dev_dbg_init(struct nbd_device *nbd);
152 static void nbd_dev_dbg_close(struct nbd_device *nbd);
153 static void nbd_config_put(struct nbd_device *nbd);
157 static void nbd_disconnect_and_put(struct nbd_device *nbd);
159 static inline struct device *nbd_to_dev(struct nbd_device *nbd) in nbd_to_dev() argument
161 return disk_to_dev(nbd->disk); in nbd_to_dev()
209 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; in pid_show() local
211 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); in pid_show()
219 static void nbd_dev_remove(struct nbd_device *nbd) in nbd_dev_remove() argument
221 struct gendisk *disk = nbd->disk; in nbd_dev_remove()
228 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_remove()
239 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete) in nbd_dev_remove()
240 complete(nbd->destroy_complete); in nbd_dev_remove()
242 kfree(nbd); in nbd_dev_remove()
245 static void nbd_put(struct nbd_device *nbd) in nbd_put() argument
247 if (refcount_dec_and_mutex_lock(&nbd->refs, in nbd_put()
249 idr_remove(&nbd_index_idr, nbd->index); in nbd_put()
250 nbd_dev_remove(nbd); in nbd_put()
261 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, in nbd_mark_nsock_dead() argument
264 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) { in nbd_mark_nsock_dead()
269 args->index = nbd->index; in nbd_mark_nsock_dead()
275 if (atomic_dec_return(&nbd->config->live_connections) == 0) { in nbd_mark_nsock_dead()
277 &nbd->config->runtime_flags)) { in nbd_mark_nsock_dead()
279 &nbd->config->runtime_flags); in nbd_mark_nsock_dead()
280 dev_info(nbd_to_dev(nbd), in nbd_mark_nsock_dead()
290 static void nbd_size_clear(struct nbd_device *nbd) in nbd_size_clear() argument
292 if (nbd->config->bytesize) { in nbd_size_clear()
293 set_capacity(nbd->disk, 0); in nbd_size_clear()
294 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); in nbd_size_clear()
298 static void nbd_size_update(struct nbd_device *nbd, bool start) in nbd_size_update() argument
300 struct nbd_config *config = nbd->config; in nbd_size_update()
301 struct block_device *bdev = bdget_disk(nbd->disk, 0); in nbd_size_update()
305 nbd->disk->queue->limits.discard_granularity = config->blksize; in nbd_size_update()
306 nbd->disk->queue->limits.discard_alignment = config->blksize; in nbd_size_update()
307 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); in nbd_size_update()
309 blk_queue_logical_block_size(nbd->disk->queue, config->blksize); in nbd_size_update()
310 blk_queue_physical_block_size(nbd->disk->queue, config->blksize); in nbd_size_update()
311 set_capacity(nbd->disk, nr_sectors); in nbd_size_update()
318 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); in nbd_size_update()
321 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); in nbd_size_update()
324 static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, in nbd_size_set() argument
327 struct nbd_config *config = nbd->config; in nbd_size_set()
330 if (nbd->task_recv != NULL) in nbd_size_set()
331 nbd_size_update(nbd, false); in nbd_size_set()
338 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req, in nbd_complete_rq()
347 static void sock_shutdown(struct nbd_device *nbd) in sock_shutdown() argument
349 struct nbd_config *config = nbd->config; in sock_shutdown()
360 nbd_mark_nsock_dead(nbd, nsock, 0); in sock_shutdown()
363 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); in sock_shutdown()
386 struct nbd_device *nbd = cmd->nbd; in nbd_xmit_timeout() local
392 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_xmit_timeout()
397 config = nbd->config; in nbd_xmit_timeout()
400 (config->num_connections == 1 && nbd->tag_set.timeout)) { in nbd_xmit_timeout()
401 dev_err_ratelimited(nbd_to_dev(nbd), in nbd_xmit_timeout()
423 nbd_mark_nsock_dead(nbd, nsock, 1); in nbd_xmit_timeout()
428 nbd_config_put(nbd); in nbd_xmit_timeout()
433 if (!nbd->tag_set.timeout) { in nbd_xmit_timeout()
440 …dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n", in nbd_xmit_timeout()
450 nbd_config_put(nbd); in nbd_xmit_timeout()
455 nbd_config_put(nbd); in nbd_xmit_timeout()
459 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n"); in nbd_xmit_timeout()
463 sock_shutdown(nbd); in nbd_xmit_timeout()
464 nbd_config_put(nbd); in nbd_xmit_timeout()
473 static int sock_xmit(struct nbd_device *nbd, int index, int send, in sock_xmit() argument
476 struct nbd_config *config = nbd->config; in sock_xmit()
483 dev_err_ratelimited(disk_to_dev(nbd->disk), in sock_xmit()
529 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) in nbd_send_cmd() argument
532 struct nbd_config *config = nbd->config; in nbd_send_cmd()
553 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
589 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd)); in nbd_send_cmd()
591 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", in nbd_send_cmd()
594 result = sock_xmit(nbd, index, 1, &from, in nbd_send_cmd()
611 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_send_cmd()
629 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", in nbd_send_cmd()
640 result = sock_xmit(nbd, index, 1, &from, flags, &sent); in nbd_send_cmd()
652 dev_err(disk_to_dev(nbd->disk), in nbd_send_cmd()
676 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) in nbd_read_stat() argument
678 struct nbd_config *config = nbd->config; in nbd_read_stat()
692 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); in nbd_read_stat()
695 dev_err(disk_to_dev(nbd->disk), in nbd_read_stat()
701 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", in nbd_read_stat()
709 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_read_stat()
710 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_read_stat()
713 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", in nbd_read_stat()
722 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", in nbd_read_stat()
728 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n", in nbd_read_stat()
734 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", in nbd_read_stat()
740 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", in nbd_read_stat()
746 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); in nbd_read_stat()
753 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); in nbd_read_stat()
755 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", in nbd_read_stat()
770 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", in nbd_read_stat()
785 struct nbd_device *nbd = args->nbd; in recv_work() local
786 struct nbd_config *config = nbd->config; in recv_work()
791 cmd = nbd_read_stat(nbd, args->index); in recv_work()
796 nbd_mark_nsock_dead(nbd, nsock, 1); in recv_work()
805 nbd_config_put(nbd); in recv_work()
827 static void nbd_clear_que(struct nbd_device *nbd) in nbd_clear_que() argument
829 blk_mq_quiesce_queue(nbd->disk->queue); in nbd_clear_que()
830 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); in nbd_clear_que()
831 blk_mq_unquiesce_queue(nbd->disk->queue); in nbd_clear_que()
832 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); in nbd_clear_que()
835 static int find_fallback(struct nbd_device *nbd, int index) in find_fallback() argument
837 struct nbd_config *config = nbd->config; in find_fallback()
846 dev_err_ratelimited(disk_to_dev(nbd->disk), in find_fallback()
869 dev_err_ratelimited(disk_to_dev(nbd->disk), in find_fallback()
878 static int wait_for_reconnect(struct nbd_device *nbd) in wait_for_reconnect() argument
880 struct nbd_config *config = nbd->config; in wait_for_reconnect()
897 struct nbd_device *nbd = cmd->nbd; in nbd_handle_cmd() local
902 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_handle_cmd()
903 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
908 config = nbd->config; in nbd_handle_cmd()
911 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
913 nbd_config_put(nbd); in nbd_handle_cmd()
923 index = find_fallback(nbd, index); in nbd_handle_cmd()
926 if (wait_for_reconnect(nbd)) { in nbd_handle_cmd()
936 sock_shutdown(nbd); in nbd_handle_cmd()
937 nbd_config_put(nbd); in nbd_handle_cmd()
959 ret = nbd_send_cmd(nbd, cmd, index); in nbd_handle_cmd()
961 dev_err_ratelimited(disk_to_dev(nbd->disk), in nbd_handle_cmd()
963 nbd_mark_nsock_dead(nbd, nsock, 1); in nbd_handle_cmd()
969 nbd_config_put(nbd); in nbd_handle_cmd()
1006 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, in nbd_get_socket() argument
1017 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); in nbd_get_socket()
1026 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, in nbd_add_socket() argument
1029 struct nbd_config *config = nbd->config; in nbd_add_socket()
1035 sock = nbd_get_socket(nbd, arg, &err); in nbd_add_socket()
1043 blk_mq_freeze_queue(nbd->disk->queue); in nbd_add_socket()
1045 if (!netlink && !nbd->task_setup && in nbd_add_socket()
1047 nbd->task_setup = current; in nbd_add_socket()
1050 (nbd->task_setup != current || in nbd_add_socket()
1052 dev_err(disk_to_dev(nbd->disk), in nbd_add_socket()
1083 blk_mq_unfreeze_queue(nbd->disk->queue); in nbd_add_socket()
1088 blk_mq_unfreeze_queue(nbd->disk->queue); in nbd_add_socket()
1093 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) in nbd_reconnect_socket() argument
1095 struct nbd_config *config = nbd->config; in nbd_reconnect_socket()
1101 sock = nbd_get_socket(nbd, arg, &err); in nbd_reconnect_socket()
1123 if (nbd->tag_set.timeout) in nbd_reconnect_socket()
1124 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; in nbd_reconnect_socket()
1126 refcount_inc(&nbd->config_refs); in nbd_reconnect_socket()
1133 args->nbd = nbd; in nbd_reconnect_socket()
1143 queue_work(nbd->recv_workq, &args->work); in nbd_reconnect_socket()
1161 static void nbd_parse_flags(struct nbd_device *nbd) in nbd_parse_flags() argument
1163 struct nbd_config *config = nbd->config; in nbd_parse_flags()
1165 set_disk_ro(nbd->disk, true); in nbd_parse_flags()
1167 set_disk_ro(nbd->disk, false); in nbd_parse_flags()
1169 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue); in nbd_parse_flags()
1172 blk_queue_write_cache(nbd->disk->queue, true, true); in nbd_parse_flags()
1174 blk_queue_write_cache(nbd->disk->queue, true, false); in nbd_parse_flags()
1177 blk_queue_write_cache(nbd->disk->queue, false, false); in nbd_parse_flags()
1180 static void send_disconnects(struct nbd_device *nbd) in send_disconnects() argument
1182 struct nbd_config *config = nbd->config; in send_disconnects()
1196 ret = sock_xmit(nbd, i, 1, &from, 0, NULL); in send_disconnects()
1198 dev_err(disk_to_dev(nbd->disk), in send_disconnects()
1204 static int nbd_disconnect(struct nbd_device *nbd) in nbd_disconnect() argument
1206 struct nbd_config *config = nbd->config; in nbd_disconnect()
1208 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); in nbd_disconnect()
1210 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags); in nbd_disconnect()
1211 send_disconnects(nbd); in nbd_disconnect()
1215 static void nbd_clear_sock(struct nbd_device *nbd) in nbd_clear_sock() argument
1217 sock_shutdown(nbd); in nbd_clear_sock()
1218 nbd_clear_que(nbd); in nbd_clear_sock()
1219 nbd->task_setup = NULL; in nbd_clear_sock()
1222 static void nbd_config_put(struct nbd_device *nbd) in nbd_config_put() argument
1224 if (refcount_dec_and_mutex_lock(&nbd->config_refs, in nbd_config_put()
1225 &nbd->config_lock)) { in nbd_config_put()
1226 struct nbd_config *config = nbd->config; in nbd_config_put()
1227 nbd_dev_dbg_close(nbd); in nbd_config_put()
1228 nbd_size_clear(nbd); in nbd_config_put()
1231 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); in nbd_config_put()
1232 nbd->task_recv = NULL; in nbd_config_put()
1233 nbd_clear_sock(nbd); in nbd_config_put()
1242 kfree(nbd->config); in nbd_config_put()
1243 nbd->config = NULL; in nbd_config_put()
1245 if (nbd->recv_workq) in nbd_config_put()
1246 destroy_workqueue(nbd->recv_workq); in nbd_config_put()
1247 nbd->recv_workq = NULL; in nbd_config_put()
1249 nbd->tag_set.timeout = 0; in nbd_config_put()
1250 nbd->disk->queue->limits.discard_granularity = 0; in nbd_config_put()
1251 nbd->disk->queue->limits.discard_alignment = 0; in nbd_config_put()
1252 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); in nbd_config_put()
1253 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue); in nbd_config_put()
1255 mutex_unlock(&nbd->config_lock); in nbd_config_put()
1256 nbd_put(nbd); in nbd_config_put()
1261 static int nbd_start_device(struct nbd_device *nbd) in nbd_start_device() argument
1263 struct nbd_config *config = nbd->config; in nbd_start_device()
1267 if (nbd->task_recv) in nbd_start_device()
1273 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); in nbd_start_device()
1277 nbd->recv_workq = alloc_workqueue("knbd%d-recv", in nbd_start_device()
1279 WQ_UNBOUND, 0, nbd->index); in nbd_start_device()
1280 if (!nbd->recv_workq) { in nbd_start_device()
1281 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); in nbd_start_device()
1285 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); in nbd_start_device()
1286 nbd->task_recv = current; in nbd_start_device()
1288 nbd_parse_flags(nbd); in nbd_start_device()
1290 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); in nbd_start_device()
1292 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); in nbd_start_device()
1297 nbd_dev_dbg_init(nbd); in nbd_start_device()
1303 sock_shutdown(nbd); in nbd_start_device()
1313 flush_workqueue(nbd->recv_workq); in nbd_start_device()
1317 if (nbd->tag_set.timeout) in nbd_start_device()
1319 nbd->tag_set.timeout; in nbd_start_device()
1321 refcount_inc(&nbd->config_refs); in nbd_start_device()
1323 args->nbd = nbd; in nbd_start_device()
1325 queue_work(nbd->recv_workq, &args->work); in nbd_start_device()
1327 nbd_size_update(nbd, true); in nbd_start_device()
1331 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev) in nbd_start_device_ioctl() argument
1333 struct nbd_config *config = nbd->config; in nbd_start_device_ioctl()
1336 ret = nbd_start_device(nbd); in nbd_start_device_ioctl()
1341 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); in nbd_start_device_ioctl()
1342 mutex_unlock(&nbd->config_lock); in nbd_start_device_ioctl()
1346 sock_shutdown(nbd); in nbd_start_device_ioctl()
1347 nbd_clear_que(nbd); in nbd_start_device_ioctl()
1350 flush_workqueue(nbd->recv_workq); in nbd_start_device_ioctl()
1351 mutex_lock(&nbd->config_lock); in nbd_start_device_ioctl()
1361 static void nbd_clear_sock_ioctl(struct nbd_device *nbd, in nbd_clear_sock_ioctl() argument
1364 nbd_clear_sock(nbd); in nbd_clear_sock_ioctl()
1368 &nbd->config->runtime_flags)) in nbd_clear_sock_ioctl()
1369 nbd_config_put(nbd); in nbd_clear_sock_ioctl()
1380 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) in nbd_set_cmd_timeout() argument
1382 nbd->tag_set.timeout = timeout * HZ; in nbd_set_cmd_timeout()
1384 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); in nbd_set_cmd_timeout()
1386 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ); in nbd_set_cmd_timeout()
1390 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, in __nbd_ioctl() argument
1393 struct nbd_config *config = nbd->config; in __nbd_ioctl()
1397 return nbd_disconnect(nbd); in __nbd_ioctl()
1399 nbd_clear_sock_ioctl(nbd, bdev); in __nbd_ioctl()
1402 return nbd_add_socket(nbd, arg, false); in __nbd_ioctl()
1408 nbd_size_set(nbd, arg, in __nbd_ioctl()
1412 nbd_size_set(nbd, config->blksize, in __nbd_ioctl()
1416 nbd_size_set(nbd, config->blksize, arg); in __nbd_ioctl()
1419 nbd_set_cmd_timeout(nbd, arg); in __nbd_ioctl()
1426 return nbd_start_device_ioctl(nbd, bdev); in __nbd_ioctl()
1446 struct nbd_device *nbd = bdev->bd_disk->private_data; in nbd_ioctl() local
1447 struct nbd_config *config = nbd->config; in nbd_ioctl()
1459 mutex_lock(&nbd->config_lock); in nbd_ioctl()
1466 error = __nbd_ioctl(bdev, nbd, cmd, arg); in nbd_ioctl()
1468 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n"); in nbd_ioctl()
1469 mutex_unlock(&nbd->config_lock); in nbd_ioctl()
1496 struct nbd_device *nbd; in nbd_open() local
1500 nbd = bdev->bd_disk->private_data; in nbd_open()
1501 if (!nbd) { in nbd_open()
1505 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_open()
1509 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_open()
1512 mutex_lock(&nbd->config_lock); in nbd_open()
1513 if (refcount_inc_not_zero(&nbd->config_refs)) { in nbd_open()
1514 mutex_unlock(&nbd->config_lock); in nbd_open()
1520 mutex_unlock(&nbd->config_lock); in nbd_open()
1523 nbd->config = config; in nbd_open()
1524 refcount_set(&nbd->config_refs, 1); in nbd_open()
1525 refcount_inc(&nbd->refs); in nbd_open()
1526 mutex_unlock(&nbd->config_lock); in nbd_open()
1528 } else if (nbd_disconnected(nbd->config)) { in nbd_open()
1538 struct nbd_device *nbd = disk->private_data; in nbd_release() local
1541 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && in nbd_release()
1543 nbd_disconnect_and_put(nbd); in nbd_release()
1546 nbd_config_put(nbd); in nbd_release()
1547 nbd_put(nbd); in nbd_release()
1563 struct nbd_device *nbd = s->private; in nbd_dbg_tasks_show() local
1565 if (nbd->task_recv) in nbd_dbg_tasks_show()
1566 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); in nbd_dbg_tasks_show()
1585 struct nbd_device *nbd = s->private; in nbd_dbg_flags_show() local
1586 u32 flags = nbd->config->flags; in nbd_dbg_flags_show()
1618 static int nbd_dev_dbg_init(struct nbd_device *nbd) in nbd_dev_dbg_init() argument
1621 struct nbd_config *config = nbd->config; in nbd_dev_dbg_init()
1626 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); in nbd_dev_dbg_init()
1628 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", in nbd_dev_dbg_init()
1629 nbd_name(nbd)); in nbd_dev_dbg_init()
1634 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); in nbd_dev_dbg_init()
1636 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); in nbd_dev_dbg_init()
1638 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); in nbd_dev_dbg_init()
1643 static void nbd_dev_dbg_close(struct nbd_device *nbd) in nbd_dev_dbg_close() argument
1645 debugfs_remove_recursive(nbd->config->dbg_dir); in nbd_dev_dbg_close()
1668 static int nbd_dev_dbg_init(struct nbd_device *nbd) in nbd_dev_dbg_init() argument
1673 static void nbd_dev_dbg_close(struct nbd_device *nbd) in nbd_dev_dbg_close() argument
1692 cmd->nbd = set->driver_data; in nbd_init_request()
1707 struct nbd_device *nbd; in nbd_dev_add() local
1712 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); in nbd_dev_add()
1713 if (!nbd) in nbd_dev_add()
1721 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, in nbd_dev_add()
1726 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); in nbd_dev_add()
1733 nbd->index = index; in nbd_dev_add()
1734 nbd->disk = disk; in nbd_dev_add()
1735 nbd->tag_set.ops = &nbd_mq_ops; in nbd_dev_add()
1736 nbd->tag_set.nr_hw_queues = 1; in nbd_dev_add()
1737 nbd->tag_set.queue_depth = 128; in nbd_dev_add()
1738 nbd->tag_set.numa_node = NUMA_NO_NODE; in nbd_dev_add()
1739 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); in nbd_dev_add()
1740 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | in nbd_dev_add()
1742 nbd->tag_set.driver_data = nbd; in nbd_dev_add()
1743 nbd->destroy_complete = NULL; in nbd_dev_add()
1745 err = blk_mq_alloc_tag_set(&nbd->tag_set); in nbd_dev_add()
1749 q = blk_mq_init_queue(&nbd->tag_set); in nbd_dev_add()
1769 mutex_init(&nbd->config_lock); in nbd_dev_add()
1770 refcount_set(&nbd->config_refs, 0); in nbd_dev_add()
1771 refcount_set(&nbd->refs, 1); in nbd_dev_add()
1772 INIT_LIST_HEAD(&nbd->list); in nbd_dev_add()
1776 disk->private_data = nbd; in nbd_dev_add()
1783 blk_mq_free_tag_set(&nbd->tag_set); in nbd_dev_add()
1789 kfree(nbd); in nbd_dev_add()
1796 struct nbd_device *nbd = ptr; in find_free_cb() local
1799 if (!refcount_read(&nbd->config_refs)) { in find_free_cb()
1800 *found = nbd; in find_free_cb()
1832 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) in nbd_genl_size_set() argument
1834 struct nbd_config *config = nbd->config; in nbd_genl_size_set()
1852 nbd_size_set(nbd, bsize, div64_u64(bytes, bsize)); in nbd_genl_size_set()
1859 struct nbd_device *nbd = NULL; in nbd_genl_connect() local
1881 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd); in nbd_genl_connect()
1890 nbd = idr_find(&nbd_index_idr, new_index); in nbd_genl_connect()
1893 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_connect()
1894 if (!nbd) { in nbd_genl_connect()
1901 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_connect()
1904 if (!nbd) { in nbd_genl_connect()
1911 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && in nbd_genl_connect()
1912 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) { in nbd_genl_connect()
1913 nbd->destroy_complete = &destroy_complete; in nbd_genl_connect()
1921 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_connect()
1931 mutex_lock(&nbd->config_lock); in nbd_genl_connect()
1932 if (refcount_read(&nbd->config_refs)) { in nbd_genl_connect()
1933 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1934 nbd_put(nbd); in nbd_genl_connect()
1940 if (WARN_ON(nbd->config)) { in nbd_genl_connect()
1941 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1942 nbd_put(nbd); in nbd_genl_connect()
1947 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
1948 nbd_put(nbd); in nbd_genl_connect()
1952 nbd->config = config; in nbd_genl_connect()
1953 refcount_set(&nbd->config_refs, 1); in nbd_genl_connect()
1956 ret = nbd_genl_size_set(info, nbd); in nbd_genl_connect()
1961 nbd_set_cmd_timeout(nbd, in nbd_genl_connect()
1983 &nbd->flags)) in nbd_genl_connect()
1987 &nbd->flags)) in nbd_genl_connect()
1988 refcount_inc(&nbd->refs); in nbd_genl_connect()
2021 ret = nbd_add_socket(nbd, fd, true); in nbd_genl_connect()
2026 ret = nbd_start_device(nbd); in nbd_genl_connect()
2028 mutex_unlock(&nbd->config_lock); in nbd_genl_connect()
2031 refcount_inc(&nbd->config_refs); in nbd_genl_connect()
2032 nbd_connect_reply(info, nbd->index); in nbd_genl_connect()
2034 nbd_config_put(nbd); in nbd_genl_connect()
2036 nbd_put(nbd); in nbd_genl_connect()
2040 static void nbd_disconnect_and_put(struct nbd_device *nbd) in nbd_disconnect_and_put() argument
2042 mutex_lock(&nbd->config_lock); in nbd_disconnect_and_put()
2043 nbd_disconnect(nbd); in nbd_disconnect_and_put()
2044 sock_shutdown(nbd); in nbd_disconnect_and_put()
2045 wake_up(&nbd->config->conn_wait); in nbd_disconnect_and_put()
2052 if (nbd->recv_workq) in nbd_disconnect_and_put()
2053 flush_workqueue(nbd->recv_workq); in nbd_disconnect_and_put()
2054 nbd_clear_que(nbd); in nbd_disconnect_and_put()
2055 nbd->task_setup = NULL; in nbd_disconnect_and_put()
2056 mutex_unlock(&nbd->config_lock); in nbd_disconnect_and_put()
2059 &nbd->config->runtime_flags)) in nbd_disconnect_and_put()
2060 nbd_config_put(nbd); in nbd_disconnect_and_put()
2065 struct nbd_device *nbd; in nbd_genl_disconnect() local
2077 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_disconnect()
2078 if (!nbd) { in nbd_genl_disconnect()
2084 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_disconnect()
2091 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_genl_disconnect()
2092 nbd_put(nbd); in nbd_genl_disconnect()
2095 nbd_disconnect_and_put(nbd); in nbd_genl_disconnect()
2096 nbd_config_put(nbd); in nbd_genl_disconnect()
2097 nbd_put(nbd); in nbd_genl_disconnect()
2103 struct nbd_device *nbd = NULL; in nbd_genl_reconfigure() local
2118 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_reconfigure()
2119 if (!nbd) { in nbd_genl_reconfigure()
2125 if (!refcount_inc_not_zero(&nbd->refs)) { in nbd_genl_reconfigure()
2133 if (!refcount_inc_not_zero(&nbd->config_refs)) { in nbd_genl_reconfigure()
2134 dev_err(nbd_to_dev(nbd), in nbd_genl_reconfigure()
2136 nbd_put(nbd); in nbd_genl_reconfigure()
2140 mutex_lock(&nbd->config_lock); in nbd_genl_reconfigure()
2141 config = nbd->config; in nbd_genl_reconfigure()
2143 !nbd->task_recv) { in nbd_genl_reconfigure()
2144 dev_err(nbd_to_dev(nbd), in nbd_genl_reconfigure()
2150 ret = nbd_genl_size_set(info, nbd); in nbd_genl_reconfigure()
2155 nbd_set_cmd_timeout(nbd, in nbd_genl_reconfigure()
2166 &nbd->flags)) in nbd_genl_reconfigure()
2170 &nbd->flags)) in nbd_genl_reconfigure()
2171 refcount_inc(&nbd->refs); in nbd_genl_reconfigure()
2208 ret = nbd_reconnect_socket(nbd, fd); in nbd_genl_reconfigure()
2214 dev_info(nbd_to_dev(nbd), "reconnected socket\n"); in nbd_genl_reconfigure()
2218 mutex_unlock(&nbd->config_lock); in nbd_genl_reconfigure()
2219 nbd_config_put(nbd); in nbd_genl_reconfigure()
2220 nbd_put(nbd); in nbd_genl_reconfigure()
2222 nbd_put(nbd); in nbd_genl_reconfigure()
2266 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply) in populate_nbd_status() argument
2279 if (refcount_read(&nbd->config_refs)) in populate_nbd_status()
2284 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index); in populate_nbd_status()
2297 struct nbd_device *nbd = ptr; in status_cb() local
2298 return populate_nbd_status(nbd, (struct sk_buff *)data); in status_cb()
2337 struct nbd_device *nbd; in nbd_genl_status() local
2338 nbd = idr_find(&nbd_index_idr, index); in nbd_genl_status()
2339 if (nbd) { in nbd_genl_status()
2340 ret = populate_nbd_status(nbd, reply); in nbd_genl_status()
2462 struct nbd_device *nbd = ptr; in nbd_exit_cb() local
2464 list_add_tail(&nbd->list, list); in nbd_exit_cb()
2470 struct nbd_device *nbd; in nbd_cleanup() local
2486 nbd = list_first_entry(&del_list, struct nbd_device, list); in nbd_cleanup()
2487 list_del_init(&nbd->list); in nbd_cleanup()
2488 if (refcount_read(&nbd->config_refs)) in nbd_cleanup()
2490 refcount_read(&nbd->config_refs)); in nbd_cleanup()
2491 if (refcount_read(&nbd->refs) != 1) in nbd_cleanup()
2493 nbd_put(nbd); in nbd_cleanup()