Lines Matching refs:tb
126 struct tb *tb = container_of(dev, struct tb, dev); in boot_acl_show() local
131 uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL); in boot_acl_show()
135 pm_runtime_get_sync(&tb->dev); in boot_acl_show()
137 if (mutex_lock_interruptible(&tb->lock)) { in boot_acl_show()
141 ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl); in boot_acl_show()
143 mutex_unlock(&tb->lock); in boot_acl_show()
146 mutex_unlock(&tb->lock); in boot_acl_show()
148 for (ret = 0, i = 0; i < tb->nboot_acl; i++) { in boot_acl_show()
154 i < tb->nboot_acl - 1 ? "," : "\n"); in boot_acl_show()
158 pm_runtime_mark_last_busy(&tb->dev); in boot_acl_show()
159 pm_runtime_put_autosuspend(&tb->dev); in boot_acl_show()
168 struct tb *tb = container_of(dev, struct tb, dev); in boot_acl_store() local
179 if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1) in boot_acl_store()
181 if (count < tb->nboot_acl - 1) in boot_acl_store()
188 acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL); in boot_acl_store()
195 while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) { in boot_acl_store()
211 if (s || i < tb->nboot_acl) { in boot_acl_store()
216 pm_runtime_get_sync(&tb->dev); in boot_acl_store()
218 if (mutex_lock_interruptible(&tb->lock)) { in boot_acl_store()
222 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); in boot_acl_store()
225 kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE); in boot_acl_store()
227 mutex_unlock(&tb->lock); in boot_acl_store()
230 pm_runtime_mark_last_busy(&tb->dev); in boot_acl_store()
231 pm_runtime_put_autosuspend(&tb->dev); in boot_acl_store()
258 struct tb *tb = container_of(dev, struct tb, dev); in security_show() local
261 if (tb->security_level < ARRAY_SIZE(tb_security_names)) in security_show()
262 name = tb_security_names[tb->security_level]; in security_show()
279 struct tb *tb = container_of(dev, struct tb, dev); in domain_attr_is_visible() local
282 if (tb->nboot_acl && in domain_attr_is_visible()
283 tb->cm_ops->get_boot_acl && in domain_attr_is_visible()
284 tb->cm_ops->set_boot_acl) in domain_attr_is_visible()
312 struct tb *tb = container_of(dev, struct tb, dev); in tb_domain_release() local
314 tb_ctl_free(tb->ctl); in tb_domain_release()
315 destroy_workqueue(tb->wq); in tb_domain_release()
316 ida_simple_remove(&tb_domain_ida, tb->index); in tb_domain_release()
317 mutex_destroy(&tb->lock); in tb_domain_release()
318 kfree(tb); in tb_domain_release()
340 struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) in tb_domain_alloc()
342 struct tb *tb; in tb_domain_alloc() local
352 tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL); in tb_domain_alloc()
353 if (!tb) in tb_domain_alloc()
356 tb->nhi = nhi; in tb_domain_alloc()
357 mutex_init(&tb->lock); in tb_domain_alloc()
359 tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL); in tb_domain_alloc()
360 if (tb->index < 0) in tb_domain_alloc()
363 tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index); in tb_domain_alloc()
364 if (!tb->wq) in tb_domain_alloc()
367 tb->dev.parent = &nhi->pdev->dev; in tb_domain_alloc()
368 tb->dev.bus = &tb_bus_type; in tb_domain_alloc()
369 tb->dev.type = &tb_domain_type; in tb_domain_alloc()
370 tb->dev.groups = domain_attr_groups; in tb_domain_alloc()
371 dev_set_name(&tb->dev, "domain%d", tb->index); in tb_domain_alloc()
372 device_initialize(&tb->dev); in tb_domain_alloc()
374 return tb; in tb_domain_alloc()
377 ida_simple_remove(&tb_domain_ida, tb->index); in tb_domain_alloc()
379 kfree(tb); in tb_domain_alloc()
387 struct tb *tb = data; in tb_domain_event_cb() local
389 if (!tb->cm_ops->handle_event) { in tb_domain_event_cb()
390 tb_warn(tb, "domain does not have event handler\n"); in tb_domain_event_cb()
397 return tb_xdomain_handle_request(tb, type, buf, size); in tb_domain_event_cb()
400 tb->cm_ops->handle_event(tb, type, buf, size); in tb_domain_event_cb()
417 int tb_domain_add(struct tb *tb) in tb_domain_add() argument
421 if (WARN_ON(!tb->cm_ops)) in tb_domain_add()
424 mutex_lock(&tb->lock); in tb_domain_add()
426 tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb); in tb_domain_add()
427 if (!tb->ctl) { in tb_domain_add()
436 tb_ctl_start(tb->ctl); in tb_domain_add()
438 if (tb->cm_ops->driver_ready) { in tb_domain_add()
439 ret = tb->cm_ops->driver_ready(tb); in tb_domain_add()
444 ret = device_add(&tb->dev); in tb_domain_add()
449 if (tb->cm_ops->start) { in tb_domain_add()
450 ret = tb->cm_ops->start(tb); in tb_domain_add()
456 mutex_unlock(&tb->lock); in tb_domain_add()
458 device_init_wakeup(&tb->dev, true); in tb_domain_add()
460 pm_runtime_no_callbacks(&tb->dev); in tb_domain_add()
461 pm_runtime_set_active(&tb->dev); in tb_domain_add()
462 pm_runtime_enable(&tb->dev); in tb_domain_add()
463 pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY); in tb_domain_add()
464 pm_runtime_mark_last_busy(&tb->dev); in tb_domain_add()
465 pm_runtime_use_autosuspend(&tb->dev); in tb_domain_add()
470 device_del(&tb->dev); in tb_domain_add()
472 tb_ctl_stop(tb->ctl); in tb_domain_add()
474 mutex_unlock(&tb->lock); in tb_domain_add()
486 void tb_domain_remove(struct tb *tb) in tb_domain_remove() argument
488 mutex_lock(&tb->lock); in tb_domain_remove()
489 if (tb->cm_ops->stop) in tb_domain_remove()
490 tb->cm_ops->stop(tb); in tb_domain_remove()
492 tb_ctl_stop(tb->ctl); in tb_domain_remove()
493 mutex_unlock(&tb->lock); in tb_domain_remove()
495 flush_workqueue(tb->wq); in tb_domain_remove()
496 device_unregister(&tb->dev); in tb_domain_remove()
505 int tb_domain_suspend_noirq(struct tb *tb) in tb_domain_suspend_noirq() argument
514 mutex_lock(&tb->lock); in tb_domain_suspend_noirq()
515 if (tb->cm_ops->suspend_noirq) in tb_domain_suspend_noirq()
516 ret = tb->cm_ops->suspend_noirq(tb); in tb_domain_suspend_noirq()
518 tb_ctl_stop(tb->ctl); in tb_domain_suspend_noirq()
519 mutex_unlock(&tb->lock); in tb_domain_suspend_noirq()
531 int tb_domain_resume_noirq(struct tb *tb) in tb_domain_resume_noirq() argument
535 mutex_lock(&tb->lock); in tb_domain_resume_noirq()
536 tb_ctl_start(tb->ctl); in tb_domain_resume_noirq()
537 if (tb->cm_ops->resume_noirq) in tb_domain_resume_noirq()
538 ret = tb->cm_ops->resume_noirq(tb); in tb_domain_resume_noirq()
539 mutex_unlock(&tb->lock); in tb_domain_resume_noirq()
544 int tb_domain_suspend(struct tb *tb) in tb_domain_suspend() argument
546 return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0; in tb_domain_suspend()
549 int tb_domain_freeze_noirq(struct tb *tb) in tb_domain_freeze_noirq() argument
553 mutex_lock(&tb->lock); in tb_domain_freeze_noirq()
554 if (tb->cm_ops->freeze_noirq) in tb_domain_freeze_noirq()
555 ret = tb->cm_ops->freeze_noirq(tb); in tb_domain_freeze_noirq()
557 tb_ctl_stop(tb->ctl); in tb_domain_freeze_noirq()
558 mutex_unlock(&tb->lock); in tb_domain_freeze_noirq()
563 int tb_domain_thaw_noirq(struct tb *tb) in tb_domain_thaw_noirq() argument
567 mutex_lock(&tb->lock); in tb_domain_thaw_noirq()
568 tb_ctl_start(tb->ctl); in tb_domain_thaw_noirq()
569 if (tb->cm_ops->thaw_noirq) in tb_domain_thaw_noirq()
570 ret = tb->cm_ops->thaw_noirq(tb); in tb_domain_thaw_noirq()
571 mutex_unlock(&tb->lock); in tb_domain_thaw_noirq()
576 void tb_domain_complete(struct tb *tb) in tb_domain_complete() argument
578 if (tb->cm_ops->complete) in tb_domain_complete()
579 tb->cm_ops->complete(tb); in tb_domain_complete()
582 int tb_domain_runtime_suspend(struct tb *tb) in tb_domain_runtime_suspend() argument
584 if (tb->cm_ops->runtime_suspend) { in tb_domain_runtime_suspend()
585 int ret = tb->cm_ops->runtime_suspend(tb); in tb_domain_runtime_suspend()
589 tb_ctl_stop(tb->ctl); in tb_domain_runtime_suspend()
593 int tb_domain_runtime_resume(struct tb *tb) in tb_domain_runtime_resume() argument
595 tb_ctl_start(tb->ctl); in tb_domain_runtime_resume()
596 if (tb->cm_ops->runtime_resume) { in tb_domain_runtime_resume()
597 int ret = tb->cm_ops->runtime_resume(tb); in tb_domain_runtime_resume()
613 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw) in tb_domain_approve_switch() argument
617 if (!tb->cm_ops->approve_switch) in tb_domain_approve_switch()
625 return tb->cm_ops->approve_switch(tb, sw); in tb_domain_approve_switch()
639 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw) in tb_domain_approve_switch_key() argument
644 if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key) in tb_domain_approve_switch_key()
652 ret = tb->cm_ops->add_switch_key(tb, sw); in tb_domain_approve_switch_key()
656 return tb->cm_ops->approve_switch(tb, sw); in tb_domain_approve_switch_key()
671 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw) in tb_domain_challenge_switch_key() argument
681 if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key) in tb_domain_challenge_switch_key()
690 ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response); in tb_domain_challenge_switch_key()
725 return tb->cm_ops->approve_switch(tb, sw); in tb_domain_challenge_switch_key()
744 int tb_domain_disconnect_pcie_paths(struct tb *tb) in tb_domain_disconnect_pcie_paths() argument
746 if (!tb->cm_ops->disconnect_pcie_paths) in tb_domain_disconnect_pcie_paths()
749 return tb->cm_ops->disconnect_pcie_paths(tb); in tb_domain_disconnect_pcie_paths()
764 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) in tb_domain_approve_xdomain_paths() argument
766 if (!tb->cm_ops->approve_xdomain_paths) in tb_domain_approve_xdomain_paths()
769 return tb->cm_ops->approve_xdomain_paths(tb, xd); in tb_domain_approve_xdomain_paths()
784 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) in tb_domain_disconnect_xdomain_paths() argument
786 if (!tb->cm_ops->disconnect_xdomain_paths) in tb_domain_disconnect_xdomain_paths()
789 return tb->cm_ops->disconnect_xdomain_paths(tb, xd); in tb_domain_disconnect_xdomain_paths()
795 struct tb *tb = data; in disconnect_xdomain() local
799 if (xd && xd->tb == tb) in disconnect_xdomain()
815 int tb_domain_disconnect_all_paths(struct tb *tb) in tb_domain_disconnect_all_paths() argument
819 ret = tb_domain_disconnect_pcie_paths(tb); in tb_domain_disconnect_all_paths()
823 return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain); in tb_domain_disconnect_all_paths()