1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Thunderbolt driver - control channel and configuration commands
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6*4882a593Smuzhiyun * Copyright (C) 2018, Intel Corporation
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/crc32.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/pci.h>
13*4882a593Smuzhiyun #include <linux/dmapool.h>
14*4882a593Smuzhiyun #include <linux/workqueue.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "ctl.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define TB_CTL_RX_PKG_COUNT 10
20*4882a593Smuzhiyun #define TB_CTL_RETRIES 4
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /**
23*4882a593Smuzhiyun * struct tb_cfg - thunderbolt control channel
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun struct tb_ctl {
26*4882a593Smuzhiyun struct tb_nhi *nhi;
27*4882a593Smuzhiyun struct tb_ring *tx;
28*4882a593Smuzhiyun struct tb_ring *rx;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun struct dma_pool *frame_pool;
31*4882a593Smuzhiyun struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
32*4882a593Smuzhiyun struct mutex request_queue_lock;
33*4882a593Smuzhiyun struct list_head request_queue;
34*4882a593Smuzhiyun bool running;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun event_cb callback;
37*4882a593Smuzhiyun void *callback_data;
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #define tb_ctl_WARN(ctl, format, arg...) \
42*4882a593Smuzhiyun dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define tb_ctl_err(ctl, format, arg...) \
45*4882a593Smuzhiyun dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define tb_ctl_warn(ctl, format, arg...) \
48*4882a593Smuzhiyun dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #define tb_ctl_info(ctl, format, arg...) \
51*4882a593Smuzhiyun dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define tb_ctl_dbg(ctl, format, arg...) \
54*4882a593Smuzhiyun dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
57*4882a593Smuzhiyun /* Serializes access to request kref_get/put */
58*4882a593Smuzhiyun static DEFINE_MUTEX(tb_cfg_request_lock);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /**
61*4882a593Smuzhiyun * tb_cfg_request_alloc() - Allocates a new config request
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * This is refcounted object so when you are done with this, call
64*4882a593Smuzhiyun * tb_cfg_request_put() to it.
65*4882a593Smuzhiyun */
tb_cfg_request_alloc(void)66*4882a593Smuzhiyun struct tb_cfg_request *tb_cfg_request_alloc(void)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun struct tb_cfg_request *req;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun req = kzalloc(sizeof(*req), GFP_KERNEL);
71*4882a593Smuzhiyun if (!req)
72*4882a593Smuzhiyun return NULL;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun kref_init(&req->kref);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun return req;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /**
80*4882a593Smuzhiyun * tb_cfg_request_get() - Increase refcount of a request
81*4882a593Smuzhiyun * @req: Request whose refcount is increased
82*4882a593Smuzhiyun */
tb_cfg_request_get(struct tb_cfg_request * req)83*4882a593Smuzhiyun void tb_cfg_request_get(struct tb_cfg_request *req)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun mutex_lock(&tb_cfg_request_lock);
86*4882a593Smuzhiyun kref_get(&req->kref);
87*4882a593Smuzhiyun mutex_unlock(&tb_cfg_request_lock);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
tb_cfg_request_destroy(struct kref * kref)90*4882a593Smuzhiyun static void tb_cfg_request_destroy(struct kref *kref)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun kfree(req);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /**
98*4882a593Smuzhiyun * tb_cfg_request_put() - Decrease refcount and possibly release the request
99*4882a593Smuzhiyun * @req: Request whose refcount is decreased
100*4882a593Smuzhiyun *
101*4882a593Smuzhiyun * Call this function when you are done with the request. When refcount
102*4882a593Smuzhiyun * goes to %0 the object is released.
103*4882a593Smuzhiyun */
tb_cfg_request_put(struct tb_cfg_request * req)104*4882a593Smuzhiyun void tb_cfg_request_put(struct tb_cfg_request *req)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun mutex_lock(&tb_cfg_request_lock);
107*4882a593Smuzhiyun kref_put(&req->kref, tb_cfg_request_destroy);
108*4882a593Smuzhiyun mutex_unlock(&tb_cfg_request_lock);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
tb_cfg_request_enqueue(struct tb_ctl * ctl,struct tb_cfg_request * req)111*4882a593Smuzhiyun static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
112*4882a593Smuzhiyun struct tb_cfg_request *req)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
115*4882a593Smuzhiyun WARN_ON(req->ctl);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun mutex_lock(&ctl->request_queue_lock);
118*4882a593Smuzhiyun if (!ctl->running) {
119*4882a593Smuzhiyun mutex_unlock(&ctl->request_queue_lock);
120*4882a593Smuzhiyun return -ENOTCONN;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun req->ctl = ctl;
123*4882a593Smuzhiyun list_add_tail(&req->list, &ctl->request_queue);
124*4882a593Smuzhiyun set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
125*4882a593Smuzhiyun mutex_unlock(&ctl->request_queue_lock);
126*4882a593Smuzhiyun return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
tb_cfg_request_dequeue(struct tb_cfg_request * req)129*4882a593Smuzhiyun static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun struct tb_ctl *ctl = req->ctl;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun mutex_lock(&ctl->request_queue_lock);
134*4882a593Smuzhiyun list_del(&req->list);
135*4882a593Smuzhiyun clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
136*4882a593Smuzhiyun if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
137*4882a593Smuzhiyun wake_up(&tb_cfg_request_cancel_queue);
138*4882a593Smuzhiyun mutex_unlock(&ctl->request_queue_lock);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
tb_cfg_request_is_active(struct tb_cfg_request * req)141*4882a593Smuzhiyun static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun static struct tb_cfg_request *
tb_cfg_request_find(struct tb_ctl * ctl,struct ctl_pkg * pkg)147*4882a593Smuzhiyun tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun struct tb_cfg_request *req;
150*4882a593Smuzhiyun bool found = false;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun mutex_lock(&pkg->ctl->request_queue_lock);
153*4882a593Smuzhiyun list_for_each_entry(req, &pkg->ctl->request_queue, list) {
154*4882a593Smuzhiyun tb_cfg_request_get(req);
155*4882a593Smuzhiyun if (req->match(req, pkg)) {
156*4882a593Smuzhiyun found = true;
157*4882a593Smuzhiyun break;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun tb_cfg_request_put(req);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun mutex_unlock(&pkg->ctl->request_queue_lock);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun return found ? req : NULL;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* utility functions */
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun
check_header(const struct ctl_pkg * pkg,u32 len,enum tb_cfg_pkg_type type,u64 route)169*4882a593Smuzhiyun static int check_header(const struct ctl_pkg *pkg, u32 len,
170*4882a593Smuzhiyun enum tb_cfg_pkg_type type, u64 route)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct tb_cfg_header *header = pkg->buffer;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* check frame, TODO: frame flags */
175*4882a593Smuzhiyun if (WARN(len != pkg->frame.size,
176*4882a593Smuzhiyun "wrong framesize (expected %#x, got %#x)\n",
177*4882a593Smuzhiyun len, pkg->frame.size))
178*4882a593Smuzhiyun return -EIO;
179*4882a593Smuzhiyun if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
180*4882a593Smuzhiyun type, pkg->frame.eof))
181*4882a593Smuzhiyun return -EIO;
182*4882a593Smuzhiyun if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
183*4882a593Smuzhiyun pkg->frame.sof))
184*4882a593Smuzhiyun return -EIO;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* check header */
187*4882a593Smuzhiyun if (WARN(header->unknown != 1 << 9,
188*4882a593Smuzhiyun "header->unknown is %#x\n", header->unknown))
189*4882a593Smuzhiyun return -EIO;
190*4882a593Smuzhiyun if (WARN(route != tb_cfg_get_route(header),
191*4882a593Smuzhiyun "wrong route (expected %llx, got %llx)",
192*4882a593Smuzhiyun route, tb_cfg_get_route(header)))
193*4882a593Smuzhiyun return -EIO;
194*4882a593Smuzhiyun return 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
check_config_address(struct tb_cfg_address addr,enum tb_cfg_space space,u32 offset,u32 length)197*4882a593Smuzhiyun static int check_config_address(struct tb_cfg_address addr,
198*4882a593Smuzhiyun enum tb_cfg_space space, u32 offset,
199*4882a593Smuzhiyun u32 length)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
202*4882a593Smuzhiyun return -EIO;
203*4882a593Smuzhiyun if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
204*4882a593Smuzhiyun space, addr.space))
205*4882a593Smuzhiyun return -EIO;
206*4882a593Smuzhiyun if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
207*4882a593Smuzhiyun offset, addr.offset))
208*4882a593Smuzhiyun return -EIO;
209*4882a593Smuzhiyun if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
210*4882a593Smuzhiyun length, addr.length))
211*4882a593Smuzhiyun return -EIO;
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun * We cannot check addr->port as it is set to the upstream port of the
214*4882a593Smuzhiyun * sender.
215*4882a593Smuzhiyun */
216*4882a593Smuzhiyun return 0;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
decode_error(const struct ctl_pkg * response)219*4882a593Smuzhiyun static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun struct cfg_error_pkg *pkg = response->buffer;
222*4882a593Smuzhiyun struct tb_ctl *ctl = response->ctl;
223*4882a593Smuzhiyun struct tb_cfg_result res = { 0 };
224*4882a593Smuzhiyun res.response_route = tb_cfg_get_route(&pkg->header);
225*4882a593Smuzhiyun res.response_port = 0;
226*4882a593Smuzhiyun res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
227*4882a593Smuzhiyun tb_cfg_get_route(&pkg->header));
228*4882a593Smuzhiyun if (res.err)
229*4882a593Smuzhiyun return res;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (pkg->zero1)
232*4882a593Smuzhiyun tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
233*4882a593Smuzhiyun if (pkg->zero2)
234*4882a593Smuzhiyun tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
235*4882a593Smuzhiyun if (pkg->zero3)
236*4882a593Smuzhiyun tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun res.err = 1;
239*4882a593Smuzhiyun res.tb_error = pkg->error;
240*4882a593Smuzhiyun res.response_port = pkg->port;
241*4882a593Smuzhiyun return res;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
parse_header(const struct ctl_pkg * pkg,u32 len,enum tb_cfg_pkg_type type,u64 route)245*4882a593Smuzhiyun static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
246*4882a593Smuzhiyun enum tb_cfg_pkg_type type, u64 route)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun struct tb_cfg_header *header = pkg->buffer;
249*4882a593Smuzhiyun struct tb_cfg_result res = { 0 };
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (pkg->frame.eof == TB_CFG_PKG_ERROR)
252*4882a593Smuzhiyun return decode_error(pkg);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun res.response_port = 0; /* will be updated later for cfg_read/write */
255*4882a593Smuzhiyun res.response_route = tb_cfg_get_route(header);
256*4882a593Smuzhiyun res.err = check_header(pkg, len, type, route);
257*4882a593Smuzhiyun return res;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
tb_cfg_print_error(struct tb_ctl * ctl,const struct tb_cfg_result * res)260*4882a593Smuzhiyun static void tb_cfg_print_error(struct tb_ctl *ctl,
261*4882a593Smuzhiyun const struct tb_cfg_result *res)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun WARN_ON(res->err != 1);
264*4882a593Smuzhiyun switch (res->tb_error) {
265*4882a593Smuzhiyun case TB_CFG_ERROR_PORT_NOT_CONNECTED:
266*4882a593Smuzhiyun /* Port is not connected. This can happen during surprise
267*4882a593Smuzhiyun * removal. Do not warn. */
268*4882a593Smuzhiyun return;
269*4882a593Smuzhiyun case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun * Invalid cfg_space/offset/length combination in
272*4882a593Smuzhiyun * cfg_read/cfg_write.
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
275*4882a593Smuzhiyun res->response_route, res->response_port);
276*4882a593Smuzhiyun return;
277*4882a593Smuzhiyun case TB_CFG_ERROR_NO_SUCH_PORT:
278*4882a593Smuzhiyun /*
279*4882a593Smuzhiyun * - The route contains a non-existent port.
280*4882a593Smuzhiyun * - The route contains a non-PHY port (e.g. PCIe).
281*4882a593Smuzhiyun * - The port in cfg_read/cfg_write does not exist.
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
284*4882a593Smuzhiyun res->response_route, res->response_port);
285*4882a593Smuzhiyun return;
286*4882a593Smuzhiyun case TB_CFG_ERROR_LOOP:
287*4882a593Smuzhiyun tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
288*4882a593Smuzhiyun res->response_route, res->response_port);
289*4882a593Smuzhiyun return;
290*4882a593Smuzhiyun case TB_CFG_ERROR_LOCK:
291*4882a593Smuzhiyun tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
292*4882a593Smuzhiyun res->response_route, res->response_port);
293*4882a593Smuzhiyun return;
294*4882a593Smuzhiyun default:
295*4882a593Smuzhiyun /* 5,6,7,9 and 11 are also valid error codes */
296*4882a593Smuzhiyun tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
297*4882a593Smuzhiyun res->response_route, res->response_port);
298*4882a593Smuzhiyun return;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
tb_crc(const void * data,size_t len)302*4882a593Smuzhiyun static __be32 tb_crc(const void *data, size_t len)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun return cpu_to_be32(~__crc32c_le(~0, data, len));
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
tb_ctl_pkg_free(struct ctl_pkg * pkg)307*4882a593Smuzhiyun static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun if (pkg) {
310*4882a593Smuzhiyun dma_pool_free(pkg->ctl->frame_pool,
311*4882a593Smuzhiyun pkg->buffer, pkg->frame.buffer_phy);
312*4882a593Smuzhiyun kfree(pkg);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
tb_ctl_pkg_alloc(struct tb_ctl * ctl)316*4882a593Smuzhiyun static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
319*4882a593Smuzhiyun if (!pkg)
320*4882a593Smuzhiyun return NULL;
321*4882a593Smuzhiyun pkg->ctl = ctl;
322*4882a593Smuzhiyun pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
323*4882a593Smuzhiyun &pkg->frame.buffer_phy);
324*4882a593Smuzhiyun if (!pkg->buffer) {
325*4882a593Smuzhiyun kfree(pkg);
326*4882a593Smuzhiyun return NULL;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun return pkg;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* RX/TX handling */
333*4882a593Smuzhiyun
tb_ctl_tx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)334*4882a593Smuzhiyun static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
335*4882a593Smuzhiyun bool canceled)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
338*4882a593Smuzhiyun tb_ctl_pkg_free(pkg);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /**
342*4882a593Smuzhiyun * tb_cfg_tx() - transmit a packet on the control channel
343*4882a593Smuzhiyun *
344*4882a593Smuzhiyun * len must be a multiple of four.
345*4882a593Smuzhiyun *
346*4882a593Smuzhiyun * Return: Returns 0 on success or an error code on failure.
347*4882a593Smuzhiyun */
tb_ctl_tx(struct tb_ctl * ctl,const void * data,size_t len,enum tb_cfg_pkg_type type)348*4882a593Smuzhiyun static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
349*4882a593Smuzhiyun enum tb_cfg_pkg_type type)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun int res;
352*4882a593Smuzhiyun struct ctl_pkg *pkg;
353*4882a593Smuzhiyun if (len % 4 != 0) { /* required for le->be conversion */
354*4882a593Smuzhiyun tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
355*4882a593Smuzhiyun return -EINVAL;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
358*4882a593Smuzhiyun tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
359*4882a593Smuzhiyun len, TB_FRAME_SIZE - 4);
360*4882a593Smuzhiyun return -EINVAL;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun pkg = tb_ctl_pkg_alloc(ctl);
363*4882a593Smuzhiyun if (!pkg)
364*4882a593Smuzhiyun return -ENOMEM;
365*4882a593Smuzhiyun pkg->frame.callback = tb_ctl_tx_callback;
366*4882a593Smuzhiyun pkg->frame.size = len + 4;
367*4882a593Smuzhiyun pkg->frame.sof = type;
368*4882a593Smuzhiyun pkg->frame.eof = type;
369*4882a593Smuzhiyun cpu_to_be32_array(pkg->buffer, data, len / 4);
370*4882a593Smuzhiyun *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun res = tb_ring_tx(ctl->tx, &pkg->frame);
373*4882a593Smuzhiyun if (res) /* ring is stopped */
374*4882a593Smuzhiyun tb_ctl_pkg_free(pkg);
375*4882a593Smuzhiyun return res;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /**
379*4882a593Smuzhiyun * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
380*4882a593Smuzhiyun */
tb_ctl_handle_event(struct tb_ctl * ctl,enum tb_cfg_pkg_type type,struct ctl_pkg * pkg,size_t size)381*4882a593Smuzhiyun static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
382*4882a593Smuzhiyun struct ctl_pkg *pkg, size_t size)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
tb_ctl_rx_submit(struct ctl_pkg * pkg)387*4882a593Smuzhiyun static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
390*4882a593Smuzhiyun * We ignore failures during stop.
391*4882a593Smuzhiyun * All rx packets are referenced
392*4882a593Smuzhiyun * from ctl->rx_packets, so we do
393*4882a593Smuzhiyun * not loose them.
394*4882a593Smuzhiyun */
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
tb_async_error(const struct ctl_pkg * pkg)397*4882a593Smuzhiyun static int tb_async_error(const struct ctl_pkg *pkg)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun const struct cfg_error_pkg *error = pkg->buffer;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun if (pkg->frame.eof != TB_CFG_PKG_ERROR)
402*4882a593Smuzhiyun return false;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun switch (error->error) {
405*4882a593Smuzhiyun case TB_CFG_ERROR_LINK_ERROR:
406*4882a593Smuzhiyun case TB_CFG_ERROR_HEC_ERROR_DETECTED:
407*4882a593Smuzhiyun case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
408*4882a593Smuzhiyun return true;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun default:
411*4882a593Smuzhiyun return false;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
tb_ctl_rx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)415*4882a593Smuzhiyun static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
416*4882a593Smuzhiyun bool canceled)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
419*4882a593Smuzhiyun struct tb_cfg_request *req;
420*4882a593Smuzhiyun __be32 crc32;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun if (canceled)
423*4882a593Smuzhiyun return; /*
424*4882a593Smuzhiyun * ring is stopped, packet is referenced from
425*4882a593Smuzhiyun * ctl->rx_packets.
426*4882a593Smuzhiyun */
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun if (frame->size < 4 || frame->size % 4 != 0) {
429*4882a593Smuzhiyun tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
430*4882a593Smuzhiyun frame->size);
431*4882a593Smuzhiyun goto rx;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun frame->size -= 4; /* remove checksum */
435*4882a593Smuzhiyun crc32 = tb_crc(pkg->buffer, frame->size);
436*4882a593Smuzhiyun be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun switch (frame->eof) {
439*4882a593Smuzhiyun case TB_CFG_PKG_READ:
440*4882a593Smuzhiyun case TB_CFG_PKG_WRITE:
441*4882a593Smuzhiyun case TB_CFG_PKG_ERROR:
442*4882a593Smuzhiyun case TB_CFG_PKG_OVERRIDE:
443*4882a593Smuzhiyun case TB_CFG_PKG_RESET:
444*4882a593Smuzhiyun if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
445*4882a593Smuzhiyun tb_ctl_err(pkg->ctl,
446*4882a593Smuzhiyun "RX: checksum mismatch, dropping packet\n");
447*4882a593Smuzhiyun goto rx;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun if (tb_async_error(pkg)) {
450*4882a593Smuzhiyun tb_ctl_handle_event(pkg->ctl, frame->eof,
451*4882a593Smuzhiyun pkg, frame->size);
452*4882a593Smuzhiyun goto rx;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun break;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun case TB_CFG_PKG_EVENT:
457*4882a593Smuzhiyun case TB_CFG_PKG_XDOMAIN_RESP:
458*4882a593Smuzhiyun case TB_CFG_PKG_XDOMAIN_REQ:
459*4882a593Smuzhiyun if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
460*4882a593Smuzhiyun tb_ctl_err(pkg->ctl,
461*4882a593Smuzhiyun "RX: checksum mismatch, dropping packet\n");
462*4882a593Smuzhiyun goto rx;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun fallthrough;
465*4882a593Smuzhiyun case TB_CFG_PKG_ICM_EVENT:
466*4882a593Smuzhiyun if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
467*4882a593Smuzhiyun goto rx;
468*4882a593Smuzhiyun break;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun default:
471*4882a593Smuzhiyun break;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /*
475*4882a593Smuzhiyun * The received packet will be processed only if there is an
476*4882a593Smuzhiyun * active request and that the packet is what is expected. This
477*4882a593Smuzhiyun * prevents packets such as replies coming after timeout has
478*4882a593Smuzhiyun * triggered from messing with the active requests.
479*4882a593Smuzhiyun */
480*4882a593Smuzhiyun req = tb_cfg_request_find(pkg->ctl, pkg);
481*4882a593Smuzhiyun if (req) {
482*4882a593Smuzhiyun if (req->copy(req, pkg))
483*4882a593Smuzhiyun schedule_work(&req->work);
484*4882a593Smuzhiyun tb_cfg_request_put(req);
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun rx:
488*4882a593Smuzhiyun tb_ctl_rx_submit(pkg);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
tb_cfg_request_work(struct work_struct * work)491*4882a593Smuzhiyun static void tb_cfg_request_work(struct work_struct *work)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun struct tb_cfg_request *req = container_of(work, typeof(*req), work);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
496*4882a593Smuzhiyun req->callback(req->callback_data);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun tb_cfg_request_dequeue(req);
499*4882a593Smuzhiyun tb_cfg_request_put(req);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /**
503*4882a593Smuzhiyun * tb_cfg_request() - Start control request not waiting for it to complete
504*4882a593Smuzhiyun * @ctl: Control channel to use
505*4882a593Smuzhiyun * @req: Request to start
506*4882a593Smuzhiyun * @callback: Callback called when the request is completed
507*4882a593Smuzhiyun * @callback_data: Data to be passed to @callback
508*4882a593Smuzhiyun *
509*4882a593Smuzhiyun * This queues @req on the given control channel without waiting for it
510*4882a593Smuzhiyun * to complete. When the request completes @callback is called.
511*4882a593Smuzhiyun */
tb_cfg_request(struct tb_ctl * ctl,struct tb_cfg_request * req,void (* callback)(void *),void * callback_data)512*4882a593Smuzhiyun int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
513*4882a593Smuzhiyun void (*callback)(void *), void *callback_data)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun int ret;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun req->flags = 0;
518*4882a593Smuzhiyun req->callback = callback;
519*4882a593Smuzhiyun req->callback_data = callback_data;
520*4882a593Smuzhiyun INIT_WORK(&req->work, tb_cfg_request_work);
521*4882a593Smuzhiyun INIT_LIST_HEAD(&req->list);
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun tb_cfg_request_get(req);
524*4882a593Smuzhiyun ret = tb_cfg_request_enqueue(ctl, req);
525*4882a593Smuzhiyun if (ret)
526*4882a593Smuzhiyun goto err_put;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun ret = tb_ctl_tx(ctl, req->request, req->request_size,
529*4882a593Smuzhiyun req->request_type);
530*4882a593Smuzhiyun if (ret)
531*4882a593Smuzhiyun goto err_dequeue;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (!req->response)
534*4882a593Smuzhiyun schedule_work(&req->work);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun return 0;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun err_dequeue:
539*4882a593Smuzhiyun tb_cfg_request_dequeue(req);
540*4882a593Smuzhiyun err_put:
541*4882a593Smuzhiyun tb_cfg_request_put(req);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun return ret;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun * tb_cfg_request_cancel() - Cancel a control request
548*4882a593Smuzhiyun * @req: Request to cancel
549*4882a593Smuzhiyun * @err: Error to assign to the request
550*4882a593Smuzhiyun *
551*4882a593Smuzhiyun * This function can be used to cancel ongoing request. It will wait
552*4882a593Smuzhiyun * until the request is not active anymore.
553*4882a593Smuzhiyun */
tb_cfg_request_cancel(struct tb_cfg_request * req,int err)554*4882a593Smuzhiyun void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
557*4882a593Smuzhiyun schedule_work(&req->work);
558*4882a593Smuzhiyun wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
559*4882a593Smuzhiyun req->result.err = err;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
tb_cfg_request_complete(void * data)562*4882a593Smuzhiyun static void tb_cfg_request_complete(void *data)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun complete(data);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /**
568*4882a593Smuzhiyun * tb_cfg_request_sync() - Start control request and wait until it completes
569*4882a593Smuzhiyun * @ctl: Control channel to use
570*4882a593Smuzhiyun * @req: Request to start
571*4882a593Smuzhiyun * @timeout_msec: Timeout how long to wait @req to complete
572*4882a593Smuzhiyun *
573*4882a593Smuzhiyun * Starts a control request and waits until it completes. If timeout
574*4882a593Smuzhiyun * triggers the request is canceled before function returns. Note the
575*4882a593Smuzhiyun * caller needs to make sure only one message for given switch is active
576*4882a593Smuzhiyun * at a time.
577*4882a593Smuzhiyun */
tb_cfg_request_sync(struct tb_ctl * ctl,struct tb_cfg_request * req,int timeout_msec)578*4882a593Smuzhiyun struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
579*4882a593Smuzhiyun struct tb_cfg_request *req,
580*4882a593Smuzhiyun int timeout_msec)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun unsigned long timeout = msecs_to_jiffies(timeout_msec);
583*4882a593Smuzhiyun struct tb_cfg_result res = { 0 };
584*4882a593Smuzhiyun DECLARE_COMPLETION_ONSTACK(done);
585*4882a593Smuzhiyun int ret;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
588*4882a593Smuzhiyun if (ret) {
589*4882a593Smuzhiyun res.err = ret;
590*4882a593Smuzhiyun return res;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun if (!wait_for_completion_timeout(&done, timeout))
594*4882a593Smuzhiyun tb_cfg_request_cancel(req, -ETIMEDOUT);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun flush_work(&req->work);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun return req->result;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun /* public interface, alloc/start/stop/free */
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /**
604*4882a593Smuzhiyun * tb_ctl_alloc() - allocate a control channel
605*4882a593Smuzhiyun *
606*4882a593Smuzhiyun * cb will be invoked once for every hot plug event.
607*4882a593Smuzhiyun *
608*4882a593Smuzhiyun * Return: Returns a pointer on success or NULL on failure.
609*4882a593Smuzhiyun */
tb_ctl_alloc(struct tb_nhi * nhi,event_cb cb,void * cb_data)610*4882a593Smuzhiyun struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun int i;
613*4882a593Smuzhiyun struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
614*4882a593Smuzhiyun if (!ctl)
615*4882a593Smuzhiyun return NULL;
616*4882a593Smuzhiyun ctl->nhi = nhi;
617*4882a593Smuzhiyun ctl->callback = cb;
618*4882a593Smuzhiyun ctl->callback_data = cb_data;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun mutex_init(&ctl->request_queue_lock);
621*4882a593Smuzhiyun INIT_LIST_HEAD(&ctl->request_queue);
622*4882a593Smuzhiyun ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
623*4882a593Smuzhiyun TB_FRAME_SIZE, 4, 0);
624*4882a593Smuzhiyun if (!ctl->frame_pool)
625*4882a593Smuzhiyun goto err;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
628*4882a593Smuzhiyun if (!ctl->tx)
629*4882a593Smuzhiyun goto err;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff,
632*4882a593Smuzhiyun 0xffff, NULL, NULL);
633*4882a593Smuzhiyun if (!ctl->rx)
634*4882a593Smuzhiyun goto err;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
637*4882a593Smuzhiyun ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
638*4882a593Smuzhiyun if (!ctl->rx_packets[i])
639*4882a593Smuzhiyun goto err;
640*4882a593Smuzhiyun ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun tb_ctl_dbg(ctl, "control channel created\n");
644*4882a593Smuzhiyun return ctl;
645*4882a593Smuzhiyun err:
646*4882a593Smuzhiyun tb_ctl_free(ctl);
647*4882a593Smuzhiyun return NULL;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun /**
651*4882a593Smuzhiyun * tb_ctl_free() - free a control channel
652*4882a593Smuzhiyun *
653*4882a593Smuzhiyun * Must be called after tb_ctl_stop.
654*4882a593Smuzhiyun *
655*4882a593Smuzhiyun * Must NOT be called from ctl->callback.
656*4882a593Smuzhiyun */
tb_ctl_free(struct tb_ctl * ctl)657*4882a593Smuzhiyun void tb_ctl_free(struct tb_ctl *ctl)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun int i;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (!ctl)
662*4882a593Smuzhiyun return;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (ctl->rx)
665*4882a593Smuzhiyun tb_ring_free(ctl->rx);
666*4882a593Smuzhiyun if (ctl->tx)
667*4882a593Smuzhiyun tb_ring_free(ctl->tx);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /* free RX packets */
670*4882a593Smuzhiyun for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
671*4882a593Smuzhiyun tb_ctl_pkg_free(ctl->rx_packets[i]);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun dma_pool_destroy(ctl->frame_pool);
675*4882a593Smuzhiyun kfree(ctl);
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /**
679*4882a593Smuzhiyun * tb_cfg_start() - start/resume the control channel
680*4882a593Smuzhiyun */
tb_ctl_start(struct tb_ctl * ctl)681*4882a593Smuzhiyun void tb_ctl_start(struct tb_ctl *ctl)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun int i;
684*4882a593Smuzhiyun tb_ctl_dbg(ctl, "control channel starting...\n");
685*4882a593Smuzhiyun tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
686*4882a593Smuzhiyun tb_ring_start(ctl->rx);
687*4882a593Smuzhiyun for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
688*4882a593Smuzhiyun tb_ctl_rx_submit(ctl->rx_packets[i]);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun ctl->running = true;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /**
694*4882a593Smuzhiyun * control() - pause the control channel
695*4882a593Smuzhiyun *
696*4882a593Smuzhiyun * All invocations of ctl->callback will have finished after this method
697*4882a593Smuzhiyun * returns.
698*4882a593Smuzhiyun *
699*4882a593Smuzhiyun * Must NOT be called from ctl->callback.
700*4882a593Smuzhiyun */
tb_ctl_stop(struct tb_ctl * ctl)701*4882a593Smuzhiyun void tb_ctl_stop(struct tb_ctl *ctl)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun mutex_lock(&ctl->request_queue_lock);
704*4882a593Smuzhiyun ctl->running = false;
705*4882a593Smuzhiyun mutex_unlock(&ctl->request_queue_lock);
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun tb_ring_stop(ctl->rx);
708*4882a593Smuzhiyun tb_ring_stop(ctl->tx);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun if (!list_empty(&ctl->request_queue))
711*4882a593Smuzhiyun tb_ctl_WARN(ctl, "dangling request in request_queue\n");
712*4882a593Smuzhiyun INIT_LIST_HEAD(&ctl->request_queue);
713*4882a593Smuzhiyun tb_ctl_dbg(ctl, "control channel stopped\n");
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /* public interface, commands */
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /**
719*4882a593Smuzhiyun * tb_cfg_ack_plug() - Ack hot plug/unplug event
720*4882a593Smuzhiyun * @ctl: Control channel to use
721*4882a593Smuzhiyun * @route: Router that originated the event
722*4882a593Smuzhiyun * @port: Port where the hot plug/unplug happened
723*4882a593Smuzhiyun * @unplug: Ack hot plug or unplug
724*4882a593Smuzhiyun *
725*4882a593Smuzhiyun * Call this as response for hot plug/unplug event to ack it.
726*4882a593Smuzhiyun * Returns %0 on success or an error code on failure.
727*4882a593Smuzhiyun */
tb_cfg_ack_plug(struct tb_ctl * ctl,u64 route,u32 port,bool unplug)728*4882a593Smuzhiyun int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun struct cfg_error_pkg pkg = {
731*4882a593Smuzhiyun .header = tb_cfg_make_header(route),
732*4882a593Smuzhiyun .port = port,
733*4882a593Smuzhiyun .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
734*4882a593Smuzhiyun .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
735*4882a593Smuzhiyun : TB_CFG_ERROR_PG_HOT_PLUG,
736*4882a593Smuzhiyun };
737*4882a593Smuzhiyun tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
738*4882a593Smuzhiyun unplug ? "un" : "", route, port);
739*4882a593Smuzhiyun return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
tb_cfg_match(const struct tb_cfg_request * req,const struct ctl_pkg * pkg)742*4882a593Smuzhiyun static bool tb_cfg_match(const struct tb_cfg_request *req,
743*4882a593Smuzhiyun const struct ctl_pkg *pkg)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun if (pkg->frame.eof == TB_CFG_PKG_ERROR)
748*4882a593Smuzhiyun return true;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun if (pkg->frame.eof != req->response_type)
751*4882a593Smuzhiyun return false;
752*4882a593Smuzhiyun if (route != tb_cfg_get_route(req->request))
753*4882a593Smuzhiyun return false;
754*4882a593Smuzhiyun if (pkg->frame.size != req->response_size)
755*4882a593Smuzhiyun return false;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun if (pkg->frame.eof == TB_CFG_PKG_READ ||
758*4882a593Smuzhiyun pkg->frame.eof == TB_CFG_PKG_WRITE) {
759*4882a593Smuzhiyun const struct cfg_read_pkg *req_hdr = req->request;
760*4882a593Smuzhiyun const struct cfg_read_pkg *res_hdr = pkg->buffer;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun if (req_hdr->addr.seq != res_hdr->addr.seq)
763*4882a593Smuzhiyun return false;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun return true;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
tb_cfg_copy(struct tb_cfg_request * req,const struct ctl_pkg * pkg)769*4882a593Smuzhiyun static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun struct tb_cfg_result res;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun /* Now make sure it is in expected format */
774*4882a593Smuzhiyun res = parse_header(pkg, req->response_size, req->response_type,
775*4882a593Smuzhiyun tb_cfg_get_route(req->request));
776*4882a593Smuzhiyun if (!res.err)
777*4882a593Smuzhiyun memcpy(req->response, pkg->buffer, req->response_size);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun req->result = res;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /* Always complete when first response is received */
782*4882a593Smuzhiyun return true;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /**
786*4882a593Smuzhiyun * tb_cfg_reset() - send a reset packet and wait for a response
787*4882a593Smuzhiyun *
788*4882a593Smuzhiyun * If the switch at route is incorrectly configured then we will not receive a
789*4882a593Smuzhiyun * reply (even though the switch will reset). The caller should check for
790*4882a593Smuzhiyun * -ETIMEDOUT and attempt to reconfigure the switch.
791*4882a593Smuzhiyun */
tb_cfg_reset(struct tb_ctl * ctl,u64 route,int timeout_msec)792*4882a593Smuzhiyun struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
793*4882a593Smuzhiyun int timeout_msec)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
796*4882a593Smuzhiyun struct tb_cfg_result res = { 0 };
797*4882a593Smuzhiyun struct tb_cfg_header reply;
798*4882a593Smuzhiyun struct tb_cfg_request *req;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun req = tb_cfg_request_alloc();
801*4882a593Smuzhiyun if (!req) {
802*4882a593Smuzhiyun res.err = -ENOMEM;
803*4882a593Smuzhiyun return res;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun req->match = tb_cfg_match;
807*4882a593Smuzhiyun req->copy = tb_cfg_copy;
808*4882a593Smuzhiyun req->request = &request;
809*4882a593Smuzhiyun req->request_size = sizeof(request);
810*4882a593Smuzhiyun req->request_type = TB_CFG_PKG_RESET;
811*4882a593Smuzhiyun req->response = &reply;
812*4882a593Smuzhiyun req->response_size = sizeof(reply);
813*4882a593Smuzhiyun req->response_type = TB_CFG_PKG_RESET;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun res = tb_cfg_request_sync(ctl, req, timeout_msec);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun tb_cfg_request_put(req);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun return res;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /**
823*4882a593Smuzhiyun * tb_cfg_read() - read from config space into buffer
824*4882a593Smuzhiyun *
825*4882a593Smuzhiyun * Offset and length are in dwords.
826*4882a593Smuzhiyun */
tb_cfg_read_raw(struct tb_ctl * ctl,void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length,int timeout_msec)827*4882a593Smuzhiyun struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
828*4882a593Smuzhiyun u64 route, u32 port, enum tb_cfg_space space,
829*4882a593Smuzhiyun u32 offset, u32 length, int timeout_msec)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun struct tb_cfg_result res = { 0 };
832*4882a593Smuzhiyun struct cfg_read_pkg request = {
833*4882a593Smuzhiyun .header = tb_cfg_make_header(route),
834*4882a593Smuzhiyun .addr = {
835*4882a593Smuzhiyun .port = port,
836*4882a593Smuzhiyun .space = space,
837*4882a593Smuzhiyun .offset = offset,
838*4882a593Smuzhiyun .length = length,
839*4882a593Smuzhiyun },
840*4882a593Smuzhiyun };
841*4882a593Smuzhiyun struct cfg_write_pkg reply;
842*4882a593Smuzhiyun int retries = 0;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun while (retries < TB_CTL_RETRIES) {
845*4882a593Smuzhiyun struct tb_cfg_request *req;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun req = tb_cfg_request_alloc();
848*4882a593Smuzhiyun if (!req) {
849*4882a593Smuzhiyun res.err = -ENOMEM;
850*4882a593Smuzhiyun return res;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun request.addr.seq = retries++;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun req->match = tb_cfg_match;
856*4882a593Smuzhiyun req->copy = tb_cfg_copy;
857*4882a593Smuzhiyun req->request = &request;
858*4882a593Smuzhiyun req->request_size = sizeof(request);
859*4882a593Smuzhiyun req->request_type = TB_CFG_PKG_READ;
860*4882a593Smuzhiyun req->response = &reply;
861*4882a593Smuzhiyun req->response_size = 12 + 4 * length;
862*4882a593Smuzhiyun req->response_type = TB_CFG_PKG_READ;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun res = tb_cfg_request_sync(ctl, req, timeout_msec);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun tb_cfg_request_put(req);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun if (res.err != -ETIMEDOUT)
869*4882a593Smuzhiyun break;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun /* Wait a bit (arbitrary time) until we send a retry */
872*4882a593Smuzhiyun usleep_range(10, 100);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun if (res.err)
876*4882a593Smuzhiyun return res;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun res.response_port = reply.addr.port;
879*4882a593Smuzhiyun res.err = check_config_address(reply.addr, space, offset, length);
880*4882a593Smuzhiyun if (!res.err)
881*4882a593Smuzhiyun memcpy(buffer, &reply.data, 4 * length);
882*4882a593Smuzhiyun return res;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun /**
886*4882a593Smuzhiyun * tb_cfg_write() - write from buffer into config space
887*4882a593Smuzhiyun *
888*4882a593Smuzhiyun * Offset and length are in dwords.
889*4882a593Smuzhiyun */
tb_cfg_write_raw(struct tb_ctl * ctl,const void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length,int timeout_msec)890*4882a593Smuzhiyun struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
891*4882a593Smuzhiyun u64 route, u32 port, enum tb_cfg_space space,
892*4882a593Smuzhiyun u32 offset, u32 length, int timeout_msec)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun struct tb_cfg_result res = { 0 };
895*4882a593Smuzhiyun struct cfg_write_pkg request = {
896*4882a593Smuzhiyun .header = tb_cfg_make_header(route),
897*4882a593Smuzhiyun .addr = {
898*4882a593Smuzhiyun .port = port,
899*4882a593Smuzhiyun .space = space,
900*4882a593Smuzhiyun .offset = offset,
901*4882a593Smuzhiyun .length = length,
902*4882a593Smuzhiyun },
903*4882a593Smuzhiyun };
904*4882a593Smuzhiyun struct cfg_read_pkg reply;
905*4882a593Smuzhiyun int retries = 0;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun memcpy(&request.data, buffer, length * 4);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun while (retries < TB_CTL_RETRIES) {
910*4882a593Smuzhiyun struct tb_cfg_request *req;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun req = tb_cfg_request_alloc();
913*4882a593Smuzhiyun if (!req) {
914*4882a593Smuzhiyun res.err = -ENOMEM;
915*4882a593Smuzhiyun return res;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun request.addr.seq = retries++;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun req->match = tb_cfg_match;
921*4882a593Smuzhiyun req->copy = tb_cfg_copy;
922*4882a593Smuzhiyun req->request = &request;
923*4882a593Smuzhiyun req->request_size = 12 + 4 * length;
924*4882a593Smuzhiyun req->request_type = TB_CFG_PKG_WRITE;
925*4882a593Smuzhiyun req->response = &reply;
926*4882a593Smuzhiyun req->response_size = sizeof(reply);
927*4882a593Smuzhiyun req->response_type = TB_CFG_PKG_WRITE;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun res = tb_cfg_request_sync(ctl, req, timeout_msec);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun tb_cfg_request_put(req);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun if (res.err != -ETIMEDOUT)
934*4882a593Smuzhiyun break;
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun /* Wait a bit (arbitrary time) until we send a retry */
937*4882a593Smuzhiyun usleep_range(10, 100);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun if (res.err)
941*4882a593Smuzhiyun return res;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun res.response_port = reply.addr.port;
944*4882a593Smuzhiyun res.err = check_config_address(reply.addr, space, offset, length);
945*4882a593Smuzhiyun return res;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
tb_cfg_get_error(struct tb_ctl * ctl,enum tb_cfg_space space,const struct tb_cfg_result * res)948*4882a593Smuzhiyun static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
949*4882a593Smuzhiyun const struct tb_cfg_result *res)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun /*
952*4882a593Smuzhiyun * For unimplemented ports access to port config space may return
953*4882a593Smuzhiyun * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
954*4882a593Smuzhiyun * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
955*4882a593Smuzhiyun * that the caller can mark the port as disabled.
956*4882a593Smuzhiyun */
957*4882a593Smuzhiyun if (space == TB_CFG_PORT &&
958*4882a593Smuzhiyun res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
959*4882a593Smuzhiyun return -ENODEV;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun tb_cfg_print_error(ctl, res);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun if (res->tb_error == TB_CFG_ERROR_LOCK)
964*4882a593Smuzhiyun return -EACCES;
965*4882a593Smuzhiyun return -EIO;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
tb_cfg_read(struct tb_ctl * ctl,void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length)968*4882a593Smuzhiyun int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
969*4882a593Smuzhiyun enum tb_cfg_space space, u32 offset, u32 length)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
972*4882a593Smuzhiyun space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
973*4882a593Smuzhiyun switch (res.err) {
974*4882a593Smuzhiyun case 0:
975*4882a593Smuzhiyun /* Success */
976*4882a593Smuzhiyun break;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun case 1:
979*4882a593Smuzhiyun /* Thunderbolt error, tb_error holds the actual number */
980*4882a593Smuzhiyun return tb_cfg_get_error(ctl, space, &res);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun case -ETIMEDOUT:
983*4882a593Smuzhiyun tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
984*4882a593Smuzhiyun route, space, offset);
985*4882a593Smuzhiyun break;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun default:
988*4882a593Smuzhiyun WARN(1, "tb_cfg_read: %d\n", res.err);
989*4882a593Smuzhiyun break;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun return res.err;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
tb_cfg_write(struct tb_ctl * ctl,const void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length)994*4882a593Smuzhiyun int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
995*4882a593Smuzhiyun enum tb_cfg_space space, u32 offset, u32 length)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
998*4882a593Smuzhiyun space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
999*4882a593Smuzhiyun switch (res.err) {
1000*4882a593Smuzhiyun case 0:
1001*4882a593Smuzhiyun /* Success */
1002*4882a593Smuzhiyun break;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun case 1:
1005*4882a593Smuzhiyun /* Thunderbolt error, tb_error holds the actual number */
1006*4882a593Smuzhiyun return tb_cfg_get_error(ctl, space, &res);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun case -ETIMEDOUT:
1009*4882a593Smuzhiyun tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
1010*4882a593Smuzhiyun route, space, offset);
1011*4882a593Smuzhiyun break;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun default:
1014*4882a593Smuzhiyun WARN(1, "tb_cfg_write: %d\n", res.err);
1015*4882a593Smuzhiyun break;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun return res.err;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun /**
1021*4882a593Smuzhiyun * tb_cfg_get_upstream_port() - get upstream port number of switch at route
1022*4882a593Smuzhiyun *
1023*4882a593Smuzhiyun * Reads the first dword from the switches TB_CFG_SWITCH config area and
1024*4882a593Smuzhiyun * returns the port number from which the reply originated.
1025*4882a593Smuzhiyun *
1026*4882a593Smuzhiyun * Return: Returns the upstream port number on success or an error code on
1027*4882a593Smuzhiyun * failure.
1028*4882a593Smuzhiyun */
tb_cfg_get_upstream_port(struct tb_ctl * ctl,u64 route)1029*4882a593Smuzhiyun int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun u32 dummy;
1032*4882a593Smuzhiyun struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
1033*4882a593Smuzhiyun TB_CFG_SWITCH, 0, 1,
1034*4882a593Smuzhiyun TB_CFG_DEFAULT_TIMEOUT);
1035*4882a593Smuzhiyun if (res.err == 1)
1036*4882a593Smuzhiyun return -EIO;
1037*4882a593Smuzhiyun if (res.err)
1038*4882a593Smuzhiyun return res.err;
1039*4882a593Smuzhiyun return res.response_port;
1040*4882a593Smuzhiyun }
1041