1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/bug.h>
7*4882a593Smuzhiyun #include <linux/completion.h>
8*4882a593Smuzhiyun #include <linux/crc-itu-t.h>
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/errno.h>
11*4882a593Smuzhiyun #include <linux/firewire.h>
12*4882a593Smuzhiyun #include <linux/firewire-constants.h>
13*4882a593Smuzhiyun #include <linux/jiffies.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/kref.h>
16*4882a593Smuzhiyun #include <linux/list.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/mutex.h>
19*4882a593Smuzhiyun #include <linux/spinlock.h>
20*4882a593Smuzhiyun #include <linux/workqueue.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <linux/atomic.h>
23*4882a593Smuzhiyun #include <asm/byteorder.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "core.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define define_fw_printk_level(func, kern_level) \
28*4882a593Smuzhiyun void func(const struct fw_card *card, const char *fmt, ...) \
29*4882a593Smuzhiyun { \
30*4882a593Smuzhiyun struct va_format vaf; \
31*4882a593Smuzhiyun va_list args; \
32*4882a593Smuzhiyun \
33*4882a593Smuzhiyun va_start(args, fmt); \
34*4882a593Smuzhiyun vaf.fmt = fmt; \
35*4882a593Smuzhiyun vaf.va = &args; \
36*4882a593Smuzhiyun printk(kern_level KBUILD_MODNAME " %s: %pV", \
37*4882a593Smuzhiyun dev_name(card->device), &vaf); \
38*4882a593Smuzhiyun va_end(args); \
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun define_fw_printk_level(fw_err, KERN_ERR);
41*4882a593Smuzhiyun define_fw_printk_level(fw_notice, KERN_NOTICE);
42*4882a593Smuzhiyun
fw_compute_block_crc(__be32 * block)43*4882a593Smuzhiyun int fw_compute_block_crc(__be32 *block)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun int length;
46*4882a593Smuzhiyun u16 crc;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun length = (be32_to_cpu(block[0]) >> 16) & 0xff;
49*4882a593Smuzhiyun crc = crc_itu_t(0, (u8 *)&block[1], length * 4);
50*4882a593Smuzhiyun *block |= cpu_to_be32(crc);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun return length;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun static DEFINE_MUTEX(card_mutex);
56*4882a593Smuzhiyun static LIST_HEAD(card_list);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static LIST_HEAD(descriptor_list);
59*4882a593Smuzhiyun static int descriptor_count;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static __be32 tmp_config_rom[256];
62*4882a593Smuzhiyun /* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
63*4882a593Smuzhiyun static size_t config_rom_length = 1 + 4 + 1 + 1;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #define BIB_CRC(v) ((v) << 0)
66*4882a593Smuzhiyun #define BIB_CRC_LENGTH(v) ((v) << 16)
67*4882a593Smuzhiyun #define BIB_INFO_LENGTH(v) ((v) << 24)
68*4882a593Smuzhiyun #define BIB_BUS_NAME 0x31333934 /* "1394" */
69*4882a593Smuzhiyun #define BIB_LINK_SPEED(v) ((v) << 0)
70*4882a593Smuzhiyun #define BIB_GENERATION(v) ((v) << 4)
71*4882a593Smuzhiyun #define BIB_MAX_ROM(v) ((v) << 8)
72*4882a593Smuzhiyun #define BIB_MAX_RECEIVE(v) ((v) << 12)
73*4882a593Smuzhiyun #define BIB_CYC_CLK_ACC(v) ((v) << 16)
74*4882a593Smuzhiyun #define BIB_PMC ((1) << 27)
75*4882a593Smuzhiyun #define BIB_BMC ((1) << 28)
76*4882a593Smuzhiyun #define BIB_ISC ((1) << 29)
77*4882a593Smuzhiyun #define BIB_CMC ((1) << 30)
78*4882a593Smuzhiyun #define BIB_IRMC ((1) << 31)
79*4882a593Smuzhiyun #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms),
83*4882a593Smuzhiyun * but we have to make it longer because there are many devices whose firmware
84*4882a593Smuzhiyun * is just too slow for that.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun #define DEFAULT_SPLIT_TIMEOUT (2 * 8000)
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun #define CANON_OUI 0x000085
89*4882a593Smuzhiyun
generate_config_rom(struct fw_card * card,__be32 * config_rom)90*4882a593Smuzhiyun static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct fw_descriptor *desc;
93*4882a593Smuzhiyun int i, j, k, length;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /*
96*4882a593Smuzhiyun * Initialize contents of config rom buffer. On the OHCI
97*4882a593Smuzhiyun * controller, block reads to the config rom accesses the host
98*4882a593Smuzhiyun * memory, but quadlet read access the hardware bus info block
99*4882a593Smuzhiyun * registers. That's just crack, but it means we should make
100*4882a593Smuzhiyun * sure the contents of bus info block in host memory matches
101*4882a593Smuzhiyun * the version stored in the OHCI registers.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun config_rom[0] = cpu_to_be32(
105*4882a593Smuzhiyun BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
106*4882a593Smuzhiyun config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
107*4882a593Smuzhiyun config_rom[2] = cpu_to_be32(
108*4882a593Smuzhiyun BIB_LINK_SPEED(card->link_speed) |
109*4882a593Smuzhiyun BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
110*4882a593Smuzhiyun BIB_MAX_ROM(2) |
111*4882a593Smuzhiyun BIB_MAX_RECEIVE(card->max_receive) |
112*4882a593Smuzhiyun BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
113*4882a593Smuzhiyun config_rom[3] = cpu_to_be32(card->guid >> 32);
114*4882a593Smuzhiyun config_rom[4] = cpu_to_be32(card->guid);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* Generate root directory. */
117*4882a593Smuzhiyun config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
118*4882a593Smuzhiyun i = 7;
119*4882a593Smuzhiyun j = 7 + descriptor_count;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Generate root directory entries for descriptors. */
122*4882a593Smuzhiyun list_for_each_entry (desc, &descriptor_list, link) {
123*4882a593Smuzhiyun if (desc->immediate > 0)
124*4882a593Smuzhiyun config_rom[i++] = cpu_to_be32(desc->immediate);
125*4882a593Smuzhiyun config_rom[i] = cpu_to_be32(desc->key | (j - i));
126*4882a593Smuzhiyun i++;
127*4882a593Smuzhiyun j += desc->length;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* Update root directory length. */
131*4882a593Smuzhiyun config_rom[5] = cpu_to_be32((i - 5 - 1) << 16);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* End of root directory, now copy in descriptors. */
134*4882a593Smuzhiyun list_for_each_entry (desc, &descriptor_list, link) {
135*4882a593Smuzhiyun for (k = 0; k < desc->length; k++)
136*4882a593Smuzhiyun config_rom[i + k] = cpu_to_be32(desc->data[k]);
137*4882a593Smuzhiyun i += desc->length;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* Calculate CRCs for all blocks in the config rom. This
141*4882a593Smuzhiyun * assumes that CRC length and info length are identical for
142*4882a593Smuzhiyun * the bus info block, which is always the case for this
143*4882a593Smuzhiyun * implementation. */
144*4882a593Smuzhiyun for (i = 0; i < j; i += length + 1)
145*4882a593Smuzhiyun length = fw_compute_block_crc(config_rom + i);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun WARN_ON(j != config_rom_length);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
update_config_roms(void)150*4882a593Smuzhiyun static void update_config_roms(void)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun struct fw_card *card;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun list_for_each_entry (card, &card_list, link) {
155*4882a593Smuzhiyun generate_config_rom(card, tmp_config_rom);
156*4882a593Smuzhiyun card->driver->set_config_rom(card, tmp_config_rom,
157*4882a593Smuzhiyun config_rom_length);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
required_space(struct fw_descriptor * desc)161*4882a593Smuzhiyun static size_t required_space(struct fw_descriptor *desc)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun /* descriptor + entry into root dir + optional immediate entry */
164*4882a593Smuzhiyun return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
fw_core_add_descriptor(struct fw_descriptor * desc)167*4882a593Smuzhiyun int fw_core_add_descriptor(struct fw_descriptor *desc)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun size_t i;
170*4882a593Smuzhiyun int ret;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun * Check descriptor is valid; the length of all blocks in the
174*4882a593Smuzhiyun * descriptor has to add up to exactly the length of the
175*4882a593Smuzhiyun * block.
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun i = 0;
178*4882a593Smuzhiyun while (i < desc->length)
179*4882a593Smuzhiyun i += (desc->data[i] >> 16) + 1;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun if (i != desc->length)
182*4882a593Smuzhiyun return -EINVAL;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun mutex_lock(&card_mutex);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (config_rom_length + required_space(desc) > 256) {
187*4882a593Smuzhiyun ret = -EBUSY;
188*4882a593Smuzhiyun } else {
189*4882a593Smuzhiyun list_add_tail(&desc->link, &descriptor_list);
190*4882a593Smuzhiyun config_rom_length += required_space(desc);
191*4882a593Smuzhiyun descriptor_count++;
192*4882a593Smuzhiyun if (desc->immediate > 0)
193*4882a593Smuzhiyun descriptor_count++;
194*4882a593Smuzhiyun update_config_roms();
195*4882a593Smuzhiyun ret = 0;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun mutex_unlock(&card_mutex);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun return ret;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun EXPORT_SYMBOL(fw_core_add_descriptor);
203*4882a593Smuzhiyun
fw_core_remove_descriptor(struct fw_descriptor * desc)204*4882a593Smuzhiyun void fw_core_remove_descriptor(struct fw_descriptor *desc)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun mutex_lock(&card_mutex);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun list_del(&desc->link);
209*4882a593Smuzhiyun config_rom_length -= required_space(desc);
210*4882a593Smuzhiyun descriptor_count--;
211*4882a593Smuzhiyun if (desc->immediate > 0)
212*4882a593Smuzhiyun descriptor_count--;
213*4882a593Smuzhiyun update_config_roms();
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun mutex_unlock(&card_mutex);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun EXPORT_SYMBOL(fw_core_remove_descriptor);
218*4882a593Smuzhiyun
reset_bus(struct fw_card * card,bool short_reset)219*4882a593Smuzhiyun static int reset_bus(struct fw_card *card, bool short_reset)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun int reg = short_reset ? 5 : 1;
222*4882a593Smuzhiyun int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun return card->driver->update_phy_reg(card, reg, 0, bit);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
fw_schedule_bus_reset(struct fw_card * card,bool delayed,bool short_reset)227*4882a593Smuzhiyun void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun /* We don't try hard to sort out requests of long vs. short resets. */
230*4882a593Smuzhiyun card->br_short = short_reset;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* Use an arbitrary short delay to combine multiple reset requests. */
233*4882a593Smuzhiyun fw_card_get(card);
234*4882a593Smuzhiyun if (!queue_delayed_work(fw_workqueue, &card->br_work,
235*4882a593Smuzhiyun delayed ? DIV_ROUND_UP(HZ, 100) : 0))
236*4882a593Smuzhiyun fw_card_put(card);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun EXPORT_SYMBOL(fw_schedule_bus_reset);
239*4882a593Smuzhiyun
br_work(struct work_struct * work)240*4882a593Smuzhiyun static void br_work(struct work_struct *work)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun struct fw_card *card = container_of(work, struct fw_card, br_work.work);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
245*4882a593Smuzhiyun if (card->reset_jiffies != 0 &&
246*4882a593Smuzhiyun time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
247*4882a593Smuzhiyun if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
248*4882a593Smuzhiyun fw_card_put(card);
249*4882a593Smuzhiyun return;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
253*4882a593Smuzhiyun FW_PHY_CONFIG_CURRENT_GAP_COUNT);
254*4882a593Smuzhiyun reset_bus(card, card->br_short);
255*4882a593Smuzhiyun fw_card_put(card);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
allocate_broadcast_channel(struct fw_card * card,int generation)258*4882a593Smuzhiyun static void allocate_broadcast_channel(struct fw_card *card, int generation)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun int channel, bandwidth = 0;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (!card->broadcast_channel_allocated) {
263*4882a593Smuzhiyun fw_iso_resource_manage(card, generation, 1ULL << 31,
264*4882a593Smuzhiyun &channel, &bandwidth, true);
265*4882a593Smuzhiyun if (channel != 31) {
266*4882a593Smuzhiyun fw_notice(card, "failed to allocate broadcast channel\n");
267*4882a593Smuzhiyun return;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun card->broadcast_channel_allocated = true;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun device_for_each_child(card->device, (void *)(long)generation,
273*4882a593Smuzhiyun fw_device_set_broadcast_channel);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun static const char gap_count_table[] = {
277*4882a593Smuzhiyun 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
278*4882a593Smuzhiyun };
279*4882a593Smuzhiyun
fw_schedule_bm_work(struct fw_card * card,unsigned long delay)280*4882a593Smuzhiyun void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun fw_card_get(card);
283*4882a593Smuzhiyun if (!schedule_delayed_work(&card->bm_work, delay))
284*4882a593Smuzhiyun fw_card_put(card);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
bm_work(struct work_struct * work)287*4882a593Smuzhiyun static void bm_work(struct work_struct *work)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
290*4882a593Smuzhiyun struct fw_device *root_device, *irm_device;
291*4882a593Smuzhiyun struct fw_node *root_node;
292*4882a593Smuzhiyun int root_id, new_root_id, irm_id, bm_id, local_id;
293*4882a593Smuzhiyun int gap_count, generation, grace, rcode;
294*4882a593Smuzhiyun bool do_reset = false;
295*4882a593Smuzhiyun bool root_device_is_running;
296*4882a593Smuzhiyun bool root_device_is_cmc;
297*4882a593Smuzhiyun bool irm_is_1394_1995_only;
298*4882a593Smuzhiyun bool keep_this_irm;
299*4882a593Smuzhiyun __be32 transaction_data[2];
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun spin_lock_irq(&card->lock);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (card->local_node == NULL) {
304*4882a593Smuzhiyun spin_unlock_irq(&card->lock);
305*4882a593Smuzhiyun goto out_put_card;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun generation = card->generation;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun root_node = card->root_node;
311*4882a593Smuzhiyun fw_node_get(root_node);
312*4882a593Smuzhiyun root_device = root_node->data;
313*4882a593Smuzhiyun root_device_is_running = root_device &&
314*4882a593Smuzhiyun atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
315*4882a593Smuzhiyun root_device_is_cmc = root_device && root_device->cmc;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun irm_device = card->irm_node->data;
318*4882a593Smuzhiyun irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
319*4882a593Smuzhiyun (irm_device->config_rom[2] & 0x000000f0) == 0;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* Canon MV5i works unreliably if it is not root node. */
322*4882a593Smuzhiyun keep_this_irm = irm_device && irm_device->config_rom &&
323*4882a593Smuzhiyun irm_device->config_rom[3] >> 8 == CANON_OUI;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun root_id = root_node->node_id;
326*4882a593Smuzhiyun irm_id = card->irm_node->node_id;
327*4882a593Smuzhiyun local_id = card->local_node->node_id;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun grace = time_after64(get_jiffies_64(),
330*4882a593Smuzhiyun card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if ((is_next_generation(generation, card->bm_generation) &&
333*4882a593Smuzhiyun !card->bm_abdicate) ||
334*4882a593Smuzhiyun (card->bm_generation != generation && grace)) {
335*4882a593Smuzhiyun /*
336*4882a593Smuzhiyun * This first step is to figure out who is IRM and
337*4882a593Smuzhiyun * then try to become bus manager. If the IRM is not
338*4882a593Smuzhiyun * well defined (e.g. does not have an active link
339*4882a593Smuzhiyun * layer or does not responds to our lock request, we
340*4882a593Smuzhiyun * will have to do a little vigilante bus management.
341*4882a593Smuzhiyun * In that case, we do a goto into the gap count logic
342*4882a593Smuzhiyun * so that when we do the reset, we still optimize the
343*4882a593Smuzhiyun * gap count. That could well save a reset in the
344*4882a593Smuzhiyun * next generation.
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (!card->irm_node->link_on) {
348*4882a593Smuzhiyun new_root_id = local_id;
349*4882a593Smuzhiyun fw_notice(card, "%s, making local node (%02x) root\n",
350*4882a593Smuzhiyun "IRM has link off", new_root_id);
351*4882a593Smuzhiyun goto pick_me;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (irm_is_1394_1995_only && !keep_this_irm) {
355*4882a593Smuzhiyun new_root_id = local_id;
356*4882a593Smuzhiyun fw_notice(card, "%s, making local node (%02x) root\n",
357*4882a593Smuzhiyun "IRM is not 1394a compliant", new_root_id);
358*4882a593Smuzhiyun goto pick_me;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun transaction_data[0] = cpu_to_be32(0x3f);
362*4882a593Smuzhiyun transaction_data[1] = cpu_to_be32(local_id);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun spin_unlock_irq(&card->lock);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
367*4882a593Smuzhiyun irm_id, generation, SCODE_100,
368*4882a593Smuzhiyun CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
369*4882a593Smuzhiyun transaction_data, 8);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun if (rcode == RCODE_GENERATION)
372*4882a593Smuzhiyun /* Another bus reset, BM work has been rescheduled. */
373*4882a593Smuzhiyun goto out;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun bm_id = be32_to_cpu(transaction_data[0]);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun spin_lock_irq(&card->lock);
378*4882a593Smuzhiyun if (rcode == RCODE_COMPLETE && generation == card->generation)
379*4882a593Smuzhiyun card->bm_node_id =
380*4882a593Smuzhiyun bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
381*4882a593Smuzhiyun spin_unlock_irq(&card->lock);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
384*4882a593Smuzhiyun /* Somebody else is BM. Only act as IRM. */
385*4882a593Smuzhiyun if (local_id == irm_id)
386*4882a593Smuzhiyun allocate_broadcast_channel(card, generation);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun goto out;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun if (rcode == RCODE_SEND_ERROR) {
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun * We have been unable to send the lock request due to
394*4882a593Smuzhiyun * some local problem. Let's try again later and hope
395*4882a593Smuzhiyun * that the problem has gone away by then.
396*4882a593Smuzhiyun */
397*4882a593Smuzhiyun fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
398*4882a593Smuzhiyun goto out;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun spin_lock_irq(&card->lock);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun if (rcode != RCODE_COMPLETE && !keep_this_irm) {
404*4882a593Smuzhiyun /*
405*4882a593Smuzhiyun * The lock request failed, maybe the IRM
406*4882a593Smuzhiyun * isn't really IRM capable after all. Let's
407*4882a593Smuzhiyun * do a bus reset and pick the local node as
408*4882a593Smuzhiyun * root, and thus, IRM.
409*4882a593Smuzhiyun */
410*4882a593Smuzhiyun new_root_id = local_id;
411*4882a593Smuzhiyun fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
412*4882a593Smuzhiyun fw_rcode_string(rcode), new_root_id);
413*4882a593Smuzhiyun goto pick_me;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun } else if (card->bm_generation != generation) {
416*4882a593Smuzhiyun /*
417*4882a593Smuzhiyun * We weren't BM in the last generation, and the last
418*4882a593Smuzhiyun * bus reset is less than 125ms ago. Reschedule this job.
419*4882a593Smuzhiyun */
420*4882a593Smuzhiyun spin_unlock_irq(&card->lock);
421*4882a593Smuzhiyun fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
422*4882a593Smuzhiyun goto out;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /*
426*4882a593Smuzhiyun * We're bus manager for this generation, so next step is to
427*4882a593Smuzhiyun * make sure we have an active cycle master and do gap count
428*4882a593Smuzhiyun * optimization.
429*4882a593Smuzhiyun */
430*4882a593Smuzhiyun card->bm_generation = generation;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun if (root_device == NULL) {
433*4882a593Smuzhiyun /*
434*4882a593Smuzhiyun * Either link_on is false, or we failed to read the
435*4882a593Smuzhiyun * config rom. In either case, pick another root.
436*4882a593Smuzhiyun */
437*4882a593Smuzhiyun new_root_id = local_id;
438*4882a593Smuzhiyun } else if (!root_device_is_running) {
439*4882a593Smuzhiyun /*
440*4882a593Smuzhiyun * If we haven't probed this device yet, bail out now
441*4882a593Smuzhiyun * and let's try again once that's done.
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun spin_unlock_irq(&card->lock);
444*4882a593Smuzhiyun goto out;
445*4882a593Smuzhiyun } else if (root_device_is_cmc) {
446*4882a593Smuzhiyun /*
447*4882a593Smuzhiyun * We will send out a force root packet for this
448*4882a593Smuzhiyun * node as part of the gap count optimization.
449*4882a593Smuzhiyun */
450*4882a593Smuzhiyun new_root_id = root_id;
451*4882a593Smuzhiyun } else {
452*4882a593Smuzhiyun /*
453*4882a593Smuzhiyun * Current root has an active link layer and we
454*4882a593Smuzhiyun * successfully read the config rom, but it's not
455*4882a593Smuzhiyun * cycle master capable.
456*4882a593Smuzhiyun */
457*4882a593Smuzhiyun new_root_id = local_id;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun pick_me:
461*4882a593Smuzhiyun /*
462*4882a593Smuzhiyun * Pick a gap count from 1394a table E-1. The table doesn't cover
463*4882a593Smuzhiyun * the typically much larger 1394b beta repeater delays though.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun if (!card->beta_repeaters_present &&
466*4882a593Smuzhiyun root_node->max_hops < ARRAY_SIZE(gap_count_table))
467*4882a593Smuzhiyun gap_count = gap_count_table[root_node->max_hops];
468*4882a593Smuzhiyun else
469*4882a593Smuzhiyun gap_count = 63;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun * Finally, figure out if we should do a reset or not. If we have
473*4882a593Smuzhiyun * done less than 5 resets with the same physical topology and we
474*4882a593Smuzhiyun * have either a new root or a new gap count setting, let's do it.
475*4882a593Smuzhiyun */
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun if (card->bm_retries++ < 5 &&
478*4882a593Smuzhiyun (card->gap_count != gap_count || new_root_id != root_id))
479*4882a593Smuzhiyun do_reset = true;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun spin_unlock_irq(&card->lock);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (do_reset) {
484*4882a593Smuzhiyun fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
485*4882a593Smuzhiyun new_root_id, gap_count);
486*4882a593Smuzhiyun fw_send_phy_config(card, new_root_id, generation, gap_count);
487*4882a593Smuzhiyun reset_bus(card, true);
488*4882a593Smuzhiyun /* Will allocate broadcast channel after the reset. */
489*4882a593Smuzhiyun goto out;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (root_device_is_cmc) {
493*4882a593Smuzhiyun /*
494*4882a593Smuzhiyun * Make sure that the cycle master sends cycle start packets.
495*4882a593Smuzhiyun */
496*4882a593Smuzhiyun transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
497*4882a593Smuzhiyun rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
498*4882a593Smuzhiyun root_id, generation, SCODE_100,
499*4882a593Smuzhiyun CSR_REGISTER_BASE + CSR_STATE_SET,
500*4882a593Smuzhiyun transaction_data, 4);
501*4882a593Smuzhiyun if (rcode == RCODE_GENERATION)
502*4882a593Smuzhiyun goto out;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun if (local_id == irm_id)
506*4882a593Smuzhiyun allocate_broadcast_channel(card, generation);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun out:
509*4882a593Smuzhiyun fw_node_put(root_node);
510*4882a593Smuzhiyun out_put_card:
511*4882a593Smuzhiyun fw_card_put(card);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
fw_card_initialize(struct fw_card * card,const struct fw_card_driver * driver,struct device * device)514*4882a593Smuzhiyun void fw_card_initialize(struct fw_card *card,
515*4882a593Smuzhiyun const struct fw_card_driver *driver,
516*4882a593Smuzhiyun struct device *device)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun static atomic_t index = ATOMIC_INIT(-1);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun card->index = atomic_inc_return(&index);
521*4882a593Smuzhiyun card->driver = driver;
522*4882a593Smuzhiyun card->device = device;
523*4882a593Smuzhiyun card->current_tlabel = 0;
524*4882a593Smuzhiyun card->tlabel_mask = 0;
525*4882a593Smuzhiyun card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
526*4882a593Smuzhiyun card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
527*4882a593Smuzhiyun card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
528*4882a593Smuzhiyun card->split_timeout_jiffies =
529*4882a593Smuzhiyun DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
530*4882a593Smuzhiyun card->color = 0;
531*4882a593Smuzhiyun card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun kref_init(&card->kref);
534*4882a593Smuzhiyun init_completion(&card->done);
535*4882a593Smuzhiyun INIT_LIST_HEAD(&card->transaction_list);
536*4882a593Smuzhiyun INIT_LIST_HEAD(&card->phy_receiver_list);
537*4882a593Smuzhiyun spin_lock_init(&card->lock);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun card->local_node = NULL;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun INIT_DELAYED_WORK(&card->br_work, br_work);
542*4882a593Smuzhiyun INIT_DELAYED_WORK(&card->bm_work, bm_work);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun EXPORT_SYMBOL(fw_card_initialize);
545*4882a593Smuzhiyun
fw_card_add(struct fw_card * card,u32 max_receive,u32 link_speed,u64 guid)546*4882a593Smuzhiyun int fw_card_add(struct fw_card *card,
547*4882a593Smuzhiyun u32 max_receive, u32 link_speed, u64 guid)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun int ret;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun card->max_receive = max_receive;
552*4882a593Smuzhiyun card->link_speed = link_speed;
553*4882a593Smuzhiyun card->guid = guid;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun mutex_lock(&card_mutex);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun generate_config_rom(card, tmp_config_rom);
558*4882a593Smuzhiyun ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
559*4882a593Smuzhiyun if (ret == 0)
560*4882a593Smuzhiyun list_add_tail(&card->link, &card_list);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun mutex_unlock(&card_mutex);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun return ret;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun EXPORT_SYMBOL(fw_card_add);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /*
569*4882a593Smuzhiyun * The next few functions implement a dummy driver that is used once a card
570*4882a593Smuzhiyun * driver shuts down an fw_card. This allows the driver to cleanly unload,
571*4882a593Smuzhiyun * as all IO to the card will be handled (and failed) by the dummy driver
572*4882a593Smuzhiyun * instead of calling into the module. Only functions for iso context
573*4882a593Smuzhiyun * shutdown still need to be provided by the card driver.
574*4882a593Smuzhiyun *
575*4882a593Smuzhiyun * .read/write_csr() should never be called anymore after the dummy driver
576*4882a593Smuzhiyun * was bound since they are only used within request handler context.
577*4882a593Smuzhiyun * .set_config_rom() is never called since the card is taken out of card_list
578*4882a593Smuzhiyun * before switching to the dummy driver.
579*4882a593Smuzhiyun */
580*4882a593Smuzhiyun
dummy_read_phy_reg(struct fw_card * card,int address)581*4882a593Smuzhiyun static int dummy_read_phy_reg(struct fw_card *card, int address)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun return -ENODEV;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
dummy_update_phy_reg(struct fw_card * card,int address,int clear_bits,int set_bits)586*4882a593Smuzhiyun static int dummy_update_phy_reg(struct fw_card *card, int address,
587*4882a593Smuzhiyun int clear_bits, int set_bits)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun return -ENODEV;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
dummy_send_request(struct fw_card * card,struct fw_packet * packet)592*4882a593Smuzhiyun static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun packet->callback(packet, card, RCODE_CANCELLED);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
dummy_send_response(struct fw_card * card,struct fw_packet * packet)597*4882a593Smuzhiyun static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun packet->callback(packet, card, RCODE_CANCELLED);
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
dummy_cancel_packet(struct fw_card * card,struct fw_packet * packet)602*4882a593Smuzhiyun static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun return -ENOENT;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
dummy_enable_phys_dma(struct fw_card * card,int node_id,int generation)607*4882a593Smuzhiyun static int dummy_enable_phys_dma(struct fw_card *card,
608*4882a593Smuzhiyun int node_id, int generation)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun return -ENODEV;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
dummy_allocate_iso_context(struct fw_card * card,int type,int channel,size_t header_size)613*4882a593Smuzhiyun static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
614*4882a593Smuzhiyun int type, int channel, size_t header_size)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
dummy_start_iso(struct fw_iso_context * ctx,s32 cycle,u32 sync,u32 tags)619*4882a593Smuzhiyun static int dummy_start_iso(struct fw_iso_context *ctx,
620*4882a593Smuzhiyun s32 cycle, u32 sync, u32 tags)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun return -ENODEV;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
dummy_set_iso_channels(struct fw_iso_context * ctx,u64 * channels)625*4882a593Smuzhiyun static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun return -ENODEV;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
dummy_queue_iso(struct fw_iso_context * ctx,struct fw_iso_packet * p,struct fw_iso_buffer * buffer,unsigned long payload)630*4882a593Smuzhiyun static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
631*4882a593Smuzhiyun struct fw_iso_buffer *buffer, unsigned long payload)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun return -ENODEV;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
dummy_flush_queue_iso(struct fw_iso_context * ctx)636*4882a593Smuzhiyun static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
dummy_flush_iso_completions(struct fw_iso_context * ctx)640*4882a593Smuzhiyun static int dummy_flush_iso_completions(struct fw_iso_context *ctx)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun return -ENODEV;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun static const struct fw_card_driver dummy_driver_template = {
646*4882a593Smuzhiyun .read_phy_reg = dummy_read_phy_reg,
647*4882a593Smuzhiyun .update_phy_reg = dummy_update_phy_reg,
648*4882a593Smuzhiyun .send_request = dummy_send_request,
649*4882a593Smuzhiyun .send_response = dummy_send_response,
650*4882a593Smuzhiyun .cancel_packet = dummy_cancel_packet,
651*4882a593Smuzhiyun .enable_phys_dma = dummy_enable_phys_dma,
652*4882a593Smuzhiyun .allocate_iso_context = dummy_allocate_iso_context,
653*4882a593Smuzhiyun .start_iso = dummy_start_iso,
654*4882a593Smuzhiyun .set_iso_channels = dummy_set_iso_channels,
655*4882a593Smuzhiyun .queue_iso = dummy_queue_iso,
656*4882a593Smuzhiyun .flush_queue_iso = dummy_flush_queue_iso,
657*4882a593Smuzhiyun .flush_iso_completions = dummy_flush_iso_completions,
658*4882a593Smuzhiyun };
659*4882a593Smuzhiyun
fw_card_release(struct kref * kref)660*4882a593Smuzhiyun void fw_card_release(struct kref *kref)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun struct fw_card *card = container_of(kref, struct fw_card, kref);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun complete(&card->done);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fw_card_release);
667*4882a593Smuzhiyun
fw_core_remove_card(struct fw_card * card)668*4882a593Smuzhiyun void fw_core_remove_card(struct fw_card *card)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun struct fw_card_driver dummy_driver = dummy_driver_template;
671*4882a593Smuzhiyun unsigned long flags;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun card->driver->update_phy_reg(card, 4,
674*4882a593Smuzhiyun PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
675*4882a593Smuzhiyun fw_schedule_bus_reset(card, false, true);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun mutex_lock(&card_mutex);
678*4882a593Smuzhiyun list_del_init(&card->link);
679*4882a593Smuzhiyun mutex_unlock(&card_mutex);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /* Switch off most of the card driver interface. */
682*4882a593Smuzhiyun dummy_driver.free_iso_context = card->driver->free_iso_context;
683*4882a593Smuzhiyun dummy_driver.stop_iso = card->driver->stop_iso;
684*4882a593Smuzhiyun card->driver = &dummy_driver;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun spin_lock_irqsave(&card->lock, flags);
687*4882a593Smuzhiyun fw_destroy_nodes(card);
688*4882a593Smuzhiyun spin_unlock_irqrestore(&card->lock, flags);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /* Wait for all users, especially device workqueue jobs, to finish. */
691*4882a593Smuzhiyun fw_card_put(card);
692*4882a593Smuzhiyun wait_for_completion(&card->done);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun WARN_ON(!list_empty(&card->transaction_list));
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun EXPORT_SYMBOL(fw_core_remove_card);
697