Lines Matching +full:pd +full:- +full:revision

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
13 #include <linux/dma-mapping.h>
15 #include <linux/firewire-constants.h>
44 #define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
45 #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
46 #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
106 * A buffer that contains a block of DMA-able coherent memory used for
126 * List of page-sized buffers for storing DMA descriptors.
284 unsigned short vendor, device, revision, flags; member
357 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
360 ", or a combination, or all = -1)");
406 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
408 static const char port[] = { '.', '-', 'p', 'c', };
423 self_id_count, generation, ohci->node_id); in log_selfids()
425 for (s = ohci->self_id_buffer; self_id_count--; ++s) in log_selfids()
442 [0x00] = "evt_no_status", [0x01] = "-reserved-",
448 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
450 [0x10] = "-reserved-", [0x11] = "ack_complete",
451 [0x12] = "ack_pending ", [0x13] = "-reserved-",
453 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
454 [0x18] = "-reserved-", [0x19] = "-reserved-",
455 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
456 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
457 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
462 [0x2] = "W resp", [0x3] = "-reserved-",
467 [0xc] = "-reserved-", [0xd] = "-reserved-",
468 [0xe] = "link internal", [0xf] = "-reserved-",
513 "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n", in log_ar_at_event()
520 "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n", in log_ar_at_event()
529 writel(data, ohci->registers + offset); in reg_write()
534 return readl(ohci->registers + offset); in reg_read()
545 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
558 return -ENODEV; /* Card was ejected. */ in read_phy_reg()
573 return -EBUSY; in read_phy_reg()
585 return -ENODEV; /* Card was ejected. */ in write_phy_reg()
596 return -EBUSY; in write_phy_reg()
632 mutex_lock(&ohci->phy_reg_mutex); in ohci_read_phy_reg()
634 mutex_unlock(&ohci->phy_reg_mutex); in ohci_read_phy_reg()
645 mutex_lock(&ohci->phy_reg_mutex); in ohci_update_phy_reg()
647 mutex_unlock(&ohci->phy_reg_mutex); in ohci_update_phy_reg()
654 return page_private(ctx->pages[i]); in ar_buffer_bus()
661 d = &ctx->descriptors[index]; in ar_context_link_page()
662 d->branch_address &= cpu_to_le32(~0xf); in ar_context_link_page()
663 d->res_count = cpu_to_le16(PAGE_SIZE); in ar_context_link_page()
664 d->transfer_status = 0; in ar_context_link_page()
667 d = &ctx->descriptors[ctx->last_buffer_index]; in ar_context_link_page()
668 d->branch_address |= cpu_to_le32(1); in ar_context_link_page()
670 ctx->last_buffer_index = index; in ar_context_link_page()
672 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in ar_context_link_page()
677 struct device *dev = ctx->ohci->card.device; in ar_context_release()
680 vunmap(ctx->buffer); in ar_context_release()
683 if (ctx->pages[i]) in ar_context_release()
684 dma_free_pages(dev, PAGE_SIZE, ctx->pages[i], in ar_context_release()
691 struct fw_ohci *ohci = ctx->ohci; in ar_context_abort()
693 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { in ar_context_abort()
694 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); in ar_context_abort()
709 return ar_next_buffer_index(ctx->last_buffer_index); in ar_first_buffer_index()
719 unsigned int i, next_i, last = ctx->last_buffer_index; in ar_search_last_active_buffer()
723 res_count = READ_ONCE(ctx->descriptors[i].res_count); in ar_search_last_active_buffer()
731 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); in ar_search_last_active_buffer()
747 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); in ar_search_last_active_buffer()
762 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count); in ar_search_last_active_buffer()
779 dma_sync_single_for_cpu(ctx->ohci->card.device, in ar_sync_buffers_for_cpu()
785 dma_sync_single_for_cpu(ctx->ohci->card.device, in ar_sync_buffers_for_cpu()
792 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
799 struct fw_ohci *ohci = ctx->ohci; in handle_ar_packet()
855 p.ack = evt - 16; in handle_ar_packet()
858 p.generation = ohci->request_generation; in handle_ar_packet()
884 if (!(ohci->quirks & QUIRK_RESET_PACKET)) in handle_ar_packet()
885 ohci->request_generation = (p.header[2] >> 16) & 0xff; in handle_ar_packet()
886 } else if (ctx == &ohci->ar_request_ctx) { in handle_ar_packet()
887 fw_core_handle_request(&ohci->card, &p); in handle_ar_packet()
889 fw_core_handle_response(&ohci->card, &p); in handle_ar_packet()
915 dma_sync_single_for_device(ctx->ohci->card.device, in ar_recycle_buffers()
929 p = ctx->pointer; in ar_context_tasklet()
936 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; in ar_context_tasklet()
945 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; in ar_context_tasklet()
950 p -= AR_BUFFERS * PAGE_SIZE; in ar_context_tasklet()
960 ctx->pointer = p; in ar_context_tasklet()
966 ctx->pointer = NULL; in ar_context_tasklet()
972 struct device *dev = ohci->card.device; in ar_context_init()
978 ctx->regs = regs; in ar_context_init()
979 ctx->ohci = ohci; in ar_context_init()
980 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); in ar_context_init()
983 ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr, in ar_context_init()
985 if (!ctx->pages[i]) in ar_context_init()
987 set_page_private(ctx->pages[i], dma_addr); in ar_context_init()
993 pages[i] = ctx->pages[i]; in ar_context_init()
995 pages[AR_BUFFERS + i] = ctx->pages[i]; in ar_context_init()
996 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); in ar_context_init()
997 if (!ctx->buffer) in ar_context_init()
1000 ctx->descriptors = ohci->misc_buffer + descriptors_offset; in ar_context_init()
1001 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; in ar_context_init()
1004 d = &ctx->descriptors[i]; in ar_context_init()
1005 d->req_count = cpu_to_le16(PAGE_SIZE); in ar_context_init()
1006 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | in ar_context_init()
1009 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); in ar_context_init()
1010 d->branch_address = cpu_to_le32(ctx->descriptors_bus + in ar_context_init()
1019 return -ENOMEM; in ar_context_init()
1029 ctx->pointer = ctx->buffer; in ar_context_run()
1031 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); in ar_context_run()
1032 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); in ar_context_run()
1039 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS); in find_branch_descriptor()
1045 return d + z - 1; in find_branch_descriptor()
1056 desc = list_entry(ctx->buffer_list.next, in context_tasklet()
1058 last = ctx->last; in context_tasklet()
1059 while (last->branch_address != 0) { in context_tasklet()
1061 address = le32_to_cpu(last->branch_address); in context_tasklet()
1064 ctx->current_bus = address; in context_tasklet()
1068 if (address < desc->buffer_bus || in context_tasklet()
1069 address >= desc->buffer_bus + desc->used) in context_tasklet()
1070 desc = list_entry(desc->list.next, in context_tasklet()
1072 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); in context_tasklet()
1075 if (!ctx->callback(ctx, d, last)) in context_tasklet()
1082 old_desc->used = 0; in context_tasklet()
1083 spin_lock_irqsave(&ctx->ohci->lock, flags); in context_tasklet()
1084 list_move_tail(&old_desc->list, &ctx->buffer_list); in context_tasklet()
1085 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in context_tasklet()
1087 ctx->last = last; in context_tasklet()
1093 * context. Must be called with ohci->lock held.
1103 * program. This will catch run-away userspace or DoS attacks. in context_add_buffer()
1105 if (ctx->total_allocation >= 16*1024*1024) in context_add_buffer()
1106 return -ENOMEM; in context_add_buffer()
1108 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, in context_add_buffer()
1111 return -ENOMEM; in context_add_buffer()
1113 offset = (void *)&desc->buffer - (void *)desc; in context_add_buffer()
1115 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads in context_add_buffer()
1116 * for descriptors, even 0x10-byte ones. This can cause page faults when in context_add_buffer()
1120 desc->buffer_size = PAGE_SIZE - offset - 0x10; in context_add_buffer()
1121 desc->buffer_bus = bus_addr + offset; in context_add_buffer()
1122 desc->used = 0; in context_add_buffer()
1124 list_add_tail(&desc->list, &ctx->buffer_list); in context_add_buffer()
1125 ctx->total_allocation += PAGE_SIZE; in context_add_buffer()
1133 ctx->ohci = ohci; in context_init()
1134 ctx->regs = regs; in context_init()
1135 ctx->total_allocation = 0; in context_init()
1137 INIT_LIST_HEAD(&ctx->buffer_list); in context_init()
1139 return -ENOMEM; in context_init()
1141 ctx->buffer_tail = list_entry(ctx->buffer_list.next, in context_init()
1144 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); in context_init()
1145 ctx->callback = callback; in context_init()
1152 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); in context_init()
1153 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); in context_init()
1154 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); in context_init()
1155 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); in context_init()
1156 ctx->last = ctx->buffer_tail->buffer; in context_init()
1157 ctx->prev = ctx->buffer_tail->buffer; in context_init()
1158 ctx->prev_z = 1; in context_init()
1165 struct fw_card *card = &ctx->ohci->card; in context_release()
1168 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) in context_release()
1169 dma_free_coherent(card->device, PAGE_SIZE, desc, in context_release()
1170 desc->buffer_bus - in context_release()
1171 ((void *)&desc->buffer - (void *)desc)); in context_release()
1174 /* Must be called with ohci->lock held */
1179 struct descriptor_buffer *desc = ctx->buffer_tail; in context_get_descriptors()
1181 if (z * sizeof(*d) > desc->buffer_size) in context_get_descriptors()
1184 if (z * sizeof(*d) > desc->buffer_size - desc->used) { in context_get_descriptors()
1188 if (desc->list.next == &ctx->buffer_list) { in context_get_descriptors()
1194 desc = list_entry(desc->list.next, in context_get_descriptors()
1196 ctx->buffer_tail = desc; in context_get_descriptors()
1199 d = desc->buffer + desc->used / sizeof(*d); in context_get_descriptors()
1201 *d_bus = desc->buffer_bus + desc->used; in context_get_descriptors()
1208 struct fw_ohci *ohci = ctx->ohci; in context_run()
1210 reg_write(ohci, COMMAND_PTR(ctx->regs), in context_run()
1211 le32_to_cpu(ctx->last->branch_address)); in context_run()
1212 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); in context_run()
1213 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); in context_run()
1214 ctx->running = true; in context_run()
1222 struct descriptor_buffer *desc = ctx->buffer_tail; in context_append()
1225 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); in context_append()
1227 desc->used += (z + extra) * sizeof(*d); in context_append()
1231 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z); in context_append()
1232 d_branch->branch_address = cpu_to_le32(d_bus | z); in context_append()
1237 * multi-descriptor block starting with an INPUT_MORE, put a copy of in context_append()
1243 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) && in context_append()
1244 d_branch != ctx->prev && in context_append()
1245 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) == in context_append()
1247 ctx->prev->branch_address = cpu_to_le32(d_bus | z); in context_append()
1250 ctx->prev = d; in context_append()
1251 ctx->prev_z = z; in context_append()
1256 struct fw_ohci *ohci = ctx->ohci; in context_stop()
1260 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); in context_stop()
1261 ctx->running = false; in context_stop()
1264 reg = reg_read(ohci, CONTROL_SET(ctx->regs)); in context_stop()
1281 * Must always be called with the ochi->lock held to ensure proper
1287 struct fw_ohci *ohci = ctx->ohci; in at_context_queue_packet()
1296 packet->ack = RCODE_SEND_ERROR; in at_context_queue_packet()
1297 return -1; in at_context_queue_packet()
1301 d[0].res_count = cpu_to_le16(packet->timestamp); in at_context_queue_packet()
1309 tcode = (packet->header[0] >> 4) & 0x0f; in at_context_queue_packet()
1321 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | in at_context_queue_packet()
1322 (packet->speed << 16)); in at_context_queue_packet()
1323 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | in at_context_queue_packet()
1324 (packet->header[0] & 0xffff0000)); in at_context_queue_packet()
1325 header[2] = cpu_to_le32(packet->header[2]); in at_context_queue_packet()
1328 header[3] = cpu_to_le32(packet->header[3]); in at_context_queue_packet()
1330 header[3] = (__force __le32) packet->header[3]; in at_context_queue_packet()
1332 d[0].req_count = cpu_to_le16(packet->header_length); in at_context_queue_packet()
1337 (packet->speed << 16)); in at_context_queue_packet()
1338 header[1] = cpu_to_le32(packet->header[1]); in at_context_queue_packet()
1339 header[2] = cpu_to_le32(packet->header[2]); in at_context_queue_packet()
1342 if (is_ping_packet(&packet->header[1])) in at_context_queue_packet()
1347 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | in at_context_queue_packet()
1348 (packet->speed << 16)); in at_context_queue_packet()
1349 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); in at_context_queue_packet()
1355 packet->ack = RCODE_SEND_ERROR; in at_context_queue_packet()
1356 return -1; in at_context_queue_packet()
1361 driver_data->packet = packet; in at_context_queue_packet()
1362 packet->driver_data = driver_data; in at_context_queue_packet()
1364 if (packet->payload_length > 0) { in at_context_queue_packet()
1365 if (packet->payload_length > sizeof(driver_data->inline_data)) { in at_context_queue_packet()
1366 payload_bus = dma_map_single(ohci->card.device, in at_context_queue_packet()
1367 packet->payload, in at_context_queue_packet()
1368 packet->payload_length, in at_context_queue_packet()
1370 if (dma_mapping_error(ohci->card.device, payload_bus)) { in at_context_queue_packet()
1371 packet->ack = RCODE_SEND_ERROR; in at_context_queue_packet()
1372 return -1; in at_context_queue_packet()
1374 packet->payload_bus = payload_bus; in at_context_queue_packet()
1375 packet->payload_mapped = true; in at_context_queue_packet()
1377 memcpy(driver_data->inline_data, packet->payload, in at_context_queue_packet()
1378 packet->payload_length); in at_context_queue_packet()
1382 d[2].req_count = cpu_to_le16(packet->payload_length); in at_context_queue_packet()
1391 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | in at_context_queue_packet()
1396 if (ohci->generation != packet->generation) { in at_context_queue_packet()
1397 if (packet->payload_mapped) in at_context_queue_packet()
1398 dma_unmap_single(ohci->card.device, payload_bus, in at_context_queue_packet()
1399 packet->payload_length, DMA_TO_DEVICE); in at_context_queue_packet()
1400 packet->ack = RCODE_GENERATION; in at_context_queue_packet()
1401 return -1; in at_context_queue_packet()
1404 context_append(ctx, d, z, 4 - z); in at_context_queue_packet()
1406 if (ctx->running) in at_context_queue_packet()
1407 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in at_context_queue_packet()
1416 tasklet_disable(&ctx->tasklet); in at_context_flush()
1418 ctx->flushing = true; in at_context_flush()
1420 ctx->flushing = false; in at_context_flush()
1422 tasklet_enable(&ctx->tasklet); in at_context_flush()
1431 struct fw_ohci *ohci = context->ohci; in handle_at_packet()
1434 if (last->transfer_status == 0 && !context->flushing) in handle_at_packet()
1439 packet = driver_data->packet; in handle_at_packet()
1444 if (packet->payload_mapped) in handle_at_packet()
1445 dma_unmap_single(ohci->card.device, packet->payload_bus, in handle_at_packet()
1446 packet->payload_length, DMA_TO_DEVICE); in handle_at_packet()
1448 evt = le16_to_cpu(last->transfer_status) & 0x1f; in handle_at_packet()
1449 packet->timestamp = le16_to_cpu(last->res_count); in handle_at_packet()
1451 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt); in handle_at_packet()
1456 packet->ack = RCODE_CANCELLED; in handle_at_packet()
1464 packet->ack = RCODE_GENERATION; in handle_at_packet()
1468 if (context->flushing) in handle_at_packet()
1469 packet->ack = RCODE_GENERATION; in handle_at_packet()
1475 packet->ack = RCODE_NO_ACK; in handle_at_packet()
1486 packet->ack = evt - 0x10; in handle_at_packet()
1490 if (context->flushing) { in handle_at_packet()
1491 packet->ack = RCODE_GENERATION; in handle_at_packet()
1497 packet->ack = RCODE_SEND_ERROR; in handle_at_packet()
1501 packet->callback(packet, &ohci->card, packet->ack); in handle_at_packet()
1518 tcode = HEADER_GET_TCODE(packet->header[0]); in handle_local_rom()
1520 length = HEADER_GET_DATA_LENGTH(packet->header[3]); in handle_local_rom()
1524 i = csr - CSR_CONFIG_ROM; in handle_local_rom()
1526 fw_fill_response(&response, packet->header, in handle_local_rom()
1529 fw_fill_response(&response, packet->header, in handle_local_rom()
1532 fw_fill_response(&response, packet->header, RCODE_COMPLETE, in handle_local_rom()
1533 (void *) ohci->config_rom + i, length); in handle_local_rom()
1536 fw_core_handle_response(&ohci->card, &response); in handle_local_rom()
1547 tcode = HEADER_GET_TCODE(packet->header[0]); in handle_local_lock()
1548 length = HEADER_GET_DATA_LENGTH(packet->header[3]); in handle_local_lock()
1549 payload = packet->payload; in handle_local_lock()
1550 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); in handle_local_lock()
1560 fw_fill_response(&response, packet->header, in handle_local_lock()
1565 sel = (csr - CSR_BUS_MANAGER_ID) / 4; in handle_local_lock()
1574 fw_fill_response(&response, packet->header, in handle_local_lock()
1581 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); in handle_local_lock()
1584 fw_core_handle_response(&ohci->card, &response); in handle_local_lock()
1591 if (ctx == &ctx->ohci->at_request_ctx) { in handle_local_request()
1592 packet->ack = ACK_PENDING; in handle_local_request()
1593 packet->callback(packet, &ctx->ohci->card, packet->ack); in handle_local_request()
1598 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | in handle_local_request()
1599 packet->header[2]; in handle_local_request()
1600 csr = offset - CSR_REGISTER_BASE; in handle_local_request()
1604 handle_local_rom(ctx->ohci, packet, csr); in handle_local_request()
1610 handle_local_lock(ctx->ohci, packet, csr); in handle_local_request()
1613 if (ctx == &ctx->ohci->at_request_ctx) in handle_local_request()
1614 fw_core_handle_request(&ctx->ohci->card, packet); in handle_local_request()
1616 fw_core_handle_response(&ctx->ohci->card, packet); in handle_local_request()
1620 if (ctx == &ctx->ohci->at_response_ctx) { in handle_local_request()
1621 packet->ack = ACK_COMPLETE; in handle_local_request()
1622 packet->callback(packet, &ctx->ohci->card, packet->ack); in handle_local_request()
1631 spin_lock_irqsave(&ctx->ohci->lock, flags); in at_context_transmit()
1633 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && in at_context_transmit()
1634 ctx->ohci->generation == packet->generation) { in at_context_transmit()
1635 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in at_context_transmit()
1641 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in at_context_transmit()
1644 packet->callback(packet, &ctx->ohci->card, packet->ack); in at_context_transmit()
1669 if (!(ohci->it_context_support & (1 << i))) in handle_dead_contexts()
1675 if (!(ohci->ir_context_support & (1 << i))) in handle_dead_contexts()
1697 * - When the lowest six bits are wrapping around to zero, a read that happens
1699 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1701 * - Occasionally, the entire register reads zero.
1718 if (ohci->quirks & QUIRK_CYCLE_TIMER) { in get_cycle_time()
1729 diff01 = t1 - t0; in get_cycle_time()
1730 diff12 = t2 - t1; in get_cycle_time()
1749 if (unlikely(!ohci->bus_time_running)) { in update_bus_time()
1751 ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) | in update_bus_time()
1753 ohci->bus_time_running = true; in update_bus_time()
1756 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) in update_bus_time()
1757 ohci->bus_time += 0x40; in update_bus_time()
1759 return ohci->bus_time | cycle_time_seconds; in update_bus_time()
1766 mutex_lock(&ohci->phy_reg_mutex); in get_status_for_port()
1770 mutex_unlock(&ohci->phy_reg_mutex); in get_status_for_port()
1790 entry = ohci->self_id_buffer[i]; in get_self_id_pos()
1792 return -1; in get_self_id_pos()
1804 mutex_lock(&ohci->phy_reg_mutex); in initiated_reset()
1820 mutex_unlock(&ohci->phy_reg_mutex); in initiated_reset()
1839 return -EBUSY; in find_and_insert_self_id()
1843 reg = ohci_read_phy_reg(&ohci->card, 4); in find_and_insert_self_id()
1848 reg = ohci_read_phy_reg(&ohci->card, 1); in find_and_insert_self_id()
1857 self_id |= ((status & 0x3) << (6 - (i * 2))); in find_and_insert_self_id()
1864 memmove(&(ohci->self_id_buffer[pos+1]), in find_and_insert_self_id()
1865 &(ohci->self_id_buffer[pos]), in find_and_insert_self_id()
1866 (self_id_count - pos) * sizeof(*ohci->self_id_buffer)); in find_and_insert_self_id()
1867 ohci->self_id_buffer[pos] = self_id; in find_and_insert_self_id()
1893 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | in bus_reset_work()
1897 if (!(ohci->is_root && is_new_root)) in bus_reset_work()
1900 ohci->is_root = is_new_root; in bus_reset_work()
1920 generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff; in bus_reset_work()
1924 u32 id = cond_le32_to_cpu(ohci->self_id[i]); in bus_reset_work()
1925 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]); in bus_reset_work()
1945 ohci->self_id_buffer[j] = id; in bus_reset_work()
1948 if (ohci->quirks & QUIRK_TI_SLLZ059) { in bus_reset_work()
1984 spin_lock_irq(&ohci->lock); in bus_reset_work()
1986 ohci->generation = -1; /* prevent AT packet queueing */ in bus_reset_work()
1987 context_stop(&ohci->at_request_ctx); in bus_reset_work()
1988 context_stop(&ohci->at_response_ctx); in bus_reset_work()
1990 spin_unlock_irq(&ohci->lock); in bus_reset_work()
1997 at_context_flush(&ohci->at_request_ctx); in bus_reset_work()
1998 at_context_flush(&ohci->at_response_ctx); in bus_reset_work()
2000 spin_lock_irq(&ohci->lock); in bus_reset_work()
2002 ohci->generation = generation; in bus_reset_work()
2005 if (ohci->quirks & QUIRK_RESET_PACKET) in bus_reset_work()
2006 ohci->request_generation = generation; in bus_reset_work()
2017 if (ohci->next_config_rom != NULL) { in bus_reset_work()
2018 if (ohci->next_config_rom != ohci->config_rom) { in bus_reset_work()
2019 free_rom = ohci->config_rom; in bus_reset_work()
2020 free_rom_bus = ohci->config_rom_bus; in bus_reset_work()
2022 ohci->config_rom = ohci->next_config_rom; in bus_reset_work()
2023 ohci->config_rom_bus = ohci->next_config_rom_bus; in bus_reset_work()
2024 ohci->next_config_rom = NULL; in bus_reset_work()
2033 be32_to_cpu(ohci->config_rom[2])); in bus_reset_work()
2034 ohci->config_rom[0] = ohci->next_header; in bus_reset_work()
2036 be32_to_cpu(ohci->next_header)); in bus_reset_work()
2044 spin_unlock_irq(&ohci->lock); in bus_reset_work()
2047 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, in bus_reset_work()
2052 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, in bus_reset_work()
2053 self_id_count, ohci->self_id_buffer, in bus_reset_work()
2054 ohci->csr_state_setclear_abdicate); in bus_reset_work()
2055 ohci->csr_state_setclear_abdicate = false; in bus_reset_work()
2078 queue_work(selfid_workqueue, &ohci->bus_reset_work); in irq_handler()
2081 tasklet_schedule(&ohci->ar_request_ctx.tasklet); in irq_handler()
2084 tasklet_schedule(&ohci->ar_response_ctx.tasklet); in irq_handler()
2087 tasklet_schedule(&ohci->at_request_ctx.tasklet); in irq_handler()
2090 tasklet_schedule(&ohci->at_response_ctx.tasklet); in irq_handler()
2097 i = ffs(iso_event) - 1; in irq_handler()
2099 &ohci->ir_context_list[i].context.tasklet); in irq_handler()
2109 i = ffs(iso_event) - 1; in irq_handler()
2111 &ohci->it_context_list[i].context.tasklet); in irq_handler()
2150 spin_lock(&ohci->lock); in irq_handler()
2152 spin_unlock(&ohci->lock); in irq_handler()
2168 return -ENODEV; /* Card was ejected. */ in software_reset()
2176 return -EBUSY; in software_reset()
2185 memset(&dest[length], 0, CONFIG_ROM_SIZE - size); in copy_config_rom()
2211 if (ohci->quirks & QUIRK_NO_1394A) in configure_1394a_enhancements()
2251 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) { in probe_tsb41ba3d()
2300 return -EIO; in ohci_enable()
2303 if (ohci->quirks & QUIRK_TI_SLLZ059) { in ohci_enable()
2310 ohci->quirks &= ~QUIRK_TI_SLLZ059; in ohci_enable()
2316 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); in ohci_enable()
2327 ohci->bus_time_running = false; in ohci_enable()
2330 if (ohci->ir_context_support & (1 << i)) in ohci_enable()
2338 card->broadcast_channel_auto_allocated = true; in ohci_enable()
2343 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; in ohci_enable()
2345 card->priority_budget_implemented = ohci->pri_req_max != 0; in ohci_enable()
2366 * link, so we have a valid config rom before enabling - the in ohci_enable()
2380 ohci->next_config_rom = in ohci_enable()
2381 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, in ohci_enable()
2382 &ohci->next_config_rom_bus, in ohci_enable()
2384 if (ohci->next_config_rom == NULL) in ohci_enable()
2385 return -ENOMEM; in ohci_enable()
2387 copy_config_rom(ohci->next_config_rom, config_rom, length); in ohci_enable()
2393 ohci->next_config_rom = ohci->config_rom; in ohci_enable()
2394 ohci->next_config_rom_bus = ohci->config_rom_bus; in ohci_enable()
2397 ohci->next_header = ohci->next_config_rom[0]; in ohci_enable()
2398 ohci->next_config_rom[0] = 0; in ohci_enable()
2401 be32_to_cpu(ohci->next_config_rom[2])); in ohci_enable()
2402 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); in ohci_enable()
2428 ar_context_run(&ohci->ar_request_ctx); in ohci_enable()
2429 ar_context_run(&ohci->ar_response_ctx); in ohci_enable()
2434 fw_schedule_bus_reset(&ohci->card, false, true); in ohci_enable()
2471 * We use ohci->lock to avoid racing with the code that sets in ohci_set_config_rom()
2472 * ohci->next_config_rom to NULL (see bus_reset_work). in ohci_set_config_rom()
2476 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, in ohci_set_config_rom()
2479 return -ENOMEM; in ohci_set_config_rom()
2481 spin_lock_irq(&ohci->lock); in ohci_set_config_rom()
2485 * push our new allocation into the ohci->next_config_rom in ohci_set_config_rom()
2494 if (ohci->next_config_rom == NULL) { in ohci_set_config_rom()
2495 ohci->next_config_rom = next_config_rom; in ohci_set_config_rom()
2496 ohci->next_config_rom_bus = next_config_rom_bus; in ohci_set_config_rom()
2500 copy_config_rom(ohci->next_config_rom, config_rom, length); in ohci_set_config_rom()
2502 ohci->next_header = config_rom[0]; in ohci_set_config_rom()
2503 ohci->next_config_rom[0] = 0; in ohci_set_config_rom()
2505 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); in ohci_set_config_rom()
2507 spin_unlock_irq(&ohci->lock); in ohci_set_config_rom()
2511 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, in ohci_set_config_rom()
2522 fw_schedule_bus_reset(&ohci->card, true, true); in ohci_set_config_rom()
2531 at_context_transmit(&ohci->at_request_ctx, packet); in ohci_send_request()
2538 at_context_transmit(&ohci->at_response_ctx, packet); in ohci_send_response()
2544 struct context *ctx = &ohci->at_request_ctx; in ohci_cancel_packet()
2545 struct driver_data *driver_data = packet->driver_data; in ohci_cancel_packet()
2546 int ret = -ENOENT; in ohci_cancel_packet()
2548 tasklet_disable(&ctx->tasklet); in ohci_cancel_packet()
2550 if (packet->ack != 0) in ohci_cancel_packet()
2553 if (packet->payload_mapped) in ohci_cancel_packet()
2554 dma_unmap_single(ohci->card.device, packet->payload_bus, in ohci_cancel_packet()
2555 packet->payload_length, DMA_TO_DEVICE); in ohci_cancel_packet()
2557 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20); in ohci_cancel_packet()
2558 driver_data->packet = NULL; in ohci_cancel_packet()
2559 packet->ack = RCODE_CANCELLED; in ohci_cancel_packet()
2560 packet->callback(packet, &ohci->card, packet->ack); in ohci_cancel_packet()
2563 tasklet_enable(&ctx->tasklet); in ohci_cancel_packet()
2583 spin_lock_irqsave(&ohci->lock, flags); in ohci_enable_phys_dma()
2585 if (ohci->generation != generation) { in ohci_enable_phys_dma()
2586 ret = -ESTALE; in ohci_enable_phys_dma()
2591 * Note, if the node ID contains a non-local bus ID, physical DMA is in ohci_enable_phys_dma()
2599 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); in ohci_enable_phys_dma()
2603 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_enable_phys_dma()
2617 if (ohci->is_root && in ohci_read_csr()
2623 if (ohci->csr_state_setclear_abdicate) in ohci_read_csr()
2640 spin_lock_irqsave(&ohci->lock, flags); in ohci_read_csr()
2642 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_read_csr()
2651 (ohci->pri_req_max << 8); in ohci_read_csr()
2666 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { in ohci_write_csr()
2672 ohci->csr_state_setclear_abdicate = false; in ohci_write_csr()
2676 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { in ohci_write_csr()
2682 ohci->csr_state_setclear_abdicate = true; in ohci_write_csr()
2698 spin_lock_irqsave(&ohci->lock, flags); in ohci_write_csr()
2699 ohci->bus_time = (update_bus_time(ohci) & 0x40) | in ohci_write_csr()
2701 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_write_csr()
2724 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp, in flush_iso_completions()
2725 ctx->header_length, ctx->header, in flush_iso_completions()
2726 ctx->base.callback_data); in flush_iso_completions()
2727 ctx->header_length = 0; in flush_iso_completions()
2734 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { in copy_iso_headers()
2735 if (ctx->base.drop_overflow_headers) in copy_iso_headers()
2740 ctx_hdr = ctx->header + ctx->header_length; in copy_iso_headers()
2741 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); in copy_iso_headers()
2748 if (ctx->base.header_size > 0) in copy_iso_headers()
2750 if (ctx->base.header_size > 4) in copy_iso_headers()
2752 if (ctx->base.header_size > 8) in copy_iso_headers()
2753 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8); in copy_iso_headers()
2754 ctx->header_length += ctx->base.header_size; in copy_iso_headers()
2763 struct descriptor *pd; in handle_ir_packet_per_buffer() local
2766 for (pd = d; pd <= last; pd++) in handle_ir_packet_per_buffer()
2767 if (pd->transfer_status) in handle_ir_packet_per_buffer()
2769 if (pd > last) in handle_ir_packet_per_buffer()
2773 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) { in handle_ir_packet_per_buffer()
2775 buffer_dma = le32_to_cpu(d->data_address); in handle_ir_packet_per_buffer()
2776 dma_sync_single_range_for_cpu(context->ohci->card.device, in handle_ir_packet_per_buffer()
2779 le16_to_cpu(d->req_count), in handle_ir_packet_per_buffer()
2785 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) in handle_ir_packet_per_buffer()
2801 req_count = le16_to_cpu(last->req_count); in handle_ir_buffer_fill()
2802 res_count = le16_to_cpu(READ_ONCE(last->res_count)); in handle_ir_buffer_fill()
2803 completed = req_count - res_count; in handle_ir_buffer_fill()
2804 buffer_dma = le32_to_cpu(last->data_address); in handle_ir_buffer_fill()
2807 ctx->mc_buffer_bus = buffer_dma; in handle_ir_buffer_fill()
2808 ctx->mc_completed = completed; in handle_ir_buffer_fill()
2815 dma_sync_single_range_for_cpu(context->ohci->card.device, in handle_ir_buffer_fill()
2820 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) { in handle_ir_buffer_fill()
2821 ctx->base.callback.mc(&ctx->base, in handle_ir_buffer_fill()
2823 ctx->base.callback_data); in handle_ir_buffer_fill()
2824 ctx->mc_completed = 0; in handle_ir_buffer_fill()
2832 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device, in flush_ir_buffer_fill()
2833 ctx->mc_buffer_bus & PAGE_MASK, in flush_ir_buffer_fill()
2834 ctx->mc_buffer_bus & ~PAGE_MASK, in flush_ir_buffer_fill()
2835 ctx->mc_completed, DMA_FROM_DEVICE); in flush_ir_buffer_fill()
2837 ctx->base.callback.mc(&ctx->base, in flush_ir_buffer_fill()
2838 ctx->mc_buffer_bus + ctx->mc_completed, in flush_ir_buffer_fill()
2839 ctx->base.callback_data); in flush_ir_buffer_fill()
2840 ctx->mc_completed = 0; in flush_ir_buffer_fill()
2844 struct descriptor *pd) in sync_it_packet_for_cpu() argument
2850 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) in sync_it_packet_for_cpu()
2854 pd += 2; in sync_it_packet_for_cpu()
2861 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) == in sync_it_packet_for_cpu()
2862 (context->current_bus & PAGE_MASK)) { in sync_it_packet_for_cpu()
2863 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) in sync_it_packet_for_cpu()
2865 pd++; in sync_it_packet_for_cpu()
2869 buffer_dma = le32_to_cpu(pd->data_address); in sync_it_packet_for_cpu()
2870 dma_sync_single_range_for_cpu(context->ohci->card.device, in sync_it_packet_for_cpu()
2873 le16_to_cpu(pd->req_count), in sync_it_packet_for_cpu()
2875 control = pd->control; in sync_it_packet_for_cpu()
2876 pd++; in sync_it_packet_for_cpu()
2886 struct descriptor *pd; in handle_it_packet() local
2889 for (pd = d; pd <= last; pd++) in handle_it_packet()
2890 if (pd->transfer_status) in handle_it_packet()
2892 if (pd > last) in handle_it_packet()
2898 if (ctx->header_length + 4 > PAGE_SIZE) { in handle_it_packet()
2899 if (ctx->base.drop_overflow_headers) in handle_it_packet()
2904 ctx_hdr = ctx->header + ctx->header_length; in handle_it_packet()
2905 ctx->last_timestamp = le16_to_cpu(last->res_count); in handle_it_packet()
2906 /* Present this value as big-endian to match the receive code */ in handle_it_packet()
2907 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) | in handle_it_packet()
2908 le16_to_cpu(pd->res_count)); in handle_it_packet()
2909 ctx->header_length += 4; in handle_it_packet()
2911 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) in handle_it_packet()
2925 ohci->mc_channels = channels; in set_multichannel_mask()
2936 int index, ret = -EBUSY; in ohci_allocate_iso_context()
2938 spin_lock_irq(&ohci->lock); in ohci_allocate_iso_context()
2942 mask = &ohci->it_context_mask; in ohci_allocate_iso_context()
2944 index = ffs(*mask) - 1; in ohci_allocate_iso_context()
2948 ctx = &ohci->it_context_list[index]; in ohci_allocate_iso_context()
2953 channels = &ohci->ir_context_channels; in ohci_allocate_iso_context()
2954 mask = &ohci->ir_context_mask; in ohci_allocate_iso_context()
2956 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; in ohci_allocate_iso_context()
2961 ctx = &ohci->ir_context_list[index]; in ohci_allocate_iso_context()
2966 mask = &ohci->ir_context_mask; in ohci_allocate_iso_context()
2968 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; in ohci_allocate_iso_context()
2970 ohci->mc_allocated = true; in ohci_allocate_iso_context()
2973 ctx = &ohci->ir_context_list[index]; in ohci_allocate_iso_context()
2978 index = -1; in ohci_allocate_iso_context()
2979 ret = -ENOSYS; in ohci_allocate_iso_context()
2982 spin_unlock_irq(&ohci->lock); in ohci_allocate_iso_context()
2988 ctx->header_length = 0; in ohci_allocate_iso_context()
2989 ctx->header = (void *) __get_free_page(GFP_KERNEL); in ohci_allocate_iso_context()
2990 if (ctx->header == NULL) { in ohci_allocate_iso_context()
2991 ret = -ENOMEM; in ohci_allocate_iso_context()
2994 ret = context_init(&ctx->context, ohci, regs, callback); in ohci_allocate_iso_context()
3000 ctx->mc_completed = 0; in ohci_allocate_iso_context()
3003 return &ctx->base; in ohci_allocate_iso_context()
3006 free_page((unsigned long)ctx->header); in ohci_allocate_iso_context()
3008 spin_lock_irq(&ohci->lock); in ohci_allocate_iso_context()
3016 ohci->mc_allocated = false; in ohci_allocate_iso_context()
3021 spin_unlock_irq(&ohci->lock); in ohci_allocate_iso_context()
3030 struct fw_ohci *ohci = ctx->context.ohci; in ohci_start_iso()
3035 if (ctx->context.last->branch_address == 0) in ohci_start_iso()
3036 return -ENODATA; in ohci_start_iso()
3038 switch (ctx->base.type) { in ohci_start_iso()
3040 index = ctx - ohci->it_context_list; in ohci_start_iso()
3048 context_run(&ctx->context, match); in ohci_start_iso()
3055 index = ctx - ohci->ir_context_list; in ohci_start_iso()
3056 match = (tags << 28) | (sync << 8) | ctx->base.channel; in ohci_start_iso()
3064 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); in ohci_start_iso()
3065 context_run(&ctx->context, control); in ohci_start_iso()
3067 ctx->sync = sync; in ohci_start_iso()
3068 ctx->tags = tags; in ohci_start_iso()
3078 struct fw_ohci *ohci = fw_ohci(base->card); in ohci_stop_iso()
3082 switch (ctx->base.type) { in ohci_stop_iso()
3084 index = ctx - ohci->it_context_list; in ohci_stop_iso()
3090 index = ctx - ohci->ir_context_list; in ohci_stop_iso()
3095 context_stop(&ctx->context); in ohci_stop_iso()
3096 tasklet_kill(&ctx->context.tasklet); in ohci_stop_iso()
3103 struct fw_ohci *ohci = fw_ohci(base->card); in ohci_free_iso_context()
3109 context_release(&ctx->context); in ohci_free_iso_context()
3110 free_page((unsigned long)ctx->header); in ohci_free_iso_context()
3112 spin_lock_irqsave(&ohci->lock, flags); in ohci_free_iso_context()
3114 switch (base->type) { in ohci_free_iso_context()
3116 index = ctx - ohci->it_context_list; in ohci_free_iso_context()
3117 ohci->it_context_mask |= 1 << index; in ohci_free_iso_context()
3121 index = ctx - ohci->ir_context_list; in ohci_free_iso_context()
3122 ohci->ir_context_mask |= 1 << index; in ohci_free_iso_context()
3123 ohci->ir_context_channels |= 1ULL << base->channel; in ohci_free_iso_context()
3127 index = ctx - ohci->ir_context_list; in ohci_free_iso_context()
3128 ohci->ir_context_mask |= 1 << index; in ohci_free_iso_context()
3129 ohci->ir_context_channels |= ohci->mc_channels; in ohci_free_iso_context()
3130 ohci->mc_channels = 0; in ohci_free_iso_context()
3131 ohci->mc_allocated = false; in ohci_free_iso_context()
3135 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_free_iso_context()
3140 struct fw_ohci *ohci = fw_ohci(base->card); in ohci_set_iso_channels()
3144 switch (base->type) { in ohci_set_iso_channels()
3147 spin_lock_irqsave(&ohci->lock, flags); in ohci_set_iso_channels()
3150 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { in ohci_set_iso_channels()
3151 *channels = ohci->ir_context_channels; in ohci_set_iso_channels()
3152 ret = -EBUSY; in ohci_set_iso_channels()
3158 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_set_iso_channels()
3162 ret = -EINVAL; in ohci_set_iso_channels()
3174 for (i = 0 ; i < ohci->n_ir ; i++) { in ohci_resume_iso_dma()
3175 ctx = &ohci->ir_context_list[i]; in ohci_resume_iso_dma()
3176 if (ctx->context.running) in ohci_resume_iso_dma()
3177 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); in ohci_resume_iso_dma()
3180 for (i = 0 ; i < ohci->n_it ; i++) { in ohci_resume_iso_dma()
3181 ctx = &ohci->it_context_list[i]; in ohci_resume_iso_dma()
3182 if (ctx->context.running) in ohci_resume_iso_dma()
3183 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); in ohci_resume_iso_dma()
3193 struct descriptor *d, *last, *pd; in queue_iso_transmit() local
3204 if (p->skip) in queue_iso_transmit()
3208 if (p->header_length > 0) in queue_iso_transmit()
3212 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; in queue_iso_transmit()
3213 if (p->payload_length > 0) in queue_iso_transmit()
3214 payload_z = end_page - (payload_index >> PAGE_SHIFT); in queue_iso_transmit()
3221 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); in queue_iso_transmit()
3223 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); in queue_iso_transmit()
3225 return -ENOMEM; in queue_iso_transmit()
3227 if (!p->skip) { in queue_iso_transmit()
3235 * FIXME: Make the context's cycle-lost behaviour configurable? in queue_iso_transmit()
3240 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | in queue_iso_transmit()
3241 IT_HEADER_TAG(p->tag) | in queue_iso_transmit()
3243 IT_HEADER_CHANNEL(ctx->base.channel) | in queue_iso_transmit()
3244 IT_HEADER_SPEED(ctx->base.speed)); in queue_iso_transmit()
3246 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + in queue_iso_transmit()
3247 p->payload_length)); in queue_iso_transmit()
3250 if (p->header_length > 0) { in queue_iso_transmit()
3251 d[2].req_count = cpu_to_le16(p->header_length); in queue_iso_transmit()
3253 memcpy(&d[z], p->header, p->header_length); in queue_iso_transmit()
3256 pd = d + z - payload_z; in queue_iso_transmit()
3257 payload_end_index = payload_index + p->payload_length; in queue_iso_transmit()
3263 min(next_page_index, payload_end_index) - payload_index; in queue_iso_transmit()
3264 pd[i].req_count = cpu_to_le16(length); in queue_iso_transmit()
3266 page_bus = page_private(buffer->pages[page]); in queue_iso_transmit()
3267 pd[i].data_address = cpu_to_le32(page_bus + offset); in queue_iso_transmit()
3269 dma_sync_single_range_for_device(ctx->context.ohci->card.device, in queue_iso_transmit()
3276 if (p->interrupt) in queue_iso_transmit()
3281 last = z == 2 ? d : d + z - 1; in queue_iso_transmit()
3282 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | in queue_iso_transmit()
3287 context_append(&ctx->context, d, z, header_z); in queue_iso_transmit()
3297 struct device *device = ctx->context.ohci->card.device; in queue_iso_packet_per_buffer()
3298 struct descriptor *d, *pd; in queue_iso_packet_per_buffer() local
3308 packet_count = packet->header_length / ctx->base.header_size; in queue_iso_packet_per_buffer()
3309 header_size = max(ctx->base.header_size, (size_t)8); in queue_iso_packet_per_buffer()
3315 payload_per_buffer = packet->payload_length / packet_count; in queue_iso_packet_per_buffer()
3320 d = context_get_descriptors(&ctx->context, in queue_iso_packet_per_buffer()
3323 return -ENOMEM; in queue_iso_packet_per_buffer()
3325 d->control = cpu_to_le16(DESCRIPTOR_STATUS | in queue_iso_packet_per_buffer()
3327 if (packet->skip && i == 0) in queue_iso_packet_per_buffer()
3328 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); in queue_iso_packet_per_buffer()
3329 d->req_count = cpu_to_le16(header_size); in queue_iso_packet_per_buffer()
3330 d->res_count = d->req_count; in queue_iso_packet_per_buffer()
3331 d->transfer_status = 0; in queue_iso_packet_per_buffer()
3332 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); in queue_iso_packet_per_buffer()
3335 pd = d; in queue_iso_packet_per_buffer()
3337 pd++; in queue_iso_packet_per_buffer()
3338 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | in queue_iso_packet_per_buffer()
3344 length = PAGE_SIZE - offset; in queue_iso_packet_per_buffer()
3345 pd->req_count = cpu_to_le16(length); in queue_iso_packet_per_buffer()
3346 pd->res_count = pd->req_count; in queue_iso_packet_per_buffer()
3347 pd->transfer_status = 0; in queue_iso_packet_per_buffer()
3349 page_bus = page_private(buffer->pages[page]); in queue_iso_packet_per_buffer()
3350 pd->data_address = cpu_to_le32(page_bus + offset); in queue_iso_packet_per_buffer()
3357 rest -= length; in queue_iso_packet_per_buffer()
3361 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | in queue_iso_packet_per_buffer()
3364 if (packet->interrupt && i == packet_count - 1) in queue_iso_packet_per_buffer()
3365 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); in queue_iso_packet_per_buffer()
3367 context_append(&ctx->context, d, z, header_z); in queue_iso_packet_per_buffer()
3384 rest = packet->payload_length; in queue_iso_buffer_fill()
3389 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) in queue_iso_buffer_fill()
3390 return -EFAULT; in queue_iso_buffer_fill()
3393 d = context_get_descriptors(&ctx->context, 1, &d_bus); in queue_iso_buffer_fill()
3395 return -ENOMEM; in queue_iso_buffer_fill()
3397 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | in queue_iso_buffer_fill()
3399 if (packet->skip && i == 0) in queue_iso_buffer_fill()
3400 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); in queue_iso_buffer_fill()
3401 if (packet->interrupt && i == z - 1) in queue_iso_buffer_fill()
3402 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); in queue_iso_buffer_fill()
3407 length = PAGE_SIZE - offset; in queue_iso_buffer_fill()
3408 d->req_count = cpu_to_le16(length); in queue_iso_buffer_fill()
3409 d->res_count = d->req_count; in queue_iso_buffer_fill()
3410 d->transfer_status = 0; in queue_iso_buffer_fill()
3412 page_bus = page_private(buffer->pages[page]); in queue_iso_buffer_fill()
3413 d->data_address = cpu_to_le32(page_bus + offset); in queue_iso_buffer_fill()
3415 dma_sync_single_range_for_device(ctx->context.ohci->card.device, in queue_iso_buffer_fill()
3419 rest -= length; in queue_iso_buffer_fill()
3423 context_append(&ctx->context, d, 1, 0); in queue_iso_buffer_fill()
3436 int ret = -ENOSYS; in ohci_queue_iso()
3438 spin_lock_irqsave(&ctx->context.ohci->lock, flags); in ohci_queue_iso()
3439 switch (base->type) { in ohci_queue_iso()
3450 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); in ohci_queue_iso()
3458 &container_of(base, struct iso_context, base)->context; in ohci_flush_queue_iso()
3460 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in ohci_flush_queue_iso()
3468 tasklet_disable(&ctx->context.tasklet); in ohci_flush_iso_completions()
3470 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { in ohci_flush_iso_completions()
3471 context_tasklet((unsigned long)&ctx->context); in ohci_flush_iso_completions()
3473 switch (base->type) { in ohci_flush_iso_completions()
3476 if (ctx->header_length != 0) in ohci_flush_iso_completions()
3480 if (ctx->mc_completed != 0) in ohci_flush_iso_completions()
3484 ret = -ENOSYS; in ohci_flush_iso_completions()
3487 clear_bit_unlock(0, &ctx->flushing_completions); in ohci_flush_iso_completions()
3491 tasklet_enable(&ctx->context.tasklet); in ohci_flush_iso_completions()
3556 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) { in pci_probe()
3557 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n"); in pci_probe()
3558 return -ENOSYS; in pci_probe()
3563 err = -ENOMEM; in pci_probe()
3567 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); in pci_probe()
3573 dev_err(&dev->dev, "failed to enable OHCI hardware\n"); in pci_probe()
3581 spin_lock_init(&ohci->lock); in pci_probe()
3582 mutex_init(&ohci->phy_reg_mutex); in pci_probe()
3584 INIT_WORK(&ohci->bus_reset_work, bus_reset_work); in pci_probe()
3589 err = -ENXIO; in pci_probe()
3599 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); in pci_probe()
3600 if (ohci->registers == NULL) { in pci_probe()
3602 err = -ENXIO; in pci_probe()
3607 if ((ohci_quirks[i].vendor == dev->vendor) && in pci_probe()
3609 ohci_quirks[i].device == dev->device) && in pci_probe()
3610 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || in pci_probe()
3611 ohci_quirks[i].revision >= dev->revision)) { in pci_probe()
3612 ohci->quirks = ohci_quirks[i].flags; in pci_probe()
3616 ohci->quirks = param_quirks; in pci_probe()
3625 ohci->misc_buffer = dma_alloc_coherent(ohci->card.device, in pci_probe()
3627 &ohci->misc_buffer_bus, in pci_probe()
3629 if (!ohci->misc_buffer) { in pci_probe()
3630 err = -ENOMEM; in pci_probe()
3634 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0, in pci_probe()
3639 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4, in pci_probe()
3644 err = context_init(&ohci->at_request_ctx, ohci, in pci_probe()
3649 err = context_init(&ohci->at_response_ctx, ohci, in pci_probe()
3655 ohci->ir_context_channels = ~0ULL; in pci_probe()
3656 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); in pci_probe()
3658 ohci->ir_context_mask = ohci->ir_context_support; in pci_probe()
3659 ohci->n_ir = hweight32(ohci->ir_context_mask); in pci_probe()
3660 size = sizeof(struct iso_context) * ohci->n_ir; in pci_probe()
3661 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); in pci_probe()
3664 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); in pci_probe()
3666 if (!ohci->it_context_support) { in pci_probe()
3668 ohci->it_context_support = 0xf; in pci_probe()
3671 ohci->it_context_mask = ohci->it_context_support; in pci_probe()
3672 ohci->n_it = hweight32(ohci->it_context_mask); in pci_probe()
3673 size = sizeof(struct iso_context) * ohci->n_it; in pci_probe()
3674 ohci->it_context_list = kzalloc(size, GFP_KERNEL); in pci_probe()
3676 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { in pci_probe()
3677 err = -ENOMEM; in pci_probe()
3681 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2; in pci_probe()
3682 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2; in pci_probe()
3690 if (!(ohci->quirks & QUIRK_NO_MSI)) in pci_probe()
3692 if (request_irq(dev->irq, irq_handler, in pci_probe()
3695 ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq); in pci_probe()
3696 err = -EIO; in pci_probe()
3700 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); in pci_probe()
3708 version >> 16, version & 0xff, ohci->card.index, in pci_probe()
3709 ohci->n_ir, ohci->n_it, ohci->quirks, in pci_probe()
3716 free_irq(dev->irq, ohci); in pci_probe()
3720 kfree(ohci->ir_context_list); in pci_probe()
3721 kfree(ohci->it_context_list); in pci_probe()
3722 context_release(&ohci->at_response_ctx); in pci_probe()
3724 context_release(&ohci->at_request_ctx); in pci_probe()
3726 ar_context_release(&ohci->ar_response_ctx); in pci_probe()
3728 ar_context_release(&ohci->ar_request_ctx); in pci_probe()
3730 dma_free_coherent(ohci->card.device, PAGE_SIZE, in pci_probe()
3731 ohci->misc_buffer, ohci->misc_buffer_bus); in pci_probe()
3733 pci_iounmap(dev, ohci->registers); in pci_probe()
3757 cancel_work_sync(&ohci->bus_reset_work); in pci_remove()
3758 fw_core_remove_card(&ohci->card); in pci_remove()
3766 free_irq(dev->irq, ohci); in pci_remove()
3768 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom) in pci_remove()
3769 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, in pci_remove()
3770 ohci->next_config_rom, ohci->next_config_rom_bus); in pci_remove()
3771 if (ohci->config_rom) in pci_remove()
3772 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, in pci_remove()
3773 ohci->config_rom, ohci->config_rom_bus); in pci_remove()
3774 ar_context_release(&ohci->ar_request_ctx); in pci_remove()
3775 ar_context_release(&ohci->ar_response_ctx); in pci_remove()
3776 dma_free_coherent(ohci->card.device, PAGE_SIZE, in pci_remove()
3777 ohci->misc_buffer, ohci->misc_buffer_bus); in pci_remove()
3778 context_release(&ohci->at_request_ctx); in pci_remove()
3779 context_release(&ohci->at_response_ctx); in pci_remove()
3780 kfree(ohci->it_context_list); in pci_remove()
3781 kfree(ohci->ir_context_list); in pci_remove()
3783 pci_iounmap(dev, ohci->registers); in pci_remove()
3789 dev_notice(&dev->dev, "removed fw-ohci device\n"); in pci_remove()
3829 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid); in pci_resume()
3830 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32)); in pci_resume()
3833 err = ohci_enable(&ohci->card, NULL, 0); in pci_resume()
3865 return -ENOMEM; in fw_ohci_init()
3883 /* Provide a module alias so root-on-sbp2 initrds don't break. */