Lines Matching refs:sde
243 struct sdma_engine *sde,
246 struct sdma_engine *sde,
248 static void dump_sdma_state(struct sdma_engine *sde);
249 static void sdma_make_progress(struct sdma_engine *sde, u64 status);
250 static void sdma_desc_avail(struct sdma_engine *sde, uint avail);
251 static void sdma_flush_descq(struct sdma_engine *sde);
287 struct sdma_engine *sde, in write_sde_csr() argument
291 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value); in write_sde_csr()
295 struct sdma_engine *sde, in read_sde_csr() argument
298 return read_kctxt_csr(sde->dd, sde->this_idx, offset0); in read_sde_csr()
305 static void sdma_wait_for_packet_egress(struct sdma_engine *sde, in sdma_wait_for_packet_egress() argument
308 u64 off = 8 * sde->this_idx; in sdma_wait_for_packet_egress()
309 struct hfi1_devdata *dd = sde->dd; in sdma_wait_for_packet_egress()
328 __func__, sde->this_idx, (u32)reg); in sdma_wait_for_packet_egress()
346 struct sdma_engine *sde = &dd->per_sdma[i]; in sdma_wait() local
348 sdma_wait_for_packet_egress(sde, 0); in sdma_wait()
352 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt) in sdma_set_desc_cnt() argument
356 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT)) in sdma_set_desc_cnt()
361 write_sde_csr(sde, SD(DESC_CNT), reg); in sdma_set_desc_cnt()
364 static inline void complete_tx(struct sdma_engine *sde, in complete_tx() argument
373 trace_hfi1_sdma_out_sn(sde, tx->sn); in complete_tx()
374 if (WARN_ON_ONCE(sde->head_sn != tx->sn)) in complete_tx()
375 dd_dev_err(sde->dd, "expected %llu got %llu\n", in complete_tx()
376 sde->head_sn, tx->sn); in complete_tx()
377 sde->head_sn++; in complete_tx()
379 __sdma_txclean(sde->dd, tx); in complete_tx()
404 static void sdma_flush(struct sdma_engine *sde) in sdma_flush() argument
412 sdma_flush_descq(sde); in sdma_flush()
413 spin_lock_irqsave(&sde->flushlist_lock, flags); in sdma_flush()
415 list_splice_init(&sde->flushlist, &flushlist); in sdma_flush()
416 spin_unlock_irqrestore(&sde->flushlist_lock, flags); in sdma_flush()
419 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); in sdma_flush()
424 seq = read_seqbegin(&sde->waitlock); in sdma_flush()
425 if (!list_empty(&sde->dmawait)) { in sdma_flush()
426 write_seqlock(&sde->waitlock); in sdma_flush()
427 list_for_each_entry_safe(w, nw, &sde->dmawait, list) { in sdma_flush()
433 write_sequnlock(&sde->waitlock); in sdma_flush()
435 } while (read_seqretry(&sde->waitlock, seq)); in sdma_flush()
451 struct sdma_engine *sde = in sdma_field_flush() local
454 write_seqlock_irqsave(&sde->head_lock, flags); in sdma_field_flush()
455 if (!__sdma_running(sde)) in sdma_field_flush()
456 sdma_flush(sde); in sdma_field_flush()
457 write_sequnlock_irqrestore(&sde->head_lock, flags); in sdma_field_flush()
462 struct sdma_engine *sde = container_of(work, struct sdma_engine, in sdma_err_halt_wait() local
469 statuscsr = read_sde_csr(sde, SD(STATUS)); in sdma_err_halt_wait()
474 dd_dev_err(sde->dd, in sdma_err_halt_wait()
476 sde->this_idx); in sdma_err_halt_wait()
486 sdma_process_event(sde, sdma_event_e15_hw_halt_done); in sdma_err_halt_wait()
489 static void sdma_err_progress_check_schedule(struct sdma_engine *sde) in sdma_err_progress_check_schedule() argument
491 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) { in sdma_err_progress_check_schedule()
493 struct hfi1_devdata *dd = sde->dd; in sdma_err_progress_check_schedule()
498 if (curr_sdma != sde) in sdma_err_progress_check_schedule()
502 dd_dev_err(sde->dd, in sdma_err_progress_check_schedule()
504 sde->this_idx); in sdma_err_progress_check_schedule()
505 mod_timer(&sde->err_progress_check_timer, jiffies + 10); in sdma_err_progress_check_schedule()
512 struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer); in sdma_err_progress_check() local
514 dd_dev_err(sde->dd, "SDE progress check event\n"); in sdma_err_progress_check()
515 for (index = 0; index < sde->dd->num_sdma; index++) { in sdma_err_progress_check()
516 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index]; in sdma_err_progress_check()
520 if (curr_sde == sde) in sdma_err_progress_check()
545 schedule_work(&sde->err_halt_worker); in sdma_err_progress_check()
550 struct sdma_engine *sde = from_tasklet(sde, t, in sdma_hw_clean_up_task() local
556 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", in sdma_hw_clean_up_task()
557 sde->this_idx, slashstrip(__FILE__), __LINE__, in sdma_hw_clean_up_task()
560 statuscsr = read_sde_csr(sde, SD(STATUS)); in sdma_hw_clean_up_task()
567 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done); in sdma_hw_clean_up_task()
570 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde) in get_txhead() argument
572 return sde->tx_ring[sde->tx_head & sde->sdma_mask]; in get_txhead()
578 static void sdma_flush_descq(struct sdma_engine *sde) in sdma_flush_descq() argument
582 struct sdma_txreq *txp = get_txhead(sde); in sdma_flush_descq()
589 head = sde->descq_head & sde->sdma_mask; in sdma_flush_descq()
590 tail = sde->descq_tail & sde->sdma_mask; in sdma_flush_descq()
593 head = ++sde->descq_head & sde->sdma_mask; in sdma_flush_descq()
597 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; in sdma_flush_descq()
598 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); in sdma_flush_descq()
599 trace_hfi1_sdma_progress(sde, head, tail, txp); in sdma_flush_descq()
600 txp = get_txhead(sde); in sdma_flush_descq()
605 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); in sdma_flush_descq()
610 struct sdma_engine *sde = from_tasklet(sde, t, sdma_sw_clean_up_task); in sdma_sw_clean_up_task() local
613 spin_lock_irqsave(&sde->tail_lock, flags); in sdma_sw_clean_up_task()
614 write_seqlock(&sde->head_lock); in sdma_sw_clean_up_task()
635 sdma_make_progress(sde, 0); in sdma_sw_clean_up_task()
637 sdma_flush(sde); in sdma_sw_clean_up_task()
644 sde->descq_tail = 0; in sdma_sw_clean_up_task()
645 sde->descq_head = 0; in sdma_sw_clean_up_task()
646 sde->desc_avail = sdma_descq_freecnt(sde); in sdma_sw_clean_up_task()
647 *sde->head_dma = 0; in sdma_sw_clean_up_task()
649 __sdma_process_event(sde, sdma_event_e40_sw_cleaned); in sdma_sw_clean_up_task()
651 write_sequnlock(&sde->head_lock); in sdma_sw_clean_up_task()
652 spin_unlock_irqrestore(&sde->tail_lock, flags); in sdma_sw_clean_up_task()
655 static void sdma_sw_tear_down(struct sdma_engine *sde) in sdma_sw_tear_down() argument
657 struct sdma_state *ss = &sde->state; in sdma_sw_tear_down()
663 atomic_set(&sde->dd->sdma_unfreeze_count, -1); in sdma_sw_tear_down()
664 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); in sdma_sw_tear_down()
667 static void sdma_start_hw_clean_up(struct sdma_engine *sde) in sdma_start_hw_clean_up() argument
669 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task); in sdma_start_hw_clean_up()
672 static void sdma_set_state(struct sdma_engine *sde, in sdma_set_state() argument
675 struct sdma_state *ss = &sde->state; in sdma_set_state()
680 sde, in sdma_set_state()
691 sdma_flush(sde); in sdma_set_state()
712 sdma_sendctrl(sde, ss->current_op); in sdma_set_state()
750 int sdma_engine_get_vl(struct sdma_engine *sde) in sdma_engine_get_vl() argument
752 struct hfi1_devdata *dd = sde->dd; in sdma_engine_get_vl()
756 if (sde->this_idx >= TXE_NUM_SDMA_ENGINES) in sdma_engine_get_vl()
765 vl = m->engine_to_vl[sde->this_idx]; in sdma_engine_get_vl()
806 rval = e->sde[selector & e->mask]; in sdma_select_engine_vl()
837 struct sdma_engine *sde[]; member
873 struct sdma_engine *sde = NULL; in sdma_select_user_engine() local
891 sde = map->sde[selector & map->mask]; in sdma_select_user_engine()
895 if (sde) in sdma_select_user_engine()
896 return sde; in sdma_select_user_engine()
907 map->sde[map->ctr + i] = map->sde[i]; in sdma_populate_sde_map()
911 struct sdma_engine *sde) in sdma_cleanup_sde_map() argument
917 if (map->sde[i] == sde) { in sdma_cleanup_sde_map()
918 memmove(&map->sde[i], &map->sde[i + 1], in sdma_cleanup_sde_map()
919 (map->ctr - i - 1) * sizeof(map->sde[0])); in sdma_cleanup_sde_map()
934 ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, in sdma_set_cpu_to_sde_map() argument
937 struct hfi1_devdata *dd = sde->dd; in sdma_set_cpu_to_sde_map()
943 vl = sdma_engine_get_vl(sde); in sdma_set_cpu_to_sde_map()
961 dd_dev_warn(sde->dd, "Invalid CPU mask\n"); in sdma_set_cpu_to_sde_map()
973 if (cpumask_test_cpu(cpu, &sde->cpu_mask)) { in sdma_set_cpu_to_sde_map()
996 rht_node->map[vl]->sde[0] = sde; in sdma_set_cpu_to_sde_map()
1004 dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n", in sdma_set_cpu_to_sde_map()
1023 rht_node->map[vl]->sde[ctr - 1] = sde; in sdma_set_cpu_to_sde_map()
1051 sde); in sdma_set_cpu_to_sde_map()
1078 cpumask_copy(&sde->cpu_mask, new_mask); in sdma_set_cpu_to_sde_map()
1087 ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf) in sdma_get_cpu_to_sde_map() argument
1090 if (cpumask_empty(&sde->cpu_mask)) in sdma_get_cpu_to_sde_map()
1093 cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask); in sdma_get_cpu_to_sde_map()
1137 if (!rht_node->map[i]->sde[j]) in sdma_seqfile_dump_cpu_list()
1144 rht_node->map[i]->sde[j]->this_idx); in sdma_seqfile_dump_cpu_list()
1253 newmap->map[i]->sde[j] = in sdma_map_init()
1298 struct sdma_engine *sde; in sdma_clean() local
1315 sde = &dd->per_sdma[i]; in sdma_clean()
1317 sde->head_dma = NULL; in sdma_clean()
1318 sde->head_phys = 0; in sdma_clean()
1320 if (sde->descq) { in sdma_clean()
1323 sde->descq_cnt * sizeof(u64[2]), in sdma_clean()
1324 sde->descq, in sdma_clean()
1325 sde->descq_phys in sdma_clean()
1327 sde->descq = NULL; in sdma_clean()
1328 sde->descq_phys = 0; in sdma_clean()
1330 kvfree(sde->tx_ring); in sdma_clean()
1331 sde->tx_ring = NULL; in sdma_clean()
1364 struct sdma_engine *sde; in sdma_init() local
1420 sde = &dd->per_sdma[this_idx]; in sdma_init()
1421 sde->dd = dd; in sdma_init()
1422 sde->ppd = ppd; in sdma_init()
1423 sde->this_idx = this_idx; in sdma_init()
1424 sde->descq_cnt = descq_cnt; in sdma_init()
1425 sde->desc_avail = sdma_descq_freecnt(sde); in sdma_init()
1426 sde->sdma_shift = ilog2(descq_cnt); in sdma_init()
1427 sde->sdma_mask = (1 << sde->sdma_shift) - 1; in sdma_init()
1430 sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES + in sdma_init()
1432 sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES + in sdma_init()
1434 sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES + in sdma_init()
1437 sde->imask = sde->int_mask | sde->progress_mask | in sdma_init()
1438 sde->idle_mask; in sdma_init()
1440 spin_lock_init(&sde->tail_lock); in sdma_init()
1441 seqlock_init(&sde->head_lock); in sdma_init()
1442 spin_lock_init(&sde->senddmactrl_lock); in sdma_init()
1443 spin_lock_init(&sde->flushlist_lock); in sdma_init()
1444 seqlock_init(&sde->waitlock); in sdma_init()
1446 sde->ahg_bits = 0xfffffffe00000000ULL; in sdma_init()
1448 sdma_set_state(sde, sdma_state_s00_hw_down); in sdma_init()
1451 kref_init(&sde->state.kref); in sdma_init()
1452 init_completion(&sde->state.comp); in sdma_init()
1454 INIT_LIST_HEAD(&sde->flushlist); in sdma_init()
1455 INIT_LIST_HEAD(&sde->dmawait); in sdma_init()
1457 sde->tail_csr = in sdma_init()
1460 tasklet_setup(&sde->sdma_hw_clean_up_task, in sdma_init()
1462 tasklet_setup(&sde->sdma_sw_clean_up_task, in sdma_init()
1464 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); in sdma_init()
1465 INIT_WORK(&sde->flush_worker, sdma_field_flush); in sdma_init()
1467 sde->progress_check_head = 0; in sdma_init()
1469 timer_setup(&sde->err_progress_check_timer, in sdma_init()
1472 sde->descq = dma_alloc_coherent(&dd->pcidev->dev, in sdma_init()
1474 &sde->descq_phys, GFP_KERNEL); in sdma_init()
1475 if (!sde->descq) in sdma_init()
1477 sde->tx_ring = in sdma_init()
1481 if (!sde->tx_ring) in sdma_init()
1509 sde = &dd->per_sdma[this_idx]; in sdma_init()
1511 sde->head_dma = curr_head; in sdma_init()
1513 phys_offset = (unsigned long)sde->head_dma - in sdma_init()
1515 sde->head_phys = dd->sdma_heads_phys + phys_offset; in sdma_init()
1516 init_sdma_regs(sde, per_sdma_credits, idle_cnt); in sdma_init()
1555 struct sdma_engine *sde; in sdma_all_running() local
1560 sde = &dd->per_sdma[i]; in sdma_all_running()
1561 sdma_process_event(sde, sdma_event_e30_go_running); in sdma_all_running()
1573 struct sdma_engine *sde; in sdma_all_idle() local
1578 sde = &dd->per_sdma[i]; in sdma_all_idle()
1579 sdma_process_event(sde, sdma_event_e70_go_idle); in sdma_all_idle()
1594 struct sdma_engine *sde; in sdma_start() local
1598 sde = &dd->per_sdma[i]; in sdma_start()
1599 sdma_process_event(sde, sdma_event_e10_go_hw_start); in sdma_start()
1610 struct sdma_engine *sde; in sdma_exit() local
1614 sde = &dd->per_sdma[this_idx]; in sdma_exit()
1615 if (!list_empty(&sde->dmawait)) in sdma_exit()
1617 sde->this_idx); in sdma_exit()
1618 sdma_process_event(sde, sdma_event_e00_go_hw_down); in sdma_exit()
1620 del_timer_sync(&sde->err_progress_check_timer); in sdma_exit()
1627 sdma_finalput(&sde->state); in sdma_exit()
1704 static inline u16 sdma_gethead(struct sdma_engine *sde) in sdma_gethead() argument
1706 struct hfi1_devdata *dd = sde->dd; in sdma_gethead()
1711 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", in sdma_gethead()
1712 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); in sdma_gethead()
1716 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) && in sdma_gethead()
1719 (u16)le64_to_cpu(*sde->head_dma) : in sdma_gethead()
1720 (u16)read_sde_csr(sde, SD(HEAD)); in sdma_gethead()
1728 swhead = sde->descq_head & sde->sdma_mask; in sdma_gethead()
1730 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; in sdma_gethead()
1731 cnt = sde->descq_cnt; in sdma_gethead()
1746 sde->this_idx, in sdma_gethead()
1767 static void sdma_desc_avail(struct sdma_engine *sde, uint avail) in sdma_desc_avail() argument
1774 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in sdma_desc_avail()
1776 dd_dev_err(sde->dd, "avail: %u\n", avail); in sdma_desc_avail()
1780 seq = read_seqbegin(&sde->waitlock); in sdma_desc_avail()
1781 if (!list_empty(&sde->dmawait)) { in sdma_desc_avail()
1783 write_seqlock(&sde->waitlock); in sdma_desc_avail()
1788 &sde->dmawait, in sdma_desc_avail()
1813 write_sequnlock(&sde->waitlock); in sdma_desc_avail()
1816 } while (read_seqretry(&sde->waitlock, seq)); in sdma_desc_avail()
1828 static void sdma_make_progress(struct sdma_engine *sde, u64 status) in sdma_make_progress() argument
1835 hwhead = sdma_gethead(sde); in sdma_make_progress()
1844 txp = get_txhead(sde); in sdma_make_progress()
1845 swhead = sde->descq_head & sde->sdma_mask; in sdma_make_progress()
1846 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); in sdma_make_progress()
1849 swhead = ++sde->descq_head & sde->sdma_mask; in sdma_make_progress()
1854 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; in sdma_make_progress()
1855 complete_tx(sde, txp, SDMA_TXREQ_S_OK); in sdma_make_progress()
1857 txp = get_txhead(sde); in sdma_make_progress()
1859 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); in sdma_make_progress()
1872 if ((status & sde->idle_mask) && !idle_check_done) { in sdma_make_progress()
1875 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; in sdma_make_progress()
1877 hwhead = (u16)read_sde_csr(sde, SD(HEAD)); in sdma_make_progress()
1883 sde->last_status = status; in sdma_make_progress()
1885 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); in sdma_make_progress()
1897 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status) in sdma_engine_interrupt() argument
1899 trace_hfi1_sdma_engine_interrupt(sde, status); in sdma_engine_interrupt()
1900 write_seqlock(&sde->head_lock); in sdma_engine_interrupt()
1901 sdma_set_desc_cnt(sde, sdma_desct_intr); in sdma_engine_interrupt()
1902 if (status & sde->idle_mask) in sdma_engine_interrupt()
1903 sde->idle_int_cnt++; in sdma_engine_interrupt()
1904 else if (status & sde->progress_mask) in sdma_engine_interrupt()
1905 sde->progress_int_cnt++; in sdma_engine_interrupt()
1906 else if (status & sde->int_mask) in sdma_engine_interrupt()
1907 sde->sdma_int_cnt++; in sdma_engine_interrupt()
1908 sdma_make_progress(sde, status); in sdma_engine_interrupt()
1909 write_sequnlock(&sde->head_lock); in sdma_engine_interrupt()
1917 void sdma_engine_error(struct sdma_engine *sde, u64 status) in sdma_engine_error() argument
1922 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n", in sdma_engine_error()
1923 sde->this_idx, in sdma_engine_error()
1925 sdma_state_names[sde->state.current_state]); in sdma_engine_error()
1927 spin_lock_irqsave(&sde->tail_lock, flags); in sdma_engine_error()
1928 write_seqlock(&sde->head_lock); in sdma_engine_error()
1930 __sdma_process_event(sde, sdma_event_e60_hw_halted); in sdma_engine_error()
1932 dd_dev_err(sde->dd, in sdma_engine_error()
1934 sde->this_idx, in sdma_engine_error()
1936 sdma_state_names[sde->state.current_state]); in sdma_engine_error()
1937 dump_sdma_state(sde); in sdma_engine_error()
1939 write_sequnlock(&sde->head_lock); in sdma_engine_error()
1940 spin_unlock_irqrestore(&sde->tail_lock, flags); in sdma_engine_error()
1943 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op) in sdma_sendctrl() argument
1950 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n", in sdma_sendctrl()
1951 sde->this_idx, in sdma_sendctrl()
1973 spin_lock_irqsave(&sde->senddmactrl_lock, flags); in sdma_sendctrl()
1975 sde->p_senddmactrl |= set_senddmactrl; in sdma_sendctrl()
1976 sde->p_senddmactrl &= ~clr_senddmactrl; in sdma_sendctrl()
1979 write_sde_csr(sde, SD(CTRL), in sdma_sendctrl()
1980 sde->p_senddmactrl | in sdma_sendctrl()
1983 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl); in sdma_sendctrl()
1985 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags); in sdma_sendctrl()
1988 sdma_dumpstate(sde); in sdma_sendctrl()
1992 static void sdma_setlengen(struct sdma_engine *sde) in sdma_setlengen() argument
1995 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", in sdma_setlengen()
1996 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); in sdma_setlengen()
2004 write_sde_csr(sde, SD(LEN_GEN), in sdma_setlengen()
2005 (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)); in sdma_setlengen()
2006 write_sde_csr(sde, SD(LEN_GEN), in sdma_setlengen()
2007 ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) | in sdma_setlengen()
2011 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail) in sdma_update_tail() argument
2015 writeq(tail, sde->tail_csr); in sdma_update_tail()
2022 static void sdma_hw_start_up(struct sdma_engine *sde) in sdma_hw_start_up() argument
2027 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", in sdma_hw_start_up()
2028 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); in sdma_hw_start_up()
2031 sdma_setlengen(sde); in sdma_hw_start_up()
2032 sdma_update_tail(sde, 0); /* Set SendDmaTail */ in sdma_hw_start_up()
2033 *sde->head_dma = 0; in sdma_hw_start_up()
2037 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); in sdma_hw_start_up()
2045 static void set_sdma_integrity(struct sdma_engine *sde) in set_sdma_integrity() argument
2047 struct hfi1_devdata *dd = sde->dd; in set_sdma_integrity()
2049 write_sde_csr(sde, SD(CHECK_ENABLE), in set_sdma_integrity()
2054 struct sdma_engine *sde, in init_sdma_regs() argument
2060 struct hfi1_devdata *dd = sde->dd; in init_sdma_regs()
2063 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); in init_sdma_regs()
2066 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys); in init_sdma_regs()
2067 sdma_setlengen(sde); in init_sdma_regs()
2068 sdma_update_tail(sde, 0); /* Set SendDmaTail */ in init_sdma_regs()
2069 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt); in init_sdma_regs()
2070 write_sde_csr(sde, SD(DESC_CNT), 0); in init_sdma_regs()
2071 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys); in init_sdma_regs()
2072 write_sde_csr(sde, SD(MEMORY), in init_sdma_regs()
2074 ((u64)(credits * sde->this_idx) << in init_sdma_regs()
2076 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull); in init_sdma_regs()
2077 set_sdma_integrity(sde); in init_sdma_regs()
2080 write_sde_csr(sde, SD(CHECK_OPCODE), in init_sdma_regs()
2088 csr = read_csr(sde->dd, reg); \
2089 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
2093 csr = read_sde_csr(sde, reg); \
2094 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
2095 #reg, sde->this_idx, csr); \
2099 csr = read_csr(sde->dd, reg + (8 * i)); \
2100 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
2104 void sdma_dumpstate(struct sdma_engine *sde) in sdma_dumpstate() argument
2145 static void dump_sdma_state(struct sdma_engine *sde) in dump_sdma_state() argument
2154 head = sde->descq_head & sde->sdma_mask; in dump_sdma_state()
2155 tail = sde->descq_tail & sde->sdma_mask; in dump_sdma_state()
2156 cnt = sdma_descq_freecnt(sde); in dump_sdma_state()
2158 dd_dev_err(sde->dd, in dump_sdma_state()
2160 sde->this_idx, head, tail, cnt, in dump_sdma_state()
2161 !list_empty(&sde->flushlist)); in dump_sdma_state()
2167 descqp = &sde->descq[head]; in dump_sdma_state()
2181 dd_dev_err(sde->dd, in dump_sdma_state()
2184 dd_dev_err(sde->dd, in dump_sdma_state()
2188 dd_dev_err(sde->dd, in dump_sdma_state()
2200 head &= sde->sdma_mask; in dump_sdma_state()
2213 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) in sdma_seqfile_dump_sde() argument
2222 head = sde->descq_head & sde->sdma_mask; in sdma_seqfile_dump_sde()
2223 tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; in sdma_seqfile_dump_sde()
2224 seq_printf(s, SDE_FMT, sde->this_idx, in sdma_seqfile_dump_sde()
2225 sde->cpu, in sdma_seqfile_dump_sde()
2226 sdma_state_name(sde->state.current_state), in sdma_seqfile_dump_sde()
2227 (unsigned long long)read_sde_csr(sde, SD(CTRL)), in sdma_seqfile_dump_sde()
2228 (unsigned long long)read_sde_csr(sde, SD(STATUS)), in sdma_seqfile_dump_sde()
2229 (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)), in sdma_seqfile_dump_sde()
2230 (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail, in sdma_seqfile_dump_sde()
2231 (unsigned long long)read_sde_csr(sde, SD(HEAD)), head, in sdma_seqfile_dump_sde()
2232 (unsigned long long)le64_to_cpu(*sde->head_dma), in sdma_seqfile_dump_sde()
2233 (unsigned long long)read_sde_csr(sde, SD(MEMORY)), in sdma_seqfile_dump_sde()
2234 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)), in sdma_seqfile_dump_sde()
2235 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)), in sdma_seqfile_dump_sde()
2236 (unsigned long long)sde->last_status, in sdma_seqfile_dump_sde()
2237 (unsigned long long)sde->ahg_bits, in sdma_seqfile_dump_sde()
2238 sde->tx_tail, in sdma_seqfile_dump_sde()
2239 sde->tx_head, in sdma_seqfile_dump_sde()
2240 sde->descq_tail, in sdma_seqfile_dump_sde()
2241 sde->descq_head, in sdma_seqfile_dump_sde()
2242 !list_empty(&sde->flushlist), in sdma_seqfile_dump_sde()
2243 sde->descq_full_count, in sdma_seqfile_dump_sde()
2244 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID)); in sdma_seqfile_dump_sde()
2250 descqp = &sde->descq[head]; in sdma_seqfile_dump_sde()
2275 head = (head + 1) & sde->sdma_mask; in sdma_seqfile_dump_sde()
2283 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1) in add_gen() argument
2285 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3; in add_gen()
2309 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx) in submit_tx() argument
2316 tail = sde->descq_tail & sde->sdma_mask; in submit_tx()
2317 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); in submit_tx()
2318 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1])); in submit_tx()
2319 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1], in submit_tx()
2320 tail, &sde->descq[tail]); in submit_tx()
2321 tail = ++sde->descq_tail & sde->sdma_mask; in submit_tx()
2328 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); in submit_tx()
2335 qw1 = add_gen(sde, descp->qw[1]); in submit_tx()
2337 sde->descq[tail].qw[1] = cpu_to_le64(qw1); in submit_tx()
2338 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1, in submit_tx()
2339 tail, &sde->descq[tail]); in submit_tx()
2340 tail = ++sde->descq_tail & sde->sdma_mask; in submit_tx()
2344 tx->sn = sde->tail_sn++; in submit_tx()
2345 trace_hfi1_sdma_in_sn(sde, tx->sn); in submit_tx()
2346 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]); in submit_tx()
2348 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx; in submit_tx()
2349 sde->desc_avail -= tx->num_desc; in submit_tx()
2357 struct sdma_engine *sde, in sdma_check_progress() argument
2364 sde->desc_avail = sdma_descq_freecnt(sde); in sdma_check_progress()
2365 if (tx->num_desc <= sde->desc_avail) in sdma_check_progress()
2372 (const seqcount_t *)&sde->head_lock.seqcount); in sdma_check_progress()
2373 ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent); in sdma_check_progress()
2375 sde->desc_avail = sdma_descq_freecnt(sde); in sdma_check_progress()
2397 int sdma_send_txreq(struct sdma_engine *sde, in sdma_send_txreq() argument
2410 spin_lock_irqsave(&sde->tail_lock, flags); in sdma_send_txreq()
2412 if (unlikely(!__sdma_running(sde))) in sdma_send_txreq()
2414 if (unlikely(tx->num_desc > sde->desc_avail)) in sdma_send_txreq()
2416 tail = submit_tx(sde, tx); in sdma_send_txreq()
2419 sdma_update_tail(sde, tail); in sdma_send_txreq()
2421 spin_unlock_irqrestore(&sde->tail_lock, flags); in sdma_send_txreq()
2428 tx->sn = sde->tail_sn++; in sdma_send_txreq()
2429 trace_hfi1_sdma_in_sn(sde, tx->sn); in sdma_send_txreq()
2431 spin_lock(&sde->flushlist_lock); in sdma_send_txreq()
2432 list_add_tail(&tx->list, &sde->flushlist); in sdma_send_txreq()
2433 spin_unlock(&sde->flushlist_lock); in sdma_send_txreq()
2435 queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); in sdma_send_txreq()
2439 ret = sdma_check_progress(sde, wait, tx, pkts_sent); in sdma_send_txreq()
2444 sde->descq_full_count++; in sdma_send_txreq()
2476 int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait, in sdma_send_txlist() argument
2485 spin_lock_irqsave(&sde->tail_lock, flags); in sdma_send_txlist()
2489 if (unlikely(!__sdma_running(sde))) in sdma_send_txlist()
2491 if (unlikely(tx->num_desc > sde->desc_avail)) in sdma_send_txlist()
2498 tail = submit_tx(sde, tx); in sdma_send_txlist()
2502 sdma_update_tail(sde, tail); in sdma_send_txlist()
2514 sdma_update_tail(sde, tail); in sdma_send_txlist()
2515 spin_unlock_irqrestore(&sde->tail_lock, flags); in sdma_send_txlist()
2519 spin_lock(&sde->flushlist_lock); in sdma_send_txlist()
2525 tx->sn = sde->tail_sn++; in sdma_send_txlist()
2526 trace_hfi1_sdma_in_sn(sde, tx->sn); in sdma_send_txlist()
2528 list_add_tail(&tx->list, &sde->flushlist); in sdma_send_txlist()
2532 spin_unlock(&sde->flushlist_lock); in sdma_send_txlist()
2533 queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); in sdma_send_txlist()
2537 ret = sdma_check_progress(sde, wait, tx, submit_count > 0); in sdma_send_txlist()
2542 sde->descq_full_count++; in sdma_send_txlist()
2546 static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event) in sdma_process_event() argument
2550 spin_lock_irqsave(&sde->tail_lock, flags); in sdma_process_event()
2551 write_seqlock(&sde->head_lock); in sdma_process_event()
2553 __sdma_process_event(sde, event); in sdma_process_event()
2555 if (sde->state.current_state == sdma_state_s99_running) in sdma_process_event()
2556 sdma_desc_avail(sde, sdma_descq_freecnt(sde)); in sdma_process_event()
2558 write_sequnlock(&sde->head_lock); in sdma_process_event()
2559 spin_unlock_irqrestore(&sde->tail_lock, flags); in sdma_process_event()
2562 static void __sdma_process_event(struct sdma_engine *sde, in __sdma_process_event() argument
2565 struct sdma_state *ss = &sde->state; in __sdma_process_event()
2570 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx, in __sdma_process_event()
2592 sdma_get(&sde->state); in __sdma_process_event()
2593 sdma_set_state(sde, in __sdma_process_event()
2601 sdma_sw_tear_down(sde); in __sdma_process_event()
2625 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2626 sdma_sw_tear_down(sde); in __sdma_process_event()
2631 sdma_set_state(sde, in __sdma_process_event()
2633 sdma_start_hw_clean_up(sde); in __sdma_process_event()
2645 schedule_work(&sde->err_halt_worker); in __sdma_process_event()
2666 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2667 sdma_sw_tear_down(sde); in __sdma_process_event()
2674 sdma_hw_start_up(sde); in __sdma_process_event()
2675 sdma_set_state(sde, ss->go_s99_running ? in __sdma_process_event()
2707 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2708 sdma_sw_tear_down(sde); in __sdma_process_event()
2717 sdma_set_state(sde, sdma_state_s99_running); in __sdma_process_event()
2725 sdma_set_state(sde, sdma_state_s50_hw_halt_wait); in __sdma_process_event()
2726 schedule_work(&sde->err_halt_worker); in __sdma_process_event()
2732 sdma_set_state(sde, sdma_state_s80_hw_freeze); in __sdma_process_event()
2733 atomic_dec(&sde->dd->sdma_unfreeze_count); in __sdma_process_event()
2734 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); in __sdma_process_event()
2748 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2760 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait); in __sdma_process_event()
2761 sdma_start_hw_clean_up(sde); in __sdma_process_event()
2787 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2788 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2795 sdma_hw_start_up(sde); in __sdma_process_event()
2796 sdma_set_state(sde, ss->go_s99_running ? in __sdma_process_event()
2829 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2830 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2835 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); in __sdma_process_event()
2836 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2848 schedule_work(&sde->err_halt_worker); in __sdma_process_event()
2870 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2871 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2876 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); in __sdma_process_event()
2877 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2889 schedule_work(&sde->err_halt_worker); in __sdma_process_event()
2910 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2911 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2934 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean); in __sdma_process_event()
2935 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2949 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2950 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
2963 atomic_dec(&sde->dd->sdma_unfreeze_count); in __sdma_process_event()
2964 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); in __sdma_process_event()
2978 sdma_hw_start_up(sde); in __sdma_process_event()
2979 sdma_set_state(sde, ss->go_s99_running ? in __sdma_process_event()
2993 sdma_set_state(sde, sdma_state_s00_hw_down); in __sdma_process_event()
2994 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); in __sdma_process_event()
3010 sdma_err_progress_check_schedule(sde); in __sdma_process_event()
3017 sdma_set_state(sde, sdma_state_s50_hw_halt_wait); in __sdma_process_event()
3018 schedule_work(&sde->err_halt_worker); in __sdma_process_event()
3021 sdma_set_state(sde, sdma_state_s60_idle_halt_wait); in __sdma_process_event()
3027 sdma_set_state(sde, sdma_state_s80_hw_freeze); in __sdma_process_event()
3028 atomic_dec(&sde->dd->sdma_unfreeze_count); in __sdma_process_event()
3029 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); in __sdma_process_event()
3041 sdma_make_progress(sde, 0); in __sdma_process_event()
3183 struct sdma_engine *sde; in sdma_update_lmc() local
3195 sde = &dd->per_sdma[i]; in sdma_update_lmc()
3196 write_sde_csr(sde, SD(CHECK_SLID), sreg); in sdma_update_lmc()
3290 int sdma_ahg_alloc(struct sdma_engine *sde) in sdma_ahg_alloc() argument
3295 if (!sde) { in sdma_ahg_alloc()
3296 trace_hfi1_ahg_allocate(sde, -EINVAL); in sdma_ahg_alloc()
3300 nr = ffz(READ_ONCE(sde->ahg_bits)); in sdma_ahg_alloc()
3302 trace_hfi1_ahg_allocate(sde, -ENOSPC); in sdma_ahg_alloc()
3305 oldbit = test_and_set_bit(nr, &sde->ahg_bits); in sdma_ahg_alloc()
3310 trace_hfi1_ahg_allocate(sde, nr); in sdma_ahg_alloc()
3321 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index) in sdma_ahg_free() argument
3323 if (!sde) in sdma_ahg_free()
3325 trace_hfi1_ahg_deallocate(sde, ahg_index); in sdma_ahg_free()
3328 clear_bit(ahg_index, &sde->ahg_bits); in sdma_ahg_free()
3416 struct sdma_engine *sde) in _sdma_engine_progress_schedule() argument
3418 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask); in _sdma_engine_progress_schedule()
3420 write_csr(sde->dd, in _sdma_engine_progress_schedule()
3422 sde->progress_mask); in _sdma_engine_progress_schedule()