Lines Matching refs:dd
142 #define emulator_rev(dd) ((dd)->irev >> 8) argument
144 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3) argument
145 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4) argument
1044 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1045 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1046 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1048 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1050 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1052 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1054 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1056 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1057 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1060 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1062 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1063 static void handle_dcc_err(struct hfi1_devdata *dd,
1065 static void handle_lcb_err(struct hfi1_devdata *dd,
1067 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1068 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1069 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1070 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1071 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1072 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1073 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1074 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1079 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1081 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1082 static int thermal_init(struct hfi1_devdata *dd);
1095 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1096 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1097 static void handle_temp_err(struct hfi1_devdata *dd);
1098 static void dc_shutdown(struct hfi1_devdata *dd);
1099 static void dc_start(struct hfi1_devdata *dd);
1100 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1103 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1104 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1117 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1333 const struct hfi1_devdata *dd, in hfi1_addr_from_offset() argument
1336 if (offset >= dd->base2_start) in hfi1_addr_from_offset()
1337 return dd->kregbase2 + (offset - dd->base2_start); in hfi1_addr_from_offset()
1338 return dd->kregbase1 + offset; in hfi1_addr_from_offset()
1349 u64 read_csr(const struct hfi1_devdata *dd, u32 offset) in read_csr() argument
1351 if (dd->flags & HFI1_PRESENT) in read_csr()
1352 return readq(hfi1_addr_from_offset(dd, offset)); in read_csr()
1362 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value) in write_csr() argument
1364 if (dd->flags & HFI1_PRESENT) { in write_csr()
1365 void __iomem *base = hfi1_addr_from_offset(dd, offset); in write_csr()
1368 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start)) in write_csr()
1383 const struct hfi1_devdata *dd, in get_csr_addr() argument
1386 if (dd->flags & HFI1_PRESENT) in get_csr_addr()
1387 return hfi1_addr_from_offset(dd, offset); in get_csr_addr()
1391 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr, in read_write_csr() argument
1397 ret = read_csr(dd, csr); in read_write_csr()
1399 write_csr(dd, csr, value); in read_write_csr()
1402 dd_dev_err(dd, "Invalid cntr register access mode"); in read_write_csr()
1414 struct hfi1_devdata *dd = context; in dev_access_u32_csr() local
1425 return read_write_csr(dd, csr, mode, data); in dev_access_u32_csr()
1431 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sde_err_cnt() local
1433 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_err_cnt()
1434 return dd->per_sdma[idx].err_cnt; in access_sde_err_cnt()
1441 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sde_int_cnt() local
1443 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_int_cnt()
1444 return dd->per_sdma[idx].sdma_int_cnt; in access_sde_int_cnt()
1451 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sde_idle_int_cnt() local
1453 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_idle_int_cnt()
1454 return dd->per_sdma[idx].idle_int_cnt; in access_sde_idle_int_cnt()
1462 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sde_progress_int_cnt() local
1464 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_progress_int_cnt()
1465 return dd->per_sdma[idx].progress_int_cnt; in access_sde_progress_int_cnt()
1472 struct hfi1_devdata *dd = context; in dev_access_u64_csr() local
1486 val = read_write_csr(dd, csr, mode, data); in dev_access_u64_csr()
1493 struct hfi1_devdata *dd = context; in dc_access_lcb_cntr() local
1500 ret = read_lcb_csr(dd, csr, &data); in dc_access_lcb_cntr()
1502 ret = write_lcb_csr(dd, csr, data); in dc_access_lcb_cntr()
1505 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr); in dc_access_lcb_cntr()
1521 return read_write_csr(ppd->dd, entry->csr, mode, data); in port_access_u32_csr()
1539 val = read_write_csr(ppd->dd, csr, mode, data); in port_access_u64_csr()
1544 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode, in read_write_sw() argument
1555 dd_dev_err(dd, "Invalid cntr sw access mode"); in read_write_sw()
1571 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data); in access_sw_link_dn_cnt()
1581 return read_write_sw(ppd->dd, &ppd->link_up, mode, data); in access_sw_link_up_cnt()
1592 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data); in access_sw_unknown_frame_cnt()
1609 return read_write_sw(ppd->dd, counter, mode, data); in access_sw_xmit_discards()
1621 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors, in access_xmit_constraint_errs()
1633 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors, in access_rcv_constraint_errs()
1647 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val, in read_write_cpu() argument
1663 dd_dev_err(dd, "Per CPU cntrs can only be zeroed"); in read_write_cpu()
1665 dd_dev_err(dd, "Invalid cntr sw cpu access mode"); in read_write_cpu()
1675 struct hfi1_devdata *dd = context; in access_sw_cpu_intr() local
1677 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl, in access_sw_cpu_intr()
1684 struct hfi1_devdata *dd = context; in access_sw_cpu_rcv_limit() local
1686 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl, in access_sw_cpu_rcv_limit()
1693 struct hfi1_devdata *dd = context; in access_sw_pio_wait() local
1695 return dd->verbs_dev.n_piowait; in access_sw_pio_wait()
1701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sw_pio_drain() local
1703 return dd->verbs_dev.n_piodrain; in access_sw_pio_drain()
1709 struct hfi1_devdata *dd = context; in access_sw_ctx0_seq_drop() local
1711 return dd->ctx0_seq_drop; in access_sw_ctx0_seq_drop()
1717 struct hfi1_devdata *dd = context; in access_sw_vtx_wait() local
1719 return dd->verbs_dev.n_txwait; in access_sw_vtx_wait()
1725 struct hfi1_devdata *dd = context; in access_sw_kmem_wait() local
1727 return dd->verbs_dev.n_kmem_wait; in access_sw_kmem_wait()
1733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sw_send_schedule() local
1735 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl, in access_sw_send_schedule()
1744 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_pll_lock_fail_err_cnt() local
1746 return dd->misc_err_status_cnt[12]; in access_misc_pll_lock_fail_err_cnt()
1753 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_mbist_fail_err_cnt() local
1755 return dd->misc_err_status_cnt[11]; in access_misc_mbist_fail_err_cnt()
1762 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_invalid_eep_cmd_err_cnt() local
1764 return dd->misc_err_status_cnt[10]; in access_misc_invalid_eep_cmd_err_cnt()
1771 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_efuse_done_parity_err_cnt() local
1773 return dd->misc_err_status_cnt[9]; in access_misc_efuse_done_parity_err_cnt()
1780 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_efuse_write_err_cnt() local
1782 return dd->misc_err_status_cnt[8]; in access_misc_efuse_write_err_cnt()
1789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_efuse_read_bad_addr_err_cnt() local
1791 return dd->misc_err_status_cnt[7]; in access_misc_efuse_read_bad_addr_err_cnt()
1798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_efuse_csr_parity_err_cnt() local
1800 return dd->misc_err_status_cnt[6]; in access_misc_efuse_csr_parity_err_cnt()
1807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_fw_auth_failed_err_cnt() local
1809 return dd->misc_err_status_cnt[5]; in access_misc_fw_auth_failed_err_cnt()
1816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_key_mismatch_err_cnt() local
1818 return dd->misc_err_status_cnt[4]; in access_misc_key_mismatch_err_cnt()
1825 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_sbus_write_failed_err_cnt() local
1827 return dd->misc_err_status_cnt[3]; in access_misc_sbus_write_failed_err_cnt()
1834 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_csr_write_bad_addr_err_cnt() local
1836 return dd->misc_err_status_cnt[2]; in access_misc_csr_write_bad_addr_err_cnt()
1843 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_csr_read_bad_addr_err_cnt() local
1845 return dd->misc_err_status_cnt[1]; in access_misc_csr_read_bad_addr_err_cnt()
1852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_misc_csr_parity_err_cnt() local
1854 return dd->misc_err_status_cnt[0]; in access_misc_csr_parity_err_cnt()
1865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sw_cce_err_status_aggregated_cnt() local
1867 return dd->sw_cce_err_status_aggregate; in access_sw_cce_err_status_aggregated_cnt()
1878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_msix_csr_parity_err_cnt() local
1880 return dd->cce_err_status_cnt[40]; in access_cce_msix_csr_parity_err_cnt()
1887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_int_map_unc_err_cnt() local
1889 return dd->cce_err_status_cnt[39]; in access_cce_int_map_unc_err_cnt()
1896 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_int_map_cor_err_cnt() local
1898 return dd->cce_err_status_cnt[38]; in access_cce_int_map_cor_err_cnt()
1905 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_msix_table_unc_err_cnt() local
1907 return dd->cce_err_status_cnt[37]; in access_cce_msix_table_unc_err_cnt()
1914 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_msix_table_cor_err_cnt() local
1916 return dd->cce_err_status_cnt[36]; in access_cce_msix_table_cor_err_cnt()
1923 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_rxdma_conv_fifo_parity_err_cnt() local
1925 return dd->cce_err_status_cnt[35]; in access_cce_rxdma_conv_fifo_parity_err_cnt()
1932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_rcpl_async_fifo_parity_err_cnt() local
1934 return dd->cce_err_status_cnt[34]; in access_cce_rcpl_async_fifo_parity_err_cnt()
1941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_seg_write_bad_addr_err_cnt() local
1943 return dd->cce_err_status_cnt[33]; in access_cce_seg_write_bad_addr_err_cnt()
1950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_seg_read_bad_addr_err_cnt() local
1952 return dd->cce_err_status_cnt[32]; in access_cce_seg_read_bad_addr_err_cnt()
1958 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_la_triggered_cnt() local
1960 return dd->cce_err_status_cnt[31]; in access_la_triggered_cnt()
1967 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_trgt_cpl_timeout_err_cnt() local
1969 return dd->cce_err_status_cnt[30]; in access_cce_trgt_cpl_timeout_err_cnt()
1976 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_receive_parity_err_cnt() local
1978 return dd->cce_err_status_cnt[29]; in access_pcic_receive_parity_err_cnt()
1985 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_transmit_back_parity_err_cnt() local
1987 return dd->cce_err_status_cnt[28]; in access_pcic_transmit_back_parity_err_cnt()
1994 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_transmit_front_parity_err_cnt() local
1996 return dd->cce_err_status_cnt[27]; in access_pcic_transmit_front_parity_err_cnt()
2003 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_cpl_dat_q_unc_err_cnt() local
2005 return dd->cce_err_status_cnt[26]; in access_pcic_cpl_dat_q_unc_err_cnt()
2012 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_cpl_hd_q_unc_err_cnt() local
2014 return dd->cce_err_status_cnt[25]; in access_pcic_cpl_hd_q_unc_err_cnt()
2021 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_post_dat_q_unc_err_cnt() local
2023 return dd->cce_err_status_cnt[24]; in access_pcic_post_dat_q_unc_err_cnt()
2030 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_post_hd_q_unc_err_cnt() local
2032 return dd->cce_err_status_cnt[23]; in access_pcic_post_hd_q_unc_err_cnt()
2039 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_retry_sot_mem_unc_err_cnt() local
2041 return dd->cce_err_status_cnt[22]; in access_pcic_retry_sot_mem_unc_err_cnt()
2048 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_retry_mem_unc_err() local
2050 return dd->cce_err_status_cnt[21]; in access_pcic_retry_mem_unc_err()
2057 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_n_post_dat_q_parity_err_cnt() local
2059 return dd->cce_err_status_cnt[20]; in access_pcic_n_post_dat_q_parity_err_cnt()
2066 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_n_post_h_q_parity_err_cnt() local
2068 return dd->cce_err_status_cnt[19]; in access_pcic_n_post_h_q_parity_err_cnt()
2075 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_cpl_dat_q_cor_err_cnt() local
2077 return dd->cce_err_status_cnt[18]; in access_pcic_cpl_dat_q_cor_err_cnt()
2084 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_cpl_hd_q_cor_err_cnt() local
2086 return dd->cce_err_status_cnt[17]; in access_pcic_cpl_hd_q_cor_err_cnt()
2093 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_post_dat_q_cor_err_cnt() local
2095 return dd->cce_err_status_cnt[16]; in access_pcic_post_dat_q_cor_err_cnt()
2102 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_post_hd_q_cor_err_cnt() local
2104 return dd->cce_err_status_cnt[15]; in access_pcic_post_hd_q_cor_err_cnt()
2111 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_retry_sot_mem_cor_err_cnt() local
2113 return dd->cce_err_status_cnt[14]; in access_pcic_retry_sot_mem_cor_err_cnt()
2120 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pcic_retry_mem_cor_err_cnt() local
2122 return dd->cce_err_status_cnt[13]; in access_pcic_retry_mem_cor_err_cnt()
2129 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_cli1_async_fifo_dbg_parity_err_cnt() local
2131 return dd->cce_err_status_cnt[12]; in access_cce_cli1_async_fifo_dbg_parity_err_cnt()
2138 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_cli1_async_fifo_rxdma_parity_err_cnt() local
2140 return dd->cce_err_status_cnt[11]; in access_cce_cli1_async_fifo_rxdma_parity_err_cnt()
2147 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt() local
2149 return dd->cce_err_status_cnt[10]; in access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt()
2156 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt() local
2158 return dd->cce_err_status_cnt[9]; in access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt()
2165 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_cli2_async_fifo_parity_err_cnt() local
2167 return dd->cce_err_status_cnt[8]; in access_cce_cli2_async_fifo_parity_err_cnt()
2174 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_csr_cfg_bus_parity_err_cnt() local
2176 return dd->cce_err_status_cnt[7]; in access_cce_csr_cfg_bus_parity_err_cnt()
2183 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_cli0_async_fifo_parity_err_cnt() local
2185 return dd->cce_err_status_cnt[6]; in access_cce_cli0_async_fifo_parity_err_cnt()
2192 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_rspd_data_parity_err_cnt() local
2194 return dd->cce_err_status_cnt[5]; in access_cce_rspd_data_parity_err_cnt()
2201 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_trgt_access_err_cnt() local
2203 return dd->cce_err_status_cnt[4]; in access_cce_trgt_access_err_cnt()
2210 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_trgt_async_fifo_parity_err_cnt() local
2212 return dd->cce_err_status_cnt[3]; in access_cce_trgt_async_fifo_parity_err_cnt()
2219 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_csr_write_bad_addr_err_cnt() local
2221 return dd->cce_err_status_cnt[2]; in access_cce_csr_write_bad_addr_err_cnt()
2228 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_cce_csr_read_bad_addr_err_cnt() local
2230 return dd->cce_err_status_cnt[1]; in access_cce_csr_read_bad_addr_err_cnt()
2237 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_ccs_csr_parity_err_cnt() local
2239 return dd->cce_err_status_cnt[0]; in access_ccs_csr_parity_err_cnt()
2250 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_csr_parity_err_cnt() local
2252 return dd->rcv_err_status_cnt[63]; in access_rx_csr_parity_err_cnt()
2259 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_csr_write_bad_addr_err_cnt() local
2261 return dd->rcv_err_status_cnt[62]; in access_rx_csr_write_bad_addr_err_cnt()
2268 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_csr_read_bad_addr_err_cnt() local
2270 return dd->rcv_err_status_cnt[61]; in access_rx_csr_read_bad_addr_err_cnt()
2277 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_csr_unc_err_cnt() local
2279 return dd->rcv_err_status_cnt[60]; in access_rx_dma_csr_unc_err_cnt()
2286 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_dq_fsm_encoding_err_cnt() local
2288 return dd->rcv_err_status_cnt[59]; in access_rx_dma_dq_fsm_encoding_err_cnt()
2295 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_eq_fsm_encoding_err_cnt() local
2297 return dd->rcv_err_status_cnt[58]; in access_rx_dma_eq_fsm_encoding_err_cnt()
2304 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_csr_parity_err_cnt() local
2306 return dd->rcv_err_status_cnt[57]; in access_rx_dma_csr_parity_err_cnt()
2313 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_data_cor_err_cnt() local
2315 return dd->rcv_err_status_cnt[56]; in access_rx_rbuf_data_cor_err_cnt()
2322 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_data_unc_err_cnt() local
2324 return dd->rcv_err_status_cnt[55]; in access_rx_rbuf_data_unc_err_cnt()
2331 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_data_fifo_rd_cor_err_cnt() local
2333 return dd->rcv_err_status_cnt[54]; in access_rx_dma_data_fifo_rd_cor_err_cnt()
2340 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_data_fifo_rd_unc_err_cnt() local
2342 return dd->rcv_err_status_cnt[53]; in access_rx_dma_data_fifo_rd_unc_err_cnt()
2349 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_hdr_fifo_rd_cor_err_cnt() local
2351 return dd->rcv_err_status_cnt[52]; in access_rx_dma_hdr_fifo_rd_cor_err_cnt()
2358 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_hdr_fifo_rd_unc_err_cnt() local
2360 return dd->rcv_err_status_cnt[51]; in access_rx_dma_hdr_fifo_rd_unc_err_cnt()
2367 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_desc_part2_cor_err_cnt() local
2369 return dd->rcv_err_status_cnt[50]; in access_rx_rbuf_desc_part2_cor_err_cnt()
2376 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_desc_part2_unc_err_cnt() local
2378 return dd->rcv_err_status_cnt[49]; in access_rx_rbuf_desc_part2_unc_err_cnt()
2385 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_desc_part1_cor_err_cnt() local
2387 return dd->rcv_err_status_cnt[48]; in access_rx_rbuf_desc_part1_cor_err_cnt()
2394 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_desc_part1_unc_err_cnt() local
2396 return dd->rcv_err_status_cnt[47]; in access_rx_rbuf_desc_part1_unc_err_cnt()
2403 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_hq_intr_fsm_err_cnt() local
2405 return dd->rcv_err_status_cnt[46]; in access_rx_hq_intr_fsm_err_cnt()
2412 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_hq_intr_csr_parity_err_cnt() local
2414 return dd->rcv_err_status_cnt[45]; in access_rx_hq_intr_csr_parity_err_cnt()
2421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_lookup_csr_parity_err_cnt() local
2423 return dd->rcv_err_status_cnt[44]; in access_rx_lookup_csr_parity_err_cnt()
2430 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_lookup_rcv_array_cor_err_cnt() local
2432 return dd->rcv_err_status_cnt[43]; in access_rx_lookup_rcv_array_cor_err_cnt()
2439 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_lookup_rcv_array_unc_err_cnt() local
2441 return dd->rcv_err_status_cnt[42]; in access_rx_lookup_rcv_array_unc_err_cnt()
2448 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_lookup_des_part2_parity_err_cnt() local
2450 return dd->rcv_err_status_cnt[41]; in access_rx_lookup_des_part2_parity_err_cnt()
2457 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_lookup_des_part1_unc_cor_err_cnt() local
2459 return dd->rcv_err_status_cnt[40]; in access_rx_lookup_des_part1_unc_cor_err_cnt()
2466 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_lookup_des_part1_unc_err_cnt() local
2468 return dd->rcv_err_status_cnt[39]; in access_rx_lookup_des_part1_unc_err_cnt()
2475 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_next_free_buf_cor_err_cnt() local
2477 return dd->rcv_err_status_cnt[38]; in access_rx_rbuf_next_free_buf_cor_err_cnt()
2484 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_next_free_buf_unc_err_cnt() local
2486 return dd->rcv_err_status_cnt[37]; in access_rx_rbuf_next_free_buf_unc_err_cnt()
2493 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rbuf_fl_init_wr_addr_parity_err_cnt() local
2495 return dd->rcv_err_status_cnt[36]; in access_rbuf_fl_init_wr_addr_parity_err_cnt()
2502 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_fl_initdone_parity_err_cnt() local
2504 return dd->rcv_err_status_cnt[35]; in access_rx_rbuf_fl_initdone_parity_err_cnt()
2511 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_fl_write_addr_parity_err_cnt() local
2513 return dd->rcv_err_status_cnt[34]; in access_rx_rbuf_fl_write_addr_parity_err_cnt()
2520 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_fl_rd_addr_parity_err_cnt() local
2522 return dd->rcv_err_status_cnt[33]; in access_rx_rbuf_fl_rd_addr_parity_err_cnt()
2529 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_empty_err_cnt() local
2531 return dd->rcv_err_status_cnt[32]; in access_rx_rbuf_empty_err_cnt()
2538 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_full_err_cnt() local
2540 return dd->rcv_err_status_cnt[31]; in access_rx_rbuf_full_err_cnt()
2547 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rbuf_bad_lookup_err_cnt() local
2549 return dd->rcv_err_status_cnt[30]; in access_rbuf_bad_lookup_err_cnt()
2556 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rbuf_ctx_id_parity_err_cnt() local
2558 return dd->rcv_err_status_cnt[29]; in access_rbuf_ctx_id_parity_err_cnt()
2565 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rbuf_csr_qeopdw_parity_err_cnt() local
2567 return dd->rcv_err_status_cnt[28]; in access_rbuf_csr_qeopdw_parity_err_cnt()
2574 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt() local
2576 return dd->rcv_err_status_cnt[27]; in access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt()
2583 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt() local
2585 return dd->rcv_err_status_cnt[26]; in access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt()
2592 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt() local
2594 return dd->rcv_err_status_cnt[25]; in access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt()
2601 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_vld_bit_parity_err_cnt() local
2603 return dd->rcv_err_status_cnt[24]; in access_rx_rbuf_csr_q_vld_bit_parity_err_cnt()
2610 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_next_buf_parity_err_cnt() local
2612 return dd->rcv_err_status_cnt[23]; in access_rx_rbuf_csr_q_next_buf_parity_err_cnt()
2619 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt() local
2621 return dd->rcv_err_status_cnt[22]; in access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt()
2628 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt() local
2630 return dd->rcv_err_status_cnt[21]; in access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt()
2637 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_block_list_read_cor_err_cnt() local
2639 return dd->rcv_err_status_cnt[20]; in access_rx_rbuf_block_list_read_cor_err_cnt()
2646 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_block_list_read_unc_err_cnt() local
2648 return dd->rcv_err_status_cnt[19]; in access_rx_rbuf_block_list_read_unc_err_cnt()
2655 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_lookup_des_cor_err_cnt() local
2657 return dd->rcv_err_status_cnt[18]; in access_rx_rbuf_lookup_des_cor_err_cnt()
2664 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_lookup_des_unc_err_cnt() local
2666 return dd->rcv_err_status_cnt[17]; in access_rx_rbuf_lookup_des_unc_err_cnt()
2673 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt() local
2675 return dd->rcv_err_status_cnt[16]; in access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt()
2682 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_lookup_des_reg_unc_err_cnt() local
2684 return dd->rcv_err_status_cnt[15]; in access_rx_rbuf_lookup_des_reg_unc_err_cnt()
2691 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_free_list_cor_err_cnt() local
2693 return dd->rcv_err_status_cnt[14]; in access_rx_rbuf_free_list_cor_err_cnt()
2700 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rbuf_free_list_unc_err_cnt() local
2702 return dd->rcv_err_status_cnt[13]; in access_rx_rbuf_free_list_unc_err_cnt()
2709 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_fsm_encoding_err_cnt() local
2711 return dd->rcv_err_status_cnt[12]; in access_rx_rcv_fsm_encoding_err_cnt()
2718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_flag_cor_err_cnt() local
2720 return dd->rcv_err_status_cnt[11]; in access_rx_dma_flag_cor_err_cnt()
2727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_flag_unc_err_cnt() local
2729 return dd->rcv_err_status_cnt[10]; in access_rx_dma_flag_unc_err_cnt()
2736 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dc_sop_eop_parity_err_cnt() local
2738 return dd->rcv_err_status_cnt[9]; in access_rx_dc_sop_eop_parity_err_cnt()
2745 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_csr_parity_err_cnt() local
2747 return dd->rcv_err_status_cnt[8]; in access_rx_rcv_csr_parity_err_cnt()
2754 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_qp_map_table_cor_err_cnt() local
2756 return dd->rcv_err_status_cnt[7]; in access_rx_rcv_qp_map_table_cor_err_cnt()
2763 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_qp_map_table_unc_err_cnt() local
2765 return dd->rcv_err_status_cnt[6]; in access_rx_rcv_qp_map_table_unc_err_cnt()
2772 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_data_cor_err_cnt() local
2774 return dd->rcv_err_status_cnt[5]; in access_rx_rcv_data_cor_err_cnt()
2781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_data_unc_err_cnt() local
2783 return dd->rcv_err_status_cnt[4]; in access_rx_rcv_data_unc_err_cnt()
2790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_hdr_cor_err_cnt() local
2792 return dd->rcv_err_status_cnt[3]; in access_rx_rcv_hdr_cor_err_cnt()
2799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_rcv_hdr_unc_err_cnt() local
2801 return dd->rcv_err_status_cnt[2]; in access_rx_rcv_hdr_unc_err_cnt()
2808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dc_intf_parity_err_cnt() local
2810 return dd->rcv_err_status_cnt[1]; in access_rx_dc_intf_parity_err_cnt()
2817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_rx_dma_csr_cor_err_cnt() local
2819 return dd->rcv_err_status_cnt[0]; in access_rx_dma_csr_cor_err_cnt()
2830 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_pec_sop_head_parity_err_cnt() local
2832 return dd->send_pio_err_status_cnt[35]; in access_pio_pec_sop_head_parity_err_cnt()
2839 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_pcc_sop_head_parity_err_cnt() local
2841 return dd->send_pio_err_status_cnt[34]; in access_pio_pcc_sop_head_parity_err_cnt()
2848 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_last_returned_cnt_parity_err_cnt() local
2850 return dd->send_pio_err_status_cnt[33]; in access_pio_last_returned_cnt_parity_err_cnt()
2857 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_current_free_cnt_parity_err_cnt() local
2859 return dd->send_pio_err_status_cnt[32]; in access_pio_current_free_cnt_parity_err_cnt()
2866 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_reserved_31_err_cnt() local
2868 return dd->send_pio_err_status_cnt[31]; in access_pio_reserved_31_err_cnt()
2875 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_reserved_30_err_cnt() local
2877 return dd->send_pio_err_status_cnt[30]; in access_pio_reserved_30_err_cnt()
2884 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_ppmc_sop_len_err_cnt() local
2886 return dd->send_pio_err_status_cnt[29]; in access_pio_ppmc_sop_len_err_cnt()
2893 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_ppmc_bqc_mem_parity_err_cnt() local
2895 return dd->send_pio_err_status_cnt[28]; in access_pio_ppmc_bqc_mem_parity_err_cnt()
2902 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_vl_fifo_parity_err_cnt() local
2904 return dd->send_pio_err_status_cnt[27]; in access_pio_vl_fifo_parity_err_cnt()
2911 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_vlf_sop_parity_err_cnt() local
2913 return dd->send_pio_err_status_cnt[26]; in access_pio_vlf_sop_parity_err_cnt()
2920 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_vlf_v1_len_parity_err_cnt() local
2922 return dd->send_pio_err_status_cnt[25]; in access_pio_vlf_v1_len_parity_err_cnt()
2929 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_block_qw_count_parity_err_cnt() local
2931 return dd->send_pio_err_status_cnt[24]; in access_pio_block_qw_count_parity_err_cnt()
2938 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_qw_valid_parity_err_cnt() local
2940 return dd->send_pio_err_status_cnt[23]; in access_pio_write_qw_valid_parity_err_cnt()
2947 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_state_machine_err_cnt() local
2949 return dd->send_pio_err_status_cnt[22]; in access_pio_state_machine_err_cnt()
2956 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_data_parity_err_cnt() local
2958 return dd->send_pio_err_status_cnt[21]; in access_pio_write_data_parity_err_cnt()
2965 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_host_addr_mem_cor_err_cnt() local
2967 return dd->send_pio_err_status_cnt[20]; in access_pio_host_addr_mem_cor_err_cnt()
2974 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_host_addr_mem_unc_err_cnt() local
2976 return dd->send_pio_err_status_cnt[19]; in access_pio_host_addr_mem_unc_err_cnt()
2983 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_pkt_evict_sm_or_arb_sm_err_cnt() local
2985 return dd->send_pio_err_status_cnt[18]; in access_pio_pkt_evict_sm_or_arb_sm_err_cnt()
2992 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_init_sm_in_err_cnt() local
2994 return dd->send_pio_err_status_cnt[17]; in access_pio_init_sm_in_err_cnt()
3001 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_ppmc_pbl_fifo_err_cnt() local
3003 return dd->send_pio_err_status_cnt[16]; in access_pio_ppmc_pbl_fifo_err_cnt()
3010 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_credit_ret_fifo_parity_err_cnt() local
3012 return dd->send_pio_err_status_cnt[15]; in access_pio_credit_ret_fifo_parity_err_cnt()
3019 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_v1_len_mem_bank1_cor_err_cnt() local
3021 return dd->send_pio_err_status_cnt[14]; in access_pio_v1_len_mem_bank1_cor_err_cnt()
3028 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_v1_len_mem_bank0_cor_err_cnt() local
3030 return dd->send_pio_err_status_cnt[13]; in access_pio_v1_len_mem_bank0_cor_err_cnt()
3037 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_v1_len_mem_bank1_unc_err_cnt() local
3039 return dd->send_pio_err_status_cnt[12]; in access_pio_v1_len_mem_bank1_unc_err_cnt()
3046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_v1_len_mem_bank0_unc_err_cnt() local
3048 return dd->send_pio_err_status_cnt[11]; in access_pio_v1_len_mem_bank0_unc_err_cnt()
3055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_sm_pkt_reset_parity_err_cnt() local
3057 return dd->send_pio_err_status_cnt[10]; in access_pio_sm_pkt_reset_parity_err_cnt()
3064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_pkt_evict_fifo_parity_err_cnt() local
3066 return dd->send_pio_err_status_cnt[9]; in access_pio_pkt_evict_fifo_parity_err_cnt()
3073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_sbrdctrl_crrel_fifo_parity_err_cnt() local
3075 return dd->send_pio_err_status_cnt[8]; in access_pio_sbrdctrl_crrel_fifo_parity_err_cnt()
3082 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_sbrdctl_crrel_parity_err_cnt() local
3084 return dd->send_pio_err_status_cnt[7]; in access_pio_sbrdctl_crrel_parity_err_cnt()
3091 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_pec_fifo_parity_err_cnt() local
3093 return dd->send_pio_err_status_cnt[6]; in access_pio_pec_fifo_parity_err_cnt()
3100 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_pcc_fifo_parity_err_cnt() local
3102 return dd->send_pio_err_status_cnt[5]; in access_pio_pcc_fifo_parity_err_cnt()
3109 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_sb_mem_fifo1_err_cnt() local
3111 return dd->send_pio_err_status_cnt[4]; in access_pio_sb_mem_fifo1_err_cnt()
3118 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_sb_mem_fifo0_err_cnt() local
3120 return dd->send_pio_err_status_cnt[3]; in access_pio_sb_mem_fifo0_err_cnt()
3127 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_csr_parity_err_cnt() local
3129 return dd->send_pio_err_status_cnt[2]; in access_pio_csr_parity_err_cnt()
3136 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_addr_parity_err_cnt() local
3138 return dd->send_pio_err_status_cnt[1]; in access_pio_write_addr_parity_err_cnt()
3145 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_bad_ctxt_err_cnt() local
3147 return dd->send_pio_err_status_cnt[0]; in access_pio_write_bad_ctxt_err_cnt()
3158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_pcie_req_tracking_cor_err_cnt() local
3160 return dd->send_dma_err_status_cnt[3]; in access_sdma_pcie_req_tracking_cor_err_cnt()
3167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_pcie_req_tracking_unc_err_cnt() local
3169 return dd->send_dma_err_status_cnt[2]; in access_sdma_pcie_req_tracking_unc_err_cnt()
3176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_csr_parity_err_cnt() local
3178 return dd->send_dma_err_status_cnt[1]; in access_sdma_csr_parity_err_cnt()
3185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_rpy_tag_err_cnt() local
3187 return dd->send_dma_err_status_cnt[0]; in access_sdma_rpy_tag_err_cnt()
3198 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_read_pio_memory_csr_unc_err_cnt() local
3200 return dd->send_egress_err_status_cnt[63]; in access_tx_read_pio_memory_csr_unc_err_cnt()
3207 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_read_sdma_memory_csr_err_cnt() local
3209 return dd->send_egress_err_status_cnt[62]; in access_tx_read_sdma_memory_csr_err_cnt()
3216 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_egress_fifo_cor_err_cnt() local
3218 return dd->send_egress_err_status_cnt[61]; in access_tx_egress_fifo_cor_err_cnt()
3225 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_read_pio_memory_cor_err_cnt() local
3227 return dd->send_egress_err_status_cnt[60]; in access_tx_read_pio_memory_cor_err_cnt()
3234 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_read_sdma_memory_cor_err_cnt() local
3236 return dd->send_egress_err_status_cnt[59]; in access_tx_read_sdma_memory_cor_err_cnt()
3243 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sb_hdr_cor_err_cnt() local
3245 return dd->send_egress_err_status_cnt[58]; in access_tx_sb_hdr_cor_err_cnt()
3252 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_credit_overrun_err_cnt() local
3254 return dd->send_egress_err_status_cnt[57]; in access_tx_credit_overrun_err_cnt()
3261 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo8_cor_err_cnt() local
3263 return dd->send_egress_err_status_cnt[56]; in access_tx_launch_fifo8_cor_err_cnt()
3270 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo7_cor_err_cnt() local
3272 return dd->send_egress_err_status_cnt[55]; in access_tx_launch_fifo7_cor_err_cnt()
3279 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo6_cor_err_cnt() local
3281 return dd->send_egress_err_status_cnt[54]; in access_tx_launch_fifo6_cor_err_cnt()
3288 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo5_cor_err_cnt() local
3290 return dd->send_egress_err_status_cnt[53]; in access_tx_launch_fifo5_cor_err_cnt()
3297 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo4_cor_err_cnt() local
3299 return dd->send_egress_err_status_cnt[52]; in access_tx_launch_fifo4_cor_err_cnt()
3306 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo3_cor_err_cnt() local
3308 return dd->send_egress_err_status_cnt[51]; in access_tx_launch_fifo3_cor_err_cnt()
3315 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo2_cor_err_cnt() local
3317 return dd->send_egress_err_status_cnt[50]; in access_tx_launch_fifo2_cor_err_cnt()
3324 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo1_cor_err_cnt() local
3326 return dd->send_egress_err_status_cnt[49]; in access_tx_launch_fifo1_cor_err_cnt()
3333 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo0_cor_err_cnt() local
3335 return dd->send_egress_err_status_cnt[48]; in access_tx_launch_fifo0_cor_err_cnt()
3342 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_credit_return_vl_err_cnt() local
3344 return dd->send_egress_err_status_cnt[47]; in access_tx_credit_return_vl_err_cnt()
3351 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_hcrc_insertion_err_cnt() local
3353 return dd->send_egress_err_status_cnt[46]; in access_tx_hcrc_insertion_err_cnt()
3360 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_egress_fifo_unc_err_cnt() local
3362 return dd->send_egress_err_status_cnt[45]; in access_tx_egress_fifo_unc_err_cnt()
3369 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_read_pio_memory_unc_err_cnt() local
3371 return dd->send_egress_err_status_cnt[44]; in access_tx_read_pio_memory_unc_err_cnt()
3378 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_read_sdma_memory_unc_err_cnt() local
3380 return dd->send_egress_err_status_cnt[43]; in access_tx_read_sdma_memory_unc_err_cnt()
3387 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sb_hdr_unc_err_cnt() local
3389 return dd->send_egress_err_status_cnt[42]; in access_tx_sb_hdr_unc_err_cnt()
3396 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_credit_return_partiy_err_cnt() local
3398 return dd->send_egress_err_status_cnt[41]; in access_tx_credit_return_partiy_err_cnt()
3405 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo8_unc_or_parity_err_cnt() local
3407 return dd->send_egress_err_status_cnt[40]; in access_tx_launch_fifo8_unc_or_parity_err_cnt()
3414 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo7_unc_or_parity_err_cnt() local
3416 return dd->send_egress_err_status_cnt[39]; in access_tx_launch_fifo7_unc_or_parity_err_cnt()
3423 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo6_unc_or_parity_err_cnt() local
3425 return dd->send_egress_err_status_cnt[38]; in access_tx_launch_fifo6_unc_or_parity_err_cnt()
3432 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo5_unc_or_parity_err_cnt() local
3434 return dd->send_egress_err_status_cnt[37]; in access_tx_launch_fifo5_unc_or_parity_err_cnt()
3441 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo4_unc_or_parity_err_cnt() local
3443 return dd->send_egress_err_status_cnt[36]; in access_tx_launch_fifo4_unc_or_parity_err_cnt()
3450 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo3_unc_or_parity_err_cnt() local
3452 return dd->send_egress_err_status_cnt[35]; in access_tx_launch_fifo3_unc_or_parity_err_cnt()
3459 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo2_unc_or_parity_err_cnt() local
3461 return dd->send_egress_err_status_cnt[34]; in access_tx_launch_fifo2_unc_or_parity_err_cnt()
3468 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo1_unc_or_parity_err_cnt() local
3470 return dd->send_egress_err_status_cnt[33]; in access_tx_launch_fifo1_unc_or_parity_err_cnt()
3477 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_fifo0_unc_or_parity_err_cnt() local
3479 return dd->send_egress_err_status_cnt[32]; in access_tx_launch_fifo0_unc_or_parity_err_cnt()
3486 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma15_disallowed_packet_err_cnt() local
3488 return dd->send_egress_err_status_cnt[31]; in access_tx_sdma15_disallowed_packet_err_cnt()
3495 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma14_disallowed_packet_err_cnt() local
3497 return dd->send_egress_err_status_cnt[30]; in access_tx_sdma14_disallowed_packet_err_cnt()
3504 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma13_disallowed_packet_err_cnt() local
3506 return dd->send_egress_err_status_cnt[29]; in access_tx_sdma13_disallowed_packet_err_cnt()
3513 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma12_disallowed_packet_err_cnt() local
3515 return dd->send_egress_err_status_cnt[28]; in access_tx_sdma12_disallowed_packet_err_cnt()
3522 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma11_disallowed_packet_err_cnt() local
3524 return dd->send_egress_err_status_cnt[27]; in access_tx_sdma11_disallowed_packet_err_cnt()
3531 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma10_disallowed_packet_err_cnt() local
3533 return dd->send_egress_err_status_cnt[26]; in access_tx_sdma10_disallowed_packet_err_cnt()
3540 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma9_disallowed_packet_err_cnt() local
3542 return dd->send_egress_err_status_cnt[25]; in access_tx_sdma9_disallowed_packet_err_cnt()
3549 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma8_disallowed_packet_err_cnt() local
3551 return dd->send_egress_err_status_cnt[24]; in access_tx_sdma8_disallowed_packet_err_cnt()
3558 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma7_disallowed_packet_err_cnt() local
3560 return dd->send_egress_err_status_cnt[23]; in access_tx_sdma7_disallowed_packet_err_cnt()
3567 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma6_disallowed_packet_err_cnt() local
3569 return dd->send_egress_err_status_cnt[22]; in access_tx_sdma6_disallowed_packet_err_cnt()
3576 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma5_disallowed_packet_err_cnt() local
3578 return dd->send_egress_err_status_cnt[21]; in access_tx_sdma5_disallowed_packet_err_cnt()
3585 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma4_disallowed_packet_err_cnt() local
3587 return dd->send_egress_err_status_cnt[20]; in access_tx_sdma4_disallowed_packet_err_cnt()
3594 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma3_disallowed_packet_err_cnt() local
3596 return dd->send_egress_err_status_cnt[19]; in access_tx_sdma3_disallowed_packet_err_cnt()
3603 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma2_disallowed_packet_err_cnt() local
3605 return dd->send_egress_err_status_cnt[18]; in access_tx_sdma2_disallowed_packet_err_cnt()
3612 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma1_disallowed_packet_err_cnt() local
3614 return dd->send_egress_err_status_cnt[17]; in access_tx_sdma1_disallowed_packet_err_cnt()
3621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma0_disallowed_packet_err_cnt() local
3623 return dd->send_egress_err_status_cnt[16]; in access_tx_sdma0_disallowed_packet_err_cnt()
3630 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_config_parity_err_cnt() local
3632 return dd->send_egress_err_status_cnt[15]; in access_tx_config_parity_err_cnt()
3639 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sbrd_ctl_csr_parity_err_cnt() local
3641 return dd->send_egress_err_status_cnt[14]; in access_tx_sbrd_ctl_csr_parity_err_cnt()
3648 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_launch_csr_parity_err_cnt() local
3650 return dd->send_egress_err_status_cnt[13]; in access_tx_launch_csr_parity_err_cnt()
3657 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_illegal_vl_err_cnt() local
3659 return dd->send_egress_err_status_cnt[12]; in access_tx_illegal_vl_err_cnt()
3666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sbrd_ctl_state_machine_parity_err_cnt() local
3668 return dd->send_egress_err_status_cnt[11]; in access_tx_sbrd_ctl_state_machine_parity_err_cnt()
3675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_egress_reserved_10_err_cnt() local
3677 return dd->send_egress_err_status_cnt[10]; in access_egress_reserved_10_err_cnt()
3684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_egress_reserved_9_err_cnt() local
3686 return dd->send_egress_err_status_cnt[9]; in access_egress_reserved_9_err_cnt()
3693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_sdma_launch_intf_parity_err_cnt() local
3695 return dd->send_egress_err_status_cnt[8]; in access_tx_sdma_launch_intf_parity_err_cnt()
3702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_pio_launch_intf_parity_err_cnt() local
3704 return dd->send_egress_err_status_cnt[7]; in access_tx_pio_launch_intf_parity_err_cnt()
3711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_egress_reserved_6_err_cnt() local
3713 return dd->send_egress_err_status_cnt[6]; in access_egress_reserved_6_err_cnt()
3720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_incorrect_link_state_err_cnt() local
3722 return dd->send_egress_err_status_cnt[5]; in access_tx_incorrect_link_state_err_cnt()
3729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_linkdown_err_cnt() local
3731 return dd->send_egress_err_status_cnt[4]; in access_tx_linkdown_err_cnt()
3738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_egress_fifi_underrun_or_parity_err_cnt() local
3740 return dd->send_egress_err_status_cnt[3]; in access_tx_egress_fifi_underrun_or_parity_err_cnt()
3747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_egress_reserved_2_err_cnt() local
3749 return dd->send_egress_err_status_cnt[2]; in access_egress_reserved_2_err_cnt()
3756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_pkt_integrity_mem_unc_err_cnt() local
3758 return dd->send_egress_err_status_cnt[1]; in access_tx_pkt_integrity_mem_unc_err_cnt()
3765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_tx_pkt_integrity_mem_cor_err_cnt() local
3767 return dd->send_egress_err_status_cnt[0]; in access_tx_pkt_integrity_mem_cor_err_cnt()
3778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_send_csr_write_bad_addr_err_cnt() local
3780 return dd->send_err_status_cnt[2]; in access_send_csr_write_bad_addr_err_cnt()
3787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_send_csr_read_bad_addr_err_cnt() local
3789 return dd->send_err_status_cnt[1]; in access_send_csr_read_bad_addr_err_cnt()
3796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_send_csr_parity_cnt() local
3798 return dd->send_err_status_cnt[0]; in access_send_csr_parity_cnt()
3809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_out_of_bounds_err_cnt() local
3811 return dd->sw_ctxt_err_status_cnt[4]; in access_pio_write_out_of_bounds_err_cnt()
3818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_overflow_err_cnt() local
3820 return dd->sw_ctxt_err_status_cnt[3]; in access_pio_write_overflow_err_cnt()
3827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_write_crosses_boundary_err_cnt() local
3829 return dd->sw_ctxt_err_status_cnt[2]; in access_pio_write_crosses_boundary_err_cnt()
3836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_disallowed_packet_err_cnt() local
3838 return dd->sw_ctxt_err_status_cnt[1]; in access_pio_disallowed_packet_err_cnt()
3845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_pio_inconsistent_sop_err_cnt() local
3847 return dd->sw_ctxt_err_status_cnt[0]; in access_pio_inconsistent_sop_err_cnt()
3858 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_request_fifo_cor_err_cnt() local
3860 return dd->sw_send_dma_eng_err_status_cnt[23]; in access_sdma_header_request_fifo_cor_err_cnt()
3867 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_storage_cor_err_cnt() local
3869 return dd->sw_send_dma_eng_err_status_cnt[22]; in access_sdma_header_storage_cor_err_cnt()
3876 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_packet_tracking_cor_err_cnt() local
3878 return dd->sw_send_dma_eng_err_status_cnt[21]; in access_sdma_packet_tracking_cor_err_cnt()
3885 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_assembly_cor_err_cnt() local
3887 return dd->sw_send_dma_eng_err_status_cnt[20]; in access_sdma_assembly_cor_err_cnt()
3894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_desc_table_cor_err_cnt() local
3896 return dd->sw_send_dma_eng_err_status_cnt[19]; in access_sdma_desc_table_cor_err_cnt()
3903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_request_fifo_unc_err_cnt() local
3905 return dd->sw_send_dma_eng_err_status_cnt[18]; in access_sdma_header_request_fifo_unc_err_cnt()
3912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_storage_unc_err_cnt() local
3914 return dd->sw_send_dma_eng_err_status_cnt[17]; in access_sdma_header_storage_unc_err_cnt()
3921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_packet_tracking_unc_err_cnt() local
3923 return dd->sw_send_dma_eng_err_status_cnt[16]; in access_sdma_packet_tracking_unc_err_cnt()
3930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_assembly_unc_err_cnt() local
3932 return dd->sw_send_dma_eng_err_status_cnt[15]; in access_sdma_assembly_unc_err_cnt()
3939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_desc_table_unc_err_cnt() local
3941 return dd->sw_send_dma_eng_err_status_cnt[14]; in access_sdma_desc_table_unc_err_cnt()
3948 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_timeout_err_cnt() local
3950 return dd->sw_send_dma_eng_err_status_cnt[13]; in access_sdma_timeout_err_cnt()
3957 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_length_err_cnt() local
3959 return dd->sw_send_dma_eng_err_status_cnt[12]; in access_sdma_header_length_err_cnt()
3966 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_address_err_cnt() local
3968 return dd->sw_send_dma_eng_err_status_cnt[11]; in access_sdma_header_address_err_cnt()
3975 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_header_select_err_cnt() local
3977 return dd->sw_send_dma_eng_err_status_cnt[10]; in access_sdma_header_select_err_cnt()
3984 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_reserved_9_err_cnt() local
3986 return dd->sw_send_dma_eng_err_status_cnt[9]; in access_sdma_reserved_9_err_cnt()
3993 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_packet_desc_overflow_err_cnt() local
3995 return dd->sw_send_dma_eng_err_status_cnt[8]; in access_sdma_packet_desc_overflow_err_cnt()
4002 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_length_mismatch_err_cnt() local
4004 return dd->sw_send_dma_eng_err_status_cnt[7]; in access_sdma_length_mismatch_err_cnt()
4010 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_halt_err_cnt() local
4012 return dd->sw_send_dma_eng_err_status_cnt[6]; in access_sdma_halt_err_cnt()
4019 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_mem_read_err_cnt() local
4021 return dd->sw_send_dma_eng_err_status_cnt[5]; in access_sdma_mem_read_err_cnt()
4028 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_first_desc_err_cnt() local
4030 return dd->sw_send_dma_eng_err_status_cnt[4]; in access_sdma_first_desc_err_cnt()
4037 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_tail_out_of_bounds_err_cnt() local
4039 return dd->sw_send_dma_eng_err_status_cnt[3]; in access_sdma_tail_out_of_bounds_err_cnt()
4046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_too_long_err_cnt() local
4048 return dd->sw_send_dma_eng_err_status_cnt[2]; in access_sdma_too_long_err_cnt()
4055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_gen_mismatch_err_cnt() local
4057 return dd->sw_send_dma_eng_err_status_cnt[1]; in access_sdma_gen_mismatch_err_cnt()
4064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_sdma_wrong_dw_err_cnt() local
4066 return dd->sw_send_dma_eng_err_status_cnt[0]; in access_sdma_wrong_dw_err_cnt()
4073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; in access_dc_rcv_err_cnt() local
4078 val = read_write_csr(dd, csr, mode, data); in access_dc_rcv_err_cnt()
4080 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ? in access_dc_rcv_err_cnt()
4081 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors; in access_dc_rcv_err_cnt()
4083 dd->sw_rcv_bypass_packet_errors = 0; in access_dc_rcv_err_cnt()
4085 dd_dev_err(dd, "Invalid cntr register access mode"); in access_dc_rcv_err_cnt()
4096 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4114 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
5244 int is_ax(struct hfi1_devdata *dd) in is_ax() argument
5247 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT in is_ax()
5253 int is_bx(struct hfi1_devdata *dd) in is_bx() argument
5256 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT in is_bx()
5268 mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64))); in is_urg_masked()
5556 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_cce_err() argument
5565 dd_dev_info(dd, "CCE Error: %s\n", in handle_cce_err()
5569 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) { in handle_cce_err()
5572 start_freeze_handling(dd->pport, FREEZE_SELF); in handle_cce_err()
5577 incr_cntr64(&dd->cce_err_status_cnt[i]); in handle_cce_err()
5579 incr_cntr64(&dd->sw_cce_err_status_aggregate); in handle_cce_err()
5591 struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer); in update_rcverr_timer() local
5592 struct hfi1_pportdata *ppd = dd->pport; in update_rcverr_timer()
5593 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL); in update_rcverr_timer()
5595 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt && in update_rcverr_timer()
5597 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__); in update_rcverr_timer()
5603 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt; in update_rcverr_timer()
5605 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); in update_rcverr_timer()
5608 static int init_rcverr(struct hfi1_devdata *dd) in init_rcverr() argument
5610 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0); in init_rcverr()
5612 dd->rcv_ovfl_cnt = 0; in init_rcverr()
5613 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); in init_rcverr()
5616 static void free_rcverr(struct hfi1_devdata *dd) in free_rcverr() argument
5618 if (dd->rcverr_timer.function) in free_rcverr()
5619 del_timer_sync(&dd->rcverr_timer); in free_rcverr()
5622 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_rxe_err() argument
5627 dd_dev_info(dd, "Receive Error: %s\n", in handle_rxe_err()
5637 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK)) in handle_rxe_err()
5640 start_freeze_handling(dd->pport, flags); in handle_rxe_err()
5645 incr_cntr64(&dd->rcv_err_status_cnt[i]); in handle_rxe_err()
5649 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_misc_err() argument
5654 dd_dev_info(dd, "Misc Error: %s", in handle_misc_err()
5658 incr_cntr64(&dd->misc_err_status_cnt[i]); in handle_misc_err()
5662 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_pio_err() argument
5667 dd_dev_info(dd, "PIO Error: %s\n", in handle_pio_err()
5671 start_freeze_handling(dd->pport, 0); in handle_pio_err()
5675 incr_cntr64(&dd->send_pio_err_status_cnt[i]); in handle_pio_err()
5679 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_sdma_err() argument
5684 dd_dev_info(dd, "SDMA Error: %s\n", in handle_sdma_err()
5688 start_freeze_handling(dd->pport, 0); in handle_sdma_err()
5692 incr_cntr64(&dd->send_dma_err_status_cnt[i]); in handle_sdma_err()
5701 static void count_port_inactive(struct hfi1_devdata *dd) in count_port_inactive() argument
5703 __count_port_discards(dd->pport); in count_port_inactive()
5715 static void handle_send_egress_err_info(struct hfi1_devdata *dd, in handle_send_egress_err_info() argument
5718 struct hfi1_pportdata *ppd = dd->pport; in handle_send_egress_err_info()
5719 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */ in handle_send_egress_err_info()
5720 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO); in handle_send_egress_err_info()
5724 write_csr(dd, SEND_EGRESS_ERR_INFO, info); in handle_send_egress_err_info()
5726 dd_dev_info(dd, in handle_send_egress_err_info()
5802 static int engine_to_vl(struct hfi1_devdata *dd, int engine) in engine_to_vl() argument
5812 m = rcu_dereference(dd->sdma_map); in engine_to_vl()
5823 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index) in sc_to_vl() argument
5829 sci = &dd->send_contexts[sw_index]; in sc_to_vl()
5838 if (dd->vld[15].sc == sc) in sc_to_vl()
5841 if (dd->vld[i].sc == sc) in sc_to_vl()
5847 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_egress_err() argument
5854 start_freeze_handling(dd->pport, 0); in handle_egress_err()
5855 else if (is_ax(dd) && in handle_egress_err()
5857 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) in handle_egress_err()
5858 start_freeze_handling(dd->pport, 0); in handle_egress_err()
5867 count_port_inactive(dd); in handle_egress_err()
5870 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift)); in handle_egress_err()
5872 handle_send_egress_err_info(dd, vl); in handle_egress_err()
5881 dd_dev_info(dd, "Egress Error: %s\n", in handle_egress_err()
5886 incr_cntr64(&dd->send_egress_err_status_cnt[i]); in handle_egress_err()
5890 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_txe_err() argument
5895 dd_dev_info(dd, "Send Error: %s\n", in handle_txe_err()
5900 incr_cntr64(&dd->send_err_status_cnt[i]); in handle_txe_err()
5921 static void interrupt_clear_down(struct hfi1_devdata *dd, in interrupt_clear_down() argument
5931 reg = read_kctxt_csr(dd, context, eri->status); in interrupt_clear_down()
5934 write_kctxt_csr(dd, context, eri->clear, reg); in interrupt_clear_down()
5936 eri->handler(dd, context, reg); in interrupt_clear_down()
5941 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n", in interrupt_clear_down()
5947 mask = read_kctxt_csr(dd, context, eri->mask); in interrupt_clear_down()
5949 write_kctxt_csr(dd, context, eri->mask, mask); in interrupt_clear_down()
5958 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source) in is_misc_err_int() argument
5963 interrupt_clear_down(dd, 0, eri); in is_misc_err_int()
5965 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n", in is_misc_err_int()
5986 static void is_sendctxt_err_int(struct hfi1_devdata *dd, in is_sendctxt_err_int() argument
5997 sw_index = dd->hw_to_sw[hw_context]; in is_sendctxt_err_int()
5998 if (sw_index >= dd->num_send_contexts) { in is_sendctxt_err_int()
5999 dd_dev_err(dd, in is_sendctxt_err_int()
6004 sci = &dd->send_contexts[sw_index]; in is_sendctxt_err_int()
6005 spin_lock_irqsave(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
6008 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, in is_sendctxt_err_int()
6010 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
6017 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS); in is_sendctxt_err_int()
6019 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context, in is_sendctxt_err_int()
6024 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index)); in is_sendctxt_err_int()
6031 queue_work(dd->pport->hfi1_wq, &sc->halt_work); in is_sendctxt_err_int()
6032 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
6041 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]); in is_sendctxt_err_int()
6045 static void handle_sdma_eng_err(struct hfi1_devdata *dd, in handle_sdma_eng_err() argument
6051 sde = &dd->per_sdma[source]; in handle_sdma_eng_err()
6053 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in handle_sdma_eng_err()
6055 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n", in handle_sdma_eng_err()
6068 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]); in handle_sdma_eng_err()
6075 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source) in is_sdma_eng_err_int() argument
6078 struct sdma_engine *sde = &dd->per_sdma[source]; in is_sdma_eng_err_int()
6080 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in is_sdma_eng_err_int()
6082 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx, in is_sdma_eng_err_int()
6086 interrupt_clear_down(dd, source, &sdma_eng_err); in is_sdma_eng_err_int()
6092 static void is_various_int(struct hfi1_devdata *dd, unsigned int source) in is_various_int() argument
6102 handle_temp_err(dd); in is_various_int()
6104 interrupt_clear_down(dd, 0, eri); in is_various_int()
6106 dd_dev_info(dd, in is_various_int()
6111 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) in handle_qsfp_int() argument
6114 struct hfi1_pportdata *ppd = dd->pport; in handle_qsfp_int()
6120 dd_dev_info(dd, "%s: QSFP module removed\n", in handle_qsfp_int()
6140 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : in handle_qsfp_int()
6162 dd_dev_info(dd, "%s: QSFP module inserted\n", in handle_qsfp_int()
6176 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : in handle_qsfp_int()
6185 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n", in handle_qsfp_int()
6197 static int request_host_lcb_access(struct hfi1_devdata *dd) in request_host_lcb_access() argument
6201 ret = do_8051_command(dd, HCMD_MISC, in request_host_lcb_access()
6205 dd_dev_err(dd, "%s: command failed with error %d\n", in request_host_lcb_access()
6211 static int request_8051_lcb_access(struct hfi1_devdata *dd) in request_8051_lcb_access() argument
6215 ret = do_8051_command(dd, HCMD_MISC, in request_8051_lcb_access()
6219 dd_dev_err(dd, "%s: command failed with error %d\n", in request_8051_lcb_access()
6229 static inline void set_host_lcb_access(struct hfi1_devdata *dd) in set_host_lcb_access() argument
6231 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, in set_host_lcb_access()
6240 static inline void set_8051_lcb_access(struct hfi1_devdata *dd) in set_8051_lcb_access() argument
6242 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, in set_8051_lcb_access()
6256 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) in acquire_lcb_access() argument
6258 struct hfi1_pportdata *ppd = dd->pport; in acquire_lcb_access()
6276 dd_dev_info(dd, "%s: link state %s not up\n", in acquire_lcb_access()
6282 if (dd->lcb_access_count == 0) { in acquire_lcb_access()
6283 ret = request_host_lcb_access(dd); in acquire_lcb_access()
6285 dd_dev_err(dd, in acquire_lcb_access()
6290 set_host_lcb_access(dd); in acquire_lcb_access()
6292 dd->lcb_access_count++; in acquire_lcb_access()
6306 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok) in release_lcb_access() argument
6316 mutex_lock(&dd->pport->hls_lock); in release_lcb_access()
6318 while (!mutex_trylock(&dd->pport->hls_lock)) in release_lcb_access()
6322 if (dd->lcb_access_count == 0) { in release_lcb_access()
6323 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n", in release_lcb_access()
6328 if (dd->lcb_access_count == 1) { in release_lcb_access()
6329 set_8051_lcb_access(dd); in release_lcb_access()
6330 ret = request_8051_lcb_access(dd); in release_lcb_access()
6332 dd_dev_err(dd, in release_lcb_access()
6336 set_host_lcb_access(dd); in release_lcb_access()
6340 dd->lcb_access_count--; in release_lcb_access()
6342 mutex_unlock(&dd->pport->hls_lock); in release_lcb_access()
6355 static void init_lcb_access(struct hfi1_devdata *dd) in init_lcb_access() argument
6357 dd->lcb_access_count = 0; in init_lcb_access()
6363 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data) in hreq_response() argument
6365 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, in hreq_response()
6377 struct hfi1_devdata *dd = ppd->dd; in handle_8051_request() local
6382 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1); in handle_8051_request()
6387 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0); in handle_8051_request()
6402 dd_dev_info(dd, "8051 request: request 0x%x not supported\n", in handle_8051_request()
6404 hreq_response(dd, HREQ_NOT_SUPPORTED, 0); in handle_8051_request()
6408 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET); in handle_8051_request()
6410 (void)read_csr(dd, DCC_CFG_RESET); in handle_8051_request()
6414 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); in handle_8051_request()
6415 hreq_response(dd, HREQ_SUCCESS, 0); in handle_8051_request()
6419 hreq_response(dd, HREQ_SUCCESS, 0); in handle_8051_request()
6423 hreq_response(dd, HREQ_SUCCESS, data); in handle_8051_request()
6426 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type); in handle_8051_request()
6427 hreq_response(dd, HREQ_NOT_SUPPORTED, 0); in handle_8051_request()
6435 void set_up_vau(struct hfi1_devdata *dd, u8 vau) in set_up_vau() argument
6437 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); in set_up_vau()
6442 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); in set_up_vau()
6450 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) in set_up_vl15() argument
6452 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); in set_up_vl15()
6463 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); in set_up_vl15()
6465 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf in set_up_vl15()
6473 void reset_link_credits(struct hfi1_devdata *dd) in reset_link_credits() argument
6479 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); in reset_link_credits()
6480 write_csr(dd, SEND_CM_CREDIT_VL15, 0); in reset_link_credits()
6481 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0); in reset_link_credits()
6483 pio_send_control(dd, PSC_CM_RESET); in reset_link_credits()
6485 dd->vl15buf_cached = 0; in reset_link_credits()
6515 static void lcb_shutdown(struct hfi1_devdata *dd, int abort) in lcb_shutdown() argument
6520 write_csr(dd, DC_LCB_CFG_RUN, 0); in lcb_shutdown()
6522 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, in lcb_shutdown()
6525 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN); in lcb_shutdown()
6526 reg = read_csr(dd, DCC_CFG_RESET); in lcb_shutdown()
6527 write_csr(dd, DCC_CFG_RESET, reg | in lcb_shutdown()
6529 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */ in lcb_shutdown()
6532 write_csr(dd, DCC_CFG_RESET, reg); in lcb_shutdown()
6533 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); in lcb_shutdown()
6547 static void _dc_shutdown(struct hfi1_devdata *dd) in _dc_shutdown() argument
6549 lockdep_assert_held(&dd->dc8051_lock); in _dc_shutdown()
6551 if (dd->dc_shutdown) in _dc_shutdown()
6554 dd->dc_shutdown = 1; in _dc_shutdown()
6556 lcb_shutdown(dd, 1); in _dc_shutdown()
6562 write_csr(dd, DC_DC8051_CFG_RST, 0x1); in _dc_shutdown()
6565 static void dc_shutdown(struct hfi1_devdata *dd) in dc_shutdown() argument
6567 mutex_lock(&dd->dc8051_lock); in dc_shutdown()
6568 _dc_shutdown(dd); in dc_shutdown()
6569 mutex_unlock(&dd->dc8051_lock); in dc_shutdown()
6578 static void _dc_start(struct hfi1_devdata *dd) in _dc_start() argument
6580 lockdep_assert_held(&dd->dc8051_lock); in _dc_start()
6582 if (!dd->dc_shutdown) in _dc_start()
6586 write_csr(dd, DC_DC8051_CFG_RST, 0ull); in _dc_start()
6588 if (wait_fm_ready(dd, TIMEOUT_8051_START)) in _dc_start()
6589 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n", in _dc_start()
6593 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); in _dc_start()
6595 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); in _dc_start()
6596 dd->dc_shutdown = 0; in _dc_start()
6599 static void dc_start(struct hfi1_devdata *dd) in dc_start() argument
6601 mutex_lock(&dd->dc8051_lock); in dc_start()
6602 _dc_start(dd); in dc_start()
6603 mutex_unlock(&dd->dc8051_lock); in dc_start()
6609 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd) in adjust_lcb_for_fpga_serdes() argument
6614 if (dd->icode != ICODE_FPGA_EMULATION) in adjust_lcb_for_fpga_serdes()
6624 if (is_emulator_s(dd)) in adjust_lcb_for_fpga_serdes()
6628 version = emulator_rev(dd); in adjust_lcb_for_fpga_serdes()
6629 if (!is_ax(dd)) in adjust_lcb_for_fpga_serdes()
6673 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull); in adjust_lcb_for_fpga_serdes()
6684 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr); in adjust_lcb_for_fpga_serdes()
6686 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, in adjust_lcb_for_fpga_serdes()
6688 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr); in adjust_lcb_for_fpga_serdes()
6700 struct hfi1_devdata *dd = ppd->dd; in handle_sma_message() local
6708 ret = read_idle_sma(dd, &msg); in handle_sma_message()
6711 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg); in handle_sma_message()
6739 dd, in handle_sma_message()
6745 dd_dev_err(dd, in handle_sma_message()
6752 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear) in adjust_rcvctrl() argument
6757 spin_lock_irqsave(&dd->rcvctrl_lock, flags); in adjust_rcvctrl()
6758 rcvctrl = read_csr(dd, RCV_CTRL); in adjust_rcvctrl()
6761 write_csr(dd, RCV_CTRL, rcvctrl); in adjust_rcvctrl()
6762 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags); in adjust_rcvctrl()
6765 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add) in add_rcvctrl() argument
6767 adjust_rcvctrl(dd, add, 0); in add_rcvctrl()
6770 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear) in clear_rcvctrl() argument
6772 adjust_rcvctrl(dd, 0, clear); in clear_rcvctrl()
6780 struct hfi1_devdata *dd = ppd->dd; in start_freeze_handling() local
6786 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); in start_freeze_handling()
6789 dd->flags |= HFI1_FROZEN; in start_freeze_handling()
6792 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); in start_freeze_handling()
6797 for (i = 0; i < dd->num_send_contexts; i++) { in start_freeze_handling()
6798 sc = dd->send_contexts[i].sc; in start_freeze_handling()
6807 dd_dev_err(dd, in start_freeze_handling()
6822 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze) in wait_for_freeze_status() argument
6829 reg = read_csr(dd, CCE_STATUS); in wait_for_freeze_status()
6841 dd_dev_err(dd, in wait_for_freeze_status()
6854 static void rxe_freeze(struct hfi1_devdata *dd) in rxe_freeze() argument
6860 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in rxe_freeze()
6863 for (i = 0; i < dd->num_rcv_contexts; i++) { in rxe_freeze()
6864 rcd = hfi1_rcd_get_by_index(dd, i); in rxe_freeze()
6865 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd); in rxe_freeze()
6876 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd) in rxe_kernel_unfreeze() argument
6883 for (i = 0; i < dd->num_rcv_contexts; i++) { in rxe_kernel_unfreeze()
6884 rcd = hfi1_rcd_get_by_index(dd, i); in rxe_kernel_unfreeze()
6888 (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) { in rxe_kernel_unfreeze()
6896 hfi1_rcvctrl(dd, rcvmask, rcd); in rxe_kernel_unfreeze()
6901 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in rxe_kernel_unfreeze()
6913 struct hfi1_devdata *dd = ppd->dd; in handle_freeze() local
6916 wait_for_freeze_status(dd, 1); in handle_freeze()
6921 pio_freeze(dd); in handle_freeze()
6924 sdma_freeze(dd); in handle_freeze()
6929 rxe_freeze(dd); in handle_freeze()
6935 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); in handle_freeze()
6936 wait_for_freeze_status(dd, 0); in handle_freeze()
6938 if (is_ax(dd)) { in handle_freeze()
6939 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); in handle_freeze()
6940 wait_for_freeze_status(dd, 1); in handle_freeze()
6941 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); in handle_freeze()
6942 wait_for_freeze_status(dd, 0); in handle_freeze()
6946 pio_kernel_unfreeze(dd); in handle_freeze()
6949 sdma_unfreeze(dd); in handle_freeze()
6954 rxe_kernel_unfreeze(dd); in handle_freeze()
6969 dd->flags &= ~HFI1_FROZEN; in handle_freeze()
6970 wake_up(&dd->event_queue); in handle_freeze()
7010 struct hfi1_devdata *dd = ppd->dd; in handle_link_up() local
7015 read_ltp_rtt(dd); in handle_link_up()
7020 clear_linkup_counters(dd); in handle_link_up()
7032 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) in handle_link_up()
7033 set_up_vl15(dd, dd->vl15buf_cached); in handle_link_up()
7038 dd_dev_err(dd, in handle_link_up()
7157 read_link_down_reason(ppd->dd, &link_down_reason); in handle_link_down()
7161 dd_dev_info(ppd->dd, "%sUnexpected link down\n", in handle_link_down()
7169 read_planned_down_reason_code(ppd->dd, &neigh_reason); in handle_link_down()
7170 dd_dev_info(ppd->dd, in handle_link_down()
7176 dd_dev_info(ppd->dd, in handle_link_down()
7181 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n", in handle_link_down()
7211 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in handle_link_down()
7218 dc_shutdown(ppd->dd); in handle_link_down()
7235 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n", in handle_link_bounce()
7299 hfi1_event_pkey_change(ppd->dd, ppd->port); in clear_full_mgmt_pkey()
7306 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width) in link_width_to_bits() argument
7314 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup) in link_width_to_bits()
7322 dd_dev_info(dd, "%s: invalid width %d, using 4\n", in link_width_to_bits()
7348 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width, in get_link_widths() argument
7359 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, in get_link_widths()
7361 read_local_lni(dd, &enable_lane_rx); in get_link_widths()
7372 if ((dd->icode == ICODE_RTL_SILICON) && in get_link_widths()
7373 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) { in get_link_widths()
7377 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G; in get_link_widths()
7380 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; in get_link_widths()
7383 dd_dev_err(dd, in get_link_widths()
7386 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; in get_link_widths()
7391 dd_dev_info(dd, in get_link_widths()
7394 *tx_width = link_width_to_bits(dd, tx); in get_link_widths()
7395 *rx_width = link_width_to_bits(dd, rx); in get_link_widths()
7411 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width, in get_linkup_widths() argument
7418 read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths); in get_linkup_widths()
7422 *tx_width = link_width_to_bits(dd, tx); in get_linkup_widths()
7423 *rx_width = link_width_to_bits(dd, rx); in get_linkup_widths()
7426 get_link_widths(dd, &active_tx, &active_rx); in get_linkup_widths()
7442 get_linkup_widths(ppd->dd, &tx_width, &rx_width); in get_linkup_link_widths()
7464 struct hfi1_devdata *dd = ppd->dd; in handle_verify_cap() local
7483 lcb_shutdown(dd, 0); in handle_verify_cap()
7484 adjust_lcb_for_fpga_serdes(dd); in handle_verify_cap()
7486 read_vc_remote_phy(dd, &power_management, &continuous); in handle_verify_cap()
7487 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf, in handle_verify_cap()
7489 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths); in handle_verify_cap()
7490 read_remote_device_id(dd, &device_id, &device_rev); in handle_verify_cap()
7493 get_link_widths(dd, &active_tx, &active_rx); in handle_verify_cap()
7494 dd_dev_info(dd, in handle_verify_cap()
7497 dd_dev_info(dd, in handle_verify_cap()
7501 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n", in handle_verify_cap()
7503 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n", in handle_verify_cap()
7516 set_up_vau(dd, vau); in handle_verify_cap()
7522 set_up_vl15(dd, 0); in handle_verify_cap()
7523 dd->vl15buf_cached = vl15buf; in handle_verify_cap()
7538 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val); in handle_verify_cap()
7539 write_csr(dd, DC_LCB_CFG_CRC_MODE, in handle_verify_cap()
7543 reg = read_csr(dd, SEND_CM_CTRL); in handle_verify_cap()
7545 write_csr(dd, SEND_CM_CTRL, in handle_verify_cap()
7548 write_csr(dd, SEND_CM_CTRL, in handle_verify_cap()
7553 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { in handle_verify_cap()
7573 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n", in handle_verify_cap()
7594 assign_remote_cm_au_table(dd, vcu); in handle_verify_cap()
7605 if (is_ax(dd)) { /* fixed in B0 */ in handle_verify_cap()
7606 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN); in handle_verify_cap()
7609 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg); in handle_verify_cap()
7613 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); in handle_verify_cap()
7616 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ in handle_verify_cap()
7617 set_8051_lcb_access(dd); in handle_verify_cap()
7660 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7670 get_link_widths(ppd->dd, &tx, &rx); in apply_link_downgrade_policy()
7678 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n"); in apply_link_downgrade_policy()
7688 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7690 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7701 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7703 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7734 dd_dev_info(ppd->dd, "8051: Link width downgrade\n"); in handle_link_downgrade()
7769 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_8051_interrupt() argument
7771 struct hfi1_pportdata *ppd = dd->pport; in handle_8051_interrupt()
7780 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051); in handle_8051_interrupt()
7800 dd_dev_info(dd, "Link error: %s\n", in handle_8051_interrupt()
7815 dd_dev_err(dd, "8051 info error: %s\n", in handle_8051_interrupt()
7840 dd_dev_info(dd, "8051: Link up\n"); in handle_8051_interrupt()
7859 dd_dev_info(dd, "8051: Link down%s\n", extra); in handle_8051_interrupt()
7869 dd_dev_info(dd, "8051 info host message: %s\n", in handle_8051_interrupt()
7883 dd_dev_err(dd, "Lost 8051 heartbeat\n"); in handle_8051_interrupt()
7884 write_csr(dd, DC_DC8051_ERR_EN, in handle_8051_interrupt()
7885 read_csr(dd, DC_DC8051_ERR_EN) & in handle_8051_interrupt()
7892 dd_dev_err(dd, "8051 error: %s\n", in handle_8051_interrupt()
7905 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n", in handle_8051_interrupt()
7910 dd_dev_info(dd, in handle_8051_interrupt()
7964 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_dcc_err() argument
7969 struct hfi1_pportdata *ppd = dd->pport; in handle_dcc_err()
7974 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) { in handle_dcc_err()
7975 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE); in handle_dcc_err()
7976 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK; in handle_dcc_err()
7978 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK; in handle_dcc_err()
7984 struct hfi1_pportdata *ppd = dd->pport; in handle_dcc_err()
7994 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG); in handle_dcc_err()
7995 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) { in handle_dcc_err()
7996 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK; in handle_dcc_err()
7998 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK; in handle_dcc_err()
8037 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n", in handle_dcc_err()
8045 info = read_csr(dd, DCC_ERR_INFO_PORTRCV); in handle_dcc_err()
8046 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0); in handle_dcc_err()
8047 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1); in handle_dcc_err()
8048 if (!(dd->err_info_rcvport.status_and_code & in handle_dcc_err()
8050 dd->err_info_rcvport.status_and_code = in handle_dcc_err()
8053 dd->err_info_rcvport.status_and_code |= in handle_dcc_err()
8059 dd->err_info_rcvport.packet_flit1 = hdr0; in handle_dcc_err()
8060 dd->err_info_rcvport.packet_flit2 = hdr1; in handle_dcc_err()
8089 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n" in handle_dcc_err()
8098 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n"); in handle_dcc_err()
8103 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n"); in handle_dcc_err()
8107 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev))) in handle_dcc_err()
8112 dd_dev_info_ratelimited(dd, "DCC Error: %s\n", in handle_dcc_err()
8119 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n", in handle_dcc_err()
8126 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg) in handle_lcb_err() argument
8130 dd_dev_info(dd, "LCB Error: %s\n", in handle_lcb_err()
8137 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source) in is_dc_int() argument
8142 interrupt_clear_down(dd, 0, eri); in is_dc_int()
8153 dd_dev_err(dd, "Parity error in DC LBM block\n"); in is_dc_int()
8155 dd_dev_err(dd, "Invalid DC interrupt %u\n", source); in is_dc_int()
8162 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source) in is_send_credit_int() argument
8164 sc_group_release_update(dd, source); in is_send_credit_int()
8176 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source) in is_sdma_eng_int() argument
8184 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which, in is_sdma_eng_int()
8186 sdma_dumpstate(&dd->per_sdma[which]); in is_sdma_eng_int()
8189 if (likely(what < 3 && which < dd->num_sdma)) { in is_sdma_eng_int()
8190 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source); in is_sdma_eng_int()
8193 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source); in is_sdma_eng_int()
8207 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source) in is_rcv_avail_int() argument
8212 if (likely(source < dd->num_rcv_contexts)) { in is_rcv_avail_int()
8213 rcd = hfi1_rcd_get_by_index(dd, source); in is_rcv_avail_int()
8225 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n", in is_rcv_avail_int()
8238 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source) in is_rcv_urgent_int() argument
8243 if (likely(source < dd->num_rcv_contexts)) { in is_rcv_urgent_int()
8244 rcd = hfi1_rcd_get_by_index(dd, source); in is_rcv_urgent_int()
8256 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n", in is_rcv_urgent_int()
8263 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source) in is_reserved_int() argument
8267 dd_dev_err(dd, "unexpected %s interrupt\n", in is_reserved_int()
8302 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source) in is_interrupt() argument
8309 trace_hfi1_interrupt(dd, entry, source); in is_interrupt()
8310 entry->is_int(dd, source - entry->start); in is_interrupt()
8315 dd_dev_err(dd, "invalid interrupt source %u\n", source); in is_interrupt()
8329 struct hfi1_devdata *dd = data; in general_interrupt() local
8335 this_cpu_inc(*dd->int_counter); in general_interrupt()
8339 if (dd->gi_mask[i] == 0) { in general_interrupt()
8343 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) & in general_interrupt()
8344 dd->gi_mask[i]; in general_interrupt()
8347 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]); in general_interrupt()
8353 is_interrupt(dd, bit); in general_interrupt()
8363 struct hfi1_devdata *dd = sde->dd; in sdma_interrupt() local
8367 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in sdma_interrupt()
8372 this_cpu_inc(*dd->int_counter); in sdma_interrupt()
8375 status = read_csr(dd, in sdma_interrupt()
8380 write_csr(dd, in sdma_interrupt()
8387 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n", in sdma_interrupt()
8400 struct hfi1_devdata *dd = rcd->dd; in clear_recv_intr() local
8403 write_csr(dd, addr, rcd->imask); in clear_recv_intr()
8405 (void)read_csr(dd, addr); in clear_recv_intr()
8411 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask); in force_recv_intr()
8432 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); in check_packet_present()
8443 struct hfi1_devdata *dd = rcd->dd; in receive_interrupt_common() local
8445 trace_hfi1_receive_interrupt(dd, rcd); in receive_interrupt_common()
8446 this_cpu_inc(*dd->int_counter); in receive_interrupt_common()
8578 u32 read_physical_state(struct hfi1_devdata *dd) in read_physical_state() argument
8582 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE); in read_physical_state()
8587 u32 read_logical_state(struct hfi1_devdata *dd) in read_logical_state() argument
8591 reg = read_csr(dd, DCC_CFG_PORT_CONFIG); in read_logical_state()
8596 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate) in set_logical_state() argument
8600 reg = read_csr(dd, DCC_CFG_PORT_CONFIG); in set_logical_state()
8604 write_csr(dd, DCC_CFG_PORT_CONFIG, reg); in set_logical_state()
8610 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data) in read_lcb_via_8051() argument
8615 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { in read_lcb_via_8051()
8616 if (acquire_lcb_access(dd, 0) == 0) { in read_lcb_via_8051()
8617 *data = read_csr(dd, addr); in read_lcb_via_8051()
8618 release_lcb_access(dd, 0); in read_lcb_via_8051()
8626 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data); in read_lcb_via_8051()
8648 static void update_lcb_cache(struct hfi1_devdata *dd) in update_lcb_cache() argument
8655 ret = read_lcb_csr(dd, lcb_cache[i].off, &val); in update_lcb_cache()
8682 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data) in read_lcb_csr() argument
8684 struct hfi1_pportdata *ppd = dd->pport; in read_lcb_csr()
8688 return read_lcb_via_8051(dd, addr, data); in read_lcb_csr()
8697 *data = read_csr(dd, addr); in read_lcb_csr()
8704 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data) in write_lcb_via_8051() argument
8709 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || in write_lcb_via_8051()
8710 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) { in write_lcb_via_8051()
8711 if (acquire_lcb_access(dd, 0) == 0) { in write_lcb_via_8051()
8712 write_csr(dd, addr, data); in write_lcb_via_8051()
8713 release_lcb_access(dd, 0); in write_lcb_via_8051()
8721 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data); in write_lcb_via_8051()
8731 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data) in write_lcb_csr() argument
8733 struct hfi1_pportdata *ppd = dd->pport; in write_lcb_csr()
8737 return write_lcb_via_8051(dd, addr, data); in write_lcb_csr()
8742 write_csr(dd, addr, data); in write_lcb_csr()
8751 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data, in do_8051_command() argument
8760 mutex_lock(&dd->dc8051_lock); in do_8051_command()
8763 if (dd->dc_shutdown) { in do_8051_command()
8778 if (dd->dc8051_timed_out) { in do_8051_command()
8779 if (dd->dc8051_timed_out > 1) { in do_8051_command()
8780 dd_dev_err(dd, in do_8051_command()
8786 _dc_shutdown(dd); in do_8051_command()
8787 _dc_start(dd); in do_8051_command()
8809 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0); in do_8051_command()
8815 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg); in do_8051_command()
8826 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg); in do_8051_command()
8828 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg); in do_8051_command()
8833 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1); in do_8051_command()
8838 dd->dc8051_timed_out++; in do_8051_command()
8839 dd_dev_err(dd, "8051 host command %u timeout\n", type); in do_8051_command()
8853 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1) in do_8051_command()
8861 dd->dc8051_timed_out = 0; in do_8051_command()
8865 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0); in do_8051_command()
8868 mutex_unlock(&dd->dc8051_lock); in do_8051_command()
8872 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state) in set_physical_link_state() argument
8874 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL); in set_physical_link_state()
8877 int load_8051_config(struct hfi1_devdata *dd, u8 field_id, in load_8051_config() argument
8886 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL); in load_8051_config()
8888 dd_dev_err(dd, in load_8051_config()
8900 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id, in read_8051_config() argument
8916 ret = read_8051_data(dd, addr, 8, &big_data); in read_8051_config()
8926 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n", in read_8051_config()
8933 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management, in write_vc_local_phy() argument
8940 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY, in write_vc_local_phy()
8944 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu, in write_vc_local_fabric() argument
8954 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC, in write_vc_local_fabric()
8958 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits, in read_vc_local_link_mode() argument
8963 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, in read_vc_local_link_mode()
8970 static int write_vc_local_link_mode(struct hfi1_devdata *dd, in write_vc_local_link_mode() argument
8980 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, in write_vc_local_link_mode()
8984 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id, in write_local_device_id() argument
8991 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame); in write_local_device_id()
8994 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id, in read_remote_device_id() argument
8999 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame); in read_remote_device_id()
9005 int write_host_interface_version(struct hfi1_devdata *dd, u8 version) in write_host_interface_version() argument
9011 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame); in write_host_interface_version()
9015 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, in write_host_interface_version()
9019 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor, in read_misc_status() argument
9024 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame); in read_misc_status()
9030 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame); in read_misc_status()
9035 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management, in read_vc_remote_phy() argument
9040 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame); in read_vc_remote_phy()
9047 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z, in read_vc_remote_fabric() argument
9052 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame); in read_vc_remote_fabric()
9060 static void read_vc_remote_link_width(struct hfi1_devdata *dd, in read_vc_remote_link_width() argument
9066 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG, in read_vc_remote_link_width()
9073 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx) in read_local_lni() argument
9077 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame); in read_local_lni()
9081 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls) in read_last_local_state() argument
9083 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls); in read_last_local_state()
9086 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs) in read_last_remote_state() argument
9088 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs); in read_last_remote_state()
9091 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality) in hfi1_read_link_quality() argument
9097 if (dd->pport->host_link_state & HLS_UP) { in hfi1_read_link_quality()
9098 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, in hfi1_read_link_quality()
9106 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc) in read_planned_down_reason_code() argument
9110 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame); in read_planned_down_reason_code()
9114 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr) in read_link_down_reason() argument
9118 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame); in read_link_down_reason()
9122 static int read_tx_settings(struct hfi1_devdata *dd, in read_tx_settings() argument
9131 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame); in read_tx_settings()
9142 static int write_tx_settings(struct hfi1_devdata *dd, in write_tx_settings() argument
9155 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame); in write_tx_settings()
9163 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out) in read_idle_message() argument
9167 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out); in read_idle_message()
9169 dd_dev_err(dd, "read idle message: type %d, err %d\n", in read_idle_message()
9173 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out); in read_idle_message()
9185 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data) in read_idle_sma() argument
9187 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, in read_idle_sma()
9196 static int send_idle_message(struct hfi1_devdata *dd, u64 data) in send_idle_message() argument
9200 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data); in send_idle_message()
9201 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL); in send_idle_message()
9203 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n", in send_idle_message()
9215 int send_idle_sma(struct hfi1_devdata *dd, u64 message) in send_idle_sma() argument
9221 return send_idle_message(dd, data); in send_idle_sma()
9230 static int do_quick_linkup(struct hfi1_devdata *dd) in do_quick_linkup() argument
9234 lcb_shutdown(dd, 0); in do_quick_linkup()
9239 write_csr(dd, DC_LCB_CFG_LOOPBACK, in do_quick_linkup()
9241 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); in do_quick_linkup()
9246 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); in do_quick_linkup()
9249 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { in do_quick_linkup()
9251 write_csr(dd, DC_LCB_CFG_RUN, in do_quick_linkup()
9254 ret = wait_link_transfer_active(dd, 10); in do_quick_linkup()
9258 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, in do_quick_linkup()
9270 dd_dev_err(dd, in do_quick_linkup()
9273 dd_dev_err(dd, "Continuing with quick linkup\n"); in do_quick_linkup()
9276 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ in do_quick_linkup()
9277 set_8051_lcb_access(dd); in do_quick_linkup()
9284 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP); in do_quick_linkup()
9286 dd_dev_err(dd, in do_quick_linkup()
9290 set_host_lcb_access(dd); in do_quick_linkup()
9291 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ in do_quick_linkup()
9304 static int init_loopback(struct hfi1_devdata *dd) in init_loopback() argument
9306 dd_dev_info(dd, "Entering loopback mode\n"); in init_loopback()
9309 write_csr(dd, DC_DC8051_CFG_MODE, in init_loopback()
9310 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK)); in init_loopback()
9318 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) && in init_loopback()
9337 if (dd->icode == ICODE_FPGA_EMULATION) { in init_loopback()
9338 dd_dev_err(dd, in init_loopback()
9349 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback); in init_loopback()
9384 struct hfi1_devdata *dd = ppd->dd; in set_local_link_attributes() local
9391 fabric_serdes_reset(dd); in set_local_link_attributes()
9394 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, in set_local_link_attributes()
9399 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { in set_local_link_attributes()
9415 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion, in set_local_link_attributes()
9420 ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION); in set_local_link_attributes()
9422 dd_dev_err(dd, in set_local_link_attributes()
9431 ret = write_vc_local_phy(dd, in set_local_link_attributes()
9438 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init, in set_local_link_attributes()
9455 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0)) in set_local_link_attributes()
9458 ret = write_vc_local_link_mode(dd, misc_bits, 0, in set_local_link_attributes()
9465 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev); in set_local_link_attributes()
9470 dd_dev_err(dd, in set_local_link_attributes()
9490 dd_dev_info(ppd->dd, in start_link()
9508 struct hfi1_devdata *dd = ppd->dd; in wait_for_qsfp_init() local
9527 mask = read_csr(dd, dd->hfi1_id ? in wait_for_qsfp_init()
9532 dd_dev_info(dd, "%s: No IntN detected, reset complete\n", in wait_for_qsfp_init()
9542 struct hfi1_devdata *dd = ppd->dd; in set_qsfp_int_n() local
9545 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK); in set_qsfp_int_n()
9551 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, in set_qsfp_int_n()
9557 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); in set_qsfp_int_n()
9562 struct hfi1_devdata *dd = ppd->dd; in reset_qsfp() local
9571 qsfp_mask = read_csr(dd, in reset_qsfp()
9572 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); in reset_qsfp()
9574 write_csr(dd, in reset_qsfp()
9575 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); in reset_qsfp()
9580 write_csr(dd, in reset_qsfp()
9581 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); in reset_qsfp()
9602 struct hfi1_devdata *dd = ppd->dd; in handle_qsfp_error_conditions() local
9606 dd_dev_err(dd, "%s: QSFP cable temperature too high\n", in handle_qsfp_error_conditions()
9611 dd_dev_err(dd, "%s: QSFP cable temperature too low\n", in handle_qsfp_error_conditions()
9622 dd_dev_err(dd, "%s: QSFP supply voltage too high\n", in handle_qsfp_error_conditions()
9627 dd_dev_err(dd, "%s: QSFP supply voltage too low\n", in handle_qsfp_error_conditions()
9634 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n", in handle_qsfp_error_conditions()
9639 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n", in handle_qsfp_error_conditions()
9644 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n", in handle_qsfp_error_conditions()
9649 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n", in handle_qsfp_error_conditions()
9654 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n", in handle_qsfp_error_conditions()
9659 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n", in handle_qsfp_error_conditions()
9664 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n", in handle_qsfp_error_conditions()
9669 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n", in handle_qsfp_error_conditions()
9674 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n", in handle_qsfp_error_conditions()
9679 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n", in handle_qsfp_error_conditions()
9684 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n", in handle_qsfp_error_conditions()
9689 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n", in handle_qsfp_error_conditions()
9703 struct hfi1_devdata *dd; in qsfp_event() local
9707 dd = ppd->dd; in qsfp_event()
9714 dd_dev_info(ppd->dd, in qsfp_event()
9724 dc_start(dd); in qsfp_event()
9743 if (one_qsfp_read(ppd, dd->hfi1_id, 6, in qsfp_event()
9745 dd_dev_info(dd, in qsfp_event()
9761 void init_qsfp_int(struct hfi1_devdata *dd) in init_qsfp_int() argument
9763 struct hfi1_pportdata *ppd = dd->pport; in init_qsfp_int()
9768 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, in init_qsfp_int()
9770 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, in init_qsfp_int()
9778 write_csr(dd, in init_qsfp_int()
9779 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, in init_qsfp_int()
9783 if (!dd->hfi1_id) in init_qsfp_int()
9784 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true); in init_qsfp_int()
9786 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true); in init_qsfp_int()
9792 static void init_lcb(struct hfi1_devdata *dd) in init_lcb() argument
9795 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) in init_lcb()
9801 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01); in init_lcb()
9802 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00); in init_lcb()
9803 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00); in init_lcb()
9804 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110); in init_lcb()
9805 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08); in init_lcb()
9806 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02); in init_lcb()
9807 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00); in init_lcb()
9827 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1); in test_qsfp_read()
9854 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n"); in try_start_link()
9857 dd_dev_info(ppd->dd, in try_start_link()
9882 struct hfi1_devdata *dd = ppd->dd; in bringup_serdes() local
9887 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK); in bringup_serdes()
9891 if (dd->base_guid) in bringup_serdes()
9892 guid = dd->base_guid + ppd->port - 1; in bringup_serdes()
9900 init_lcb(dd); in bringup_serdes()
9903 ret = init_loopback(dd); in bringup_serdes()
9921 struct hfi1_devdata *dd = ppd->dd; in hfi1_quiet_serdes() local
9944 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in hfi1_quiet_serdes()
9948 static inline int init_cpu_counters(struct hfi1_devdata *dd) in init_cpu_counters() argument
9953 ppd = (struct hfi1_pportdata *)(dd + 1); in init_cpu_counters()
9954 for (i = 0; i < dd->num_pports; i++, ppd++) { in init_cpu_counters()
9972 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index, in hfi1_put_tid() argument
9977 if (!(dd->flags & HFI1_PRESENT)) in hfi1_put_tid()
9984 dd_dev_err(dd, in hfi1_put_tid()
9989 trace_hfi1_put_tid(dd, index, type, pa, order); in hfi1_put_tid()
9996 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg); in hfi1_put_tid()
9997 writeq(reg, dd->rcvarray_wc + (index * 8)); in hfi1_put_tid()
10012 struct hfi1_devdata *dd = rcd->dd; in hfi1_clear_tids() local
10018 hfi1_put_tid(dd, i, PT_INVALID, 0, 0); in hfi1_clear_tids()
10022 hfi1_put_tid(dd, i, PT_INVALID, 0, 0); in hfi1_clear_tids()
10059 struct hfi1_devdata *dd = ppd->dd; in hfi1_get_ib_cfg() local
10106 dd, in hfi1_get_ib_cfg()
10130 u32 lrh_max_header_bytes(struct hfi1_devdata *dd) in lrh_max_header_bytes() argument
10142 return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; in lrh_max_header_bytes()
10158 struct hfi1_devdata *dd = ppd->dd; in set_send_length() local
10159 u32 max_hb = lrh_max_header_bytes(dd), dcmtu; in set_send_length()
10160 u32 maxvlmtu = dd->vld[15].mtu; in set_send_length()
10161 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) in set_send_length()
10168 if (dd->vld[i].mtu > maxvlmtu) in set_send_length()
10169 maxvlmtu = dd->vld[i].mtu; in set_send_length()
10171 len1 |= (((dd->vld[i].mtu + max_hb) >> 2) in set_send_length()
10175 len2 |= (((dd->vld[i].mtu + max_hb) >> 2) in set_send_length()
10179 write_csr(dd, SEND_LEN_CHECK0, len1); in set_send_length()
10180 write_csr(dd, SEND_LEN_CHECK1, len2); in set_send_length()
10184 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50), in set_send_length()
10185 sc_mtu_to_threshold(dd->vld[i].sc, in set_send_length()
10186 dd->vld[i].mtu, in set_send_length()
10187 get_hdrqentsize(dd->rcd[0]))); in set_send_length()
10190 pio_select_send_context_vl(dd, j, i), in set_send_length()
10193 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), in set_send_length()
10194 sc_mtu_to_threshold(dd->vld[15].sc, in set_send_length()
10195 dd->vld[15].mtu, in set_send_length()
10196 dd->rcd[0]->rcvhdrqentsize)); in set_send_length()
10197 sc_set_cr_threshold(dd->vld[15].sc, thres); in set_send_length()
10202 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG); in set_send_length()
10206 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1); in set_send_length()
10213 struct hfi1_devdata *dd = ppd->dd; in set_lidlmc() local
10215 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); in set_lidlmc()
10229 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1); in set_lidlmc()
10239 for (i = 0; i < chip_send_contexts(dd); i++) { in set_lidlmc()
10242 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg); in set_lidlmc()
10246 sdma_update_lmc(dd, mask, lid); in set_lidlmc()
10323 struct hfi1_devdata *dd = ppd->dd; in decode_state_complete() local
10342 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n", in decode_state_complete()
10344 dd_dev_err(dd, " last reported state state: %s (0x%x)\n", in decode_state_complete()
10346 dd_dev_err(dd, " state successfully completed: %s\n", in decode_state_complete()
10348 dd_dev_err(dd, " fail reason 0x%x: %s\n", in decode_state_complete()
10350 dd_dev_err(dd, " passing lane mask: 0x%x", lanes); in decode_state_complete()
10363 read_last_local_state(ppd->dd, &last_local_state); in check_lni_states()
10364 read_last_remote_state(ppd->dd, &last_remote_state); in check_lni_states()
10379 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms) in wait_link_transfer_active() argument
10387 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE); in wait_link_transfer_active()
10391 dd_dev_err(dd, in wait_link_transfer_active()
10403 struct hfi1_devdata *dd = ppd->dd; in force_logical_link_state_down() local
10408 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1); in force_logical_link_state_down()
10409 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, in force_logical_link_state_down()
10412 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); in force_logical_link_state_down()
10413 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0); in force_logical_link_state_down()
10414 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110); in force_logical_link_state_down()
10415 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2); in force_logical_link_state_down()
10417 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); in force_logical_link_state_down()
10418 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET); in force_logical_link_state_down()
10420 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1); in force_logical_link_state_down()
10421 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT); in force_logical_link_state_down()
10423 wait_link_transfer_active(dd, 100); in force_logical_link_state_down()
10428 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1); in force_logical_link_state_down()
10429 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0); in force_logical_link_state_down()
10430 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0); in force_logical_link_state_down()
10432 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n"); in force_logical_link_state_down()
10445 struct hfi1_devdata *dd = ppd->dd; in goto_offline() local
10450 update_lcb_cache(dd); in goto_offline()
10456 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE); in goto_offline()
10459 dd_dev_err(dd, in goto_offline()
10479 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT); in goto_offline()
10482 release_chip_resource(dd, qsfp_resource(dd)); in goto_offline()
10485 dd_dev_err(dd, in goto_offline()
10504 set_host_lcb_access(dd); in goto_offline()
10505 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ in goto_offline()
10524 ret = wait_fm_ready(dd, 7000); in goto_offline()
10526 dd_dev_err(dd, in goto_offline()
10542 handle_linkup_change(dd, 0); in goto_offline()
10633 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", in driver_pstate()
10657 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", in driver_lstate()
10690 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i)); in data_vls_operational()
10691 if ((reg && !ppd->dd->vld[i].mtu) || in data_vls_operational()
10692 (!reg && ppd->dd->vld[i].mtu)) in data_vls_operational()
10709 struct hfi1_devdata *dd = ppd->dd; in set_link_state() local
10724 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__, in set_link_state()
10748 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) { in set_link_state()
10768 dd_dev_err(dd, in set_link_state()
10776 dd_dev_err(dd, in set_link_state()
10788 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in set_link_state()
10790 handle_linkup_change(dd, 1); in set_link_state()
10791 pio_kernel_linkup(dd); in set_link_state()
10808 dd_dev_err(dd, in set_link_state()
10815 set_logical_state(dd, LSTATE_ARMED); in set_link_state()
10818 dd_dev_err(dd, in set_link_state()
10830 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) in set_link_state()
10837 set_logical_state(dd, LSTATE_ACTIVE); in set_link_state()
10840 dd_dev_err(dd, in set_link_state()
10845 sdma_all_running(dd); in set_link_state()
10850 event.device = &dd->verbs_dev.rdi.ibdev; in set_link_state()
10858 dd->dc_shutdown) in set_link_state()
10859 dc_start(dd); in set_link_state()
10861 write_csr(dd, DCC_CFG_LED_CNTRL, 0); in set_link_state()
10877 set_all_slowpath(ppd->dd); in set_link_state()
10886 ret = do_quick_linkup(dd); in set_link_state()
10888 ret1 = set_physical_link_state(dd, PLS_POLLING); in set_link_state()
10893 dd_dev_err(dd, in set_link_state()
10932 if (!dd->dc_shutdown) { in set_link_state()
10933 ret1 = set_physical_link_state(dd, PLS_DISABLED); in set_link_state()
10935 dd_dev_err(dd, in set_link_state()
10943 dd_dev_err(dd, in set_link_state()
10948 dc_shutdown(dd); in set_link_state()
10954 dc_start(dd); in set_link_state()
10971 ret1 = set_physical_link_state(dd, PLS_LINKUP); in set_link_state()
10973 dd_dev_err(dd, in set_link_state()
10985 dd_dev_info(dd, "%s: state 0x%x: not supported\n", in set_link_state()
10994 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n", in set_link_state()
11025 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg); in hfi1_set_ib_cfg()
11083 dd_dev_info(ppd->dd, in hfi1_set_ib_cfg()
11158 struct hfi1_devdata *dd = ppd->dd; in set_vl_weights() local
11168 drain = !is_ax(dd) && is_up; in set_vl_weights()
11177 ret = stop_drain_data_vls(dd); in set_vl_weights()
11181 dd, in set_vl_weights()
11197 write_csr(dd, target + (i * 8), reg); in set_vl_weights()
11199 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE); in set_vl_weights()
11202 open_fill_data_vls(dd); /* reopen all VLs */ in set_vl_weights()
11213 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr, in read_one_cm_vl() argument
11216 u64 reg = read_csr(dd, csr); in read_one_cm_vl()
11229 static int get_buffer_control(struct hfi1_devdata *dd, in get_buffer_control() argument
11240 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]); in get_buffer_control()
11243 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]); in get_buffer_control()
11245 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); in get_buffer_control()
11256 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) in get_sc2vlnt() argument
11262 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0); in get_sc2vlnt()
11270 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16); in get_sc2vlnt()
11280 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems, in get_vlarb_preempt() argument
11291 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) in set_sc2vlnt() argument
11293 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, in set_sc2vlnt()
11311 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, in set_sc2vlnt()
11331 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what, in nonzero_msg() argument
11335 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n", in nonzero_msg()
11340 static void set_global_shared(struct hfi1_devdata *dd, u16 limit) in set_global_shared() argument
11344 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); in set_global_shared()
11347 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); in set_global_shared()
11351 static void set_global_limit(struct hfi1_devdata *dd, u16 limit) in set_global_limit() argument
11355 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); in set_global_limit()
11358 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); in set_global_limit()
11362 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit) in set_vl_shared() argument
11372 reg = read_csr(dd, addr); in set_vl_shared()
11375 write_csr(dd, addr, reg); in set_vl_shared()
11379 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit) in set_vl_dedicated() argument
11389 reg = read_csr(dd, addr); in set_vl_dedicated()
11392 write_csr(dd, addr, reg); in set_vl_dedicated()
11396 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask, in wait_for_vl_status_clear() argument
11404 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask; in wait_for_vl_status_clear()
11413 dd_dev_err(dd, in wait_for_vl_status_clear()
11420 dd_dev_err(dd, in wait_for_vl_status_clear()
11451 struct hfi1_devdata *dd = ppd->dd; in set_buffer_control() local
11487 nonzero_msg(dd, i, "dedicated", in set_buffer_control()
11489 nonzero_msg(dd, i, "shared", in set_buffer_control()
11497 get_buffer_control(dd, &cur_bc, &cur_total); in set_buffer_control()
11536 set_global_limit(dd, new_total); in set_buffer_control()
11544 (is_ax(dd) && any_shared_limit_changing)) { in set_buffer_control()
11545 set_global_shared(dd, 0); in set_buffer_control()
11555 set_vl_shared(dd, i, 0); in set_buffer_control()
11560 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask, in set_buffer_control()
11569 set_vl_dedicated(dd, i, in set_buffer_control()
11577 wait_for_vl_status_clear(dd, ld_mask, "dedicated"); in set_buffer_control()
11586 set_vl_dedicated(dd, i, in set_buffer_control()
11599 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared)); in set_buffer_control()
11605 set_global_shared(dd, in set_buffer_control()
11610 set_global_limit(dd, new_total); in set_buffer_control()
11622 ret = sdma_map_init(dd, ppd->port - 1, vl_count ? in set_buffer_control()
11627 ret = pio_map_init(dd, ppd->port - 1, vl_count ? in set_buffer_control()
11669 size = get_buffer_control(ppd->dd, t, NULL); in fm_get_table()
11672 size = get_sc2vlnt(ppd->dd, t); in fm_get_table()
11677 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t); in fm_get_table()
11727 set_sc2vlnt(ppd->dd, t); in fm_set_table()
11740 static int disable_data_vls(struct hfi1_devdata *dd) in disable_data_vls() argument
11742 if (is_ax(dd)) in disable_data_vls()
11745 pio_send_control(dd, PSC_DATA_VL_DISABLE); in disable_data_vls()
11758 int open_fill_data_vls(struct hfi1_devdata *dd) in open_fill_data_vls() argument
11760 if (is_ax(dd)) in open_fill_data_vls()
11763 pio_send_control(dd, PSC_DATA_VL_ENABLE); in open_fill_data_vls()
11773 static void drain_data_vls(struct hfi1_devdata *dd) in drain_data_vls() argument
11775 sc_wait(dd); in drain_data_vls()
11776 sdma_wait(dd); in drain_data_vls()
11777 pause_for_credit_return(dd); in drain_data_vls()
11790 int stop_drain_data_vls(struct hfi1_devdata *dd) in stop_drain_data_vls() argument
11794 ret = disable_data_vls(dd); in stop_drain_data_vls()
11796 drain_data_vls(dd); in stop_drain_data_vls()
11805 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns) in ns_to_cclock() argument
11809 if (dd->icode == ICODE_FPGA_EMULATION) in ns_to_cclock()
11822 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks) in cclock_to_ns() argument
11826 if (dd->icode == ICODE_FPGA_EMULATION) in cclock_to_ns()
11843 struct hfi1_devdata *dd = rcd->dd; in adjust_rcv_timeout() local
11868 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */ in adjust_rcv_timeout()
11870 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr); in adjust_rcv_timeout()
11878 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, in adjust_rcv_timeout()
11886 struct hfi1_devdata *dd = rcd->dd; in update_usrhead() local
11899 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg); in update_usrhead()
11904 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); in update_usrhead()
11911 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) in hdrqempty()
11917 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); in hdrqempty()
11983 int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt) in hfi1_validate_rcvhdrcnt() argument
11986 dd_dev_err(dd, "Receive header queue count too small\n"); in hfi1_validate_rcvhdrcnt()
11991 dd_dev_err(dd, in hfi1_validate_rcvhdrcnt()
11998 dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n", in hfi1_validate_rcvhdrcnt()
12013 void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt) in set_hdrq_regs() argument
12019 write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg); in set_hdrq_regs()
12023 write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg); in set_hdrq_regs()
12026 write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg); in set_hdrq_regs()
12032 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, in set_hdrq_regs()
12033 dd->rcvhdrtail_dummy_dma); in set_hdrq_regs()
12036 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, in hfi1_rcvctrl() argument
12050 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL); in hfi1_rcvctrl()
12055 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, in hfi1_rcvctrl()
12058 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, in hfi1_rcvctrl()
12074 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr; in hfi1_rcvctrl()
12086 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0); in hfi1_rcvctrl()
12090 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0); in hfi1_rcvctrl()
12099 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg); in hfi1_rcvctrl()
12113 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg); in hfi1_rcvctrl()
12115 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT); in hfi1_rcvctrl()
12118 write_csr(dd, RCV_VL15, 0); in hfi1_rcvctrl()
12124 if (dd->rcvhdrtail_dummy_dma) { in hfi1_rcvctrl()
12125 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, in hfi1_rcvctrl()
12126 dd->rcvhdrtail_dummy_dma); in hfi1_rcvctrl()
12134 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, in hfi1_rcvctrl()
12139 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, in hfi1_rcvctrl()
12173 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, in hfi1_rcvctrl()
12176 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, in hfi1_rcvctrl()
12180 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl); in hfi1_rcvctrl()
12185 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); in hfi1_rcvctrl()
12187 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n", in hfi1_rcvctrl()
12189 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); in hfi1_rcvctrl()
12190 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10); in hfi1_rcvctrl()
12191 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00); in hfi1_rcvctrl()
12192 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); in hfi1_rcvctrl()
12193 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); in hfi1_rcvctrl()
12194 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n", in hfi1_rcvctrl()
12205 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT, in hfi1_rcvctrl()
12211 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); in hfi1_rcvctrl()
12220 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, in hfi1_rcvctrl()
12221 dd->rcvhdrtail_dummy_dma); in hfi1_rcvctrl()
12224 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp) in hfi1_read_cntrs() argument
12230 ret = dd->cntrnameslen; in hfi1_read_cntrs()
12231 *namep = dd->cntrnames; in hfi1_read_cntrs()
12236 ret = (dd->ndevcntrs) * sizeof(u64); in hfi1_read_cntrs()
12239 *cntrp = dd->cntrs; in hfi1_read_cntrs()
12255 dd, j, in hfi1_read_cntrs()
12262 dd->cntrs[entry->offset + j] = in hfi1_read_cntrs()
12268 for (j = 0; j < chip_sdma_engines(dd); in hfi1_read_cntrs()
12271 entry->rw_cntr(entry, dd, j, in hfi1_read_cntrs()
12276 dd->cntrs[entry->offset + j] = in hfi1_read_cntrs()
12280 val = entry->rw_cntr(entry, dd, in hfi1_read_cntrs()
12283 dd->cntrs[entry->offset] = val; in hfi1_read_cntrs()
12301 ret = ppd->dd->portcntrnameslen; in hfi1_read_portcntrs()
12302 *namep = ppd->dd->portcntrnames; in hfi1_read_portcntrs()
12307 ret = ppd->dd->nportcntrs * sizeof(u64); in hfi1_read_portcntrs()
12344 static void free_cntrs(struct hfi1_devdata *dd) in free_cntrs() argument
12349 if (dd->synth_stats_timer.function) in free_cntrs()
12350 del_timer_sync(&dd->synth_stats_timer); in free_cntrs()
12351 ppd = (struct hfi1_pportdata *)(dd + 1); in free_cntrs()
12352 for (i = 0; i < dd->num_pports; i++, ppd++) { in free_cntrs()
12364 kfree(dd->portcntrnames); in free_cntrs()
12365 dd->portcntrnames = NULL; in free_cntrs()
12366 kfree(dd->cntrs); in free_cntrs()
12367 dd->cntrs = NULL; in free_cntrs()
12368 kfree(dd->scntrs); in free_cntrs()
12369 dd->scntrs = NULL; in free_cntrs()
12370 kfree(dd->cntrnames); in free_cntrs()
12371 dd->cntrnames = NULL; in free_cntrs()
12372 if (dd->update_cntr_wq) { in free_cntrs()
12373 destroy_workqueue(dd->update_cntr_wq); in free_cntrs()
12374 dd->update_cntr_wq = NULL; in free_cntrs()
12378 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry, in read_dev_port_cntr() argument
12385 dd_dev_err(dd, "Counter %s not enabled", entry->name); in read_dev_port_cntr()
12429 static u64 write_dev_port_cntr(struct hfi1_devdata *dd, in write_dev_port_cntr() argument
12436 dd_dev_err(dd, "Counter %s not enabled", entry->name); in write_dev_port_cntr()
12463 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl) in read_dev_cntr() argument
12469 sval = dd->scntrs + entry->offset; in read_dev_cntr()
12474 return read_dev_port_cntr(dd, entry, sval, dd, vl); in read_dev_cntr()
12477 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data) in write_dev_cntr() argument
12483 sval = dd->scntrs + entry->offset; in write_dev_cntr()
12488 return write_dev_port_cntr(dd, entry, sval, dd, vl, data); in write_dev_cntr()
12502 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && in read_port_cntr()
12508 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl); in read_port_cntr()
12522 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && in write_port_cntr()
12528 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data); in write_port_cntr()
12540 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata, in do_update_synth_timer() local
12550 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); in do_update_synth_timer()
12553 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); in do_update_synth_timer()
12558 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx); in do_update_synth_timer()
12560 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) { in do_update_synth_timer()
12567 dd->unit); in do_update_synth_timer()
12569 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx); in do_update_synth_timer()
12571 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit, in do_update_synth_timer()
12575 dd->unit); in do_update_synth_timer()
12581 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit); in do_update_synth_timer()
12586 read_dev_cntr(dd, i, vl); in do_update_synth_timer()
12588 read_dev_cntr(dd, i, CNTR_INVALID_VL); in do_update_synth_timer()
12591 ppd = (struct hfi1_pportdata *)(dd + 1); in do_update_synth_timer()
12592 for (i = 0; i < dd->num_pports; i++, ppd++) { in do_update_synth_timer()
12611 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, in do_update_synth_timer()
12615 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, in do_update_synth_timer()
12619 dd->unit, dd->last_tx, dd->last_rx); in do_update_synth_timer()
12622 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); in do_update_synth_timer()
12628 struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer); in update_synth_timer() local
12630 queue_work(dd->update_cntr_wq, &dd->update_cntr_work); in update_synth_timer()
12631 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); in update_synth_timer()
12635 static int init_cntrs(struct hfi1_devdata *dd) in init_cntrs() argument
12644 u32 sdma_engines = chip_sdma_engines(dd); in init_cntrs()
12647 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0); in init_cntrs()
12654 dd->ndevcntrs = 0; in init_cntrs()
12664 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12673 dd->ndevcntrs++; in init_cntrs()
12676 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12685 dd->ndevcntrs++; in init_cntrs()
12693 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12694 dd->ndevcntrs++; in init_cntrs()
12699 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64), in init_cntrs()
12701 if (!dd->cntrs) in init_cntrs()
12704 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12705 if (!dd->scntrs) in init_cntrs()
12709 dd->cntrnameslen = sz; in init_cntrs()
12710 dd->cntrnames = kmalloc(sz, GFP_KERNEL); in init_cntrs()
12711 if (!dd->cntrnames) in init_cntrs()
12715 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) { in init_cntrs()
12772 rcv_ctxts = dd->num_rcv_contexts; in init_cntrs()
12780 dd->nportcntrs = 0; in init_cntrs()
12788 port_cntrs[i].offset = dd->nportcntrs; in init_cntrs()
12797 dd->nportcntrs++; in init_cntrs()
12805 port_cntrs[i].offset = dd->nportcntrs; in init_cntrs()
12806 dd->nportcntrs++; in init_cntrs()
12811 dd->portcntrnameslen = sz; in init_cntrs()
12812 dd->portcntrnames = kmalloc(sz, GFP_KERNEL); in init_cntrs()
12813 if (!dd->portcntrnames) in init_cntrs()
12817 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) { in init_cntrs()
12852 ppd = (struct hfi1_pportdata *)(dd + 1); in init_cntrs()
12853 for (i = 0; i < dd->num_pports; i++, ppd++) { in init_cntrs()
12854 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12858 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12864 if (init_cpu_counters(dd)) in init_cntrs()
12867 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d", in init_cntrs()
12868 WQ_MEM_RECLAIM, dd->unit); in init_cntrs()
12869 if (!dd->update_cntr_wq) in init_cntrs()
12872 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer); in init_cntrs()
12874 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); in init_cntrs()
12877 free_cntrs(dd); in init_cntrs()
12881 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate) in chip_to_opa_lstate() argument
12893 dd_dev_err(dd, in chip_to_opa_lstate()
12900 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate) in chip_to_opa_pstate() argument
12917 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n", in chip_to_opa_pstate()
12997 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n", in update_statusp()
13019 new_state = chip_to_opa_lstate(ppd->dd, in wait_logical_linkstate()
13020 read_logical_state(ppd->dd)); in wait_logical_linkstate()
13024 dd_dev_err(ppd->dd, in wait_logical_linkstate()
13037 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state); in log_state_transition()
13039 dd_dev_info(ppd->dd, in log_state_transition()
13050 u32 read_state = read_physical_state(ppd->dd); in log_physical_state()
13055 dd_dev_err(ppd->dd, in log_physical_state()
13078 read_state = read_physical_state(ppd->dd); in wait_physical_linkstate()
13082 dd_dev_err(ppd->dd, in wait_physical_linkstate()
13111 read_state = read_physical_state(ppd->dd); in wait_phys_link_offline_substates()
13115 dd_dev_err(ppd->dd, in wait_phys_link_offline_substates()
13144 read_state = read_physical_state(ppd->dd); in wait_phys_link_out_of_offline()
13148 dd_dev_err(ppd->dd, in wait_phys_link_out_of_offline()
13169 struct hfi1_devdata *dd = sc->dd; in hfi1_init_ctxt() local
13174 reg = read_kctxt_csr(dd, sc->hw_context, in hfi1_init_ctxt()
13180 write_kctxt_csr(dd, sc->hw_context, in hfi1_init_ctxt()
13185 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp) in hfi1_tempsense_rd() argument
13190 if (dd->icode != ICODE_RTL_SILICON) { in hfi1_tempsense_rd()
13192 dd_dev_info(dd, "%s: tempsense not supported by HW\n", in hfi1_tempsense_rd()
13196 reg = read_csr(dd, ASIC_STS_THERM); in hfi1_tempsense_rd()
13221 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits, in read_mod_write() argument
13227 spin_lock(&dd->irq_src_lock); in read_mod_write()
13228 reg = read_csr(dd, CCE_INT_MASK + (8 * idx)); in read_mod_write()
13233 write_csr(dd, CCE_INT_MASK + (8 * idx), reg); in read_mod_write()
13234 spin_unlock(&dd->irq_src_lock); in read_mod_write()
13246 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set) in set_intr_bits() argument
13262 read_mod_write(dd, src - 1, bits, set); in set_intr_bits()
13267 read_mod_write(dd, last, bits, set); in set_intr_bits()
13275 void clear_all_interrupts(struct hfi1_devdata *dd) in clear_all_interrupts() argument
13280 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0); in clear_all_interrupts()
13282 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13283 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13284 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13285 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13286 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13287 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13288 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13289 for (i = 0; i < chip_send_contexts(dd); i++) in clear_all_interrupts()
13290 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13291 for (i = 0; i < chip_sdma_engines(dd); i++) in clear_all_interrupts()
13292 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0); in clear_all_interrupts()
13294 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0); in clear_all_interrupts()
13295 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0); in clear_all_interrupts()
13296 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0); in clear_all_interrupts()
13303 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) in remap_intr() argument
13312 dd->gi_mask[m] &= ~((u64)1 << n); in remap_intr()
13314 dd_dev_err(dd, "remap interrupt err\n"); in remap_intr()
13321 reg = read_csr(dd, CCE_INT_MAP + (8 * m)); in remap_intr()
13324 write_csr(dd, CCE_INT_MAP + (8 * m), reg); in remap_intr()
13327 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr) in remap_sdma_interrupts() argument
13336 remap_intr(dd, IS_SDMA_START + engine, msix_intr); in remap_sdma_interrupts()
13337 remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr); in remap_sdma_interrupts()
13338 remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr); in remap_sdma_interrupts()
13345 void reset_interrupts(struct hfi1_devdata *dd) in reset_interrupts() argument
13351 dd->gi_mask[i] = ~(u64)0; in reset_interrupts()
13355 write_csr(dd, CCE_INT_MAP + (8 * i), 0); in reset_interrupts()
13363 static int set_up_interrupts(struct hfi1_devdata *dd) in set_up_interrupts() argument
13368 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); in set_up_interrupts()
13371 clear_all_interrupts(dd); in set_up_interrupts()
13374 reset_interrupts(dd); in set_up_interrupts()
13377 ret = msix_initialize(dd); in set_up_interrupts()
13381 ret = msix_request_irqs(dd); in set_up_interrupts()
13383 msix_clean_up_interrupts(dd); in set_up_interrupts()
13399 static int set_up_context_variables(struct hfi1_devdata *dd) in set_up_context_variables() argument
13408 u32 send_contexts = chip_send_contexts(dd); in set_up_context_variables()
13409 u32 rcv_contexts = chip_rcv_contexts(dd); in set_up_context_variables()
13432 dd_dev_err(dd, in set_up_context_variables()
13452 dd_dev_err(dd, in set_up_context_variables()
13461 hfi1_num_netdev_contexts(dd, rcv_contexts - in set_up_context_variables()
13476 rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2); in set_up_context_variables()
13481 dd_dev_err(dd, in set_up_context_variables()
13490 dd->num_rcv_contexts = in set_up_context_variables()
13492 dd->n_krcv_queues = num_kernel_contexts; in set_up_context_variables()
13493 dd->first_dyn_alloc_ctxt = num_kernel_contexts; in set_up_context_variables()
13494 dd->num_netdev_contexts = num_netdev_contexts; in set_up_context_variables()
13495 dd->num_user_contexts = n_usr_ctxts; in set_up_context_variables()
13496 dd->freectxts = n_usr_ctxts; in set_up_context_variables()
13497 dd_dev_info(dd, in set_up_context_variables()
13500 (int)dd->num_rcv_contexts, in set_up_context_variables()
13501 (int)dd->n_krcv_queues, in set_up_context_variables()
13502 dd->num_netdev_contexts, in set_up_context_variables()
13503 dd->num_user_contexts); in set_up_context_variables()
13516 dd->rcv_entries.group_size = RCV_INCREMENT; in set_up_context_variables()
13517 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size; in set_up_context_variables()
13518 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts; in set_up_context_variables()
13519 dd->rcv_entries.nctxt_extra = ngroups - in set_up_context_variables()
13520 (dd->num_rcv_contexts * dd->rcv_entries.ngroups); in set_up_context_variables()
13521 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n", in set_up_context_variables()
13522 dd->rcv_entries.ngroups, in set_up_context_variables()
13523 dd->rcv_entries.nctxt_extra); in set_up_context_variables()
13524 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size > in set_up_context_variables()
13526 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) / in set_up_context_variables()
13527 dd->rcv_entries.group_size; in set_up_context_variables()
13528 dd_dev_info(dd, in set_up_context_variables()
13530 dd->rcv_entries.ngroups); in set_up_context_variables()
13531 dd->rcv_entries.nctxt_extra = 0; in set_up_context_variables()
13536 ret = init_sc_pools_and_sizes(dd); in set_up_context_variables()
13538 dd->num_send_contexts = ret; in set_up_context_variables()
13540 dd, in set_up_context_variables()
13543 dd->num_send_contexts, in set_up_context_variables()
13544 dd->sc_sizes[SC_KERNEL].count, in set_up_context_variables()
13545 dd->sc_sizes[SC_ACK].count, in set_up_context_variables()
13546 dd->sc_sizes[SC_USER].count, in set_up_context_variables()
13547 dd->sc_sizes[SC_VL15].count); in set_up_context_variables()
13561 struct hfi1_devdata *dd = ppd->dd; in set_partition_keys() local
13565 dd_dev_info(dd, "Setting partition keys\n"); in set_partition_keys()
13566 for (i = 0; i < hfi1_get_npkeys(dd); i++) { in set_partition_keys()
13573 write_csr(dd, RCV_PARTITION_KEY + in set_partition_keys()
13580 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK); in set_partition_keys()
13591 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd) in write_uninitialized_csrs_and_memories() argument
13597 write_csr(dd, CCE_INT_MAP + (8 * i), 0); in write_uninitialized_csrs_and_memories()
13600 for (i = 0; i < chip_send_contexts(dd); i++) in write_uninitialized_csrs_and_memories()
13601 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0); in write_uninitialized_csrs_and_memories()
13613 for (i = 0; i < chip_rcv_contexts(dd); i++) { in write_uninitialized_csrs_and_memories()
13614 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); in write_uninitialized_csrs_and_memories()
13615 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); in write_uninitialized_csrs_and_memories()
13617 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0); in write_uninitialized_csrs_and_memories()
13621 for (i = 0; i < chip_rcv_array_count(dd); i++) in write_uninitialized_csrs_and_memories()
13622 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0); in write_uninitialized_csrs_and_memories()
13626 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0); in write_uninitialized_csrs_and_memories()
13632 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits, in clear_cce_status() argument
13639 reg = read_csr(dd, CCE_STATUS); in clear_cce_status()
13644 write_csr(dd, CCE_CTRL, ctrl_bits); in clear_cce_status()
13649 reg = read_csr(dd, CCE_STATUS); in clear_cce_status()
13653 dd_dev_err(dd, in clear_cce_status()
13663 static void reset_cce_csrs(struct hfi1_devdata *dd) in reset_cce_csrs() argument
13671 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK); in reset_cce_csrs()
13672 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK); in reset_cce_csrs()
13673 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK); in reset_cce_csrs()
13675 write_csr(dd, CCE_SCRATCH + (8 * i), 0); in reset_cce_csrs()
13677 write_csr(dd, CCE_ERR_MASK, 0); in reset_cce_csrs()
13678 write_csr(dd, CCE_ERR_CLEAR, ~0ull); in reset_cce_csrs()
13681 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0); in reset_cce_csrs()
13682 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR); in reset_cce_csrs()
13685 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0); in reset_cce_csrs()
13686 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i), in reset_cce_csrs()
13691 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull); in reset_cce_csrs()
13692 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull); in reset_cce_csrs()
13695 write_csr(dd, CCE_INT_MAP, 0); in reset_cce_csrs()
13698 write_csr(dd, CCE_INT_MASK + (8 * i), 0); in reset_cce_csrs()
13699 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull); in reset_cce_csrs()
13704 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0); in reset_cce_csrs()
13708 static void reset_misc_csrs(struct hfi1_devdata *dd) in reset_misc_csrs() argument
13713 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0); in reset_misc_csrs()
13714 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0); in reset_misc_csrs()
13715 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0); in reset_misc_csrs()
13722 write_csr(dd, MISC_CFG_RSA_CMD, 1); in reset_misc_csrs()
13723 write_csr(dd, MISC_CFG_RSA_MU, 0); in reset_misc_csrs()
13724 write_csr(dd, MISC_CFG_FW_CTRL, 0); in reset_misc_csrs()
13730 write_csr(dd, MISC_ERR_MASK, 0); in reset_misc_csrs()
13731 write_csr(dd, MISC_ERR_CLEAR, ~0ull); in reset_misc_csrs()
13736 static void reset_txe_csrs(struct hfi1_devdata *dd) in reset_txe_csrs() argument
13743 write_csr(dd, SEND_CTRL, 0); in reset_txe_csrs()
13744 __cm_reset(dd, 0); /* reset CM internal state */ in reset_txe_csrs()
13749 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0); in reset_txe_csrs()
13750 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */ in reset_txe_csrs()
13752 write_csr(dd, SEND_PIO_ERR_MASK, 0); in reset_txe_csrs()
13753 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull); in reset_txe_csrs()
13756 write_csr(dd, SEND_DMA_ERR_MASK, 0); in reset_txe_csrs()
13757 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull); in reset_txe_csrs()
13760 write_csr(dd, SEND_EGRESS_ERR_MASK, 0); in reset_txe_csrs()
13761 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull); in reset_txe_csrs()
13763 write_csr(dd, SEND_BTH_QP, 0); in reset_txe_csrs()
13764 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0); in reset_txe_csrs()
13765 write_csr(dd, SEND_SC2VLT0, 0); in reset_txe_csrs()
13766 write_csr(dd, SEND_SC2VLT1, 0); in reset_txe_csrs()
13767 write_csr(dd, SEND_SC2VLT2, 0); in reset_txe_csrs()
13768 write_csr(dd, SEND_SC2VLT3, 0); in reset_txe_csrs()
13769 write_csr(dd, SEND_LEN_CHECK0, 0); in reset_txe_csrs()
13770 write_csr(dd, SEND_LEN_CHECK1, 0); in reset_txe_csrs()
13772 write_csr(dd, SEND_ERR_MASK, 0); in reset_txe_csrs()
13773 write_csr(dd, SEND_ERR_CLEAR, ~0ull); in reset_txe_csrs()
13776 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0); in reset_txe_csrs()
13778 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0); in reset_txe_csrs()
13779 for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++) in reset_txe_csrs()
13780 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0); in reset_txe_csrs()
13782 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0); in reset_txe_csrs()
13784 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0); in reset_txe_csrs()
13785 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR); in reset_txe_csrs()
13786 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR); in reset_txe_csrs()
13788 write_csr(dd, SEND_CM_TIMER_CTRL, 0); in reset_txe_csrs()
13789 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0); in reset_txe_csrs()
13790 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0); in reset_txe_csrs()
13791 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0); in reset_txe_csrs()
13792 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0); in reset_txe_csrs()
13794 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); in reset_txe_csrs()
13795 write_csr(dd, SEND_CM_CREDIT_VL15, 0); in reset_txe_csrs()
13800 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull); in reset_txe_csrs()
13807 for (i = 0; i < chip_send_contexts(dd); i++) { in reset_txe_csrs()
13808 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0); in reset_txe_csrs()
13809 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0); in reset_txe_csrs()
13810 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0); in reset_txe_csrs()
13811 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0); in reset_txe_csrs()
13812 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0); in reset_txe_csrs()
13813 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull); in reset_txe_csrs()
13814 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0); in reset_txe_csrs()
13815 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0); in reset_txe_csrs()
13816 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0); in reset_txe_csrs()
13817 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0); in reset_txe_csrs()
13818 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0); in reset_txe_csrs()
13819 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0); in reset_txe_csrs()
13825 for (i = 0; i < chip_sdma_engines(dd); i++) { in reset_txe_csrs()
13826 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0); in reset_txe_csrs()
13828 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0); in reset_txe_csrs()
13829 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0); in reset_txe_csrs()
13830 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0); in reset_txe_csrs()
13832 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0); in reset_txe_csrs()
13833 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0); in reset_txe_csrs()
13835 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0); in reset_txe_csrs()
13836 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0); in reset_txe_csrs()
13839 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0); in reset_txe_csrs()
13840 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull); in reset_txe_csrs()
13842 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0); in reset_txe_csrs()
13843 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0); in reset_txe_csrs()
13844 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0); in reset_txe_csrs()
13845 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0); in reset_txe_csrs()
13846 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0); in reset_txe_csrs()
13847 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0); in reset_txe_csrs()
13848 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0); in reset_txe_csrs()
13856 static void init_rbufs(struct hfi1_devdata *dd) in init_rbufs() argument
13867 reg = read_csr(dd, RCV_STATUS); in init_rbufs()
13879 dd_dev_err(dd, in init_rbufs()
13888 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK); in init_rbufs()
13896 read_csr(dd, RCV_CTRL); in init_rbufs()
13903 reg = read_csr(dd, RCV_STATUS); in init_rbufs()
13909 dd_dev_err(dd, in init_rbufs()
13918 static void reset_rxe_csrs(struct hfi1_devdata *dd) in reset_rxe_csrs() argument
13925 write_csr(dd, RCV_CTRL, 0); in reset_rxe_csrs()
13926 init_rbufs(dd); in reset_rxe_csrs()
13931 write_csr(dd, RCV_BTH_QP, 0); in reset_rxe_csrs()
13932 write_csr(dd, RCV_MULTICAST, 0); in reset_rxe_csrs()
13933 write_csr(dd, RCV_BYPASS, 0); in reset_rxe_csrs()
13934 write_csr(dd, RCV_VL15, 0); in reset_rxe_csrs()
13936 write_csr(dd, RCV_ERR_INFO, in reset_rxe_csrs()
13939 write_csr(dd, RCV_ERR_MASK, 0); in reset_rxe_csrs()
13940 write_csr(dd, RCV_ERR_CLEAR, ~0ull); in reset_rxe_csrs()
13943 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0); in reset_rxe_csrs()
13945 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0); in reset_rxe_csrs()
13947 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0); in reset_rxe_csrs()
13949 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0); in reset_rxe_csrs()
13951 clear_rsm_rule(dd, i); in reset_rxe_csrs()
13953 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0); in reset_rxe_csrs()
13958 for (i = 0; i < chip_rcv_contexts(dd); i++) { in reset_rxe_csrs()
13960 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0); in reset_rxe_csrs()
13962 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0); in reset_rxe_csrs()
13963 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0); in reset_rxe_csrs()
13964 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0); in reset_rxe_csrs()
13965 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); in reset_rxe_csrs()
13966 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0); in reset_rxe_csrs()
13967 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0); in reset_rxe_csrs()
13968 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0); in reset_rxe_csrs()
13969 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); in reset_rxe_csrs()
13970 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0); in reset_rxe_csrs()
13971 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0); in reset_rxe_csrs()
13975 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0); in reset_rxe_csrs()
13977 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0); in reset_rxe_csrs()
13980 write_uctxt_csr(dd, i, in reset_rxe_csrs()
13997 static void init_sc2vl_tables(struct hfi1_devdata *dd) in init_sc2vl_tables() argument
14003 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL( in init_sc2vl_tables()
14009 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL( in init_sc2vl_tables()
14015 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL( in init_sc2vl_tables()
14021 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL( in init_sc2vl_tables()
14029 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL( in init_sc2vl_tables()
14033 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL( in init_sc2vl_tables()
14041 *((u8 *)(dd->sc2vl) + i) = (u8)i; in init_sc2vl_tables()
14043 *((u8 *)(dd->sc2vl) + i) = 0; in init_sc2vl_tables()
14056 static int init_chip(struct hfi1_devdata *dd) in init_chip() argument
14073 write_csr(dd, SEND_CTRL, 0); in init_chip()
14074 for (i = 0; i < chip_send_contexts(dd); i++) in init_chip()
14075 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0); in init_chip()
14076 for (i = 0; i < chip_sdma_engines(dd); i++) in init_chip()
14077 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0); in init_chip()
14079 write_csr(dd, RCV_CTRL, 0); in init_chip()
14080 for (i = 0; i < chip_rcv_contexts(dd); i++) in init_chip()
14081 write_csr(dd, RCV_CTXT_CTRL, 0); in init_chip()
14084 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull); in init_chip()
14092 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK); in init_chip()
14093 (void)read_csr(dd, CCE_DC_CTRL); in init_chip()
14101 dd_dev_info(dd, "Resetting CSRs with FLR\n"); in init_chip()
14104 pcie_flr(dd->pcidev); in init_chip()
14107 ret = restore_pci_variables(dd); in init_chip()
14109 dd_dev_err(dd, "%s: Could not restore PCI variables\n", in init_chip()
14114 if (is_ax(dd)) { in init_chip()
14115 dd_dev_info(dd, "Resetting CSRs with FLR\n"); in init_chip()
14116 pcie_flr(dd->pcidev); in init_chip()
14117 ret = restore_pci_variables(dd); in init_chip()
14119 dd_dev_err(dd, "%s: Could not restore PCI variables\n", in init_chip()
14125 dd_dev_info(dd, "Resetting CSRs with writes\n"); in init_chip()
14126 reset_cce_csrs(dd); in init_chip()
14127 reset_txe_csrs(dd); in init_chip()
14128 reset_rxe_csrs(dd); in init_chip()
14129 reset_misc_csrs(dd); in init_chip()
14132 write_csr(dd, CCE_DC_CTRL, 0); in init_chip()
14135 setextled(dd, 0); in init_chip()
14147 write_csr(dd, ASIC_QSFP1_OUT, 0x1f); in init_chip()
14148 write_csr(dd, ASIC_QSFP2_OUT, 0x1f); in init_chip()
14149 init_chip_resources(dd); in init_chip()
14153 static void init_early_variables(struct hfi1_devdata *dd) in init_early_variables() argument
14158 dd->vau = CM_VAU; in init_early_variables()
14159 dd->link_credits = CM_GLOBAL_CREDITS; in init_early_variables()
14160 if (is_ax(dd)) in init_early_variables()
14161 dd->link_credits--; in init_early_variables()
14162 dd->vcu = cu_to_vcu(hfi1_cu); in init_early_variables()
14164 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau); in init_early_variables()
14165 if (dd->vl15_init > dd->link_credits) in init_early_variables()
14166 dd->vl15_init = dd->link_credits; in init_early_variables()
14168 write_uninitialized_csrs_and_memories(dd); in init_early_variables()
14171 for (i = 0; i < dd->num_pports; i++) { in init_early_variables()
14172 struct hfi1_pportdata *ppd = &dd->pport[i]; in init_early_variables()
14176 init_sc2vl_tables(dd); in init_early_variables()
14179 static void init_kdeth_qp(struct hfi1_devdata *dd) in init_kdeth_qp() argument
14181 write_csr(dd, SEND_BTH_QP, in init_kdeth_qp()
14185 write_csr(dd, RCV_BTH_QP, in init_kdeth_qp()
14195 u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx) in hfi1_get_qp_map() argument
14197 u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8); in hfi1_get_qp_map()
14220 static void init_qpmap_table(struct hfi1_devdata *dd, in init_qpmap_table() argument
14235 write_csr(dd, regno, reg); in init_qpmap_table()
14241 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK in init_qpmap_table()
14269 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd) in alloc_rsm_map_table() argument
14272 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */ in alloc_rsm_map_table()
14287 static void complete_rsm_map_table(struct hfi1_devdata *dd, in complete_rsm_map_table() argument
14295 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]); in complete_rsm_map_table()
14298 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); in complete_rsm_map_table()
14303 static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) in has_rsm_rule() argument
14305 return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0; in has_rsm_rule()
14311 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index, in add_rsm_rule() argument
14314 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), in add_rsm_rule()
14318 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), in add_rsm_rule()
14325 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), in add_rsm_rule()
14335 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) in clear_rsm_rule() argument
14337 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0); in clear_rsm_rule()
14338 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0); in clear_rsm_rule()
14339 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0); in clear_rsm_rule()
14343 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, in qos_rmt_entries() argument
14351 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS || in qos_rmt_entries()
14400 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt) in init_qos() argument
14409 rmt_entries = qos_rmt_entries(dd, &m, &n); in init_qos()
14457 add_rsm_rule(dd, RSM_INS_VERBS, &rrd); in init_qos()
14462 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT); in init_qos()
14463 dd->qos_shift = n + 1; in init_qos()
14466 dd->qos_shift = 1; in init_qos()
14467 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1); in init_qos()
14470 static void init_fecn_handling(struct hfi1_devdata *dd, in init_fecn_handling() argument
14483 start = dd->first_dyn_alloc_ctxt; in init_fecn_handling()
14485 total_cnt = dd->num_rcv_contexts - start; in init_fecn_handling()
14489 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n"); in init_fecn_handling()
14505 for (i = start, idx = rmt->used; i < dd->num_rcv_contexts; in init_fecn_handling()
14539 add_rsm_rule(dd, RSM_INS_FECN, &rrd); in init_fecn_handling()
14549 static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd) in hfi1_netdev_update_rmt() argument
14555 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); in hfi1_netdev_update_rmt()
14556 int ctxt_count = hfi1_netdev_ctxt_count(dd); in hfi1_netdev_update_rmt()
14559 if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) { in hfi1_netdev_update_rmt()
14560 dd_dev_info(dd, "Contexts are already mapped in RMT\n"); in hfi1_netdev_update_rmt()
14565 dd_dev_err(dd, "Not enough RMT entries used = %d\n", in hfi1_netdev_update_rmt()
14570 dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n", in hfi1_netdev_update_rmt()
14576 reg = read_csr(dd, regoff); in hfi1_netdev_update_rmt()
14581 reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8); in hfi1_netdev_update_rmt()
14586 dev_dbg(&(dd)->pcidev->dev, in hfi1_netdev_update_rmt()
14590 write_csr(dd, regoff, reg); in hfi1_netdev_update_rmt()
14593 reg = read_csr(dd, regoff); in hfi1_netdev_update_rmt()
14600 static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd, in hfi1_enable_rsm_rule() argument
14603 if (!hfi1_netdev_update_rmt(dd)) { in hfi1_enable_rsm_rule()
14604 dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule); in hfi1_enable_rsm_rule()
14608 add_rsm_rule(dd, rule, rrd); in hfi1_enable_rsm_rule()
14609 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); in hfi1_enable_rsm_rule()
14612 void hfi1_init_aip_rsm(struct hfi1_devdata *dd) in hfi1_init_aip_rsm() argument
14618 if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) { in hfi1_init_aip_rsm()
14619 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); in hfi1_init_aip_rsm()
14636 hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd); in hfi1_init_aip_rsm()
14641 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd) in hfi1_init_vnic_rsm() argument
14643 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); in hfi1_init_vnic_rsm()
14663 hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd); in hfi1_init_vnic_rsm()
14666 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd) in hfi1_deinit_vnic_rsm() argument
14668 clear_rsm_rule(dd, RSM_INS_VNIC); in hfi1_deinit_vnic_rsm()
14671 void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd) in hfi1_deinit_aip_rsm() argument
14674 if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1) in hfi1_deinit_aip_rsm()
14675 clear_rsm_rule(dd, RSM_INS_AIP); in hfi1_deinit_aip_rsm()
14678 static int init_rxe(struct hfi1_devdata *dd) in init_rxe() argument
14684 write_csr(dd, RCV_ERR_MASK, ~0ull); in init_rxe()
14686 rmt = alloc_rsm_map_table(dd); in init_rxe()
14691 init_qos(dd, rmt); in init_rxe()
14692 init_fecn_handling(dd, rmt); in init_rxe()
14693 complete_rsm_map_table(dd, rmt); in init_rxe()
14695 hfi1_netdev_set_free_rmt_idx(dd, rmt->used); in init_rxe()
14711 val = read_csr(dd, RCV_BYPASS); in init_rxe()
14715 write_csr(dd, RCV_BYPASS, val); in init_rxe()
14719 static void init_other(struct hfi1_devdata *dd) in init_other() argument
14722 write_csr(dd, CCE_ERR_MASK, ~0ull); in init_other()
14724 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK); in init_other()
14726 write_csr(dd, DCC_ERR_FLG_EN, ~0ull); in init_other()
14727 write_csr(dd, DC_DC8051_ERR_EN, ~0ull); in init_other()
14738 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu, in assign_cm_au_table() argument
14741 write_csr(dd, csr0to3, in assign_cm_au_table()
14748 write_csr(dd, csr4to7, in assign_cm_au_table()
14759 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu) in assign_local_cm_au_table() argument
14761 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3, in assign_local_cm_au_table()
14765 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu) in assign_remote_cm_au_table() argument
14767 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3, in assign_remote_cm_au_table()
14771 static void init_txe(struct hfi1_devdata *dd) in init_txe() argument
14776 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull); in init_txe()
14777 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull); in init_txe()
14778 write_csr(dd, SEND_ERR_MASK, ~0ull); in init_txe()
14779 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull); in init_txe()
14782 for (i = 0; i < chip_send_contexts(dd); i++) in init_txe()
14783 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull); in init_txe()
14784 for (i = 0; i < chip_sdma_engines(dd); i++) in init_txe()
14785 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull); in init_txe()
14788 assign_local_cm_au_table(dd, dd->vcu); in init_txe()
14794 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR) in init_txe()
14795 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE); in init_txe()
14798 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, in hfi1_set_ctxt_jkey() argument
14814 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg); in hfi1_set_ctxt_jkey()
14818 if (!is_ax(dd)) { in hfi1_set_ctxt_jkey()
14819 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); in hfi1_set_ctxt_jkey()
14821 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); in hfi1_set_ctxt_jkey()
14828 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg); in hfi1_set_ctxt_jkey()
14833 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) in hfi1_clear_ctxt_jkey() argument
14842 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0); in hfi1_clear_ctxt_jkey()
14848 if (!is_ax(dd)) { in hfi1_clear_ctxt_jkey()
14849 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); in hfi1_clear_ctxt_jkey()
14851 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); in hfi1_clear_ctxt_jkey()
14854 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0); in hfi1_clear_ctxt_jkey()
14859 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, in hfi1_set_ctxt_pkey() argument
14871 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg); in hfi1_set_ctxt_pkey()
14872 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); in hfi1_set_ctxt_pkey()
14875 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); in hfi1_set_ctxt_pkey()
14880 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt) in hfi1_clear_ctxt_pkey() argument
14889 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); in hfi1_clear_ctxt_pkey()
14891 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); in hfi1_clear_ctxt_pkey()
14892 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0); in hfi1_clear_ctxt_pkey()
14901 void hfi1_start_cleanup(struct hfi1_devdata *dd) in hfi1_start_cleanup() argument
14903 aspm_exit(dd); in hfi1_start_cleanup()
14904 free_cntrs(dd); in hfi1_start_cleanup()
14905 free_rcverr(dd); in hfi1_start_cleanup()
14906 finish_chip_resources(dd); in hfi1_start_cleanup()
14917 static int init_asic_data(struct hfi1_devdata *dd) in init_asic_data() argument
14925 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL); in init_asic_data()
14932 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) && in init_asic_data()
14933 dd->unit != peer->unit) in init_asic_data()
14939 dd->asic_data = peer->asic_data; in init_asic_data()
14942 dd->asic_data = asic_data; in init_asic_data()
14943 mutex_init(&dd->asic_data->asic_resource_mutex); in init_asic_data()
14945 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ in init_asic_data()
14950 ret = set_up_i2c(dd, dd->asic_data); in init_asic_data()
14961 static int obtain_boardname(struct hfi1_devdata *dd) in obtain_boardname() argument
14969 ret = read_hfi1_efi_var(dd, "description", &size, in obtain_boardname()
14970 (void **)&dd->boardname); in obtain_boardname()
14972 dd_dev_info(dd, "Board description not found\n"); in obtain_boardname()
14974 dd->boardname = kstrdup(generic, GFP_KERNEL); in obtain_boardname()
14975 if (!dd->boardname) in obtain_boardname()
14989 static int check_int_registers(struct hfi1_devdata *dd) in check_int_registers() argument
14996 mask = read_csr(dd, CCE_INT_MASK); in check_int_registers()
14997 write_csr(dd, CCE_INT_MASK, 0ull); in check_int_registers()
14998 reg = read_csr(dd, CCE_INT_MASK); in check_int_registers()
15003 write_csr(dd, CCE_INT_CLEAR, all_bits); in check_int_registers()
15004 reg = read_csr(dd, CCE_INT_STATUS); in check_int_registers()
15009 write_csr(dd, CCE_INT_FORCE, all_bits); in check_int_registers()
15010 reg = read_csr(dd, CCE_INT_STATUS); in check_int_registers()
15015 write_csr(dd, CCE_INT_CLEAR, all_bits); in check_int_registers()
15016 write_csr(dd, CCE_INT_MASK, mask); in check_int_registers()
15020 write_csr(dd, CCE_INT_MASK, mask); in check_int_registers()
15021 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n"); in check_int_registers()
15033 int hfi1_init_dd(struct hfi1_devdata *dd) in hfi1_init_dd() argument
15035 struct pci_dev *pdev = dd->pcidev; in hfi1_init_dd()
15046 u32 sdma_engines = chip_sdma_engines(dd); in hfi1_init_dd()
15048 ppd = dd->pport; in hfi1_init_dd()
15049 for (i = 0; i < dd->num_pports; i++, ppd++) { in hfi1_init_dd()
15052 hfi1_init_pportdata(pdev, ppd, dd, 0, 1); in hfi1_init_dd()
15068 dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n", in hfi1_init_dd()
15076 dd->vld[vl].mtu = hfi1_max_mtu; in hfi1_init_dd()
15077 dd->vld[15].mtu = MAX_MAD_PACKET; in hfi1_init_dd()
15099 ret = hfi1_pcie_ddinit(dd, pdev); in hfi1_init_dd()
15104 ret = save_pci_variables(dd); in hfi1_init_dd()
15108 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT) in hfi1_init_dd()
15110 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT) in hfi1_init_dd()
15119 ret = check_int_registers(dd); in hfi1_init_dd()
15128 reg = read_csr(dd, CCE_REVISION2); in hfi1_init_dd()
15129 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT) in hfi1_init_dd()
15132 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT; in hfi1_init_dd()
15133 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT; in hfi1_init_dd()
15134 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n", in hfi1_init_dd()
15135 dd->icode < ARRAY_SIZE(inames) ? in hfi1_init_dd()
15136 inames[dd->icode] : "unknown", (int)dd->irev); in hfi1_init_dd()
15139 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G; in hfi1_init_dd()
15141 dd->pport->link_speed_enabled = dd->pport->link_speed_supported; in hfi1_init_dd()
15143 dd->pport->link_speed_active = OPA_LINK_SPEED_25G; in hfi1_init_dd()
15146 ppd = dd->pport; in hfi1_init_dd()
15147 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) { in hfi1_init_dd()
15156 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n", in hfi1_init_dd()
15171 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64; in hfi1_init_dd()
15172 if (dd->rcv_intr_timeout_csr > in hfi1_init_dd()
15174 dd->rcv_intr_timeout_csr = in hfi1_init_dd()
15176 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout) in hfi1_init_dd()
15177 dd->rcv_intr_timeout_csr = 1; in hfi1_init_dd()
15180 read_guid(dd); in hfi1_init_dd()
15183 ret = init_asic_data(dd); in hfi1_init_dd()
15188 ret = init_chip(dd); in hfi1_init_dd()
15193 ret = pcie_speeds(dd); in hfi1_init_dd()
15198 ret = eprom_init(dd); in hfi1_init_dd()
15203 get_platform_config(dd); in hfi1_init_dd()
15206 ret = hfi1_firmware_init(dd); in hfi1_init_dd()
15222 ret = do_pcie_gen3_transition(dd); in hfi1_init_dd()
15230 tune_pcie_caps(dd); in hfi1_init_dd()
15233 init_early_variables(dd); in hfi1_init_dd()
15235 parse_platform_config(dd); in hfi1_init_dd()
15237 ret = obtain_boardname(dd); in hfi1_init_dd()
15241 snprintf(dd->boardversion, BOARD_VERS_MAX, in hfi1_init_dd()
15244 (u32)dd->majrev, in hfi1_init_dd()
15245 (u32)dd->minrev, in hfi1_init_dd()
15246 (dd->revision >> CCE_REVISION_SW_SHIFT) in hfi1_init_dd()
15250 ret = hfi1_netdev_alloc(dd); in hfi1_init_dd()
15254 ret = set_up_context_variables(dd); in hfi1_init_dd()
15259 ret = init_rxe(dd); in hfi1_init_dd()
15264 init_txe(dd); in hfi1_init_dd()
15266 init_other(dd); in hfi1_init_dd()
15268 init_kdeth_qp(dd); in hfi1_init_dd()
15270 ret = hfi1_dev_affinity_init(dd); in hfi1_init_dd()
15275 ret = init_send_contexts(dd); in hfi1_init_dd()
15279 ret = hfi1_create_kctxts(dd); in hfi1_init_dd()
15287 aspm_init(dd); in hfi1_init_dd()
15289 ret = init_pervl_scs(dd); in hfi1_init_dd()
15294 for (i = 0; i < dd->num_pports; ++i) { in hfi1_init_dd()
15295 ret = sdma_init(dd, i); in hfi1_init_dd()
15301 ret = set_up_interrupts(dd); in hfi1_init_dd()
15305 ret = hfi1_comp_vectors_set_up(dd); in hfi1_init_dd()
15310 init_lcb_access(dd); in hfi1_init_dd()
15317 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n", in hfi1_init_dd()
15318 (dd->base_guid & 0xFFFFFF) | in hfi1_init_dd()
15319 ((dd->base_guid >> 11) & 0xF000000)); in hfi1_init_dd()
15321 dd->oui1 = dd->base_guid >> 56 & 0xFF; in hfi1_init_dd()
15322 dd->oui2 = dd->base_guid >> 48 & 0xFF; in hfi1_init_dd()
15323 dd->oui3 = dd->base_guid >> 40 & 0xFF; in hfi1_init_dd()
15325 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */ in hfi1_init_dd()
15329 thermal_init(dd); in hfi1_init_dd()
15331 ret = init_cntrs(dd); in hfi1_init_dd()
15335 ret = init_rcverr(dd); in hfi1_init_dd()
15339 init_completion(&dd->user_comp); in hfi1_init_dd()
15342 atomic_set(&dd->user_refcount, 1); in hfi1_init_dd()
15347 free_rcverr(dd); in hfi1_init_dd()
15349 free_cntrs(dd); in hfi1_init_dd()
15351 hfi1_comp_vectors_clean_up(dd); in hfi1_init_dd()
15352 msix_clean_up_interrupts(dd); in hfi1_init_dd()
15354 hfi1_netdev_free(dd); in hfi1_init_dd()
15355 hfi1_pcie_ddcleanup(dd); in hfi1_init_dd()
15357 hfi1_free_devdata(dd); in hfi1_init_dd()
15417 dd_dev_err((dd), \
15431 static int thermal_init(struct hfi1_devdata *dd) in thermal_init() argument
15435 if (dd->icode != ICODE_RTL_SILICON || in thermal_init()
15436 check_chip_resource(dd, CR_THERM_INIT, NULL)) in thermal_init()
15439 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); in thermal_init()
15441 THERM_FAILURE(dd, ret, "Acquire SBus"); in thermal_init()
15445 dd_dev_info(dd, "Initializing thermal sensor\n"); in thermal_init()
15447 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0); in thermal_init()
15451 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, in thermal_init()
15454 THERM_FAILURE(dd, ret, "Bus Reset"); in thermal_init()
15458 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, in thermal_init()
15461 THERM_FAILURE(dd, ret, "Therm Block Reset"); in thermal_init()
15465 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1, in thermal_init()
15468 THERM_FAILURE(dd, ret, "Write Clock Div"); in thermal_init()
15472 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3, in thermal_init()
15476 THERM_FAILURE(dd, ret, "Write Mode Sel"); in thermal_init()
15480 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, in thermal_init()
15483 THERM_FAILURE(dd, ret, "Write Reset Deassert"); in thermal_init()
15490 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1); in thermal_init()
15493 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0); in thermal_init()
15495 THERM_FAILURE(dd, ret, "Unable to set thermal init flag"); in thermal_init()
15498 release_chip_resource(dd, CR_SBUS); in thermal_init()
15502 static void handle_temp_err(struct hfi1_devdata *dd) in handle_temp_err() argument
15504 struct hfi1_pportdata *ppd = &dd->pport[0]; in handle_temp_err()
15510 dd_dev_emerg(dd, in handle_temp_err()
15512 dd->flags |= HFI1_FORCED_FREEZE; in handle_temp_err()
15527 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) | in handle_temp_err()
15533 dc_shutdown(dd); in handle_temp_err()