Lines Matching refs:macsec
330 static bool macsec_is_offloaded(struct macsec_dev *macsec) in macsec_is_offloaded() argument
332 if (macsec->offload == MACSEC_OFFLOAD_MAC || in macsec_is_offloaded()
333 macsec->offload == MACSEC_OFFLOAD_PHY) in macsec_is_offloaded()
341 struct macsec_dev *macsec) in macsec_check_offload() argument
343 if (!macsec || !macsec->real_dev) in macsec_check_offload()
347 return macsec->real_dev->phydev && in macsec_check_offload()
348 macsec->real_dev->phydev->macsec_ops; in macsec_check_offload()
350 return macsec->real_dev->features & NETIF_F_HW_MACSEC && in macsec_check_offload()
351 macsec->real_dev->macsec_ops; in macsec_check_offload()
357 struct macsec_dev *macsec, in __macsec_get_ops() argument
365 ctx->phydev = macsec->real_dev->phydev; in __macsec_get_ops()
367 ctx->netdev = macsec->real_dev; in __macsec_get_ops()
371 return macsec->real_dev->phydev->macsec_ops; in __macsec_get_ops()
373 return macsec->real_dev->macsec_ops; in __macsec_get_ops()
379 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, in macsec_get_ops() argument
382 if (!macsec_check_offload(macsec->offload, macsec)) in macsec_get_ops()
385 return __macsec_get_ops(macsec->offload, macsec, ctx); in macsec_get_ops()
493 struct macsec_dev *macsec = netdev_priv(dev); in macsec_encrypt_finish() local
495 skb->dev = macsec->real_dev; in macsec_encrypt_finish()
534 struct macsec_dev *macsec = macsec_priv(dev); in macsec_encrypt_done() local
542 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); in macsec_encrypt_done()
596 struct macsec_dev *macsec = macsec_priv(dev); in macsec_encrypt() local
600 secy = &macsec->secy; in macsec_encrypt()
651 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); in macsec_encrypt()
835 struct macsec_dev *macsec = macsec_priv(dev); in macsec_decrypt_done() local
848 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { in macsec_decrypt_done()
854 macsec_finalize_skb(skb, macsec->secy.icv_len, in macsec_decrypt_done()
856 macsec_reset_skb(skb, macsec->secy.netdev); in macsec_decrypt_done()
859 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) in macsec_decrypt_done()
1001 struct macsec_dev *macsec; in handle_not_macsec() local
1006 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { in handle_not_macsec()
1008 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); in handle_not_macsec()
1009 struct net_device *ndev = macsec->secy.netdev; in handle_not_macsec()
1014 if (macsec_is_offloaded(macsec) && netif_running(ndev)) { in handle_not_macsec()
1045 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { in handle_not_macsec()
1080 struct macsec_dev *macsec; in macsec_handle_frame() local
1134 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { in macsec_handle_frame()
1135 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); in macsec_handle_frame()
1140 secy = &macsec->secy; in macsec_handle_frame()
1150 macsec = macsec_priv(dev); in macsec_handle_frame()
1151 secy_stats = this_cpu_ptr(macsec->stats); in macsec_handle_frame()
1238 ret = gro_cells_receive(&macsec->gro_cells, skb); in macsec_handle_frame()
1242 macsec->secy.netdev->stats.rx_dropped++; in macsec_handle_frame()
1266 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { in macsec_handle_frame()
1269 secy_stats = this_cpu_ptr(macsec->stats); in macsec_handle_frame()
1275 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { in macsec_handle_frame()
1289 macsec_reset_skb(nskb, macsec->secy.netdev); in macsec_handle_frame()
1297 macsec->secy.netdev->stats.rx_dropped++; in macsec_handle_frame()
1397 struct macsec_dev *macsec; in create_rx_sc() local
1402 list_for_each_entry(macsec, &rxd->secys, secys) { in create_rx_sc()
1403 if (find_rx_sc_rtnl(&macsec->secy, sci)) in create_rx_sc()
2561 static bool macsec_is_configured(struct macsec_dev *macsec) in macsec_is_configured() argument
2563 struct macsec_secy *secy = &macsec->secy; in macsec_is_configured()
2586 struct macsec_dev *macsec; in macsec_upd_offload() local
2603 macsec = macsec_priv(dev); in macsec_upd_offload()
2609 if (macsec->offload == offload) in macsec_upd_offload()
2614 !macsec_check_offload(offload, macsec)) in macsec_upd_offload()
2623 prev_offload = macsec->offload; in macsec_upd_offload()
2624 macsec->offload = offload; in macsec_upd_offload()
2629 if (macsec_is_configured(macsec)) { in macsec_upd_offload()
2635 macsec, &ctx); in macsec_upd_offload()
2646 ctx.secy = &macsec->secy; in macsec_upd_offload()
2655 macsec->offload = prev_offload; in macsec_upd_offload()
2665 struct macsec_dev *macsec = macsec_priv(dev); in get_tx_sa_stats() local
2669 if (macsec_is_offloaded(macsec)) { in get_tx_sa_stats()
2673 ops = macsec_get_ops(macsec, &ctx); in get_tx_sa_stats()
2709 struct macsec_dev *macsec = macsec_priv(dev); in get_rx_sa_stats() local
2713 if (macsec_is_offloaded(macsec)) { in get_rx_sa_stats()
2717 ops = macsec_get_ops(macsec, &ctx); in get_rx_sa_stats()
2762 struct macsec_dev *macsec = macsec_priv(dev); in get_rx_sc_stats() local
2766 if (macsec_is_offloaded(macsec)) { in get_rx_sc_stats()
2770 ops = macsec_get_ops(macsec, &ctx); in get_rx_sc_stats()
2844 struct macsec_dev *macsec = macsec_priv(dev); in get_tx_sc_stats() local
2848 if (macsec_is_offloaded(macsec)) { in get_tx_sc_stats()
2852 ops = macsec_get_ops(macsec, &ctx); in get_tx_sc_stats()
2900 struct macsec_dev *macsec = macsec_priv(dev); in get_secy_stats() local
2904 if (macsec_is_offloaded(macsec)) { in get_secy_stats()
2908 ops = macsec_get_ops(macsec, &ctx); in get_secy_stats()
3028 struct macsec_dev *macsec = netdev_priv(dev); in dump_secy() local
3050 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) in dump_secy()
3373 struct macsec_dev *macsec = netdev_priv(dev); in macsec_start_xmit() local
3374 struct macsec_secy *secy = &macsec->secy; in macsec_start_xmit()
3379 skb->dev = macsec->real_dev; in macsec_start_xmit()
3385 secy_stats = this_cpu_ptr(macsec->stats); in macsec_start_xmit()
3389 skb->dev = macsec->real_dev; in macsec_start_xmit()
3409 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); in macsec_start_xmit()
3423 struct macsec_dev *macsec = macsec_priv(dev); in macsec_dev_init() local
3424 struct net_device *real_dev = macsec->real_dev; in macsec_dev_init()
3431 err = gro_cells_init(&macsec->gro_cells, dev); in macsec_dev_init()
3455 struct macsec_dev *macsec = macsec_priv(dev); in macsec_dev_uninit() local
3457 gro_cells_destroy(&macsec->gro_cells); in macsec_dev_uninit()
3464 struct macsec_dev *macsec = macsec_priv(dev); in macsec_fix_features() local
3465 struct net_device *real_dev = macsec->real_dev; in macsec_fix_features()
3476 struct macsec_dev *macsec = macsec_priv(dev); in macsec_dev_open() local
3477 struct net_device *real_dev = macsec->real_dev; in macsec_dev_open()
3497 if (macsec_is_offloaded(macsec)) { in macsec_dev_open()
3507 ctx.secy = &macsec->secy; in macsec_dev_open()
3528 struct macsec_dev *macsec = macsec_priv(dev); in macsec_dev_stop() local
3529 struct net_device *real_dev = macsec->real_dev; in macsec_dev_stop()
3534 if (macsec_is_offloaded(macsec)) { in macsec_dev_stop()
3538 ops = macsec_get_ops(macsec, &ctx); in macsec_dev_stop()
3540 ctx.secy = &macsec->secy; in macsec_dev_stop()
3584 struct macsec_dev *macsec = macsec_priv(dev); in macsec_set_mac_address() local
3585 struct net_device *real_dev = macsec->real_dev; in macsec_set_mac_address()
3603 macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES); in macsec_set_mac_address()
3606 if (macsec_is_offloaded(macsec)) { in macsec_set_mac_address()
3610 ops = macsec_get_ops(macsec, &ctx); in macsec_set_mac_address()
3612 ctx.secy = &macsec->secy; in macsec_set_mac_address()
3622 struct macsec_dev *macsec = macsec_priv(dev); in macsec_change_mtu() local
3623 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); in macsec_change_mtu()
3625 if (macsec->real_dev->mtu - extra < new_mtu) in macsec_change_mtu()
3688 struct macsec_dev *macsec = macsec_priv(dev); in macsec_free_netdev() local
3690 free_percpu(macsec->stats); in macsec_free_netdev()
3691 free_percpu(macsec->secy.tx_sc.stats); in macsec_free_netdev()
3789 struct macsec_dev *macsec = macsec_priv(dev); in macsec_changelink() local
3806 memcpy(&secy, &macsec->secy, sizeof(secy)); in macsec_changelink()
3807 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); in macsec_changelink()
3814 if (macsec_is_offloaded(macsec)) { in macsec_changelink()
3824 ctx.secy = &macsec->secy; in macsec_changelink()
3833 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); in macsec_changelink()
3834 memcpy(&macsec->secy, &secy, sizeof(secy)); in macsec_changelink()
3839 static void macsec_del_dev(struct macsec_dev *macsec) in macsec_del_dev() argument
3843 while (macsec->secy.rx_sc) { in macsec_del_dev()
3844 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); in macsec_del_dev()
3846 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); in macsec_del_dev()
3851 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); in macsec_del_dev()
3854 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); in macsec_del_dev()
3862 struct macsec_dev *macsec = macsec_priv(dev); in macsec_common_dellink() local
3863 struct net_device *real_dev = macsec->real_dev; in macsec_common_dellink()
3866 if (macsec_is_offloaded(macsec)) { in macsec_common_dellink()
3872 ctx.secy = &macsec->secy; in macsec_common_dellink()
3878 list_del_rcu(&macsec->secys); in macsec_common_dellink()
3879 macsec_del_dev(macsec); in macsec_common_dellink()
3887 struct macsec_dev *macsec = macsec_priv(dev); in macsec_dellink() local
3888 struct net_device *real_dev = macsec->real_dev; in macsec_dellink()
3902 struct macsec_dev *macsec = macsec_priv(dev); in register_macsec_dev() local
3922 list_add_tail_rcu(&macsec->secys, &rxd->secys); in register_macsec_dev()
3929 struct macsec_dev *macsec; in sci_exists() local
3931 list_for_each_entry(macsec, &rxd->secys, secys) { in sci_exists()
3932 if (macsec->secy.sci == sci) in sci_exists()
3941 struct macsec_dev *macsec = macsec_priv(dev); in macsec_add_dev() local
3942 struct macsec_secy *secy = &macsec->secy; in macsec_add_dev()
3944 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); in macsec_add_dev()
3945 if (!macsec->stats) in macsec_add_dev()
3950 free_percpu(macsec->stats); in macsec_add_dev()
3983 struct macsec_dev *macsec = macsec_priv(dev); in macsec_newlink() local
4000 macsec->real_dev = real_dev; in macsec_newlink()
4003 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); in macsec_newlink()
4006 macsec->offload = MACSEC_OFFLOAD_OFF; in macsec_newlink()
4009 if (macsec->offload != MACSEC_OFFLOAD_OFF && in macsec_newlink()
4010 !macsec_check_offload(macsec->offload, macsec)) in macsec_newlink()
4072 if (macsec_is_offloaded(macsec)) { in macsec_newlink()
4076 ops = macsec_get_ops(macsec, &ctx); in macsec_newlink()
4078 ctx.secy = &macsec->secy; in macsec_newlink()
4097 macsec_del_dev(macsec); in macsec_newlink()