[PATCH net-next v2] macsec: Add support for IFLA_MACSEC_OFFLOAD in the netlink layer

From: ehakim
Date: Tue Dec 06 2022 - 03:58:42 EST


From: Emeel Hakim <ehakim@xxxxxxxxxx>

This adds support for configuring Macsec offload through the
netlink layer by:
- Considering IFLA_MACSEC_OFFLOAD in macsec_fill_info.
- Handling IFLA_MACSEC_OFFLOAD in macsec_changelink.
- Adding IFLA_MACSEC_OFFLOAD to the netlink policy.
- Adjusting macsec_get_size.

The handling in macsec_changlink is similar to
macsec_upd_offload.
Update macsec_upd_offload to use a common helper function
to avoid duplication.

Example for setting offload for a macsec device
ip link set macsec0 type macsec offload mac

Reviewed-by: Raed Salem <raeds@xxxxxxxxxx>
Signed-off-by: Emeel Hakim <ehakim@xxxxxxxxxx>
---
V1 -> V2: Add common helper to avoid duplicating code
drivers/net/macsec.c | 114 ++++++++++++++++++++++++++++---------------
1 file changed, 74 insertions(+), 40 deletions(-)

diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index d73b9d535b7a..afd6ff47ba56 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2583,16 +2583,45 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
return false;
}

+static int macsec_update_offload(struct macsec_dev *macsec, enum macsec_offload offload)
+{
+ enum macsec_offload prev_offload;
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+ int ret = 0;
+
+ prev_offload = macsec->offload;
+
+ /* Check if the device already has rules configured: we do not support
+ * rules migration.
+ */
+ if (macsec_is_configured(macsec))
+ return -EBUSY;
+
+ ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
+ macsec, &ctx);
+ if (!ops)
+ return -EOPNOTSUPP;
+
+ macsec->offload = offload;
+
+ ctx.secy = &macsec->secy;
+ ret = (offload == MACSEC_OFFLOAD_OFF) ? macsec_offload(ops->mdo_del_secy, &ctx) :
+ macsec_offload(ops->mdo_add_secy, &ctx);
+
+ if (ret)
+ macsec->offload = prev_offload;
+
+ return ret;
+}
+
static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
- enum macsec_offload offload, prev_offload;
- int (*func)(struct macsec_context *ctx);
struct nlattr **attrs = info->attrs;
- struct net_device *dev;
- const struct macsec_ops *ops;
- struct macsec_context ctx;
+ enum macsec_offload offload;
struct macsec_dev *macsec;
+ struct net_device *dev;
int ret;

if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -2629,39 +2658,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)

rtnl_lock();

- prev_offload = macsec->offload;
- macsec->offload = offload;
-
- /* Check if the device already has rules configured: we do not support
- * rules migration.
- */
- if (macsec_is_configured(macsec)) {
- ret = -EBUSY;
- goto rollback;
- }
-
- ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
- macsec, &ctx);
- if (!ops) {
- ret = -EOPNOTSUPP;
- goto rollback;
- }
-
- if (prev_offload == MACSEC_OFFLOAD_OFF)
- func = ops->mdo_add_secy;
- else
- func = ops->mdo_del_secy;
-
- ctx.secy = &macsec->secy;
- ret = macsec_offload(func, &ctx);
- if (ret)
- goto rollback;
-
- rtnl_unlock();
- return 0;
-
-rollback:
- macsec->offload = prev_offload;
+ ret = macsec_update_offload(macsec, offload);

rtnl_unlock();
return ret;
@@ -3698,6 +3695,7 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
+ [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
};

static void macsec_free_netdev(struct net_device *dev)
@@ -3803,6 +3801,29 @@ static int macsec_changelink_common(struct net_device *dev,
return 0;
}

+static int macsec_changelink_upd_offload(struct net_device *dev, struct nlattr *data[])
+{
+ enum macsec_offload offload;
+ struct macsec_dev *macsec;
+
+ macsec = macsec_priv(dev);
+ offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
+
+ if (macsec->offload == offload)
+ return 0;
+
+ /* Check if the offloading mode is supported by the underlying layers */
+ if (offload != MACSEC_OFFLOAD_OFF &&
+ !macsec_check_offload(offload, macsec))
+ return -EOPNOTSUPP;
+
+ /* Check if the net device is busy. */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ return macsec_update_offload(macsec, offload);
+}
+
static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -3831,6 +3852,12 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
if (ret)
goto cleanup;

+ if (data[IFLA_MACSEC_OFFLOAD]) {
+ ret = macsec_changelink_upd_offload(dev, data);
+ if (ret)
+ goto cleanup;
+ }
+
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
@@ -4231,16 +4258,22 @@ static size_t macsec_get_size(const struct net_device *dev)
nla_total_size(1) + /* IFLA_MACSEC_SCB */
nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
+ nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
0;
}

static int macsec_fill_info(struct sk_buff *skb,
const struct net_device *dev)
{
- struct macsec_secy *secy = &macsec_priv(dev)->secy;
- struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ struct macsec_tx_sc *tx_sc;
+ struct macsec_dev *macsec;
+ struct macsec_secy *secy;
u64 csid;

+ macsec = macsec_priv(dev);
+ secy = &macsec->secy;
+ tx_sc = &secy->tx_sc;
+
switch (secy->key_len) {
case MACSEC_GCM_AES_128_SAK_LEN:
csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
@@ -4265,6 +4298,7 @@ static int macsec_fill_info(struct sk_buff *skb,
nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
+ nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
0)
goto nla_put_failure;

--
2.21.3