Re: [PATCH v6 net-next 5/7] net: ethernet: ti: am65-cpsw: add mqprio qdisc offload in channel mode

From: Vladimir Oltean
Date: Mon Nov 20 2023 - 18:03:24 EST


On Mon, Nov 20, 2023 at 04:01:45PM +0200, Roger Quadros wrote:
> +static int am65_cpsw_setup_mqprio(struct net_device *ndev, void *type_data)
> +{
> + struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
> + struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
> + struct tc_mqprio_qopt_offload *mqprio = type_data;
> + struct am65_cpsw_common *common = port->common;
> + struct tc_mqprio_qopt *qopt = &mqprio->qopt;
> + int i, tc, offset, count, prio, ret;
> + u8 num_tc = qopt->num_tc;
> + u32 tx_prio_map = 0;
> +
> + memcpy(&p_mqprio->mqprio_hw, mqprio, sizeof(*mqprio));
> +
> + ret = pm_runtime_get_sync(common->dev);
> + if (ret < 0) {
> + pm_runtime_put_noidle(common->dev);
> + return ret;
> + }
> +
> + if (!num_tc) {
> + am65_cpsw_reset_tc_mqprio(ndev);
> + ret = -EINVAL;

num_tc == 0 is what signals the deletion of the mqprio qdisc.
Why return -EINVAL?

> + goto exit_put;
> + }
> +
> + ret = am65_cpsw_mqprio_verify_shaper(port, mqprio);
> + if (ret)
> + goto exit_put;
> +
> + netdev_set_num_tc(ndev, num_tc);
> +
> + /* Multiple Linux priorities can map to a Traffic Class
> + * A Traffic Class can have multiple contiguous Queues,
> + * Queues get mapped to Channels (thread_id),
> + * if not VLAN tagged, thread_id is used as packet_priority
> + * if VLAN tagged. VLAN priority is used as packet_priority
> + * packet_priority gets mapped to header_priority in p0_rx_pri_map,
> + * header_priority gets mapped to switch_priority in pn_tx_pri_map.
> + * As p0_rx_pri_map is left at defaults (0x76543210), we can
> + * assume that Queue_n gets mapped to header_priority_n. We can then
> + * set the switch priority in pn_tx_pri_map.
> + */
> +
> + for (tc = 0; tc < num_tc; tc++) {
> + prio = tc;
> +
> + /* For simplicity we assign the same priority (TCn) to
> + * all queues of a Traffic Class.
> + */
> + for (i = qopt->offset[tc]; i < qopt->offset[tc] + qopt->count[tc]; i++)
> + tx_prio_map |= prio << (4 * i);
> +
> + count = qopt->count[tc];
> + offset = qopt->offset[tc];
> + netdev_set_tc_queue(ndev, tc, count, offset);
> + }
> +
> + writel(tx_prio_map, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
> +
> + am65_cpsw_tx_pn_shaper_apply(port);
> +
> +exit_put:
> + pm_runtime_put(common->dev);
> +
> + return ret;
> +}
> +
> static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
> {
> return port->qos.est_oper || port->qos.est_admin;
> @@ -737,16 +989,6 @@ static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_blo
> port, port, true);
> }
>
> -static u32
> -am65_cpsw_qos_tx_rate_calc(u32 rate_mbps, unsigned long bus_freq)
> -{
> - u32 ir;
> -
> - bus_freq /= 1000000;
> - ir = DIV_ROUND_UP(((u64)rate_mbps * 32768), bus_freq);
> - return ir;
> -}
> -

Insufficient code movement in the previous patch?