Re: [PATCH 10/13] scsi: fnic: Add support for multiqueue (MQ) in fnic_main.c

From: John Garry
Date: Wed Nov 08 2023 - 08:59:15 EST



static void
@@ -392,7 +394,7 @@ static int fnic_notify_set(struct fnic *fnic)
err = vnic_dev_notify_set(fnic->vdev, -1);
break;
case VNIC_DEV_INTR_MODE_MSIX:
- err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
+ err = vnic_dev_notify_set(fnic->vdev, fnic->wq_copy_count + fnic->cpy_wq_base);
break;
default:
shost_printk(KERN_ERR, fnic->lport->host,
@@ -565,11 +567,6 @@ static int fnic_scsi_drv_init(struct fnic *fnic)
host->max_cmd_len = FCOE_MAX_CMD_LEN;
host->nr_hw_queues = fnic->wq_copy_count;

Please be aware of comment on nr_hw_queues in scsi_host.h - maybe it is relevant to this adapter:

"the total queue depth per host is nr_hw_queues * can_queue"

Also, since you seem to be using blk_mq_unique_tag() as the per-IO tag, I assume that you don't need to set shost.host_tagset (for that reason).

- if (host->nr_hw_queues > 1)
- shost_printk(KERN_ERR, host,
- "fnic: blk-mq is not supported");
-
- host->nr_hw_queues = fnic->wq_copy_count = 1;
shost_printk(KERN_INFO, host,
"fnic: can_queue: %d max_lun: %llu",
@@ -582,15 +579,71 @@ static int fnic_scsi_drv_init(struct fnic *fnic)
return 0;
}
+void fnic_mq_map_queues_cpus(struct Scsi_Host *host)

This function looks to do effectively the same as blk_mq_pci_map_queues(), right?

+{
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+ int irq_num;
+ struct fc_lport *lp = shost_priv(host);
+ struct fnic *fnic = lport_priv(lp);
+ struct pci_dev *l_pdev = fnic->pdev;
+ struct blk_mq_tag_set *set = &host->tag_set;
+ int intr_mode = fnic->config.intr_mode;
+
+ if (intr_mode == VNIC_DEV_INTR_MODE_MSI || intr_mode == VNIC_DEV_INTR_MODE_INTX) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic<%d>: %s: %d: intr_mode is not msix\n",
+ fnic->fnic_num, __func__, __LINE__);
+ return;
+ }
+
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "fnic<%d>: %s: %d: set->nr_hw_queues: %d\n",
+ fnic->fnic_num, __func__, __LINE__, set->nr_hw_queues);
+
+ for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ if (l_pdev == NULL) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic<%d>: %s: %d: l_pdev is null\n",
+ fnic->fnic_num, __func__, __LINE__);
+ return;
+ }
+
+ irq_num = pci_irq_vector(l_pdev, queue+2);
+ if (irq_num < 0) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic<%d>: %s: %d: irq_num less than zero: %d\n",
+ fnic->fnic_num, __func__, __LINE__, irq_num);
+ continue;
+ }
+
+ mask = irq_get_effective_affinity_mask(irq_num);
+ if (!mask) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic<%d>: %s: %d: failed to get irq_affinity map for queue: %d\n",
+ fnic->fnic_num, __func__, __LINE__, irq_num);
+ continue;
+ }
+
+ for_each_cpu(cpu, mask) {
+ set->map[HCTX_TYPE_DEFAULT].mq_map[cpu] = queue;
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "fnic<%d>: %s: %d: cpu: %d <=> queue: %d\n",
+ fnic->fnic_num, __func__, __LINE__, cpu, irq_num);
+ }
+ }
+}
+

Thanks,
John