Re: [PATCH v3 3/3] remoteproc: xilinx: add mailbox channels for rpmsg

From: Tanmay Shah
Date: Wed Feb 22 2023 - 15:49:45 EST



On 2/22/23 11:06 AM, Mathieu Poirier wrote:
On Mon, Feb 13, 2023 at 01:18:26PM -0800, Tanmay Shah wrote:
[ ... ]
+
/*
* zynqmp_r5_set_mode()
*
@@ -617,7 +819,7 @@ static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
return 0;
}
-static const struct rproc_ops zynqmp_r5_rproc_ops = {
+static struct rproc_ops zynqmp_r5_rproc_ops = {
.prepare = zynqmp_r5_rproc_prepare,
.unprepare = zynqmp_r5_rproc_unprepare,
.start = zynqmp_r5_rproc_start,
@@ -642,6 +844,7 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
{
struct zynqmp_r5_core *r5_core;
struct rproc *r5_rproc;
+ struct mbox_info *ipi;
int ret;
/* Set up DMA mask */
@@ -649,12 +852,23 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
if (ret)
return ERR_PTR(ret);
+ /*
+ * If mailbox nodes are disabled using "status" property then setting up
+ * mailbox channels will be failed. In that case we don't really need
+ * kick() operation. Include .kick() only if mbox channels are acquired
+ * successfully.
+ */
+ ipi = zynqmp_r5_setup_mbox(cdev);
+ if (ipi)
+ zynqmp_r5_rproc_ops.kick = zynqmp_r5_rproc_kick;
+
/* Allocate remoteproc instance */
r5_rproc = rproc_alloc(cdev, dev_name(cdev),
&zynqmp_r5_rproc_ops,
NULL, sizeof(struct zynqmp_r5_core));
if (!r5_rproc) {
dev_err(cdev, "failed to allocate memory for rproc instance\n");
+ zynqmp_r5_free_mbox(ipi);
return ERR_PTR(-ENOMEM);
}
@@ -665,6 +879,7 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
if (!r5_core->np) {
dev_err(cdev, "can't get device node for r5 core\n");
ret = -EINVAL;
+ zynqmp_r5_free_mbox(ipi);
goto free_rproc;
}
@@ -672,10 +887,17 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
ret = rproc_add(r5_rproc);
if (ret) {
dev_err(cdev, "failed to add r5 remoteproc\n");
+ zynqmp_r5_free_mbox(ipi);
goto free_rproc;
}
+ if (ipi) {
+ r5_core->ipi = ipi;
+ ipi->r5_core = r5_core;
+ }
+
r5_core->rproc = r5_rproc;
+
return r5_core;
free_rproc:
@@ -918,6 +1140,7 @@ static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster)
while (i >= 0) {
put_device(child_devs[i]);
if (r5_cores[i]) {
+ zynqmp_r5_free_mbox(r5_cores[i]->ipi);
The mailboxes are initialized in zynqmp_r5_add_rproc_core() but free'd here in
case of trouble, which introduces coupling between the two functions. I suggest
moving zynqmp_r5_setup_mbox() in zynqmp_r5_cluster_init() and initialize both
mailboxes in it.

I am done reviewing this set.


Ack. Yes that makes sense.

Thanks,

Tanmay



Thanks,
Mathieu

Thanks,
Mathieu

of_reserved_mem_device_release(r5_cores[i]->dev);
rproc_del(r5_cores[i]->rproc);
rproc_free(r5_cores[i]->rproc);
@@ -942,6 +1165,7 @@ static void zynqmp_r5_cluster_exit(void *data)
for (i = 0; i < cluster->core_count; i++) {
r5_core = cluster->r5_cores[i];
+ zynqmp_r5_free_mbox(r5_core->ipi);
of_reserved_mem_device_release(r5_core->dev);
put_device(r5_core->dev);
rproc_del(r5_core->rproc);
--
2.25.1