[PATCH v1 1/1] fpga: dfl: fix the kernel warning when release/assign ports for SRIOV

From: Russ Weight
Date: Mon Apr 25 2022 - 13:48:41 EST


From: Xu Yilun <yilun.xu@xxxxxxxxx>

The dfl ports are registered as platform devices in PF mode. The port
device should be removed from host when the user wants to configure the
port as VF and pass through to VM. The FME dev ioctls
DFL_FPGA_FME_PORT_RELEASE/ASSIGN are designed for this purpose.

In previous implementation, the port platform device is not completely
destroyed on port release. It is removed from system by
platform_device_del(), but the platform device instance is retained.
When the port assign ioctl is called, it is added back by
platform_device_add(). It conflicts to the comments of device_add():
"Do not call this routine more than once for any device structure",
and will cause kernel warning at runtime.

The patch tries to completely unregisters the port platform device on
release and registers a new one on assign. But the main work is to
remove the dependency of struct dfl_feature_platform_data for many
internal DFL APIs. This structure holds many DFL enumeration info for
feature devices. Many DFL APIs are expected to work with these info even
when the port platform device is unregistered. But with the change the
platform_data will be freed in this case. So this patch introduced a new
structure dfl_feature_dev_data for these APIs, it acts similarly as the
previous dfl_feature_platform_data. The dfl_feature_platform_data then
only needs a pointer this dfl_feature_dev_data to make feature device
driver work.

Signed-off-by: Xu Yilun <yilun.xu@xxxxxxxxx>
Signed-off-by: Russ Weight <russell.h.weight@xxxxxxxxx>
---
drivers/fpga/dfl-afu-dma-region.c | 119 +++++----
drivers/fpga/dfl-afu-error.c | 59 +++--
drivers/fpga/dfl-afu-main.c | 254 +++++++++---------
drivers/fpga/dfl-afu-region.c | 50 ++--
drivers/fpga/dfl-afu.h | 26 +-
drivers/fpga/dfl-fme-br.c | 24 +-
drivers/fpga/dfl-fme-error.c | 98 +++----
drivers/fpga/dfl-fme-main.c | 70 +++--
drivers/fpga/dfl-fme-pr.c | 84 +++---
drivers/fpga/dfl.c | 427 +++++++++++++++---------------
drivers/fpga/dfl.h | 138 ++++++----
11 files changed, 696 insertions(+), 653 deletions(-)

diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 02b60fde0430..d85c41435b39 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -16,26 +16,26 @@

#include "dfl-afu.h"

-void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
+void afu_dma_region_init(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);

afu->dma_regions = RB_ROOT;
}

/**
* afu_dma_pin_pages - pin pages of given dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma memory region to be pinned
*
* Pin all the pages of given dfl_afu_dma_region.
* Return 0 for success or negative error code.
*/
-static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
+static int afu_dma_pin_pages(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
int npages = region->length >> PAGE_SHIFT;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
int ret, pinned;

ret = account_locked_vm(current->mm, npages, true);
@@ -73,17 +73,17 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,

/**
* afu_dma_unpin_pages - unpin pages of given dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma memory region to be unpinned
*
* Unpin all the pages of given dfl_afu_dma_region.
* Return 0 for success or negative error code.
*/
-static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
+static void afu_dma_unpin_pages(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
+ struct device *dev = &fdata->dev->dev;
long npages = region->length >> PAGE_SHIFT;
- struct device *dev = &pdata->dev->dev;

unpin_user_pages(region->pages, npages);
kfree(region->pages);
@@ -133,20 +133,21 @@ static bool dma_region_check_iova(struct dfl_afu_dma_region *region,

/**
* afu_dma_region_add - add given dma region to rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma region to be added
*
* Return 0 for success, -EEXIST if dma region has already been added.
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
+static int afu_dma_region_add(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
+ struct device *dev = &fdata->dev->dev;
struct rb_node **new, *parent = NULL;

- dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
+ dev_dbg(dev, "add region (iova = %llx)\n",
(unsigned long long)region->iova);

new = &afu->dma_regions.rb_node;
@@ -177,50 +178,51 @@ static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,

/**
* afu_dma_region_remove - remove given dma region from rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma region to be removed
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
+static void afu_dma_region_remove(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu *afu;

- dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ dev_dbg(dev, "del region (iova = %llx)\n",
(unsigned long long)region->iova);

- afu = dfl_fpga_pdata_get_private(pdata);
+ afu = dfl_fpga_fdata_get_private(fdata);
rb_erase(&region->node, &afu->dma_regions);
}

/**
* afu_dma_region_destroy - destroy all regions in rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
+void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node *node = rb_first(&afu->dma_regions);
struct dfl_afu_dma_region *region;

while (node) {
region = container_of(node, struct dfl_afu_dma_region, node);

- dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
(unsigned long long)region->iova);

rb_erase(node, &afu->dma_regions);

if (region->iova)
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length,
DMA_BIDIRECTIONAL);

if (region->pages)
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);

node = rb_next(node);
kfree(region);
@@ -229,7 +231,7 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)

/**
* afu_dma_region_find - find the dma region from rbtree based on iova and size
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: address of the dma memory area
* @size: size of the dma memory area
*
@@ -239,14 +241,14 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
* [@iova, @iova+size)
* If nothing is matched returns NULL.
*
- * Needs to be called with pdata->lock held.
+ * Needs to be called with fdata->lock held.
*/
struct dfl_afu_dma_region *
-afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
+afu_dma_region_find(struct dfl_feature_dev_data *fdata, u64 iova, u64 size)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node *node = afu->dma_regions.rb_node;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;

while (node) {
struct dfl_afu_dma_region *region;
@@ -276,20 +278,20 @@ afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)

/**
* afu_dma_region_find_iova - find the dma region from rbtree by iova
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: address of the dma region
*
- * Needs to be called with pdata->lock held.
+ * Needs to be called with fdata->lock held.
*/
static struct dfl_afu_dma_region *
-afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
+afu_dma_region_find_iova(struct dfl_feature_dev_data *fdata, u64 iova)
{
- return afu_dma_region_find(pdata, iova, 0);
+ return afu_dma_region_find(fdata, iova, 0);
}

/**
* afu_dma_map_region - map memory region for dma
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @user_addr: address of the memory region
* @length: size of the memory region
* @iova: pointer of iova address
@@ -298,9 +300,10 @@ afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
* of the memory region via @iova.
* Return 0 for success, otherwise error code.
*/
-int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
u64 user_addr, u64 length, u64 *iova)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu_dma_region *region;
int ret;

@@ -323,47 +326,47 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
region->length = length;

/* Pin the user memory region */
- ret = afu_dma_pin_pages(pdata, region);
+ ret = afu_dma_pin_pages(fdata, region);
if (ret) {
- dev_err(&pdata->dev->dev, "failed to pin memory region\n");
+ dev_err(dev, "failed to pin memory region\n");
goto free_region;
}

/* Only accept continuous pages, return error else */
if (!afu_dma_check_continuous_pages(region)) {
- dev_err(&pdata->dev->dev, "pages are not continuous\n");
+ dev_err(dev, "pages are not continuous\n");
ret = -EINVAL;
goto unpin_pages;
}

/* As pages are continuous then start to do DMA mapping */
- region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova = dma_map_page(dfl_fpga_fdata_to_parent(fdata),
region->pages[0], 0,
region->length,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
- dev_err(&pdata->dev->dev, "failed to map for dma\n");
+ if (dma_mapping_error(dfl_fpga_fdata_to_parent(fdata), region->iova)) {
+ dev_err(dev, "failed to map for dma\n");
ret = -EFAULT;
goto unpin_pages;
}

*iova = region->iova;

- mutex_lock(&pdata->lock);
- ret = afu_dma_region_add(pdata, region);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = afu_dma_region_add(fdata, region);
+ mutex_unlock(&fdata->lock);
if (ret) {
- dev_err(&pdata->dev->dev, "failed to add dma region\n");
+ dev_err(dev, "failed to add dma region\n");
goto unmap_dma;
}

return 0;

unmap_dma:
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length, DMA_BIDIRECTIONAL);
unpin_pages:
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
free_region:
kfree(region);
return ret;
@@ -371,34 +374,34 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,

/**
* afu_dma_unmap_region - unmap dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: dma address of the region
*
* Unmap dma memory region based on @iova.
* Return 0 for success, otherwise error code.
*/
-int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
+int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova)
{
struct dfl_afu_dma_region *region;

- mutex_lock(&pdata->lock);
- region = afu_dma_region_find_iova(pdata, iova);
+ mutex_lock(&fdata->lock);
+ region = afu_dma_region_find_iova(fdata, iova);
if (!region) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return -EINVAL;
}

if (region->in_use) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return -EBUSY;
}

- afu_dma_region_remove(pdata, region);
- mutex_unlock(&pdata->lock);
+ afu_dma_region_remove(fdata, region);
+ mutex_unlock(&fdata->lock);

- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length, DMA_BIDIRECTIONAL);
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
kfree(region);

return 0;
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
index ab7be6217368..0f392d1f6d45 100644
--- a/drivers/fpga/dfl-afu-error.c
+++ b/drivers/fpga/dfl-afu-error.c
@@ -28,37 +28,36 @@
#define ERROR_MASK GENMASK_ULL(63, 0)

/* mask or unmask port errors by the error mask register. */
-static void __afu_port_err_mask(struct device *dev, bool mask)
+static void __afu_port_err_mask(struct dfl_feature_dev_data *fdata, bool mask)
{
void __iomem *base;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);

writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
}

static void afu_port_err_mask(struct device *dev, bool mask)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);

- mutex_lock(&pdata->lock);
- __afu_port_err_mask(dev, mask);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ __afu_port_err_mask(fdata, mask);
+ mutex_unlock(&fdata->lock);
}

/* clear port errors. */
static int afu_port_err_clear(struct device *dev, u64 err)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
- struct platform_device *pdev = to_platform_device(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base_err, *base_hdr;
int enable_ret = 0, ret = -EBUSY;
u64 v;

- base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
- base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base_err = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
+ base_hdr = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);

/*
* clear Port Errors
@@ -80,12 +79,12 @@ static int afu_port_err_clear(struct device *dev, u64 err)
}

/* Halt Port by keeping Port in reset */
- ret = __afu_port_disable(pdev);
+ ret = __afu_port_disable(fdata);
if (ret)
goto done;

/* Mask all errors */
- __afu_port_err_mask(dev, true);
+ __afu_port_err_mask(fdata, true);

/* Clear errors if err input matches with current port errors.*/
v = readq(base_err + PORT_ERROR);
@@ -102,28 +101,28 @@ static int afu_port_err_clear(struct device *dev, u64 err)
}

/* Clear mask */
- __afu_port_err_mask(dev, false);
+ __afu_port_err_mask(fdata, false);

/* Enable the Port by clearing the reset */
- enable_ret = __afu_port_enable(pdev);
+ enable_ret = __afu_port_enable(fdata);

done:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return enable_ret ? enable_ret : ret;
}

static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 error;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
error = readq(base + PORT_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
@@ -146,15 +145,15 @@ static DEVICE_ATTR_RW(errors);
static ssize_t first_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 error;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
error = readq(base + PORT_FIRST_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
@@ -164,16 +163,16 @@ static ssize_t first_malformed_req_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 req0, req1;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
req0 = readq(base + PORT_MALFORMED_REQ0);
req1 = readq(base + PORT_MALFORMED_REQ1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "0x%016llx%016llx\n",
(unsigned long long)req1, (unsigned long long)req0);
@@ -191,12 +190,14 @@ static umode_t port_err_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;

+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
+ if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_ERROR))
return 0;

return attr->mode;
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 7f621e96d3b8..eccbb407082d 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -26,7 +26,7 @@

/**
* __afu_port_enable - enable a port by clear reset
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
*
* Enable Port by clear the port soft reset bit, which is set by default.
* The AFU is unable to respond to any MMIO access while in reset.
@@ -35,18 +35,17 @@
*
* The caller needs to hold lock for protection.
*/
-int __afu_port_enable(struct platform_device *pdev)
+int __afu_port_enable(struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
u64 v;

- WARN_ON(!pdata->disable_count);
+ WARN_ON(!fdata->disable_count);

- if (--pdata->disable_count != 0)
+ if (--fdata->disable_count != 0)
return 0;

- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

/* Clear port soft reset */
v = readq(base + PORT_HDR_CTRL);
@@ -60,7 +59,8 @@ int __afu_port_enable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
!(v & PORT_CTRL_SFTRST_ACK),
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, failure to enable device\n");
+ dev_err(fdata->dfl_cdev->parent,
+ "timeout, failure to enable device\n");
return -ETIMEDOUT;
}

@@ -69,22 +69,21 @@ int __afu_port_enable(struct platform_device *pdev)

/**
* __afu_port_disable - disable a port by hold reset
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
*
* Disable Port by setting the port soft reset bit, it puts the port into reset.
*
* The caller needs to hold lock for protection.
*/
-int __afu_port_disable(struct platform_device *pdev)
+int __afu_port_disable(struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
u64 v;

- if (pdata->disable_count++ != 0)
+ if (fdata->disable_count++ != 0)
return 0;

- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

/* Set port soft reset */
v = readq(base + PORT_HDR_CTRL);
@@ -99,7 +98,8 @@ int __afu_port_disable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, failure to disable device\n");
+ dev_err(fdata->dfl_cdev->parent,
+ "timeout, failure to disable device\n");
return -ETIMEDOUT;
}

@@ -118,34 +118,36 @@ int __afu_port_disable(struct platform_device *pdev)
* (disabled). Any attempts on MMIO access to AFU while in reset, will
* result errors reported via port error reporting sub feature (if present).
*/
-static int __port_reset(struct platform_device *pdev)
+static int __port_reset(struct dfl_feature_dev_data *fdata)
{
int ret;

- ret = __afu_port_disable(pdev);
+ ret = __afu_port_disable(fdata);
if (ret)
return ret;

- return __afu_port_enable(pdev);
+ return __afu_port_enable(fdata);
}

static int port_reset(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata;
int ret;

- mutex_lock(&pdata->lock);
- ret = __port_reset(pdev);
- mutex_unlock(&pdata->lock);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
+
+ mutex_lock(&fdata->lock);
+ ret = __port_reset(fdata);
+ mutex_unlock(&fdata->lock);

return ret;
}

-static int port_get_id(struct platform_device *pdev)
+static int port_get_id(struct dfl_feature_dev_data *fdata)
{
void __iomem *base;

- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
}
@@ -153,7 +155,8 @@ static int port_get_id(struct platform_device *pdev)
static ssize_t
id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- int id = port_get_id(to_platform_device(dev));
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
+ int id = port_get_id(fdata);

return scnprintf(buf, PAGE_SIZE, "%d\n", id);
}
@@ -162,15 +165,15 @@ static DEVICE_ATTR_RO(id);
static ssize_t
ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_CTRL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
}
@@ -179,7 +182,7 @@ static ssize_t
ltr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool ltr;
u64 v;
@@ -187,14 +190,14 @@ ltr_store(struct device *dev, struct device_attribute *attr,
if (kstrtobool(buf, &ltr))
return -EINVAL;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_CTRL);
v &= ~PORT_CTRL_LATENCY;
v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
writeq(v, base + PORT_HDR_CTRL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return count;
}
@@ -203,15 +206,15 @@ static DEVICE_ATTR_RW(ltr);
static ssize_t
ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
}
@@ -220,18 +223,18 @@ static ssize_t
ap1_event_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool clear;

if (kstrtobool(buf, &clear) || !clear)
return -EINVAL;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return count;
}
@@ -241,15 +244,15 @@ static ssize_t
ap2_event_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
}
@@ -258,18 +261,18 @@ static ssize_t
ap2_event_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool clear;

if (kstrtobool(buf, &clear) || !clear)
return -EINVAL;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return count;
}
@@ -278,15 +281,15 @@ static DEVICE_ATTR_RW(ap2_event);
static ssize_t
power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
}
@@ -296,18 +299,18 @@ static ssize_t
userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freq_cmd;
void __iomem *base;

if (kstrtou64(buf, 0, &userclk_freq_cmd))
return -EINVAL;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return count;
}
@@ -317,18 +320,18 @@ static ssize_t
userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqcntr_cmd;
void __iomem *base;

if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
return -EINVAL;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return count;
}
@@ -338,15 +341,15 @@ static ssize_t
userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqsts;
void __iomem *base;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
}
@@ -356,15 +359,15 @@ static ssize_t
userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqcntrsts;
void __iomem *base;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "0x%llx\n",
(unsigned long long)userclk_freqcntrsts);
@@ -388,10 +391,12 @@ static umode_t port_hdr_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
umode_t mode = attr->mode;
void __iomem *base;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ fdata = to_dfl_feature_dev_data(dev);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);

if (dfl_feature_revision(base) > 0) {
/*
@@ -456,21 +461,21 @@ static const struct dfl_feature_ops port_hdr_ops = {
static ssize_t
afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 guidl, guidh;

- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_AFU);

- mutex_lock(&pdata->lock);
- if (pdata->disable_count) {
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ if (fdata->disable_count) {
+ mutex_unlock(&fdata->lock);
return -EBUSY;
}

guidl = readq(base + GUID_L);
guidh = readq(base + GUID_H);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
}
@@ -485,12 +490,15 @@ static umode_t port_afu_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+
+ fdata = to_dfl_feature_dev_data(dev);

/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
+ if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_AFU))
return 0;

return attr->mode;
@@ -504,9 +512,11 @@ static const struct attribute_group port_afu_group = {
static int port_afu_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
+ struct dfl_feature_dev_data *fdata =
+ to_dfl_feature_dev_data(&pdev->dev);
struct resource *res = &pdev->resource[feature->resource_index];

- return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ return afu_mmio_region_add(fdata,
DFL_PORT_REGION_INDEX_AFU,
resource_size(res), res->start,
DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
@@ -525,9 +535,11 @@ static const struct dfl_feature_ops port_afu_ops = {
static int port_stp_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
+ struct dfl_feature_dev_data *fdata =
+ to_dfl_feature_dev_data(&pdev->dev);
struct resource *res = &pdev->resource[feature->resource_index];

- return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ return afu_mmio_region_add(fdata,
DFL_PORT_REGION_INDEX_STP,
resource_size(res), res->start,
DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
@@ -596,21 +608,19 @@ static struct dfl_feature_driver port_feature_drvs[] = {
static int afu_open(struct inode *inode, struct file *filp)
{
struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
int ret;

- pdata = dev_get_platdata(&fdev->dev);
- if (WARN_ON(!pdata))
- return -ENODEV;
+ fdata = to_dfl_feature_dev_data(&fdev->dev);

- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
if (!ret) {
dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
- dfl_feature_dev_use_count(pdata));
+ dfl_feature_dev_use_count(fdata));
filp->private_data = fdev;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return ret;
}
@@ -618,24 +628,24 @@ static int afu_open(struct inode *inode, struct file *filp)
static int afu_release(struct inode *inode, struct file *filp)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
struct dfl_feature *feature;

dev_dbg(&pdev->dev, "Device File Release\n");

- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);

- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);

- if (!dfl_feature_dev_use_count(pdata)) {
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (!dfl_feature_dev_use_count(fdata)) {
+ dfl_fpga_dev_for_each_feature(fdata, feature)
dfl_fpga_set_irq_triggers(feature, 0,
feature->nr_irqs, NULL);
- __port_reset(pdev);
- afu_dma_region_destroy(pdata);
+ __port_reset(fdata);
+ afu_dma_region_destroy(fdata);
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return 0;
}
@@ -650,6 +660,7 @@ static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
static long
afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
{
+ struct dfl_feature_dev_data *fdata = pdata->fdata;
struct dfl_fpga_port_info info;
struct dfl_afu *afu;
unsigned long minsz;
@@ -662,12 +673,12 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
if (info.argsz < minsz)
return -EINVAL;

- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
info.flags = 0;
info.num_regions = afu->num_regions;
info.num_umsgs = afu->num_umsgs;
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
@@ -691,7 +702,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
if (rinfo.argsz < minsz || rinfo.padding)
return -EINVAL;

- ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
+ ret = afu_mmio_region_get_by_index(pdata->fdata, rinfo.index, &region);
if (ret)
return ret;

@@ -708,6 +719,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
static long
afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
{
+ struct dfl_feature_dev_data *fdata = pdata->fdata;
struct dfl_fpga_port_dma_map map;
unsigned long minsz;
long ret;
@@ -720,16 +732,16 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
if (map.argsz < minsz || map.flags)
return -EINVAL;

- ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
+ ret = afu_dma_map_region(fdata, map.user_addr, map.length, &map.iova);
if (ret)
return ret;

if (copy_to_user(arg, &map, sizeof(map))) {
- afu_dma_unmap_region(pdata, map.iova);
+ afu_dma_unmap_region(fdata, map.iova);
return -EFAULT;
}

- dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
+ dev_dbg(&fdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
(unsigned long long)map.user_addr,
(unsigned long long)map.length,
(unsigned long long)map.iova);
@@ -751,7 +763,7 @@ afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
if (unmap.argsz < minsz || unmap.flags)
return -EINVAL;

- return afu_dma_unmap_region(pdata, unmap.iova);
+ return afu_dma_unmap_region(pdata->fdata, unmap.iova);
}

static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
@@ -785,7 +797,7 @@ static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* handled in this sub feature, and returns 0 and other
* error code if cmd is handled.
*/
- dfl_fpga_dev_for_each_feature(pdata, f)
+ dfl_fpga_dev_for_each_feature(pdata->fdata, f)
if (f->ops && f->ops->ioctl) {
ret = f->ops->ioctl(pdev, f, cmd, arg);
if (ret != -ENODEV)
@@ -817,7 +829,8 @@ static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
pdata = dev_get_platdata(&pdev->dev);

offset = vma->vm_pgoff << PAGE_SHIFT;
- ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
+ ret = afu_mmio_region_get_by_offset(pdata->fdata, offset, size,
+ &region);
if (ret)
return ret;

@@ -852,6 +865,7 @@ static const struct file_operations afu_fops = {
static int afu_dev_init(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = pdata->fdata;
struct dfl_afu *afu;

afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
@@ -860,11 +874,11 @@ static int afu_dev_init(struct platform_device *pdev)

afu->pdata = pdata;

- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, afu);
- afu_mmio_region_init(pdata);
- afu_dma_region_init(pdata);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, afu);
+ afu_mmio_region_init(fdata);
+ afu_dma_region_init(fdata);
+ mutex_unlock(&fdata->lock);

return 0;
}
@@ -872,27 +886,27 @@ static int afu_dev_init(struct platform_device *pdev)
static int afu_dev_destroy(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = pdata->fdata;

- mutex_lock(&pdata->lock);
- afu_mmio_region_destroy(pdata);
- afu_dma_region_destroy(pdata);
- dfl_fpga_pdata_set_private(pdata, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ afu_mmio_region_destroy(fdata);
+ afu_dma_region_destroy(fdata);
+ dfl_fpga_fdata_set_private(fdata, NULL);
+ mutex_unlock(&fdata->lock);

return 0;
}

-static int port_enable_set(struct platform_device *pdev, bool enable)
+static int port_enable_set(struct dfl_feature_dev_data *fdata, bool enable)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
int ret;

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
if (enable)
- ret = __afu_port_enable(pdev);
+ ret = __afu_port_enable(fdata);
else
- ret = __afu_port_disable(pdev);
- mutex_unlock(&pdata->lock);
+ ret = __afu_port_disable(fdata);
+ mutex_unlock(&fdata->lock);

return ret;
}
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
index 0804b7a0c298..b11a5b21e666 100644
--- a/drivers/fpga/dfl-afu-region.c
+++ b/drivers/fpga/dfl-afu-region.c
@@ -12,11 +12,11 @@

/**
* afu_mmio_region_init - init function for afu mmio region support
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
*/
-void afu_mmio_region_init(struct dfl_feature_platform_data *pdata)
+void afu_mmio_region_init(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);

INIT_LIST_HEAD(&afu->regions);
}
@@ -39,6 +39,7 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
/**
* afu_mmio_region_add - add a mmio region to given feature dev.
*
+ * @fdata: afu feature dev data
* @region_index: region index.
* @region_size: region size.
* @phys: region's physical address of this region.
@@ -46,14 +47,15 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_add(struct dfl_feature_dev_data *fdata,
u32 region_index, u64 region_size, u64 phys, u32 flags)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu_mmio_region *region;
struct dfl_afu *afu;
int ret = 0;

- region = devm_kzalloc(&pdata->dev->dev, sizeof(*region), GFP_KERNEL);
+ region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;

@@ -62,13 +64,13 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
region->phys = phys;
region->flags = flags;

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);

- afu = dfl_fpga_pdata_get_private(pdata);
+ afu = dfl_fpga_fdata_get_private(fdata);

/* check if @index already exists */
if (get_region_by_index(afu, region_index)) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
ret = -EEXIST;
goto exit;
}
@@ -79,37 +81,37 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,

afu->region_cur_offset += region_size;
afu->num_regions++;
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return 0;

exit:
- devm_kfree(&pdata->dev->dev, region);
+ devm_kfree(dev, region);
return ret;
}

/**
* afu_mmio_region_destroy - destroy all mmio regions under given feature dev.
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
*/
-void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata)
+void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct dfl_afu_mmio_region *tmp, *region;

list_for_each_entry_safe(region, tmp, &afu->regions, node)
- devm_kfree(&pdata->dev->dev, region);
+ devm_kfree(&fdata->dev->dev, region);
}

/**
* afu_mmio_region_get_by_index - find an afu region by index.
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @region_index: region index.
* @pregion: ptr to region for result.
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata,
u32 region_index,
struct dfl_afu_mmio_region *pregion)
{
@@ -117,8 +119,8 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
struct dfl_afu *afu;
int ret = 0;

- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
region = get_region_by_index(afu, region_index);
if (!region) {
ret = -EINVAL;
@@ -126,14 +128,14 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
}
*pregion = *region;
exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}

/**
* afu_mmio_region_get_by_offset - find an afu mmio region by offset and size
*
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @offset: region offset from start of the device fd.
* @size: region size.
* @pregion: ptr to region for result.
@@ -143,7 +145,7 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata,
u64 offset, u64 size,
struct dfl_afu_mmio_region *pregion)
{
@@ -151,8 +153,8 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
struct dfl_afu *afu;
int ret = 0;

- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
for_each_region(region, afu)
if (region->offset <= offset &&
region->offset + region->size >= offset + size) {
@@ -161,6 +163,6 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
}
ret = -EINVAL;
exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index e5020e2b1f3d..a712cf6d6a04 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -79,27 +79,27 @@ struct dfl_afu {
struct dfl_feature_platform_data *pdata;
};

-/* hold pdata->lock when call __afu_port_enable/disable */
-int __afu_port_enable(struct platform_device *pdev);
-int __afu_port_disable(struct platform_device *pdev);
+/* hold fdata->lock when call __afu_port_enable/disable */
+int __afu_port_enable(struct dfl_feature_dev_data *fdata);
+int __afu_port_disable(struct dfl_feature_dev_data *fdata);

-void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
-int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+void afu_mmio_region_init(struct dfl_feature_dev_data *fdata);
+int afu_mmio_region_add(struct dfl_feature_dev_data *fdata,
u32 region_index, u64 region_size, u64 phys, u32 flags);
-void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata);
-int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata);
+int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata,
u32 region_index,
struct dfl_afu_mmio_region *pregion);
-int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata,
u64 offset, u64 size,
struct dfl_afu_mmio_region *pregion);
-void afu_dma_region_init(struct dfl_feature_platform_data *pdata);
-void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata);
-int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+void afu_dma_region_init(struct dfl_feature_dev_data *fdata);
+void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata);
+int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
u64 user_addr, u64 length, u64 *iova);
-int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
+int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova);
struct dfl_afu_dma_region *
-afu_dma_region_find(struct dfl_feature_platform_data *pdata,
+afu_dma_region_find(struct dfl_feature_dev_data *fdata,
u64 iova, u64 size);

extern const struct dfl_feature_ops port_err_ops;
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
index 808d1f4d76df..fed9bc903f7a 100644
--- a/drivers/fpga/dfl-fme-br.c
+++ b/drivers/fpga/dfl-fme-br.c
@@ -22,34 +22,34 @@
struct fme_br_priv {
struct dfl_fme_br_pdata *pdata;
struct dfl_fpga_port_ops *port_ops;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *port_fdata;
};

static int fme_bridge_enable_set(struct fpga_bridge *bridge, bool enable)
{
struct fme_br_priv *priv = bridge->priv;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *port_fdata;
struct dfl_fpga_port_ops *ops;

- if (!priv->port_pdev) {
- port_pdev = dfl_fpga_cdev_find_port(priv->pdata->cdev,
- &priv->pdata->port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ if (!priv->port_fdata) {
+ port_fdata = dfl_fpga_cdev_find_port_data(priv->pdata->cdev,
+ &priv->pdata->port_id,
+ dfl_fpga_check_port_id);
+ if (!port_fdata)
return -ENODEV;

- priv->port_pdev = port_pdev;
+ priv->port_fdata = port_fdata;
}

- if (priv->port_pdev && !priv->port_ops) {
- ops = dfl_fpga_port_ops_get(priv->port_pdev);
+ if (priv->port_fdata && !priv->port_ops) {
+ ops = dfl_fpga_port_ops_get(priv->port_fdata);
if (!ops || !ops->enable_set)
return -ENOENT;

priv->port_ops = ops;
}

- return priv->port_ops->enable_set(priv->port_pdev, enable);
+ return priv->port_ops->enable_set(priv->port_fdata, enable);
}

static const struct fpga_bridge_ops fme_bridge_ops = {
@@ -85,8 +85,6 @@ static int fme_br_remove(struct platform_device *pdev)

fpga_bridge_unregister(br);

- if (priv->port_pdev)
- put_device(&priv->port_pdev->dev);
if (priv->port_ops)
dfl_fpga_port_ops_put(priv->port_ops);

diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
index 51c2892ec06d..f00d949efe69 100644
--- a/drivers/fpga/dfl-fme-error.c
+++ b/drivers/fpga/dfl-fme-error.c
@@ -42,15 +42,15 @@
static ssize_t pcie0_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + PCIE0_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -59,7 +59,7 @@ static ssize_t pcie0_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
int ret = 0;
u64 v, val;
@@ -67,9 +67,9 @@ static ssize_t pcie0_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);

v = readq(base + PCIE0_ERROR);
@@ -79,7 +79,7 @@ static ssize_t pcie0_errors_store(struct device *dev,
ret = -EINVAL;

writeq(0ULL, base + PCIE0_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(pcie0_errors);
@@ -87,15 +87,15 @@ static DEVICE_ATTR_RW(pcie0_errors);
static ssize_t pcie1_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + PCIE1_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -104,7 +104,7 @@ static ssize_t pcie1_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
int ret = 0;
u64 v, val;
@@ -112,9 +112,9 @@ static ssize_t pcie1_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);

v = readq(base + PCIE1_ERROR);
@@ -124,7 +124,7 @@ static ssize_t pcie1_errors_store(struct device *dev,
ret = -EINVAL;

writeq(0ULL, base + PCIE1_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(pcie1_errors);
@@ -132,9 +132,10 @@ static DEVICE_ATTR_RW(pcie1_errors);
static ssize_t nonfatal_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

return sprintf(buf, "0x%llx\n",
(unsigned long long)readq(base + RAS_NONFAT_ERROR));
@@ -144,9 +145,10 @@ static DEVICE_ATTR_RO(nonfatal_errors);
static ssize_t catfatal_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

return sprintf(buf, "0x%llx\n",
(unsigned long long)readq(base + RAS_CATFAT_ERROR));
@@ -156,15 +158,15 @@ static DEVICE_ATTR_RO(catfatal_errors);
static ssize_t inject_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + RAS_ERROR_INJECT);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "0x%llx\n",
(unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
@@ -174,7 +176,7 @@ static ssize_t inject_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u8 inject_error;
u64 v;
@@ -185,14 +187,14 @@ static ssize_t inject_errors_store(struct device *dev,
if (inject_error & ~INJECT_ERROR_MASK)
return -EINVAL;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + RAS_ERROR_INJECT);
v &= ~INJECT_ERROR_MASK;
v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
writeq(v, base + RAS_ERROR_INJECT);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return count;
}
@@ -201,15 +203,15 @@ static DEVICE_ATTR_RW(inject_errors);
static ssize_t fme_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -218,7 +220,7 @@ static ssize_t fme_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v, val;
int ret = 0;
@@ -226,9 +228,9 @@ static ssize_t fme_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);

v = readq(base + FME_ERROR);
@@ -240,7 +242,7 @@ static ssize_t fme_errors_store(struct device *dev,
/* Workaround: disable MBP_ERROR if feature revision is 0 */
writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
base + FME_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(fme_errors);
@@ -248,15 +250,15 @@ static DEVICE_ATTR_RW(fme_errors);
static ssize_t first_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_FIRST_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -265,15 +267,15 @@ static DEVICE_ATTR_RO(first_error);
static ssize_t next_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_NEXT_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -295,12 +297,14 @@ static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;

+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
+ if (!dfl_get_feature_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR))
return 0;

return attr->mode;
@@ -314,12 +318,12 @@ const struct attribute_group fme_global_err_group = {

static void fme_err_mask(struct device *dev, bool mask)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);

/* Workaround: keep MBP_ERROR always masked if revision is 0 */
if (dfl_feature_revision(base))
@@ -332,7 +336,7 @@ static void fme_err_mask(struct device *dev, bool mask)
writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);

- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
}

static int fme_global_err_init(struct platform_device *pdev,
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 77ea04d4edbe..967c40debfdb 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -27,10 +27,11 @@
static ssize_t ports_num_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);

v = readq(base + FME_HDR_CAP);

@@ -46,10 +47,11 @@ static DEVICE_ATTR_RO(ports_num);
static ssize_t bitstream_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);

v = readq(base + FME_HDR_BITSTREAM_ID);

@@ -64,10 +66,11 @@ static DEVICE_ATTR_RO(bitstream_id);
static ssize_t bitstream_metadata_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);

v = readq(base + FME_HDR_BITSTREAM_MD);

@@ -78,10 +81,11 @@ static DEVICE_ATTR_RO(bitstream_metadata);
static ssize_t cache_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);

v = readq(base + FME_HDR_CAP);

@@ -93,10 +97,11 @@ static DEVICE_ATTR_RO(cache_size);
static ssize_t fabric_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);

v = readq(base + FME_HDR_CAP);

@@ -108,10 +113,11 @@ static DEVICE_ATTR_RO(fabric_version);
static ssize_t socket_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;

- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);

v = readq(base + FME_HDR_CAP);

@@ -137,7 +143,7 @@ static const struct attribute_group fme_hdr_group = {
static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
unsigned long arg)
{
- struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ struct dfl_fpga_cdev *cdev = pdata->fdata->dfl_cdev;
int port_id;

if (get_user(port_id, (int __user *)arg))
@@ -149,7 +155,7 @@ static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
unsigned long arg)
{
- struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ struct dfl_fpga_cdev *cdev = pdata->fdata->dfl_cdev;
int port_id;

if (get_user(port_id, (int __user *)arg))
@@ -410,14 +416,14 @@ static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
struct dfl_feature *feature = dev_get_drvdata(dev);
int ret = 0;
u64 v;

val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);

switch (attr) {
case hwmon_power_max:
@@ -437,7 +443,7 @@ static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
break;
}

- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return ret;
}
@@ -599,19 +605,21 @@ static int fme_open(struct inode *inode, struct file *filp)
{
struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
+ struct dfl_feature_dev_data *fdata;
int ret;

if (WARN_ON(!pdata))
return -ENODEV;

- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ fdata = pdata->fdata;
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
if (!ret) {
dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
- dfl_feature_dev_use_count(pdata));
+ dfl_feature_dev_use_count(fdata));
filp->private_data = pdata;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return ret;
}
@@ -619,19 +627,20 @@ static int fme_open(struct inode *inode, struct file *filp)
static int fme_release(struct inode *inode, struct file *filp)
{
struct dfl_feature_platform_data *pdata = filp->private_data;
- struct platform_device *pdev = pdata->dev;
+ struct dfl_feature_dev_data *fdata = pdata->fdata;
+ struct platform_device *pdev = fdata->dev;
struct dfl_feature *feature;

dev_dbg(&pdev->dev, "Device File Release\n");

- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);

- if (!dfl_feature_dev_use_count(pdata))
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (!dfl_feature_dev_use_count(fdata))
+ dfl_fpga_dev_for_each_feature(fdata, feature)
dfl_fpga_set_irq_triggers(feature, 0,
feature->nr_irqs, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return 0;
}
@@ -639,7 +648,8 @@ static int fme_release(struct inode *inode, struct file *filp)
static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct dfl_feature_platform_data *pdata = filp->private_data;
- struct platform_device *pdev = pdata->dev;
+ struct dfl_feature_dev_data *fdata = pdata->fdata;
+ struct platform_device *pdev = fdata->dev;
struct dfl_feature *f;
long ret;

@@ -657,7 +667,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* handled in this sub feature, and returns 0 or other
* error code if cmd is handled.
*/
- dfl_fpga_dev_for_each_feature(pdata, f) {
+ dfl_fpga_dev_for_each_feature(fdata, f) {
if (f->ops && f->ops->ioctl) {
ret = f->ops->ioctl(pdev, f, cmd, arg);
if (ret != -ENODEV)
@@ -672,6 +682,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static int fme_dev_init(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = pdata->fdata;
struct dfl_fme *fme;

fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
@@ -680,9 +691,9 @@ static int fme_dev_init(struct platform_device *pdev)

fme->pdata = pdata;

- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, fme);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, fme);
+ mutex_unlock(&fdata->lock);

return 0;
}
@@ -690,10 +701,11 @@ static int fme_dev_init(struct platform_device *pdev)
static void fme_dev_destroy(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = pdata->fdata;

- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, NULL);
+ mutex_unlock(&fdata->lock);
}

static const struct file_operations fme_fops = {
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index d61ce9a18879..1cfcb06cf0d1 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -65,7 +65,7 @@ static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)

static int fme_pr(struct platform_device *pdev, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
void __user *argp = (void __user *)arg;
struct dfl_fpga_fme_port_pr port_pr;
struct fpga_image_info *info;
@@ -87,8 +87,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
return -EINVAL;

/* get fme header region */
- fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
- FME_FEATURE_ID_HEADER);
+ fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);

/* check port id */
v = readq(fme_hdr + FME_HDR_CAP);
@@ -123,8 +122,8 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)

info->flags |= FPGA_MGR_PARTIAL_RECONFIG;

- mutex_lock(&pdata->lock);
- fme = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ fme = dfl_fpga_fdata_get_private(fdata);
/* fme device has been unregistered. */
if (!fme) {
ret = -EINVAL;
@@ -156,7 +155,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)

put_device(&region->dev);
unlock_exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
free_exit:
vfree(buf);
return ret;
@@ -170,10 +169,10 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
* Return: mgr platform device if successful, and error code otherwise.
*/
static struct platform_device *
-dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
+dfl_fme_create_mgr(struct dfl_feature_dev_data *fdata,
struct dfl_feature *feature)
{
- struct platform_device *mgr, *fme = pdata->dev;
+ struct platform_device *mgr, *fme = fdata->dev;
struct dfl_fme_mgr_pdata mgr_pdata;
int ret = -ENOMEM;

@@ -211,9 +210,9 @@ dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
* dfl_fme_destroy_mgr - destroy fpga mgr platform device
* @pdata: fme platform device's pdata
*/
-static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_mgr(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);

platform_device_unregister(priv->mgr);
}
@@ -221,15 +220,15 @@ static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
/**
* dfl_fme_create_bridge - create fme fpga bridge platform device as child
*
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
* @port_id: port id for the bridge to be created.
*
* Return: bridge platform device if successful, and error code otherwise.
*/
static struct dfl_fme_bridge *
-dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
+dfl_fme_create_bridge(struct dfl_feature_dev_data *fdata, int port_id)
{
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
struct dfl_fme_br_pdata br_pdata;
struct dfl_fme_bridge *fme_br;
int ret = -ENOMEM;
@@ -238,7 +237,7 @@ dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
if (!fme_br)
return ERR_PTR(ret);

- br_pdata.cdev = pdata->dfl_cdev;
+ br_pdata.cdev = fdata->dfl_cdev;
br_pdata.port_id = port_id;

fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
@@ -274,11 +273,11 @@ static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)

/**
* dfl_fme_destroy_bridge - destroy all fpga bridge platform device
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_bridges(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
struct dfl_fme_bridge *fbridge, *tmp;

list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
@@ -290,7 +289,7 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
/**
* dfl_fme_create_region - create fpga region platform device as child
*
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
* @mgr: mgr platform device needed for region
* @br: br platform device needed for region
* @port_id: port id
@@ -298,12 +297,12 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
* Return: fme region if successful, and error code otherwise.
*/
static struct dfl_fme_region *
-dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
+dfl_fme_create_region(struct dfl_feature_dev_data *fdata,
struct platform_device *mgr,
struct platform_device *br, int port_id)
{
struct dfl_fme_region_pdata region_pdata;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
struct dfl_fme_region *fme_region;
int ret = -ENOMEM;

@@ -353,11 +352,11 @@ static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)

/**
* dfl_fme_destroy_regions - destroy all fme regions
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_regions(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
struct dfl_fme_region *fme_region, *tmp;

list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
@@ -369,7 +368,8 @@ static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
static int pr_mgmt_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata =
+ to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fme_region *fme_region;
struct dfl_fme_bridge *fme_br;
struct platform_device *mgr;
@@ -378,18 +378,17 @@ static int pr_mgmt_init(struct platform_device *pdev,
int ret = -ENODEV, i = 0;
u64 fme_cap, port_offset;

- fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
- FME_FEATURE_ID_HEADER);
+ fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);

- mutex_lock(&pdata->lock);
- priv = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ priv = dfl_fpga_fdata_get_private(fdata);

/* Initialize the region and bridge sub device list */
INIT_LIST_HEAD(&priv->region_list);
INIT_LIST_HEAD(&priv->bridge_list);

/* Create fpga mgr platform device */
- mgr = dfl_fme_create_mgr(pdata, feature);
+ mgr = dfl_fme_create_mgr(fdata, feature);
if (IS_ERR(mgr)) {
dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
goto unlock;
@@ -405,7 +404,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
continue;

/* Create bridge for each port */
- fme_br = dfl_fme_create_bridge(pdata, i);
+ fme_br = dfl_fme_create_bridge(fdata, i);
if (IS_ERR(fme_br)) {
ret = PTR_ERR(fme_br);
goto destroy_region;
@@ -414,7 +413,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
list_add(&fme_br->node, &priv->bridge_list);

/* Create region for each port */
- fme_region = dfl_fme_create_region(pdata, mgr,
+ fme_region = dfl_fme_create_region(fdata, mgr,
fme_br->br, i);
if (IS_ERR(fme_region)) {
ret = PTR_ERR(fme_region);
@@ -423,30 +422,31 @@ static int pr_mgmt_init(struct platform_device *pdev,

list_add(&fme_region->node, &priv->region_list);
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

return 0;

destroy_region:
- dfl_fme_destroy_regions(pdata);
- dfl_fme_destroy_bridges(pdata);
- dfl_fme_destroy_mgr(pdata);
+ dfl_fme_destroy_regions(fdata);
+ dfl_fme_destroy_bridges(fdata);
+ dfl_fme_destroy_mgr(fdata);
unlock:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}

static void pr_mgmt_uinit(struct platform_device *pdev,
struct dfl_feature *feature)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata =
+ to_dfl_feature_dev_data(&pdev->dev);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);

- dfl_fme_destroy_regions(pdata);
- dfl_fme_destroy_bridges(pdata);
- dfl_fme_destroy_mgr(pdata);
- mutex_unlock(&pdata->lock);
+ dfl_fme_destroy_regions(fdata);
+ dfl_fme_destroy_bridges(fdata);
+ dfl_fme_destroy_mgr(fdata);
+ mutex_unlock(&fdata->lock);
}

static long fme_pr_ioctl(struct platform_device *pdev,
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 599bb21d86af..4f36152f4ab6 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -118,17 +118,6 @@ static void dfl_id_free(enum dfl_id_type type, int id)
mutex_unlock(&dfl_id_mutex);
}

-static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
- if (!strcmp(dfl_devs[i].name, pdev->name))
- return i;
-
- return DFL_ID_MAX;
-}
-
static enum dfl_id_type dfh_id_to_type(u16 id)
{
int i;
@@ -160,7 +149,8 @@ static LIST_HEAD(dfl_port_ops_list);
*
* Please note that must dfl_fpga_port_ops_put after use the port_ops.
*/
-struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
+struct dfl_fpga_port_ops *
+dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata)
{
struct dfl_fpga_port_ops *ops = NULL;

@@ -170,7 +160,7 @@ struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)

list_for_each_entry(ops, &dfl_port_ops_list, node) {
/* match port_ops using the name of platform device */
- if (!strcmp(pdev->name, ops->name)) {
+ if (!strcmp(fdata->pdev_name, ops->name)) {
if (!try_module_get(ops->owner))
ops = NULL;
goto done;
@@ -226,22 +216,21 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
*
* Return: 1 if port device matches with given port id, otherwise 0.
*/
-int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
+int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct dfl_fpga_port_ops *port_ops;

- if (pdata->id != FEATURE_DEV_ID_UNUSED)
- return pdata->id == *(int *)pport_id;
+ if (fdata->id != FEATURE_DEV_ID_UNUSED)
+ return fdata->id == *(int *)pport_id;

- port_ops = dfl_fpga_port_ops_get(pdev);
+ port_ops = dfl_fpga_port_ops_get(fdata);
if (!port_ops || !port_ops->get_id)
return 0;

- pdata->id = port_ops->get_id(pdev);
+ fdata->id = port_ops->get_id(fdata);
dfl_fpga_port_ops_put(port_ops);

- return pdata->id == *(int *)pport_id;
+ return fdata->id == *(int *)pport_id;
}
EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);

@@ -348,10 +337,10 @@ static void release_dfl_dev(struct device *dev)
}

static struct dfl_device *
-dfl_dev_add(struct dfl_feature_platform_data *pdata,
+dfl_dev_add(struct dfl_feature_dev_data *fdata,
struct dfl_feature *feature)
{
- struct platform_device *pdev = pdata->dev;
+ struct platform_device *pdev = fdata->dev;
struct resource *parent_res;
struct dfl_device *ddev;
int id, i, ret;
@@ -377,10 +366,10 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata,
if (ret)
goto put_dev;

- ddev->type = feature_dev_id_type(pdev);
+ ddev->type = fdata->type;
ddev->feature_id = feature->id;
ddev->revision = feature->revision;
- ddev->cdev = pdata->dfl_cdev;
+ ddev->cdev = fdata->dfl_cdev;

/* add mmio resource */
parent_res = &pdev->resource[feature->resource_index];
@@ -423,11 +412,11 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata,
return ERR_PTR(ret);
}

-static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
+static void dfl_devs_remove(struct dfl_feature_dev_data *fdata)
{
struct dfl_feature *feature;

- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ddev) {
device_unregister(&feature->ddev->dev);
feature->ddev = NULL;
@@ -435,13 +424,13 @@ static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
}
}

-static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
+static int dfl_devs_add(struct dfl_feature_dev_data *fdata)
{
struct dfl_feature *feature;
struct dfl_device *ddev;
int ret;

- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ioaddr)
continue;

@@ -450,7 +439,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
goto err;
}

- ddev = dfl_dev_add(pdata, feature);
+ ddev = dfl_dev_add(fdata, feature);
if (IS_ERR(ddev)) {
ret = PTR_ERR(ddev);
goto err;
@@ -462,7 +451,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
return 0;

err:
- dfl_devs_remove(pdata);
+ dfl_devs_remove(fdata);
return ret;
}

@@ -493,11 +482,12 @@ EXPORT_SYMBOL(dfl_driver_unregister);
void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = pdata->fdata;
struct dfl_feature *feature;

- dfl_devs_remove(pdata);
+ dfl_devs_remove(fdata);

- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ops) {
if (feature->ops->uinit)
feature->ops->uinit(pdev, feature);
@@ -568,12 +558,13 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev,
struct dfl_feature_driver *feature_drvs)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = pdata->fdata;
struct dfl_feature_driver *drv = feature_drvs;
struct dfl_feature *feature;
int ret;

while (drv->ops) {
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (dfl_feature_drv_match(feature, drv)) {
ret = dfl_feature_instance_init(pdev, pdata,
feature, drv);
@@ -584,7 +575,7 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev,
drv++;
}

- ret = dfl_devs_add(pdata);
+ ret = dfl_devs_add(fdata);
if (ret)
goto exit;

@@ -683,7 +674,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
* @nr_irqs: number of irqs for all feature devices.
* @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
* this device.
- * @feature_dev: current feature device.
+ * @type: the current FIU type.
* @ioaddr: header register region address of current FIU in enumeration.
* @start: register resource start of current FIU.
* @len: max register resource length of current FIU.
@@ -696,7 +687,7 @@ struct build_feature_devs_info {
unsigned int nr_irqs;
int *irq_table;

- struct platform_device *feature_dev;
+ enum dfl_id_type type;
void __iomem *ioaddr;
resource_size_t start;
resource_size_t len;
@@ -724,50 +715,51 @@ struct dfl_feature_info {
unsigned int nr_irqs;
};

-static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
- struct platform_device *port)
+static void dfl_fpga_cdev_add_port_data(struct dfl_fpga_cdev *cdev,
+ struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
-
mutex_lock(&cdev->lock);
- list_add(&pdata->node, &cdev->port_dev_list);
- get_device(&pdata->dev->dev);
+ list_add(&fdata->node, &cdev->port_dev_list);
mutex_unlock(&cdev->lock);
}

-/*
- * register current feature device, it is called when we need to switch to
- * another feature parsing or we have parsed all features on given device
- * feature list.
- */
-static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+static struct dfl_feature_dev_data *
+binfo_create_feature_dev_data(struct build_feature_devs_info *binfo)
{
- struct platform_device *fdev = binfo->feature_dev;
- struct dfl_feature_platform_data *pdata;
+ enum dfl_id_type type = binfo->type;
struct dfl_feature_info *finfo, *p;
- enum dfl_id_type type;
+ struct dfl_feature_dev_data *fdata;
int ret, index = 0, res_idx = 0;

- type = feature_dev_id_type(fdev);
if (WARN_ON_ONCE(type >= DFL_ID_MAX))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);

- /*
- * we do not need to care for the memory which is associated with
- * the platform device. After calling platform_device_unregister(),
- * it will be automatically freed by device's release() callback,
- * platform_device_release().
- */
- pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ fdata = devm_kzalloc(binfo->dev, sizeof(*fdata), GFP_KERNEL);
+ if (!fdata)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->features = devm_kcalloc(binfo->dev, binfo->feature_num,
+ sizeof(*fdata->features), GFP_KERNEL);
+ if (!fdata->features)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->resources = devm_kcalloc(binfo->dev, binfo->feature_num,
+ sizeof(*fdata->resources), GFP_KERNEL);
+ if (!fdata->resources)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->type = type;

- pdata->dev = fdev;
- pdata->num = binfo->feature_num;
- pdata->dfl_cdev = binfo->cdev;
- pdata->id = FEATURE_DEV_ID_UNUSED;
- mutex_init(&pdata->lock);
- lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
+ fdata->pdev_id = dfl_id_alloc(type, binfo->dev);
+ if (fdata->pdev_id < 0)
+ return ERR_PTR(fdata->pdev_id);
+
+ fdata->pdev_name = dfl_devs[type].name;
+ fdata->num = binfo->feature_num;
+ fdata->dfl_cdev = binfo->cdev;
+ fdata->id = FEATURE_DEV_ID_UNUSED;
+ mutex_init(&fdata->lock);
+ lockdep_set_class_and_name(&fdata->lock, &dfl_pdata_keys[type],
dfl_pdata_key_strings[type]);

/*
@@ -776,25 +768,15 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
* works properly for port device.
* and it should always be 0 for fme device.
*/
- WARN_ON(pdata->disable_count);
-
- fdev->dev.platform_data = pdata;
-
- /* each sub feature has one MMIO resource */
- fdev->num_resources = binfo->feature_num;
- fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
- GFP_KERNEL);
- if (!fdev->resource)
- return -ENOMEM;
+ WARN_ON(fdata->disable_count);

/* fill features and resource information for feature dev */
list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- struct dfl_feature *feature = &pdata->features[index++];
+ struct dfl_feature *feature = &fdata->features[index++];
struct dfl_feature_irq_ctx *ctx;
unsigned int i;

/* save resource information for each feature */
- feature->dev = fdev;
feature->id = finfo->fid;
feature->revision = finfo->revision;

@@ -810,19 +792,22 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
feature->ioaddr =
devm_ioremap_resource(binfo->dev,
&finfo->mmio_res);
- if (IS_ERR(feature->ioaddr))
- return PTR_ERR(feature->ioaddr);
+ if (IS_ERR(feature->ioaddr)) {
+ ret = PTR_ERR(feature->ioaddr);
+ goto err_free_id;
+ }
} else {
feature->resource_index = res_idx;
- fdev->resource[res_idx++] = finfo->mmio_res;
+ fdata->resources[res_idx++] = finfo->mmio_res;
}

if (finfo->nr_irqs) {
ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto err_free_id;
+ }
for (i = 0; i < finfo->nr_irqs; i++)
ctx[i].irq =
binfo->irq_table[finfo->irq_base + i];
@@ -835,55 +820,90 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
kfree(finfo);
}

- ret = platform_device_add(binfo->feature_dev);
- if (!ret) {
- if (type == PORT_ID)
- dfl_fpga_cdev_add_port_dev(binfo->cdev,
- binfo->feature_dev);
- else
- binfo->cdev->fme_dev =
- get_device(&binfo->feature_dev->dev);
- /*
- * reset it to avoid build_info_free() freeing their resource.
- *
- * The resource of successfully registered feature devices
- * will be freed by platform_device_unregister(). See the
- * comments in build_info_create_dev().
- */
- binfo->feature_dev = NULL;
- }
+ fdata->resource_num = res_idx;

- return ret;
+ return fdata;
+
+err_free_id:
+ dfl_id_free(type, fdata->pdev_id);
+
+ return ERR_PTR(ret);
}

-static int
-build_info_create_dev(struct build_feature_devs_info *binfo,
- enum dfl_id_type type)
+/*
+ * register current feature device, it is called when we need to switch to
+ * another feature parsing or we have parsed all features on given device
+ * feature list.
+ */
+static int feature_dev_register(struct dfl_feature_dev_data *fdata)
{
+ struct dfl_feature_platform_data pdata = { 0 };
struct platform_device *fdev;
+ struct dfl_feature *feature;
+ int ret;

- if (type >= DFL_ID_MAX)
- return -EINVAL;
-
- /*
- * we use -ENODEV as the initialization indicator which indicates
- * whether the id need to be reclaimed
- */
- fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
+ fdev = platform_device_alloc(fdata->pdev_name, fdata->pdev_id);
if (!fdev)
return -ENOMEM;

- binfo->feature_dev = fdev;
- binfo->feature_num = 0;
+ fdata->dev = fdev;

- INIT_LIST_HEAD(&binfo->sub_features);
+ fdev->dev.parent = &fdata->dfl_cdev->region->dev;
+ fdev->dev.devt = dfl_get_devt(dfl_devs[fdata->type].devt_type,
+ fdev->id);
+
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = fdev;

- fdev->id = dfl_id_alloc(type, &fdev->dev);
- if (fdev->id < 0)
- return fdev->id;
+ ret = platform_device_add_resources(fdev, fdata->resources,
+ fdata->resource_num);
+ if (ret)
+ goto err_put_dev;
+
+ pdata.fdata = fdata;
+ ret = platform_device_add_data(fdev, &pdata, sizeof(pdata));
+ if (ret)
+ goto err_put_dev;

- fdev->dev.parent = &binfo->cdev->region->dev;
- fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
+ ret = platform_device_add(fdev);
+ if (ret)
+ goto err_put_dev;
+
+ return 0;
+
+err_put_dev:
+ platform_device_put(fdev);
+ fdata->dev = NULL;
+
+ return ret;
+}
+
+static void feature_dev_unregister(struct dfl_feature_dev_data *fdata)
+{
+ platform_device_unregister(fdata->dev);
+ fdata->dev = NULL;
+}
+
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct dfl_feature_dev_data *fdata;
+ int ret;
+
+ fdata = binfo_create_feature_dev_data(binfo);
+ if (IS_ERR(fdata))
+ return PTR_ERR(fdata);
+
+ ret = feature_dev_register(fdata);
+ if (ret)
+ return ret;
+
+ if (binfo->type == PORT_ID)
+ dfl_fpga_cdev_add_port_data(binfo->cdev, fdata);
+ else
+ binfo->cdev->fme_dev = get_device(&fdata->dev->dev);
+
+ /* reset the binfo for next FIU */
+ binfo->type = DFL_ID_MAX;

return 0;
}
@@ -892,22 +912,11 @@ static void build_info_free(struct build_feature_devs_info *binfo)
{
struct dfl_feature_info *finfo, *p;

- /*
- * it is a valid id, free it. See comments in
- * build_info_create_dev()
- */
- if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
- dfl_id_free(feature_dev_id_type(binfo->feature_dev),
- binfo->feature_dev->id);
-
- list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- list_del(&finfo->node);
- kfree(finfo);
- }
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ list_del(&finfo->node);
+ kfree(finfo);
}

- platform_device_put(binfo->feature_dev);
-
devm_kfree(binfo->dev, binfo);
}

@@ -1068,7 +1077,7 @@ static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU);
}

-#define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev)
+#define is_feature_dev_detected(binfo) ((binfo)->type != DFL_ID_MAX)

static int parse_feature_afu(struct build_feature_devs_info *binfo,
resource_size_t ofst)
@@ -1078,12 +1087,11 @@ static int parse_feature_afu(struct build_feature_devs_info *binfo,
return -EINVAL;
}

- switch (feature_dev_id_type(binfo->feature_dev)) {
+ switch (binfo->type) {
case PORT_ID:
return parse_feature_port_afu(binfo, ofst);
default:
- dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
- binfo->feature_dev->name);
+ dev_info(binfo->dev, "AFU belonging to FIU is not supported yet.\n");
}

return 0;
@@ -1124,6 +1132,7 @@ static void build_info_complete(struct build_feature_devs_info *binfo)
static int parse_feature_fiu(struct build_feature_devs_info *binfo,
resource_size_t ofst)
{
+ enum dfl_id_type type;
int ret = 0;
u32 offset;
u16 id;
@@ -1145,10 +1154,13 @@ static int parse_feature_fiu(struct build_feature_devs_info *binfo,
v = readq(binfo->ioaddr + DFH);
id = FIELD_GET(DFH_ID, v);

- /* create platform device for dfl feature dev */
- ret = build_info_create_dev(binfo, dfh_id_to_type(id));
- if (ret)
- return ret;
+ type = dfh_id_to_type(id);
+ if (type >= DFL_ID_MAX)
+ return -EINVAL;
+
+ binfo->type = type;
+ binfo->feature_num = 0;
+ INIT_LIST_HEAD(&binfo->sub_features);

ret = create_feature_instance(binfo, 0, 0, 0);
if (ret)
@@ -1366,13 +1378,10 @@ EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);

static int remove_feature_dev(struct device *dev, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
- enum dfl_id_type type = feature_dev_id_type(pdev);
- int id = pdev->id;
-
- platform_device_unregister(pdev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);

- dfl_id_free(type, id);
+ feature_dev_unregister(fdata);
+ dfl_id_free(fdata->type, fdata->pdev_id);

return 0;
}
@@ -1424,6 +1433,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
goto unregister_region_exit;
}

+ binfo->type = DFL_ID_MAX;
binfo->dev = info->dev;
binfo->cdev = cdev;

@@ -1465,25 +1475,10 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
*/
void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
{
- struct dfl_feature_platform_data *pdata, *ptmp;
-
mutex_lock(&cdev->lock);
if (cdev->fme_dev)
put_device(cdev->fme_dev);

- list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
- struct platform_device *port_dev = pdata->dev;
-
- /* remove released ports */
- if (!device_is_registered(&port_dev->dev)) {
- dfl_id_free(feature_dev_id_type(port_dev),
- port_dev->id);
- platform_device_put(port_dev);
- }
-
- list_del(&pdata->node);
- put_device(&port_dev->dev);
- }
mutex_unlock(&cdev->lock);

remove_feature_devs(cdev);
@@ -1507,23 +1502,21 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
*
* NOTE: you will need to drop the device reference with put_device() after use.
*/
-struct platform_device *
-__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *))
+struct dfl_feature_dev_data *
+__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *,
+ void *))
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_dev;
-
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- port_dev = pdata->dev;
+ struct dfl_feature_dev_data *fdata;

- if (match(port_dev, data) && get_device(&port_dev->dev))
- return port_dev;
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (match(fdata, data))
+ return fdata;
}

return NULL;
}
-EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
+EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port_data);

static int __init dfl_fpga_init(void)
{
@@ -1557,33 +1550,29 @@ static int __init dfl_fpga_init(void)
*/
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *fdata;
int ret = -ENODEV;

mutex_lock(&cdev->lock);
- port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!fdata)
goto unlock_exit;

- if (!device_is_registered(&port_pdev->dev)) {
+ if (!fdata->dev) {
ret = -EBUSY;
- goto put_dev_exit;
+ goto unlock_exit;
}

- pdata = dev_get_platdata(&port_pdev->dev);
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, true);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, true);
+ mutex_unlock(&fdata->lock);
if (ret)
- goto put_dev_exit;
+ goto unlock_exit;

- platform_device_del(port_pdev);
+ feature_dev_unregister(fdata);
cdev->released_port_num++;
-put_dev_exit:
- put_device(&port_pdev->dev);
+
unlock_exit:
mutex_unlock(&cdev->lock);
return ret;
@@ -1603,34 +1592,30 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
*/
int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *fdata;
int ret = -ENODEV;

mutex_lock(&cdev->lock);
- port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!fdata)
goto unlock_exit;

- if (device_is_registered(&port_pdev->dev)) {
+ if (fdata->dev) {
ret = -EBUSY;
- goto put_dev_exit;
+ goto unlock_exit;
}

- ret = platform_device_add(port_pdev);
+ ret = feature_dev_register(fdata);
if (ret)
- goto put_dev_exit;
-
- pdata = dev_get_platdata(&port_pdev->dev);
+ goto unlock_exit;

- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
+ mutex_unlock(&fdata->lock);

cdev->released_port_num--;
-put_dev_exit:
- put_device(&port_pdev->dev);
+
unlock_exit:
mutex_unlock(&cdev->lock);
return ret;
@@ -1640,10 +1625,11 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
static void config_port_access_mode(struct device *fme_dev, int port_id,
bool is_vf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(fme_dev);
void __iomem *base;
u64 v;

- base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);

v = readq(base + FME_HDR_PORT_OFST(port_id));

@@ -1667,14 +1653,14 @@ static void config_port_access_mode(struct device *fme_dev, int port_id,
*/
void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
{
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;

mutex_lock(&cdev->lock);
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- if (device_is_registered(&pdata->dev->dev))
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (fdata->dev)
continue;

- config_port_pf_mode(cdev->fme_dev, pdata->id);
+ config_port_pf_mode(cdev->fme_dev, fdata->id);
}
mutex_unlock(&cdev->lock);
}
@@ -1693,7 +1679,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
*/
int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
{
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
int ret = 0;

mutex_lock(&cdev->lock);
@@ -1707,11 +1693,11 @@ int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
goto done;
}

- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- if (device_is_registered(&pdata->dev->dev))
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (fdata->dev)
continue;

- config_port_vf_mode(cdev->fme_dev, pdata->id);
+ config_port_vf_mode(cdev->fme_dev, fdata->id);
}
done:
mutex_unlock(&cdev->lock);
@@ -1845,6 +1831,7 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
unsigned long arg)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = pdata->fdata;
struct dfl_fpga_irq_set hdr;
s32 *fds;
long ret;
@@ -1864,9 +1851,9 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
if (IS_ERR(fds))
return PTR_ERR(fds);

- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);

kfree(fds);
return ret;
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 53572c7aced0..f8ba28079fc3 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -17,6 +17,7 @@
#include <linux/bitfield.h>
#include <linux/cdev.h>
#include <linux/delay.h>
+#include <linux/dfl.h>
#include <linux/eventfd.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
@@ -169,6 +170,8 @@
#define PORT_UINT_CAP_INT_NUM GENMASK_ULL(11, 0) /* Interrupts num */
#define PORT_UINT_CAP_FST_VECT GENMASK_ULL(23, 12) /* First Vector */

+struct dfl_feature_dev_data;
+
/**
* struct dfl_fpga_port_ops - port ops
*
@@ -182,15 +185,16 @@ struct dfl_fpga_port_ops {
const char *name;
struct module *owner;
struct list_head node;
- int (*get_id)(struct platform_device *pdev);
- int (*enable_set)(struct platform_device *pdev, bool enable);
+ int (*get_id)(struct dfl_feature_dev_data *fdata);
+ int (*enable_set)(struct dfl_feature_dev_data *fdata, bool enable);
};

void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops);
void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops);
-struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev);
+struct dfl_fpga_port_ops *
+ dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata);
void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
-int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
+int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id);

/**
* struct dfl_feature_id - dfl private feature id
@@ -256,26 +260,32 @@ struct dfl_feature {
#define FEATURE_DEV_ID_UNUSED (-1)

/**
- * struct dfl_feature_platform_data - platform data for feature devices
+ * struct dfl_feature_dev_data - dfl enumeration data for dfl feature dev.
*
- * @node: node to link feature devs to container device's port_dev_list.
- * @lock: mutex to protect platform data.
- * @cdev: cdev of feature dev.
- * @dev: ptr to platform device linked with this platform data.
+ * @node: node to link the data structure to container device's port_dev_list.
+ * @lock: mutex to protect feature dev data.
+ * @dev: ptr to the feature's platform device linked with this structure.
+ * @type: type of DFL FIU for the feature dev. See enum dfl_id_type.
+ * @pdev_id: platform device id for the feature dev.
+ * @pdev_name: platform device name for the feature dev.
* @dfl_cdev: ptr to container device.
- * @id: id used for this feature device.
+ * @id: id used for the feature device.
* @disable_count: count for port disable.
* @excl_open: set on feature device exclusive open.
* @open_count: count for feature device open.
* @num: number for sub features.
* @private: ptr to feature dev private data.
- * @features: sub features of this feature dev.
+ * @features: sub features for the feature dev.
+ * @resource_num: number of resources for the feature dev.
+ * @resources: resources for the feature dev.
*/
-struct dfl_feature_platform_data {
+struct dfl_feature_dev_data {
struct list_head node;
struct mutex lock;
- struct cdev cdev;
struct platform_device *dev;
+ enum dfl_id_type type;
+ int pdev_id;
+ const char *pdev_name;
struct dfl_fpga_cdev *dfl_cdev;
int id;
unsigned int disable_count;
@@ -283,55 +293,68 @@ struct dfl_feature_platform_data {
int open_count;
void *private;
int num;
- struct dfl_feature features[];
+ struct dfl_feature *features;
+ int resource_num;
+ struct resource *resources;
+};
+
+/**
+ * struct dfl_feature_platform_data - platform data for feature devices
+ *
+ * @fdata: dfl enumeration data for the dfl feature device.
+ * @cdev: cdev of feature dev.
+ */
+struct dfl_feature_platform_data {
+ struct dfl_feature_dev_data *fdata;
+ struct cdev cdev;
};

static inline
-int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata,
+int dfl_feature_dev_use_begin(struct dfl_feature_dev_data *fdata,
bool excl)
{
- if (pdata->excl_open)
+ if (fdata->excl_open)
return -EBUSY;

if (excl) {
- if (pdata->open_count)
+ if (fdata->open_count)
return -EBUSY;

- pdata->excl_open = true;
+ fdata->excl_open = true;
}
- pdata->open_count++;
+ fdata->open_count++;

return 0;
}

static inline
-void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
+void dfl_feature_dev_use_end(struct dfl_feature_dev_data *fdata)
{
- pdata->excl_open = false;
+ fdata->excl_open = false;

- if (WARN_ON(pdata->open_count <= 0))
+ if (WARN_ON(fdata->open_count <= 0))
return;

- pdata->open_count--;
+ fdata->open_count--;
}

static inline
-int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata)
+int dfl_feature_dev_use_count(struct dfl_feature_dev_data *fdata)
{
- return pdata->open_count;
+ return fdata->open_count;
}

static inline
-void dfl_fpga_pdata_set_private(struct dfl_feature_platform_data *pdata,
+void dfl_fpga_fdata_set_private(struct dfl_feature_dev_data *fdata,
void *private)
{
- pdata->private = private;
+ fdata->private = private;
}

static inline
-void *dfl_fpga_pdata_get_private(struct dfl_feature_platform_data *pdata)
+void *dfl_fpga_fdata_get_private(struct dfl_feature_dev_data *fdata)
{
- return pdata->private;
+ return fdata->private;
}

struct dfl_feature_ops {
@@ -361,30 +384,29 @@ struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode)

pdata = container_of(inode->i_cdev, struct dfl_feature_platform_data,
cdev);
- return pdata->dev;
+ return pdata->fdata->dev;
}

-#define dfl_fpga_dev_for_each_feature(pdata, feature) \
- for ((feature) = (pdata)->features; \
- (feature) < (pdata)->features + (pdata)->num; (feature)++)
+#define dfl_fpga_dev_for_each_feature(fdata, feature) \
+ for ((feature) = (fdata)->features; \
+ (feature) < (fdata)->features + (fdata)->num; (feature)++)

-static inline
-struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u16 id)
+static inline struct dfl_feature *
+dfl_get_feature_by_id(struct dfl_feature_dev_data *fdata, u16 id)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
struct dfl_feature *feature;

- dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_fpga_dev_for_each_feature(fdata, feature)
if (feature->id == id)
return feature;

return NULL;
}

-static inline
-void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
+static inline void __iomem *
+dfl_get_feature_ioaddr_by_id(struct dfl_feature_dev_data *fdata, u16 id)
{
- struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
+ struct dfl_feature *feature = dfl_get_feature_by_id(fdata, id);

if (feature && feature->ioaddr)
return feature->ioaddr;
@@ -393,15 +415,18 @@ void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
return NULL;
}

-static inline bool is_dfl_feature_present(struct device *dev, u16 id)
+static inline struct dfl_feature_dev_data *
+to_dfl_feature_dev_data(struct device *dev)
{
- return !!dfl_get_feature_ioaddr_by_id(dev, id);
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+
+ return pdata->fdata;
}

static inline
-struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata)
+struct device *dfl_fpga_fdata_to_parent(struct dfl_feature_dev_data *fdata)
{
- return pdata->dev->dev.parent->parent;
+ return fdata->dev->dev.parent->parent;
}

static inline bool dfl_feature_is_fme(void __iomem *base)
@@ -483,26 +508,23 @@ struct dfl_fpga_cdev *
dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info);
void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev);

-/*
- * need to drop the device reference with put_device() after use port platform
- * device returned by __dfl_fpga_cdev_find_port and dfl_fpga_cdev_find_port
- * functions.
- */
-struct platform_device *
-__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *));
+struct dfl_feature_dev_data *
+__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *,
+ void *));

-static inline struct platform_device *
-dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *))
+static inline struct dfl_feature_dev_data *
+dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *,
+ void *))
{
- struct platform_device *pdev;
+ struct dfl_feature_dev_data *fdata;

mutex_lock(&cdev->lock);
- pdev = __dfl_fpga_cdev_find_port(cdev, data, match);
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, data, match);
mutex_unlock(&cdev->lock);

- return pdev;
+ return fdata;
}

int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id);
--
2.25.1