[RFC v2 5/6] dmabuf: Add gpu cgroup charge transfer function

From: T.J. Mercier
Date: Fri Feb 11 2022 - 11:19:20 EST


The dma_buf_charge_transfer function provides a way for processes to
transfer charge of a buffer to a different process. This is essential
for the cases where a central allocator process does allocations for
various subsystems, hands over the fd to the client who requested the
memory and drops all references to the allocated memory.

From: Hridya Valsaraju <hridya@xxxxxxxxxx>
Signed-off-by: Hridya Valsaraju <hridya@xxxxxxxxxx>
Co-developed-by: T.J. Mercier <tjmercier@xxxxxxxxxx>
Signed-off-by: T.J. Mercier <tjmercier@xxxxxxxxxx>
---
changes in v2
- Move dma-buf cgroup charge transfer from a dma_buf_op defined by every
heap to a single dma-buf function for all heaps per Daniel Vetter and
Christian König.

drivers/dma-buf/dma-buf.c | 48 +++++++++++++++++++++++++++++++++++++++
include/linux/dma-buf.h | 2 ++
2 files changed, 50 insertions(+)

diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 83d0d1b91547..55e1b982f840 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1374,6 +1374,54 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
}
EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);

+/**
+ * dma_buf_charge_transfer - Change the GPU cgroup to which the provided dma_buf
+ * is charged.
+ * @dmabuf: [in] buffer whose charge will be migrated to a different GPU
+ * cgroup
+ * @gpucg: [in] the destination GPU cgroup for dmabuf's charge
+ *
+ * Only tasks that belong to the same cgroup the buffer is currently charged to
+ * may call this function, otherwise it will return -EPERM.
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
+ */
+int dma_buf_charge_transfer(struct dma_buf *dmabuf, struct gpucg *gpucg)
+{
+#ifdef CONFIG_CGROUP_GPU
+ struct gpucg *current_gpucg;
+ int ret = 0;
+
+ /*
+ * Verify that the cgroup of the process requesting the transfer is the
+ * same as the one the buffer is currently charged to.
+ */
+ current_gpucg = gpucg_get(current);
+ mutex_lock(&dmabuf->lock);
+ if (current_gpucg != dmabuf->gpucg) {
+ ret = -EPERM;
+ goto err;
+ }
+
+ ret = gpucg_try_charge(gpucg, dmabuf->gpucg_dev, dmabuf->size);
+ if (ret)
+ goto err;
+
+ dmabuf->gpucg = gpucg;
+
+ /* uncharge the buffer from the cgroup it's currently charged to. */
+ gpucg_uncharge(current_gpucg, dmabuf->gpucg_dev, dmabuf->size);
+
+err:
+ mutex_unlock(&dmabuf->lock);
+ gpucg_put(current_gpucg);
+ return ret;
+#else
+ return 0;
+#endif /* CONFIG_CGROUP_GPU */
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_charge_transfer, DMA_BUF);
+
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 742f29c3daaf..85c940c08867 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -646,4 +646,6 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
+
+int dma_buf_charge_transfer(struct dma_buf *dmabuf, struct gpucg *gpucg);
#endif /* __DMA_BUF_H__ */
--
2.35.1.265.g69c8d7142f-goog