Re: [PATCH] hv_balloon: Update the balloon driver to use the SBRM API

From: Boqun Feng
Date: Mon Jul 31 2023 - 17:26:11 EST


Hi Mitchell,

On Wed, Jul 26, 2023 at 12:23:31AM +0000, Mitchell Levy via B4 Relay wrote:
> From: Mitchell Levy <levymitchell0@xxxxxxxxx>
>
>
>
> ---

I don't know whether it's a tool issue or something else, but all words
after the "---" line in the email will be discarded from a commit log.
You can try to apply this patch yourself and see the result:

b4 shazam 20230726-master-v1-1-b2ce6a4538db@xxxxxxxxx

> This patch is intended as a proof-of-concept for the new SBRM
> machinery[1]. For some brief background, the idea behind SBRM is using
> the __cleanup__ attribute to automatically unlock locks (or otherwise
> release resources) when they go out of scope, similar to C++ style RAII.
> This promises some benefits such as making code simpler (particularly
> where you have lots of goto fail; type constructs) as well as reducing
> the surface area for certain kinds of bugs.
>
> The changes in this patch should not result in any difference in how the
> code actually runs (i.e., it's purely an exercise in this new syntax
> sugar). In one instance SBRM was not appropriate, so I left that part
> alone, but all other locking/unlocking is handled automatically in this
> patch.
>
> Link: https://lore.kernel.org/all/20230626125726.GU4253@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/ [1]
>
> Suggested-by: Boqun Feng <boqun.feng@xxxxxxxxx>
> Signed-off-by: "Mitchell Levy (Microsoft)" <levymitchell0@xxxxxxxxx>

Beside the above format issue, the code looks good to me, nice job!

Feel free to add:

Reviewed-by: Boqun Feng <boqun.feng@xxxxxxxxx>

Regards,
Boqun

> ---
> drivers/hv/hv_balloon.c | 82 +++++++++++++++++++++++--------------------------
> 1 file changed, 38 insertions(+), 44 deletions(-)
>
> diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
> index dffcc894f117..2812601e84da 100644
> --- a/drivers/hv/hv_balloon.c
> +++ b/drivers/hv/hv_balloon.c
> @@ -8,6 +8,7 @@
>
> #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
>
> +#include <linux/cleanup.h>
> #include <linux/kernel.h>
> #include <linux/jiffies.h>
> #include <linux/mman.h>
> @@ -646,7 +647,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
> void *v)
> {
> struct memory_notify *mem = (struct memory_notify *)v;
> - unsigned long flags, pfn_count;
> + unsigned long pfn_count;
>
> switch (val) {
> case MEM_ONLINE:
> @@ -655,21 +656,22 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
> break;
>
> case MEM_OFFLINE:
> - spin_lock_irqsave(&dm_device.ha_lock, flags);
> - pfn_count = hv_page_offline_check(mem->start_pfn,
> - mem->nr_pages);
> - if (pfn_count <= dm_device.num_pages_onlined) {
> - dm_device.num_pages_onlined -= pfn_count;
> - } else {
> - /*
> - * We're offlining more pages than we managed to online.
> - * This is unexpected. In any case don't let
> - * num_pages_onlined wrap around zero.
> - */
> - WARN_ON_ONCE(1);
> - dm_device.num_pages_onlined = 0;
> + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
> + pfn_count = hv_page_offline_check(mem->start_pfn,
> + mem->nr_pages);
> + if (pfn_count <= dm_device.num_pages_onlined) {
> + dm_device.num_pages_onlined -= pfn_count;
> + } else {
> + /*
> + * We're offlining more pages than we
> + * managed to online. This is
> + * unexpected. In any case don't let
> + * num_pages_onlined wrap around zero.
> + */
> + WARN_ON_ONCE(1);
> + dm_device.num_pages_onlined = 0;
> + }
> }
> - spin_unlock_irqrestore(&dm_device.ha_lock, flags);
> break;
> case MEM_GOING_ONLINE:
> case MEM_GOING_OFFLINE:
> @@ -721,24 +723,23 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
> unsigned long start_pfn;
> unsigned long processed_pfn;
> unsigned long total_pfn = pfn_count;
> - unsigned long flags;
>
> for (i = 0; i < (size/HA_CHUNK); i++) {
> start_pfn = start + (i * HA_CHUNK);
>
> - spin_lock_irqsave(&dm_device.ha_lock, flags);
> - has->ha_end_pfn += HA_CHUNK;
> + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
> + has->ha_end_pfn += HA_CHUNK;
>
> - if (total_pfn > HA_CHUNK) {
> - processed_pfn = HA_CHUNK;
> - total_pfn -= HA_CHUNK;
> - } else {
> - processed_pfn = total_pfn;
> - total_pfn = 0;
> - }
> + if (total_pfn > HA_CHUNK) {
> + processed_pfn = HA_CHUNK;
> + total_pfn -= HA_CHUNK;
> + } else {
> + processed_pfn = total_pfn;
> + total_pfn = 0;
> + }
>
> - has->covered_end_pfn += processed_pfn;
> - spin_unlock_irqrestore(&dm_device.ha_lock, flags);
> + has->covered_end_pfn += processed_pfn;
> + }
>
> reinit_completion(&dm_device.ol_waitevent);
>
> @@ -758,10 +759,10 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
> */
> do_hot_add = false;
> }
> - spin_lock_irqsave(&dm_device.ha_lock, flags);
> - has->ha_end_pfn -= HA_CHUNK;
> - has->covered_end_pfn -= processed_pfn;
> - spin_unlock_irqrestore(&dm_device.ha_lock, flags);
> + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
> + has->ha_end_pfn -= HA_CHUNK;
> + has->covered_end_pfn -= processed_pfn;
> + }
> break;
> }
>
> @@ -781,10 +782,9 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
> static void hv_online_page(struct page *pg, unsigned int order)
> {
> struct hv_hotadd_state *has;
> - unsigned long flags;
> unsigned long pfn = page_to_pfn(pg);
>
> - spin_lock_irqsave(&dm_device.ha_lock, flags);
> + guard(spinlock_irqsave)(&dm_device.ha_lock);
> list_for_each_entry(has, &dm_device.ha_region_list, list) {
> /* The page belongs to a different HAS. */
> if ((pfn < has->start_pfn) ||
> @@ -794,7 +794,6 @@ static void hv_online_page(struct page *pg, unsigned int order)
> hv_bring_pgs_online(has, pfn, 1UL << order);
> break;
> }
> - spin_unlock_irqrestore(&dm_device.ha_lock, flags);
> }
>
> static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
> @@ -803,9 +802,8 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
> struct hv_hotadd_gap *gap;
> unsigned long residual, new_inc;
> int ret = 0;
> - unsigned long flags;
>
> - spin_lock_irqsave(&dm_device.ha_lock, flags);
> + guard(spinlock_irqsave)(&dm_device.ha_lock);
> list_for_each_entry(has, &dm_device.ha_region_list, list) {
> /*
> * If the pfn range we are dealing with is not in the current
> @@ -852,7 +850,6 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
> ret = 1;
> break;
> }
> - spin_unlock_irqrestore(&dm_device.ha_lock, flags);
>
> return ret;
> }
> @@ -947,7 +944,6 @@ static unsigned long process_hot_add(unsigned long pg_start,
> {
> struct hv_hotadd_state *ha_region = NULL;
> int covered;
> - unsigned long flags;
>
> if (pfn_cnt == 0)
> return 0;
> @@ -979,9 +975,9 @@ static unsigned long process_hot_add(unsigned long pg_start,
> ha_region->covered_end_pfn = pg_start;
> ha_region->end_pfn = rg_start + rg_size;
>
> - spin_lock_irqsave(&dm_device.ha_lock, flags);
> - list_add_tail(&ha_region->list, &dm_device.ha_region_list);
> - spin_unlock_irqrestore(&dm_device.ha_lock, flags);
> + scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
> + list_add_tail(&ha_region->list, &dm_device.ha_region_list);
> + }
> }
>
> do_pg_range:
> @@ -2047,7 +2043,6 @@ static void balloon_remove(struct hv_device *dev)
> struct hv_dynmem_device *dm = hv_get_drvdata(dev);
> struct hv_hotadd_state *has, *tmp;
> struct hv_hotadd_gap *gap, *tmp_gap;
> - unsigned long flags;
>
> if (dm->num_pages_ballooned != 0)
> pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
> @@ -2073,7 +2068,7 @@ static void balloon_remove(struct hv_device *dev)
> #endif
> }
>
> - spin_lock_irqsave(&dm_device.ha_lock, flags);
> + guard(spinlock_irqsave)(&dm_device.ha_lock);
> list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
> list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
> list_del(&gap->list);
> @@ -2082,7 +2077,6 @@ static void balloon_remove(struct hv_device *dev)
> list_del(&has->list);
> kfree(has);
> }
> - spin_unlock_irqrestore(&dm_device.ha_lock, flags);
> }
>
> static int balloon_suspend(struct hv_device *hv_dev)
>
> ---
> base-commit: 3f01e9fed8454dcd89727016c3e5b2fbb8f8e50c
> change-id: 20230725-master-bbcd9205758b
>
> Best regards,
> --
> Mitchell Levy <levymitchell0@xxxxxxxxx>
>