Re: [PATCH 4/4] locking: Add kselftests for ww_mutex stress

From: Maarten Lankhorst
Date: Wed Nov 30 2016 - 07:29:53 EST


Op 30-11-16 om 01:35 schreef Chris Wilson:
> Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
> Cc: Maarten Lankhorst <dev@xxxxxxxxxxxxxx>
> Cc: Nicolai HÃhnle <nhaehnle@xxxxxxxxx>
> ---
> kernel/locking/test-ww_mutex.c | 134 +++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 134 insertions(+)
>
> diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
> index 63a5031de138..c367014f62dc 100644
> --- a/kernel/locking/test-ww_mutex.c
> +++ b/kernel/locking/test-ww_mutex.c
> @@ -21,6 +21,9 @@
> #include <linux/kthread.h>
> #include <linux/ww_mutex.h>
> #include <linux/completion.h>
> +#include <linux/random.h>
> +#include <linux/slab.h>
> +#include <linux/delay.h>
>
> MODULE_LICENSE("GPL");
> MODULE_AUTHOR("Intel Corporation");
> @@ -224,6 +227,129 @@ static int test_abba(void)
> return ret;
> }
>
> +struct stress {
> + struct work_struct work;
> + struct ww_mutex *locks;
> + int nlocks;
> +};
> +
> +static int *get_random_order(int count)
> +{
> + int *order;
> + int n, r, tmp;
> +
> + order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY);
> + if (!order)
> + return order;
> +
> + for (n = 0; n < count; n++)
> + order[n] = n;
> +
> + for (n = count - 1; n > 1; n--) {
> + r = get_random_int() % (n + 1);
> + if (r != n) {
> + tmp = order[n];
> + order[n] = order[r];
> + order[r] = tmp;
> + }
> + }
> +
> + return order;
> +}
> +
> +static void stress_work(struct work_struct *work)
> +{
> + struct stress *stress = container_of(work, typeof(*stress), work);
> + const int nlocks = stress->nlocks;
> + struct ww_mutex *locks = stress->locks;
> + struct ww_acquire_ctx ctx;
> + int contended = -1;
> + int *order;
> + int n, ret;
> +
> + order = get_random_order(nlocks);
> + if (!order)
> + return;
> +
> + ww_acquire_init(&ctx, &ww_class);
> +
> +retry:
> + ret = 0;
> + for (n = 0; n < nlocks; n++) {
> + if (n == contended)
> + continue;
> +
> + ret = ww_mutex_lock(&locks[order[n]], &ctx);
> + if (ret < 0)
> + break;
> + }
What's wrong with attempting to lock the contended lock here?
Who knows, this might find some more bugs than the functional tests already do.
> + if (!ret)
> + usleep_range(1000, 2000); /* dummy load */
> +
> + if (contended > n)
> + ww_mutex_unlock(&locks[order[contended]]);
> + contended = n;
> + while (n--)
> + ww_mutex_unlock(&locks[order[n]]);
> +
> + if (ret == -EDEADLK) {
> + ww_mutex_lock_slow(&locks[order[contended]], &ctx);
> + goto retry;
> + }
> +
> + if (ret)
> + pr_err_once("ww_mutex stress test failed with %d\n", ret);
> +
> + ww_acquire_fini(&ctx);
> +
> + kfree(order);
> + kfree(stress);
> +}
> +
> +static int stress(int nlocks, int count)
> +{
> + struct ww_mutex *locks;
> + struct workqueue_struct *wq;
> + int ret = -ENOMEM;
> + int n;
> +
> + wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
> + if (!wq)
> + return -ENOMEM;
> +
> + locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
> + if (!locks)
> + goto err;
> +
> + for (n = 0; n < nlocks; n++)
> + ww_mutex_init(&locks[n], &ww_class);
> +
> + for (n = 0; n < count; n++) {
> + struct stress *stress;
> +
> + stress = kmalloc(sizeof(*stress), GFP_KERNEL);
> + if (!stress)
> + break;
> +
> + INIT_WORK(&stress->work, stress_work);
> + stress->locks = locks;
> + stress->nlocks = nlocks;
> +
> + queue_work(wq, &stress->work);
> + }
> +
> + flush_workqueue(wq);
> +
> + for (n = 0; n < nlocks; n++)
> + ww_mutex_destroy(&locks[n]);
> + kfree(locks);
> +
> + ret = 0;
> +err:
> + destroy_workqueue(wq);
> + return ret;
> +}
> +
> static int __init test_ww_mutex_init(void)
> {
> int ret;
> @@ -240,6 +366,14 @@ static int __init test_ww_mutex_init(void)
> if (ret)
> return ret;
>
> + ret = stress(16, 1024);
> + if (ret)
> + return ret;
> +
> + ret = stress(4096, 1024);
> + if (ret)
> + return ret;
> +
> return 0;
> }
>