Re: [PATCH 2/3] mm/zsmalloc: remove migrate_write_lock_nested()

From: Chengming Zhou
Date: Mon Feb 19 2024 - 23:52:37 EST


On 2024/2/20 12:48, Sergey Senozhatsky wrote:
> On (24/02/19 13:33), Chengming Zhou wrote:
>> static void migrate_write_unlock(struct zspage *zspage)
>> {
>> write_unlock(&zspage->lock);
>> @@ -2003,19 +1997,17 @@ static unsigned long __zs_compact(struct zs_pool *pool,
>> dst_zspage = isolate_dst_zspage(class);
>> if (!dst_zspage)
>> break;
>> - migrate_write_lock(dst_zspage);
>> }
>>
>> src_zspage = isolate_src_zspage(class);
>> if (!src_zspage)
>> break;
>>
>> - migrate_write_lock_nested(src_zspage);
>> -
>> + migrate_write_lock(src_zspage);
>> migrate_zspage(pool, src_zspage, dst_zspage);
>> - fg = putback_zspage(class, src_zspage);
>> migrate_write_unlock(src_zspage);
>>
>> + fg = putback_zspage(class, src_zspage);
>
> Hmm. Lockless putback doesn't look right to me. We modify critical
> zspage fileds in putback_zspage().

Which I think is protected by pool->lock, right? We already held it.

>
>> if (fg == ZS_INUSE_RATIO_0) {
>> free_zspage(pool, class, src_zspage);
>> pages_freed += class->pages_per_zspage;
>> @@ -2025,7 +2017,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,
>> if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
>> || spin_is_contended(&pool->lock)) {
>> putback_zspage(class, dst_zspage);
>> - migrate_write_unlock(dst_zspage);
>> dst_zspage = NULL;
>>
>> spin_unlock(&pool->lock);
>> @@ -2034,15 +2025,12 @@ static unsigned long __zs_compact(struct zs_pool *pool,
>> }
>> }
>>
>> - if (src_zspage) {
>> + if (src_zspage)
>> putback_zspage(class, src_zspage);
>> - migrate_write_unlock(src_zspage);
>> - }
>>
>> - if (dst_zspage) {
>> + if (dst_zspage)
>> putback_zspage(class, dst_zspage);
>> - migrate_write_unlock(dst_zspage);
>> - }