Re: [PATCH v2 06/10] maple_tree: Add mas_wr_new_end() to calculate new_end accurately

From: Liam R. Howlett
Date: Wed May 17 2023 - 15:22:45 EST


* Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx> [230517 04:59]:
> The previous new_end calculation is inaccurate, because it assumes that
> two new pivots must be added (this is inaccurate), and sometimes it will
> miss the fast path and enter the slow path. Add mas_wr_new_end() to
> accurately calculate new_end to make the conditions for entering the
> fast path more accurate.
>
> Signed-off-by: Peng Zhang <zhangpeng.00@xxxxxxxxxxxxx>
> ---
> lib/maple_tree.c | 33 ++++++++++++++++++++++-----------
> 1 file changed, 22 insertions(+), 11 deletions(-)
>
> diff --git a/lib/maple_tree.c b/lib/maple_tree.c
> index f881bce1a9f6..3b9d227f3d7d 100644
> --- a/lib/maple_tree.c
> +++ b/lib/maple_tree.c
> @@ -4294,6 +4294,20 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
> }
> }
>
> +static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
> +{
> + struct ma_state *mas = wr_mas->mas;
> + unsigned char new_end = wr_mas->node_end + 2;
> +
> + new_end -= wr_mas->offset_end - mas->offset;
> + if (wr_mas->r_min == mas->index)
> + new_end--;

nit: new line missing here

> + if (wr_mas->end_piv == mas->last)
> + new_end--;
> +
> + return new_end;
> +}
> +
> static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
> {
> unsigned char end = wr_mas->node_end;
> @@ -4349,9 +4363,8 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas)
>
> static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
> {
> - unsigned char node_slots;
> - unsigned char node_size;
> struct ma_state *mas = wr_mas->mas;
> + unsigned char new_end;
>
> /* Direct replacement */
> if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
> @@ -4361,17 +4374,15 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
> return;
> }
>
> - /* Attempt to append */
> - node_slots = mt_slots[wr_mas->type];
> - node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
> - if (mas->max == ULONG_MAX)
> - node_size++;
> -
> - /* slot and node store will not fit, go to the slow path */
> - if (unlikely(node_size >= node_slots))
> + /*
> + * new_end exceeds the size of the maple node and cannot enter the fast
> + * path.
> + */
> + new_end = mas_wr_new_end(wr_mas);
> + if (new_end >= mt_slots[wr_mas->type])
> goto slow_path;
>
> - if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
> + if (wr_mas->entry && (wr_mas->node_end < mt_slots[wr_mas->type] - 1) &&
> (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
> if (!wr_mas->content || !wr_mas->entry)
> mas_update_gap(mas);
> --
> 2.20.1
>