Re: [PATCH v5 bpf-next 2/3] bpf: introduce helper bpf_get_branch_snapshot

From: Peter Zijlstra
Date: Sat Sep 04 2021 - 06:56:09 EST


On Sat, Sep 04, 2021 at 12:24:30PM +0200, Peter Zijlstra wrote:
> On Fri, Sep 03, 2021 at 10:10:16AM -0700, Andrii Nakryiko wrote:
> > > I suppose you have to have this helper function because the JIT cannot
> > > emit static_call()... although in this case one could cheat and simply
> > > emit a call to static_call_query() and not bother with dynamic updates
> > > (because there aren't any).
> >
> > If that's safe, let's do it.
>
> I'll try and remember to look into static_call_lock(), a means of
> forever denying future static_call_update() calls. That should make this
> more obvious.

A little something like so I suppose.... we don't really have spare
bits in the !INLINE case :/


---
diff --git a/include/linux/static_call.h b/include/linux/static_call.h
index 3e56a9751c06..b0feccd56d37 100644
--- a/include/linux/static_call.h
+++ b/include/linux/static_call.h
@@ -174,6 +174,10 @@ struct static_call_tramp_key {
s32 key;
};

+extern void __static_call_lock(struct static_call_key *key);
+
+#define static_call_lock(name) __static_call_lock(&STATIC_CALL_KEY(name))
+
extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
extern int static_call_mod_init(struct module *mod);
extern int static_call_text_reserved(void *start, void *end);
@@ -215,6 +219,8 @@ extern long __static_call_return0(void);

#elif defined(CONFIG_HAVE_STATIC_CALL)

+#define static_call_lock(name)
+
static inline int static_call_init(void) { return 0; }

#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
@@ -268,6 +274,8 @@ static inline long __static_call_return0(void)

#else /* Generic implementation */

+#define static_call_lock(name)
+
static inline int static_call_init(void) { return 0; }

static inline long __static_call_return0(void)
diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h
index 5a00b8b2cf9f..e40a3b595c4a 100644
--- a/include/linux/static_call_types.h
+++ b/include/linux/static_call_types.h
@@ -62,6 +62,7 @@ struct static_call_key {
void *func;
union {
/* bit 0: 0 = mods, 1 = sites */
+ /* but 1: locked */
unsigned long type;
struct static_call_mod *mods;
struct static_call_site *sites;
diff --git a/kernel/static_call.c b/kernel/static_call.c
index 43ba0b1e0edb..a1ba93fbad29 100644
--- a/kernel/static_call.c
+++ b/kernel/static_call.c
@@ -104,6 +104,11 @@ static inline bool static_call_key_has_mods(struct static_call_key *key)
return !(key->type & 1);
}

+static inline bool static_call_key_is_locked(struct static_call_key *key)
+{
+ return !!(key->type & 2);
+}
+
static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
{
if (!static_call_key_has_mods(key))
@@ -117,7 +122,7 @@ static inline struct static_call_site *static_call_key_sites(struct static_call_
if (static_call_key_has_mods(key))
return NULL;

- return (struct static_call_site *)(key->type & ~1);
+ return (struct static_call_site *)(key->type & ~3);
}

void __static_call_update(struct static_call_key *key, void *tramp, void *func)
@@ -125,6 +130,9 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
struct static_call_site *site, *stop;
struct static_call_mod *site_mod, first;

+ if (WARN_ON_ONCE(static_call_key_is_locked(key)))
+ return;
+
cpus_read_lock();
static_call_lock();

@@ -418,6 +426,18 @@ static void static_call_del_module(struct module *mod)
}
}

+void __static_call_lock(struct static_call_key *key)
+{
+ cpus_read_lock();
+ static_call_lock();
+
+ WARN_ON_ONCE(static_call_key_is_locked(key));
+ key->type |= 2;
+
+ static_call_unlock();
+ cpus_read_unlock();
+}
+
static int static_call_module_notify(struct notifier_block *nb,
unsigned long val, void *data)
{