* attached task, once for the dependent filter, and if
* requested for the user notifier. When @refs reaches zero,
* the filter can be freed.
+ * @users: A filter's @users count is incremented for each directly
+ * attached task (filter installation, fork(), thread_sync),
+ * and once for the dependent filter (tracked in filter->prev).
+ * When it reaches zero it indicates that no direct or indirect
+ * users of that filter exist. No new tasks can get associated with
+ * this filter after reaching 0. The @users count is always smaller
+ * or equal to @refs. Hence, reaching 0 for @users does not mean
+ * the filter can be freed.
* @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
* @prev: points to a previously installed, or inherited, filter
* @prog: the BPF program to evaluate
*/
struct seccomp_filter {
refcount_t refs;
+ refcount_t users;
bool log;
struct seccomp_filter *prev;
struct bpf_prog *prog;
}
}
+static void __seccomp_filter_orphan(struct seccomp_filter *orig)
+{
+ while (orig && refcount_dec_and_test(&orig->users)) {
+ if (waitqueue_active(&orig->wqh))
+ wake_up_poll(&orig->wqh, EPOLLHUP);
+ orig = orig->prev;
+ }
+}
+
static void __put_seccomp_filter(struct seccomp_filter *orig)
{
/* Clean up single-reference branches iteratively. */
}
}
+static void __seccomp_filter_release(struct seccomp_filter *orig)
+{
+ /* Notify about any unused filters in the task's former filter tree. */
+ __seccomp_filter_orphan(orig);
+ /* Finally drop all references to the task's former tree. */
+ __put_seccomp_filter(orig);
+}
+
/**
- * seccomp_filter_release - Detach the task from its filter tree
- * and drop its reference count during
- * exit.
+ * seccomp_filter_release - Detach the task from its filter tree,
+ * drop its reference count, and notify
+ * about unused filters
*
* This function should only be called when the task is exiting as
* it detaches it from its filter tree. As such, READ_ONCE() and
/* Detach task from its filter tree. */
tsk->seccomp.filter = NULL;
- __put_seccomp_filter(orig);
+ __seccomp_filter_release(orig);
}
/**
/* Get a task reference for the new leaf node. */
get_seccomp_filter(caller);
+
/*
* Drop the task reference to the shared ancestor since
* current's path will hold a reference. (This also
* allows a put before the assignment.)
*/
- __put_seccomp_filter(thread->seccomp.filter);
+ __seccomp_filter_release(thread->seccomp.filter);
+
+ /* Make our new filter tree visible. */
smp_store_release(&thread->seccomp.filter,
caller->seccomp.filter);
atomic_set(&thread->seccomp.filter_count,
}
refcount_set(&sfilter->refs, 1);
+ refcount_set(&sfilter->users, 1);
init_waitqueue_head(&sfilter->wqh);
return sfilter;
if (!orig)
return;
__get_seccomp_filter(orig);
+ refcount_inc(&orig->users);
}
static void seccomp_init_siginfo(kernel_siginfo_t *info, int syscall, int reason)
mutex_unlock(&filter->notify_lock);
+ if (refcount_read(&filter->users) == 0)
+ ret |= EPOLLHUP;
+
return ret;
}