Skip to content

Commit d6a788a

Browse files
authored
Merge pull request torvalds#68 from sched-ext/scx-cleanups
Misc example scheduler cleanups
2 parents 41728bb + 58e2a66 commit d6a788a

File tree

5 files changed

+118
-200
lines changed

5 files changed

+118
-200
lines changed

tools/sched_ext/scx_common.bpf.h

-104
Original file line numberDiff line numberDiff line change
@@ -235,108 +235,4 @@ u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
235235
void bpf_rcu_read_lock(void) __ksym;
236236
void bpf_rcu_read_unlock(void) __ksym;
237237

238-
/* BPF core iterators from tools/testing/selftests/bpf/progs/bpf_misc.h */
239-
struct bpf_iter_num;
240-
241-
extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __ksym;
242-
extern int *bpf_iter_num_next(struct bpf_iter_num *it) __ksym;
243-
extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym;
244-
245-
#ifndef bpf_for_each
246-
/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for
247-
* using BPF open-coded iterators without having to write mundane explicit
248-
* low-level loop logic. Instead, it provides for()-like generic construct
249-
* that can be used pretty naturally. E.g., for some hypothetical cgroup
250-
* iterator, you'd write:
251-
*
252-
* struct cgroup *cg, *parent_cg = <...>;
253-
*
254-
* bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) {
255-
* bpf_printk("Child cgroup id = %d", cg->cgroup_id);
256-
* if (cg->cgroup_id == 123)
257-
* break;
258-
* }
259-
*
260-
* I.e., it looks almost like high-level for each loop in other languages,
261-
* supports continue/break, and is verifiable by BPF verifier.
262-
*
263-
* For iterating integers, the difference betwen bpf_for_each(num, i, N, M)
264-
* and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
265-
* verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
266-
* *`, not just `int`. So for integers bpf_for() is more convenient.
267-
*
268-
* Note: this macro relies on C99 feature of allowing to declare variables
269-
* inside for() loop, bound to for() loop lifetime. It also utilizes GCC
270-
* extension: __attribute__((cleanup(<func>))), supported by both GCC and
271-
* Clang.
272-
*/
273-
#define bpf_for_each(type, cur, args...) for ( \
274-
/* initialize and define destructor */ \
275-
struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \
276-
cleanup(bpf_iter_##type##_destroy))), \
277-
/* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \
278-
*___p __attribute__((unused)) = ( \
279-
bpf_iter_##type##_new(&___it, ##args), \
280-
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
281-
/* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \
282-
(void)bpf_iter_##type##_destroy, (void *)0); \
283-
/* iteration and termination check */ \
284-
(((cur) = bpf_iter_##type##_next(&___it))); \
285-
)
286-
#endif /* bpf_for_each */
287-
288-
#ifndef bpf_for
289-
/* bpf_for(i, start, end) implements a for()-like looping construct that sets
290-
* provided integer variable *i* to values starting from *start* through,
291-
* but not including, *end*. It also proves to BPF verifier that *i* belongs
292-
* to range [start, end), so this can be used for accessing arrays without
293-
* extra checks.
294-
*
295-
* Note: *start* and *end* are assumed to be expressions with no side effects
296-
* and whose values do not change throughout bpf_for() loop execution. They do
297-
* not have to be statically known or constant, though.
298-
*
299-
* Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
300-
* loop bound variables and cleanup attribute, supported by GCC and Clang.
301-
*/
302-
#define bpf_for(i, start, end) for ( \
303-
/* initialize and define destructor */ \
304-
struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
305-
cleanup(bpf_iter_num_destroy))), \
306-
/* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
307-
*___p __attribute__((unused)) = ( \
308-
bpf_iter_num_new(&___it, (start), (end)), \
309-
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
310-
/* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
311-
(void)bpf_iter_num_destroy, (void *)0); \
312-
({ \
313-
/* iteration step */ \
314-
int *___t = bpf_iter_num_next(&___it); \
315-
/* termination and bounds check */ \
316-
(___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \
317-
}); \
318-
)
319-
#endif /* bpf_for */
320-
321-
#ifndef bpf_repeat
322-
/* bpf_repeat(N) performs N iterations without exposing iteration number
323-
*
324-
* Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
325-
* loop bound variables and cleanup attribute, supported by GCC and Clang.
326-
*/
327-
#define bpf_repeat(N) for ( \
328-
/* initialize and define destructor */ \
329-
struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
330-
cleanup(bpf_iter_num_destroy))), \
331-
/* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
332-
*___p __attribute__((unused)) = ( \
333-
bpf_iter_num_new(&___it, 0, (N)), \
334-
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
335-
/* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
336-
(void)bpf_iter_num_destroy, (void *)0); \
337-
bpf_iter_num_next(&___it); \
338-
/* nothing here */ \
339-
)
340-
#endif /* bpf_repeat */
341-
342238
#endif /* __SCHED_EXT_COMMON_BPF_H */

tools/sched_ext/scx_flatcg.bpf.c

+9-1
Original file line numberDiff line numberDiff line change
@@ -510,7 +510,15 @@ void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable)
510510
struct cgroup *cgrp;
511511
struct fcg_cgrp_ctx *cgc;
512512

513-
/* scale the execution time by the inverse of the weight and charge */
513+
/*
514+
* Scale the execution time by the inverse of the weight and charge.
515+
*
516+
* Note that the default yield implementation yields by setting
517+
* @p->scx.slice to zero and the following would treat the yielding task
518+
* as if it has consumed all its slice. If this penalizes yielding tasks
519+
* too much, determine the execution time by taking explicit timestamps
520+
* instead of depending on @p->scx.slice.
521+
*/
514522
if (!fifo_sched)
515523
p->scx.dsq_vtime +=
516524
(SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;

0 commit comments

Comments
 (0)