diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf.h b/tools/testing/selftests/bpf/progs/mptcp_bpf.h index 782f36ed027e79..a289746666dd5b 100644 --- a/tools/testing/selftests/bpf/progs/mptcp_bpf.h +++ b/tools/testing/selftests/bpf/progs/mptcp_bpf.h @@ -6,6 +6,7 @@ #include #define MPTCP_SUBFLOWS_MAX 8 +#define MPTCP_SCHED_FLAG_RESCHEDULE (1 << 0) extern void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow, bool scheduled) __ksym; diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c index 638ea6aa63b7db..42c11fa483b134 100644 --- a/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c +++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c @@ -69,10 +69,28 @@ int BPF_PROG(bpf_rr_get_subflow, struct mptcp_sock *msk, return 0; } +SEC("struct_ops") +void BPF_PROG(bpf_rr_push, struct mptcp_sock *msk, + struct mptcp_subflow_context *subflow, + struct mptcp_sched_chunk *chunk) +{ + struct tcp_sock *tp = bpf_skc_to_tcp_sock(mptcp_subflow_tcp_sock(subflow)); + + if (!tp) { + /* Should not happen, in that case let default behavior. */ + return; + } + + /* Make sure to reschedule for each MSS. */ + chunk->limit = tp->mss_cache; + chunk->flags |= MPTCP_SCHED_FLAG_RESCHEDULE; +} + SEC(".struct_ops") struct mptcp_sched_ops rr = { .init = (void *)mptcp_sched_rr_init, .release = (void *)mptcp_sched_rr_release, .get_subflow = (void *)bpf_rr_get_subflow, + .push = (void *)bpf_rr_push, .name = "bpf_rr", };