mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
tcp: Revert "tcp: remove CA_ACK_SLOWPATH"
This change was a followup to the header prediction removal, so first revert this as a prerequisite to back out hp removal. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0da93d2ebb
commit
c1d2b4c3e2
3 changed files with 50 additions and 23 deletions
|
@ -910,8 +910,9 @@ enum tcp_ca_event {
|
||||||
|
|
||||||
/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
|
/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
|
||||||
enum tcp_ca_ack_event_flags {
|
enum tcp_ca_ack_event_flags {
|
||||||
CA_ACK_WIN_UPDATE = (1 << 0), /* ACK updated window */
|
CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
|
||||||
CA_ACK_ECE = (1 << 1), /* ECE bit is set on ack */
|
CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
|
||||||
|
CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -3552,7 +3552,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||||
u32 lost = tp->lost;
|
u32 lost = tp->lost;
|
||||||
int acked = 0; /* Number of packets newly acked */
|
int acked = 0; /* Number of packets newly acked */
|
||||||
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
|
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
|
||||||
u32 ack_ev_flags = 0;
|
|
||||||
|
|
||||||
sack_state.first_sackt = 0;
|
sack_state.first_sackt = 0;
|
||||||
sack_state.rate = &rs;
|
sack_state.rate = &rs;
|
||||||
|
@ -3593,6 +3592,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||||
if (flag & FLAG_UPDATE_TS_RECENT)
|
if (flag & FLAG_UPDATE_TS_RECENT)
|
||||||
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
|
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
|
||||||
|
|
||||||
|
{
|
||||||
|
u32 ack_ev_flags = CA_ACK_SLOWPATH;
|
||||||
|
|
||||||
if (ack_seq != TCP_SKB_CB(skb)->end_seq)
|
if (ack_seq != TCP_SKB_CB(skb)->end_seq)
|
||||||
flag |= FLAG_DATA;
|
flag |= FLAG_DATA;
|
||||||
else
|
else
|
||||||
|
@ -3606,13 +3608,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||||
|
|
||||||
if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
|
if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
|
||||||
flag |= FLAG_ECE;
|
flag |= FLAG_ECE;
|
||||||
ack_ev_flags = CA_ACK_ECE;
|
ack_ev_flags |= CA_ACK_ECE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flag & FLAG_WIN_UPDATE)
|
if (flag & FLAG_WIN_UPDATE)
|
||||||
ack_ev_flags |= CA_ACK_WIN_UPDATE;
|
ack_ev_flags |= CA_ACK_WIN_UPDATE;
|
||||||
|
|
||||||
tcp_in_ack_event(sk, ack_ev_flags);
|
tcp_in_ack_event(sk, ack_ev_flags);
|
||||||
|
}
|
||||||
|
|
||||||
/* We passed data and got it acked, remove any soft error
|
/* We passed data and got it acked, remove any soft error
|
||||||
* log. Something worked...
|
* log. Something worked...
|
||||||
|
|
|
@ -153,6 +153,24 @@ static inline void update_rtt_min(struct westwood *w)
|
||||||
w->rtt_min = min(w->rtt, w->rtt_min);
|
w->rtt_min = min(w->rtt, w->rtt_min);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @westwood_fast_bw
|
||||||
|
* It is called when we are in fast path. In particular it is called when
|
||||||
|
* header prediction is successful. In such case in fact update is
|
||||||
|
* straight forward and doesn't need any particular care.
|
||||||
|
*/
|
||||||
|
static inline void westwood_fast_bw(struct sock *sk)
|
||||||
|
{
|
||||||
|
const struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
struct westwood *w = inet_csk_ca(sk);
|
||||||
|
|
||||||
|
westwood_update_window(sk);
|
||||||
|
|
||||||
|
w->bk += tp->snd_una - w->snd_una;
|
||||||
|
w->snd_una = tp->snd_una;
|
||||||
|
update_rtt_min(w);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @westwood_acked_count
|
* @westwood_acked_count
|
||||||
* This function evaluates cumul_ack for evaluating bk in case of
|
* This function evaluates cumul_ack for evaluating bk in case of
|
||||||
|
@ -205,12 +223,17 @@ static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
|
||||||
|
|
||||||
static void tcp_westwood_ack(struct sock *sk, u32 ack_flags)
|
static void tcp_westwood_ack(struct sock *sk, u32 ack_flags)
|
||||||
{
|
{
|
||||||
|
if (ack_flags & CA_ACK_SLOWPATH) {
|
||||||
struct westwood *w = inet_csk_ca(sk);
|
struct westwood *w = inet_csk_ca(sk);
|
||||||
|
|
||||||
westwood_update_window(sk);
|
westwood_update_window(sk);
|
||||||
w->bk += westwood_acked_count(sk);
|
w->bk += westwood_acked_count(sk);
|
||||||
|
|
||||||
update_rtt_min(w);
|
update_rtt_min(w);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
westwood_fast_bw(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
|
static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
|
||||||
|
|
Loading…
Add table
Reference in a new issue