diff --git a/include/dp_flow.h b/include/dp_flow.h index a2770433..a3764e41 100644 --- a/include/dp_flow.h +++ b/include/dp_flow.h @@ -114,7 +114,10 @@ struct flow_value { } incoming_flow_offloaded_flag; struct dp_ref ref_count; union { - enum dp_flow_tcp_state tcp_state; + struct { + enum dp_flow_tcp_state state; + enum dp_flow_dir first_syn_flow_dir; + } tcp; } l4_state; bool aged; }; diff --git a/src/dp_cntrack.c b/src/dp_cntrack.c index b26c5fe0..52c075ed 100644 --- a/src/dp_cntrack.c +++ b/src/dp_cntrack.c @@ -46,29 +46,31 @@ static __rte_always_inline void dp_cntrack_tcp_state(struct flow_value *flow_val uint8_t tcp_flags = tcp_hdr->tcp_flags; if (DP_TCP_PKT_FLAG_RST(tcp_flags)) { - flow_val->l4_state.tcp_state = DP_FLOW_TCP_STATE_RST_FIN; + flow_val->l4_state.tcp.state = DP_FLOW_TCP_STATE_RST_FIN; } else if (DP_TCP_PKT_FLAG_FIN(tcp_flags)) { // this is not entirely 1:1 mapping to fin sequence, // but sufficient to determine if a tcp connection is almost successfuly closed // (last ack is still pending) - if (flow_val->l4_state.tcp_state == DP_FLOW_TCP_STATE_ESTABLISHED) - flow_val->l4_state.tcp_state = DP_FLOW_TCP_STATE_FINWAIT; + if (flow_val->l4_state.tcp.state == DP_FLOW_TCP_STATE_ESTABLISHED) + flow_val->l4_state.tcp.state = DP_FLOW_TCP_STATE_FINWAIT; else - flow_val->l4_state.tcp_state = DP_FLOW_TCP_STATE_RST_FIN; + flow_val->l4_state.tcp.state = DP_FLOW_TCP_STATE_RST_FIN; } else { - switch (flow_val->l4_state.tcp_state) { + switch (flow_val->l4_state.tcp.state) { case DP_FLOW_TCP_STATE_NONE: case DP_FLOW_TCP_STATE_RST_FIN: - if (DP_TCP_PKT_FLAG_ONLY_SYN(tcp_flags) && df->flow_dir == DP_FLOW_DIR_ORG) - flow_val->l4_state.tcp_state = DP_FLOW_TCP_STATE_NEW_SYN; + if (DP_TCP_PKT_FLAG_ONLY_SYN(tcp_flags)) { + flow_val->l4_state.tcp.state = DP_FLOW_TCP_STATE_NEW_SYN; + flow_val->l4_state.tcp.first_syn_flow_dir = df->flow_dir; + } break; case DP_FLOW_TCP_STATE_NEW_SYN: - if (DP_TCP_PKT_FLAG_ONLY_SYNACK(tcp_flags) && df->flow_dir == DP_FLOW_DIR_REPLY) - flow_val->l4_state.tcp_state = DP_FLOW_TCP_STATE_NEW_SYNACK; + if (DP_TCP_PKT_FLAG_ONLY_SYNACK(tcp_flags) && df->flow_dir != flow_val->l4_state.tcp.first_syn_flow_dir) + flow_val->l4_state.tcp.state = DP_FLOW_TCP_STATE_NEW_SYNACK; break; case DP_FLOW_TCP_STATE_NEW_SYNACK: - if (DP_TCP_PKT_FLAG_ONLY_ACK(tcp_flags) && df->flow_dir == DP_FLOW_DIR_ORG) - flow_val->l4_state.tcp_state = DP_FLOW_TCP_STATE_ESTABLISHED; + if (DP_TCP_PKT_FLAG_ONLY_ACK(tcp_flags) && df->flow_dir == flow_val->l4_state.tcp.first_syn_flow_dir) + flow_val->l4_state.tcp.state = DP_FLOW_TCP_STATE_ESTABLISHED; break; default: // FIN-states already handled above @@ -126,13 +128,13 @@ static __rte_always_inline void dp_cntrack_change_flow_offload_flags(struct rte_ static __rte_always_inline void dp_cntrack_set_timeout_tcp_flow(struct rte_mbuf *m, struct flow_value *flow_val, struct dp_flow *df) { - if (flow_val->l4_state.tcp_state == DP_FLOW_TCP_STATE_ESTABLISHED) { + if (flow_val->l4_state.tcp.state == DP_FLOW_TCP_STATE_ESTABLISHED) { flow_val->timeout_value = DP_FLOW_TCP_EXTENDED_TIMEOUT; dp_cntrack_change_flow_offload_flags(m, flow_val, df); } else { flow_val->timeout_value = flow_timeout; - if (flow_val->l4_state.tcp_state == DP_FLOW_TCP_STATE_FINWAIT - || flow_val->l4_state.tcp_state == DP_FLOW_TCP_STATE_RST_FIN) + if (flow_val->l4_state.tcp.state == DP_FLOW_TCP_STATE_FINWAIT + || flow_val->l4_state.tcp.state == DP_FLOW_TCP_STATE_RST_FIN) dp_cntrack_change_flow_offload_flags(m, flow_val, df); } } @@ -178,7 +180,7 @@ static __rte_always_inline struct flow_value *flow_table_insert_entry(struct flo dp_cntrack_init_flow_offload_flags(flow_val, df); if (df->l4_type == IPPROTO_TCP) - flow_val->l4_state.tcp_state = DP_FLOW_TCP_STATE_NONE; + flow_val->l4_state.tcp.state = DP_FLOW_TCP_STATE_NONE; dp_ref_init(&flow_val->ref_count, dp_free_flow); if (DP_FAILED(dp_add_flow(key, flow_val)))