Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DPVS proxy_protocol目前发现Push数据的时候才会传递源IP,是否可以在TCP建立完成之后(ACK之后)主动发送PROXY TCP透传源IP #1001

Open
764276020 opened this issue Oct 15, 2024 · 9 comments

Comments

@764276020
Copy link

764276020 commented Oct 15, 2024

DPVS proxy_protocol目前发现Push数据的时候才会传递源IP,是否可以在TCP建立完成之后(ACK之后)主动发送PROXY TCP透传源IP。
在邮件协议中,开启ProxyProtocol之后,欢迎信息会在用户发送第一个指令的时候一起返回,因为服务端没有通过PROXY TCP获取到用户IP,如果服务端提前返回了欢迎信息,DPVS应该会跳过ProxyProtocol数据的发送,因为链接已经是ESTAB状态了。

当前版本:dpvs version: 1.9-6, build on 2024.05.23.14:33:51

Keepalived v2.0.19 (01/02,2024), git commit v1.9.6+

Copyright(C) 2001-2024 Alexandre Cassen, [email protected]

Built with kernel headers for Linux 3.10.0
Running on Linux 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020

configure options: --enable-ipv6

Config options: LVS VRRP VRRP_AUTH OLD_CHKSUM_COMPAT FIB_ROUTING

System options: PIPE2 SIGNALFD INOTIFY_INIT1 VSYSLOG EPOLL_CREATE1 IPV6_ADVANCED_API LIBNL1 RTA_ENCAP RTA_EXPIRES RTA_PREF FRA_SUPPRESS_PREFIXLEN FRA_TUN_ID RTAX_CC_ALGO RTAX_QUICKACK RTA_VIA FRA_OIFNAME IFA_FLAGS IP_MULTICAST_ALL NET_LINUX_IF_H_COLLISION LIBIPTC_LINUX_NET_IF_H_COLLISION LIBIPVS_NETLINK VRRP_VMAC IFLA_LINK_NETNSID CN_PROC SOCK_NONBLOCK SOCK_CLOEXEC O_PATH GLOB_BRACE INET6_ADDR_GEN_MODE SO_MARK SCHED_RT SCHED_RESET_ON_FORK

@ywc689
Copy link
Collaborator

ywc689 commented Oct 24, 2024

Proxy_protocol数据添加在第一个发给RS的ACK数据包中,不会等到Client Push数据才发送。请检查是否启用了syn_proxy,并配置了defer_rs_syn.

@764276020
Copy link
Author

Proxy_protocol数据添加在第一个发给RS的ACK数据包中,不会等到Client Push数据才发送。请检查是否启用了syn_proxy,并配置了defer_rs_syn.

开启之后还是不行,DPVS 配置如下:
global_defs {
log_level WARNING
log_file /home/dpvs/logs/dpvs.log
! log_async_mode off
! kni on
! pdump off
}

! netif config
netif_defs {
pktpool_size 1048575
pktpool_cache 256
fdir_mode signature

<init> device dpdk0 {
    rx {
        queue_number        12
        descriptor_number   1024
        rss                 all
    }
    tx {
        queue_number        12
        descriptor_number   1024
    }
    ! mtu                   1500
    ! promisc_mode
    ! allmulticast
    ! kni_name              dpdk0.kni
}

<init> device dpdk1 {
    rx {
        queue_number        12
        descriptor_number   1024
        rss                 all
    }
    tx {
        queue_number        12
        descriptor_number   1024
    }
    ! mtu                   1500
    ! promisc_mode
    ! allmulticast
    ! kni_name                dpdk1.kni
}


<init> device dpdk2 {
    rx {
        queue_number        12
        descriptor_number   1024
        rss                 all
    }
    tx {
        queue_number        12
        descriptor_number   1024
    }
    ! mtu                   1500
    ! promisc_mode
    ! allmulticast
    ! kni_name              dpdk2.kni
}

<init> device dpdk3 {
    rx {
        queue_number        12
        descriptor_number   1024
        rss                 all
    }
    tx {
        queue_number        12
        descriptor_number   1024
    }
    ! mtu                   1500
    ! promisc_mode
    ! allmulticast
    ! kni_name              dpdk3.kni
}

<init> bonding bond1 {
    mode                    4
    slave                   dpdk2
    slave                   dpdk3
    primary                 dpdk2
    ! numa_node             0          ! /sys/bus/pci/devices/[slaves' pci]/numa_node
    kni_name                bond1.kni
}

<init> bonding bond2 {
    mode                    4
    slave                   dpdk0
    slave                   dpdk1
    primary                 dpdk0
    ! numa_node             1          ! /sys/bus/pci/devices/[slaves' pci]/numa_node
    kni_name                bond2.kni
}

}

! worker config (lcores)
worker_defs {
worker cpu0 {
type master
cpu_id 0
}

<init> worker cpu1 {
    type    slave
    cpu_id  1
    port    bond1 {
        rx_queue_ids     0
        tx_queue_ids     0
        ! isol_rx_cpu_ids  9
        ! isol_rxq_ring_sz 1048576
    }
    port    bond2 {
        rx_queue_ids     0
        tx_queue_ids     0
        ! isol_rx_cpu_ids  9
        ! isol_rxq_ring_sz 1048576
    }
}

<init> worker cpu2 {
    type    slave
    cpu_id  2
    port    bond1 {
        rx_queue_ids     1
        tx_queue_ids     1
        ! isol_rx_cpu_ids  10
        ! isol_rxq_ring_sz 1048576
    }
    port    bond2 {
        rx_queue_ids     1
        tx_queue_ids     1
        ! isol_rx_cpu_ids  10
        ! isol_rxq_ring_sz 1048576
    }
}

<init> worker cpu3 {
    type    slave
    cpu_id  3
    port    bond1 {
        rx_queue_ids     2
        tx_queue_ids     2
        ! isol_rx_cpu_ids  11
        ! isol_rxq_ring_sz 1048576
    }
    port    bond2 {
        rx_queue_ids     2
        tx_queue_ids     2
        ! isol_rx_cpu_ids  11
        ! isol_rxq_ring_sz 1048576
    }
}

<init> worker cpu4 {
    type    slave
    cpu_id  4
    port    bond1 {
        rx_queue_ids     3
        tx_queue_ids     3
        ! isol_rx_cpu_ids  12
        ! isol_rxq_ring_sz 1048576
    }
    port    bond2 {
        rx_queue_ids     3
        tx_queue_ids     3
        ! isol_rx_cpu_ids  12
        ! isol_rxq_ring_sz 1048576
    }
}

<init> worker cpu5 {
    type    slave
    cpu_id  5
    port    bond1 {
        rx_queue_ids     4
        tx_queue_ids     4
        ! isol_rx_cpu_ids  13
        ! isol_rxq_ring_sz 1048576
    }
    port    bond2 {
        rx_queue_ids     4
        tx_queue_ids     4
        ! isol_rx_cpu_ids  13
        ! isol_rxq_ring_sz 1048576
    }
}

<init> worker cpu6 {
    type    slave
    cpu_id  6
    port    bond1 {
        rx_queue_ids     5
        tx_queue_ids     5
        ! isol_rx_cpu_ids  14
        ! isol_rxq_ring_sz 1048576
    }
    port    bond2 {
        rx_queue_ids     5
        tx_queue_ids     5
        ! isol_rx_cpu_ids  14
        ! isol_rxq_ring_sz 1048576
    }
}

<init> worker cpu7 {
    type    slave
    cpu_id  7
    port    bond1 {
        rx_queue_ids     6
        tx_queue_ids     6
        ! isol_rx_cpu_ids  15
        ! isol_rxq_ring_sz 1048576
    }
    port    bond2 {
        rx_queue_ids     6
        tx_queue_ids     6
        ! isol_rx_cpu_ids  15
        ! isol_rxq_ring_sz 1048576
    }
}

<init> worker cpu8 {
    type    slave
    cpu_id  8
    port    bond1 {
        rx_queue_ids     7
        tx_queue_ids     7
        ! isol_rx_cpu_ids  16
        ! isol_rxq_ring_sz 1048576
    }
    port    bond2 {
        rx_queue_ids     7
        tx_queue_ids     7
        ! isol_rx_cpu_ids  16
        ! isol_rxq_ring_sz 1048576
    }
}

<init> worker cpu9 {
    type    slave
    cpu_id  9
    port    bond1 {
        rx_queue_ids     8
        tx_queue_ids     8
        ! isol_rx_cpu_ids  16
        ! isol_rxq_ring_sz 1048576
    }
    port    bond2 {
        rx_queue_ids     8
        tx_queue_ids     8
        ! isol_rx_cpu_ids  16
        ! isol_rxq_ring_sz 1048576
    }
}

<init> worker cpu10 {
    type    slave
    cpu_id  10
    port    bond1 {
        rx_queue_ids     9
        tx_queue_ids     9
        ! isol_rx_cpu_ids  16
        ! isol_rxq_ring_sz 1048576
    }
    port    bond2 {
        rx_queue_ids     9
        tx_queue_ids     9
        ! isol_rx_cpu_ids  16
        ! isol_rxq_ring_sz 1048576
    }
}

<init> worker cpu11 {
    type    slave
    cpu_id  11
    port    bond1 {
        rx_queue_ids     10
        tx_queue_ids     10
        ! isol_rx_cpu_ids  16
        ! isol_rxq_ring_sz 1048576
    }
    port    bond2 {
        rx_queue_ids     10
        tx_queue_ids     10
        ! isol_rx_cpu_ids  16
        ! isol_rxq_ring_sz 1048576
    }
}

<init> worker cpu12 {
    type    slave
    cpu_id  12
    icmp_redirect_core
    port    bond1 {
        rx_queue_ids     11
        tx_queue_ids     11
        ! isol_rx_cpu_ids  16
        ! isol_rxq_ring_sz 1048576
    }
    port    bond2 {
        rx_queue_ids     11
        tx_queue_ids     11
        ! isol_rx_cpu_ids  16
        ! isol_rxq_ring_sz 1048576
    }
}

}

! timer config
timer_defs {
# cpu job loops to schedule dpdk timer management
schedule_interval 50
}

! dpvs neighbor config
neigh_defs {
unres_queue_length 128
timeout 60
}

! dpvs ipset config
ipset_defs {
ipset_hash_pool_size 131072
}

! dpvs ipv4 config
ipv4_defs {
forwarding off
default_ttl 64
fragment {
bucket_number 4096
bucket_entries 16
max_entries 4096
ttl 1
}
}

! dpvs ipv6 config
ipv6_defs {
disable off
forwarding off
route6 {
method hlist
recycle_time 10
}
}

! control plane config
ctrl_defs {
lcore_msg {
ring_size 4096
sync_msg_timeout_us 20000
priority_level low
}
}

! ipvs config
ipvs_defs {
conn {
conn_pool_size 2097152
conn_pool_cache 256
conn_init_timeout 3
! expire_quiescent_template
! fast_xmit_close
! redirect off
}

udp {
    ! defence_udp_drop
    uoa_mode        opp
    uoa_max_trail   3
    timeout {
        oneway      60
        normal      300
        last        3
    }
}

tcp {
    ! defence_tcp_drop
    timeout {
        none        2
        established 300
        syn_sent    3
        syn_recv    30
        fin_wait    7
        time_wait   7
        close       3
        close_wait  7
        last_ack    7
        listen      120
        synack      30
        last        2
    }
    synproxy {
        synack_options {
            mss             1452
            ttl             63
            sack
            ! wscale        0
            ! timestamp
        }
        defer_rs_syn
        rs_syn_max_retry    3
        ack_storm_thresh    10
        max_ack_saved       3
        conn_reuse_state {
            close
            time_wait
            ! fin_wait
            ! close_wait
            ! last_ack
       }
    }
}

}

! sa_pool config
sa_pool {
pool_hash_size 16
flow_enable on
}

@764276020
Copy link
Author

image 当我向服务器发送helo localhost指令的时候,DPVS才会透传客户端源IP

@764276020
Copy link
Author

image

proxy_protocol v2 也是这样的

@764276020
Copy link
Author

syn_proxy

同时打开 syn_proxy 和 defer_rs_syn 的情况下,抓包数据是这样的
image

@764276020
Copy link
Author

同时打开 syn_proxy 和 defer_rs_syn 的情况下,依然是要等客户端push 数据,DPVS这里才会将最后的ACK+Push数据同时发送给RS。期望状态是,客户端建立三次握手之后,服务端就可以获取到用户的IP,并且RS做日志记录和发送欢迎信息给客户端。现在的情况是,客户端需要先发送指令例如helo locahost到服务器,服务器才会将欢迎信息和helo localhost 指令的结果一并发送给客户端。

@ywc689
Copy link
Collaborator

ywc689 commented Oct 25, 2024

同时关闭syn_proxy和defer_rs_syn,抓包看看 TCP建立连接之后client->dpvs 的第一个ACK包中有无proxyprotocol数据。

@764276020
Copy link
Author

同时关闭syn_proxy和defer_rs_syn,抓包看看 TCP建立连接之后client->dpvs 的第一个ACK包中有无proxyprotocol数据。

没有的
image

@764276020
Copy link
Author

image

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants