Skip to content

Commit

Permalink
Merge pull request #86 from hengyoush/docs/vitepress
Browse files Browse the repository at this point in the history
Merge docs/vitepress into main
  • Loading branch information
hengyoush authored Oct 26, 2024
2 parents a53f080 + cc7555f commit b421c07
Show file tree
Hide file tree
Showing 69 changed files with 6,828 additions and 1,474 deletions.
66 changes: 66 additions & 0 deletions .github/workflows/deploy-pages.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
# 构建 VitePress 站点并将其部署到 GitHub Pages 的示例工作流程
#
name: Deploy VitePress site to Pages

on:
# 在针对 `main` 分支的推送上运行。如果你
# 使用 `master` 分支作为默认分支,请将其更改为 `master`
push:
branches: [main]

# 允许你从 Actions 选项卡手动运行此工作流程
workflow_dispatch:

# 设置 GITHUB_TOKEN 的权限,以允许部署到 GitHub Pages
permissions:
contents: read
pages: write
id-token: write

# 只允许同时进行一次部署,跳过正在运行和最新队列之间的运行队列
# 但是,不要取消正在进行的运行,因为我们希望允许这些生产部署完成
concurrency:
group: pages
cancel-in-progress: false

jobs:
# 构建工作
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0 # 如果未启用 lastUpdated,则不需要
# - uses: pnpm/action-setup@v3 # 如果使用 pnpm,请取消此区域注释
# with:
# version: 9
# - uses: oven-sh/setup-bun@v1 # 如果使用 Bun,请取消注释
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: 20
cache: npm # 或 pnpm / yarn
- name: Setup Pages
uses: actions/configure-pages@v4
- name: Install dependencies
run: npm ci # 或 pnpm install / yarn install / bun install
- name: Build with VitePress
run: npm run docs:build # 或 pnpm docs:build / yarn docs:build / bun run docs:build
- name: Upload artifact
uses: actions/upload-pages-artifact@v3
with:
path: docs/.vitepress/dist

# 部署工作
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
needs: build
runs-on: ubuntu-latest
name: Deploy
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4
58 changes: 56 additions & 2 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ jobs:
- 'bpf-20240912.022020'
# renovate: datasource=docker depName=quay.io/lvh-images/kernel-images
- 'bpf-next-20240912.022020'
timeout-minutes: 10
timeout-minutes: 30
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4
Expand Down Expand Up @@ -149,6 +149,45 @@ jobs:
sudo mkdir -p /opt/cni/bin
sudo tar Cxzvvf /opt/cni/bin cni-plugins-linux-amd64-v1.5.0.tgz
#install redis-cli
sudo apt install -y redis-tools
#install python pip
sudo apt install -y python3 python3-pip pipx
- name: Test mysql
uses: cilium/little-vm-helper@97c89f004bd0ab4caeacfe92ebc956e13e362e6b # v0.0.19
with:
provision: 'false'
cmd: |
set -ex
uname -a
cat /etc/issue
pushd /host
if [ -f "/var/lib/kyanos/btf/current.btf" ]; then
bash /host/testdata/test_mysql.sh '/host/kyanos/kyanos $kyanos_log_option --btf /var/lib/kyanos/btf/current.btf'
else
bash /host/testdata/test_mysql.sh '/host/kyanos/kyanos $kyanos_log_option'
fi
popd
- name: Test https
uses: cilium/little-vm-helper@97c89f004bd0ab4caeacfe92ebc956e13e362e6b # v0.0.19
with:
provision: 'false'
cmd: |
set -ex
uname -a
cat /etc/issue
pushd /host
if [ -f "/var/lib/kyanos/btf/current.btf" ]; then
bash /host/testdata/test_https.sh '/host/kyanos/kyanos $kyanos_log_option --btf /var/lib/kyanos/btf/current.btf'
else
bash /host/testdata/test_https.sh '/host/kyanos/kyanos $kyanos_log_option'
fi
popd
- name: Test base
uses: cilium/little-vm-helper@97c89f004bd0ab4caeacfe92ebc956e13e362e6b # v0.0.19
with:
Expand All @@ -163,6 +202,7 @@ jobs:
bash /host/testdata/test_base.sh '/host/kyanos/kyanos $kyanos_log_option'
fi
- name: Test filter by l3/l4 info
uses: cilium/little-vm-helper@97c89f004bd0ab4caeacfe92ebc956e13e362e6b # v0.0.19
with:
Expand Down Expand Up @@ -259,4 +299,18 @@ jobs:
bash /host/testdata/test_containerd_filter_by_container_id.sh '/host/kyanos/kyanos $kyanos_log_option --btf /var/lib/kyanos/btf/current.btf'
else
bash /host/testdata/test_containerd_filter_by_container_id.sh '/host/kyanos/kyanos $kyanos_log_option'
fi
fi
- name: Test redis
uses: cilium/little-vm-helper@97c89f004bd0ab4caeacfe92ebc956e13e362e6b # v0.0.19
with:
provision: 'false'
cmd: |
set -ex
uname -a
cat /etc/issue
if [ -f "/var/lib/kyanos/btf/current.btf" ]; then
bash /host/testdata/test_redis.sh '/host/kyanos/kyanos $kyanos_log_option --btf /var/lib/kyanos/btf/current.btf'
else
bash /host/testdata/test_redis.sh '/host/kyanos/kyanos $kyanos_log_option'
fi
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,18 @@
*.o
__debug**
node_modules/
docs/.vitepress/dist
docs/.vitepress/cache

# Test binary, built with `go test -c`
*.test

# Output of the go coverage tool, specifically when used with LiteIDE
*.out

# asciinema files
*.cast

# Dependency directories (remove the comment below to include it)
# vendor/

Expand Down
2 changes: 2 additions & 0 deletions agent/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,8 @@ func SetupAgent(options ac.AgentOptions) {
return
}

bf.AttachProgs(options)

stop := false
go func() {
<-stopper
Expand Down
8 changes: 4 additions & 4 deletions agent/analysis/analysis.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,10 +168,10 @@ func (a *Analyzer) Run() {
case record := <-a.recordsChannel:
a.analyze(record)
a.recordReceived++
if a.EnableBatchModel() && a.recordReceived == a.TargetSamples {
a.resultChannel <- a.harvest()
return
}
// if a.EnableBatchModel() && a.recordReceived == a.TargetSamples {
// a.resultChannel <- a.harvest()
// return
// }
case <-a.AnalysisOptions.HavestSignal:
a.resultChannel <- a.harvest()
if a.AnalysisOptions.EnableBatchModel() {
Expand Down
13 changes: 13 additions & 0 deletions agent/analysis/classfier.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,19 @@ func getClassfier(classfierType anc.ClassfierType, options anc.AnalysisOptions)
}
}

func GetClassfierType(classfierType anc.ClassfierType, options anc.AnalysisOptions, r *anc.AnnotatedRecord) anc.ClassfierType {
if classfierType == anc.ProtocolAdaptive {
c, ok := options.ProtocolSpecificClassfiers[bpf.AgentTrafficProtocolT(r.Protocol)]
if ok {
return c
} else {
return anc.RemoteIp
}
} else {
return classfierType
}
}

func getClassIdHumanReadableFunc(classfierType anc.ClassfierType, options anc.AnalysisOptions) (ClassIdAsHumanReadable, bool) {
if classfierType == anc.ProtocolAdaptive {
return func(ar *anc.AnnotatedRecord) string {
Expand Down
14 changes: 13 additions & 1 deletion agent/analysis/common/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ type AnalysisOptions struct {
CleanWhenHarvest bool

// Fast Inspect Options
TimeLimit int
SlowMode bool
BigRespMode bool
BigReqMode bool
TargetSamples int
CurrentReceivedSamples func() int
HavestSignal chan struct{}

Expand Down Expand Up @@ -249,6 +249,18 @@ type AnnotatedRecordToStringOptions struct {
IncludeConnDesc bool
}

func (r *AnnotatedRecord) TimeDetailInfo() string {
return r.String(AnnotatedRecordToStringOptions{
IncludeConnDesc: false,
MetricTypeSet: MetricTypeSet{
TotalDuration: true,
ReadFromSocketBufferDuration: true,
BlackBoxDuration: true,
},
IncludeSyscallStat: true,
})
}

func (r *AnnotatedRecord) String(options AnnotatedRecordToStringOptions) string {
nano := options.Nano
var result string
Expand Down
10 changes: 8 additions & 2 deletions agent/analysis/stat.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,8 +231,14 @@ func (s *StatRecorder) ReceiveRecord(r protocol.Record, connection *conn.Connect
if hasNicInEvents && hasDevOutEvents {
annotatedRecord.BlackBoxDuration = float64(events.nicIngressEvents[len(events.nicIngressEvents)-1].GetTimestamp()) - float64(events.devOutEvents[0].GetTimestamp())
}
if hasUserCopyEvents && hasTcpInEvents {
annotatedRecord.ReadFromSocketBufferDuration = float64(events.userCopyEvents[len(events.userCopyEvents)-1].GetTimestamp()) - float64(events.tcpInEvents[0].GetTimestamp())
if (hasUserCopyEvents || hasReadSyscallEvents) && hasTcpInEvents {
var readFromEndTime float64
if hasUserCopyEvents {
readFromEndTime = float64(events.userCopyEvents[len(events.userCopyEvents)-1].GetTimestamp())
} else {
readFromEndTime = float64(events.readSyscallEvents[len(events.readSyscallEvents)-1].GetTimestamp())
}
annotatedRecord.ReadFromSocketBufferDuration = readFromEndTime - float64(events.tcpInEvents[0].GetTimestamp())
}
if hasTcpInEvents && hasNicInEvents {
annotatedRecord.CopyToSocketBufferDuration = float64(events.tcpInEvents[len(events.tcpInEvents)-1].GetTimestamp() - events.nicIngressEvents[0].GetTimestamp())
Expand Down
42 changes: 32 additions & 10 deletions agent/conn/conntrack.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ type Connection4 struct {

ssl bool

tracable bool
onRoleChanged func()

TempKernEvents []*bpf.AgentKernEvt
TempConnEvents []*bpf.AgentConnEvtT
TempSyscallEvents []*bpf.SyscallEventData
Expand Down Expand Up @@ -245,6 +248,10 @@ func (c *Connection4) OnClose(needClearBpfMap bool) {
}

func (c *Connection4) UpdateConnectionTraceable(traceable bool) {
if c.tracable == traceable {
return
}
c.tracable = traceable
key, _ := c.extractSockKeys()
sockKeyConnIdMap := bpf.GetMapFromObjs(bpf.Objs, "SockKeyConnIdMap")
c.doUpdateConnIdMapProtocolToUnknwon(key, sockKeyConnIdMap, traceable)
Expand Down Expand Up @@ -384,14 +391,18 @@ func (c *Connection4) parseStreamBuffer(streamBuffer *buffer.StreamBuffer, messa
// parseState = parseResult.ParseState
switch parseResult.ParseState {
case protocol.Success:
common.ConntrackLog.Debugf("[parseStreamBuffer] Success, %s(%s)", c.ToString(), messageType.String())
if c.Role == bpf.AgentEndpointRoleTKRoleUnknown && len(parseResult.ParsedMessages) > 0 {
parsedMessage := parseResult.ParsedMessages[0]
if (bpf.IsIngressStep(ke.Step) && parsedMessage.IsReq()) || (bpf.IsEgressStep(ke.Step) && !parsedMessage.IsReq()) {
c.Role = bpf.AgentEndpointRoleTKRoleServer
} else {
c.Role = bpf.AgentEndpointRoleTKRoleClient
}
common.ConntrackLog.Debugf("Update %s role", c.ToString())
if c.onRoleChanged != nil {
c.onRoleChanged()
}
common.ConntrackLog.Debugf("[parseStreamBuffer] Update %s role", c.ToString())
c.resetParseProgress()
} else {
if len(parseResult.ParsedMessages) > 0 && parseResult.ParsedMessages[0].IsReq() != (messageType == protocol.Request) {
Expand All @@ -405,28 +416,32 @@ func (c *Connection4) parseStreamBuffer(streamBuffer *buffer.StreamBuffer, messa
pos := parser.FindBoundary(streamBuffer, messageType, 1)
if pos != -1 {
streamBuffer.RemovePrefix(pos)
common.ConntrackLog.Debugf("[parseStreamBuffer] Invalid, %s Removed streambuffer some head data(%d bytes) due to stuck from %s queue(found boundary) and continue", c.ToString(), pos, messageType.String())
stop = false
} else if c.progressIsStucked(streamBuffer) {
if streamBuffer.Head().Len() > int(ke.Len) {
common.ConntrackLog.Debugf("Invalid, %s Removed streambuffer some head data(%d bytes) due to stuck from %s queue", c.ToString(), streamBuffer.Head().Len()-int(ke.Len), messageType.String())
common.ConntrackLog.Debugf("[parseStreamBuffer] Invalid, %s Removed streambuffer some head data(%d bytes) due to stuck from %s queue", c.ToString(), streamBuffer.Head().Len()-int(ke.Len), messageType.String())
streamBuffer.RemovePrefix(streamBuffer.Head().Len() - int(ke.Len))
stop = false
} else {
removed := c.checkProgress(streamBuffer)
if removed {
common.ConntrackLog.Debugf("Invalid, %s Removed streambuffer head due to stuck from %s queue", c.ToString(), messageType.String())
common.ConntrackLog.Debugf("[parseStreamBuffer] Invalid, %s Removed streambuffer head due to stuck from %s queue and continue", c.ToString(), messageType.String())
stop = false
} else {
common.ConntrackLog.Debugf("[parseStreamBuffer] Invalid, %s Removed streambuffer head due to stuck from %s queue and stop", c.ToString(), messageType.String())
stop = true
}
}
} else {
stop = true

common.ConntrackLog.Debugf("[parseStreamBuffer] Invalid, %s stop process %s queue", c.ToString(), messageType.String())
}
case protocol.NeedsMoreData:
removed := c.checkProgress(streamBuffer)
if removed {
common.ConntrackLog.Debugf("Needs more data, %s Removed streambuffer head due to stuck from %s queue", c.ToString(), messageType.String())
common.ConntrackLog.Debugf("[parseStreamBuffer] Needs more data, %s Removed streambuffer head due to stuck from %s queue", c.ToString(), messageType.String())
stop = false
} else {
stop = true
Expand Down Expand Up @@ -461,13 +476,16 @@ func (c *Connection4) getLastProgressTime(sb *buffer.StreamBuffer) int64 {
return c.lastRespMadeProgressTime
}
}

const maxAllowStuckTime = 1000

func (c *Connection4) progressIsStucked(sb *buffer.StreamBuffer) bool {
if c.getLastProgressTime(sb) == 0 {
c.updateProgressTime(sb)
return false
}
headTime, ok := sb.FindTimestampBySeq(uint64(sb.Position0()))
if !ok || time.Now().UnixMilli()-int64(common.NanoToMills(headTime)) > 5000 {
if !ok || time.Now().UnixMilli()-int64(common.NanoToMills(headTime)) > maxAllowStuckTime {
return true
}
return false
Expand All @@ -480,7 +498,7 @@ func (c *Connection4) checkProgress(sb *buffer.StreamBuffer) bool {
headTime, ok := sb.FindTimestampBySeq(uint64(sb.Position0()))
now := time.Now().UnixMilli()
headTimeMills := int64(common.NanoToMills(headTime))
if !ok || now-headTimeMills > 5000 {
if !ok || now-headTimeMills > maxAllowStuckTime {
sb.RemoveHead()
return true
} else {
Expand Down Expand Up @@ -512,14 +530,18 @@ func (c *Connection4) IsServerSide() bool {
func (c *Connection4) IsSsl() bool {
return c.ssl
}

func (c *Connection4) Side() common.SideEnum {
if c.Role == bpf.AgentEndpointRoleTKRoleClient {
func endpointRoleAsSideEnum(role bpf.AgentEndpointRoleT) common.SideEnum {
if role == bpf.AgentEndpointRoleTKRoleClient {
return common.ClientSide
} else {
} else if role == bpf.AgentEndpointRoleTKRoleServer {
return common.ServerSide
} else {
return common.AllSide
}
}
func (c *Connection4) Side() common.SideEnum {
return endpointRoleAsSideEnum(c.Role)
}
func (c *Connection4) Identity() string {
cd := common.ConnDesc{
LocalPort: c.LocalPort,
Expand Down
Loading

0 comments on commit b421c07

Please sign in to comment.