mirror of
https://github.com/SleepingBag945/dddd.git
synced 2025-06-01 11:11:13 +00:00
nuclei v3.1.8
This commit is contained in:
parent
817704a0fb
commit
9f8f4de4ad
@ -1,5 +1,11 @@
|
||||
# 更新日志
|
||||
|
||||
## 2023.2.1
|
||||
|
||||
更新nuclei版本至v3.1.8,可以用最新特性写poc了。
|
||||
|
||||
|
||||
|
||||
## 2023.1.15
|
||||
|
||||
添加-oip参数,选择后将以ip:port的形式从网络空间搜索引擎中拉取资产,但消耗的积分不会变少。(fofa,hunter,quake通用)
|
||||
|
@ -16,7 +16,6 @@ import (
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/exportrunner"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/operators/common/dsl"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/utils/monitor"
|
||||
errorutil "github.com/projectdiscovery/utils/errors"
|
||||
fileutil "github.com/projectdiscovery/utils/file"
|
||||
)
|
||||
@ -68,11 +67,6 @@ func CallNuclei(TargetAndPocsName map[string][]string,
|
||||
|
||||
exportrunner.ExportRunnerParseOptions(options)
|
||||
|
||||
if options.HangMonitor {
|
||||
cancel := monitor.NewStackMonitor(10 * time.Second)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
nucleiRunner, err := exportrunner.ExportRunnerNew(options)
|
||||
if err != nil {
|
||||
gologger.Fatal().Msgf("Could not create runner: %s\n", err)
|
||||
|
@ -34,7 +34,7 @@ func GC() {
|
||||
debug.FreeOSMemory()
|
||||
}
|
||||
|
||||
var version = "1.8"
|
||||
var version = "1.9"
|
||||
|
||||
func showBanner() {
|
||||
banner := fmt.Sprintf(`
|
||||
@ -294,7 +294,7 @@ func Flag() {
|
||||
flag.IntVar(&structs.GlobalConfig.SubdomainBruteForceThreads, "sbft", 150, "爆破子域名协程数量")
|
||||
flag.BoolVar(&structs.GlobalConfig.AllowLocalAreaDomain, "ld", false, "允许域名解析到局域网")
|
||||
flag.BoolVar(&structs.GlobalConfig.AllowCDNAssets, "ac", false, "允许扫描带CDN的资产,默认略过")
|
||||
flag.BoolVar(&structs.GlobalConfig.NoHostBind,"nh",false,"禁用域名绑定资产探测")
|
||||
flag.BoolVar(&structs.GlobalConfig.NoHostBind, "nh", false, "禁用域名绑定资产探测")
|
||||
|
||||
// 端口扫描
|
||||
flag.StringVar(&PortString, "p", "", "目标IP扫描的端口。 默认扫描Top1000")
|
||||
|
13
details.md
13
details.md
@ -156,7 +156,7 @@ icp.name="带带弟弟"
|
||||
# 详细参数
|
||||
|
||||
```shell
|
||||
coco@Mac % ./dddd -h
|
||||
coco@Mac dddd % ./dddd -h
|
||||
|
||||
_ _ _ _
|
||||
__| | __| | __| | __| |
|
||||
@ -164,12 +164,14 @@ coco@Mac % ./dddd -h
|
||||
\__,_| \__,_| \__,_| \__,_|
|
||||
_|"""""|_|"""""|_|"""""|_|"""""|
|
||||
"`-0-0-'"`-0-0-'"`-0-0-`"`-0-0-'
|
||||
dddd.version: 1.6
|
||||
dddd.version: 1.9
|
||||
|
||||
Usage of dddd:
|
||||
Usage of ./dddd:
|
||||
-Pn
|
||||
禁用主机发现功能(icmp,tcp)
|
||||
-a 开启审计日志
|
||||
-ac
|
||||
允许扫描带CDN的资产,默认略过
|
||||
-alf string
|
||||
审计日志文件名称 (default "audit.log")
|
||||
-ffmc int
|
||||
@ -194,6 +196,8 @@ Usage of dddd:
|
||||
关闭主动指纹探测
|
||||
-ngp
|
||||
关闭Golang Poc探测
|
||||
-nh
|
||||
禁用域名绑定资产探测
|
||||
-ni
|
||||
禁用Interactsh服务器,排除反连模版
|
||||
-nicmp
|
||||
@ -206,6 +210,8 @@ Usage of dddd:
|
||||
关闭被动子域名枚举
|
||||
-o string
|
||||
html格式输出报告
|
||||
-oip
|
||||
从网络空间搜索引擎中以IP:Port的形式拉取资产,而不是Domain(IP):Port
|
||||
-p string
|
||||
目标IP扫描的端口。 默认扫描Top1000
|
||||
-pc int
|
||||
@ -246,6 +252,7 @@ Usage of dddd:
|
||||
Web探针线程,根据网络环境调整 (default 100)
|
||||
-wto int
|
||||
Web探针超时时间,根据网络环境调整 (default 12)
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
28
go.mod
28
go.mod
@ -9,7 +9,7 @@ require (
|
||||
github.com/jlaffaye/ftp v0.2.0
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/projectdiscovery/dnsx v1.1.5
|
||||
github.com/projectdiscovery/gologger v1.1.11
|
||||
github.com/projectdiscovery/gologger v1.1.12
|
||||
github.com/satori/go.uuid v1.2.0
|
||||
github.com/sijms/go-ora/v2 v2.7.9
|
||||
github.com/tomatome/grdp v0.1.0
|
||||
@ -68,7 +68,7 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/cloudflare/cfssl v1.6.4 // indirect
|
||||
github.com/cloudflare/circl v1.3.6 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/corpix/uarand v0.2.0 // indirect
|
||||
github.com/denisbrodbeck/machineid v1.0.1 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
@ -138,6 +138,7 @@ require (
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/compress v1.17.3 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/libdns/libdns v0.2.1 // indirect
|
||||
@ -147,22 +148,25 @@ require (
|
||||
github.com/mackerelio/go-osstat v0.2.4 // indirect
|
||||
github.com/mfonda/simhash v0.0.0-20151007195837-79f94a1100d6 // indirect
|
||||
github.com/mholt/acmez v1.2.0 // indirect
|
||||
github.com/mholt/archiver/v3 v3.5.1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.2 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/praetorian-inc/fingerprintx v1.1.9 // indirect
|
||||
github.com/projectdiscovery/chaos-client v0.5.1 // indirect
|
||||
github.com/projectdiscovery/dsl v0.0.35 // indirect
|
||||
github.com/projectdiscovery/fastdialer v0.0.49 // indirect
|
||||
github.com/projectdiscovery/fastdialer v0.0.55 // indirect
|
||||
github.com/projectdiscovery/fasttemplate v0.0.2 // indirect
|
||||
github.com/projectdiscovery/fdmax v0.0.4 // indirect
|
||||
github.com/projectdiscovery/go-smb2 v0.0.0-20240129202741-052cc450c6cb // indirect
|
||||
github.com/projectdiscovery/gostruct v0.0.2 // indirect
|
||||
github.com/projectdiscovery/gozero v0.0.1 // indirect
|
||||
github.com/projectdiscovery/n3iwf v0.0.0-20230523120440-b8cd232ff1f5 // indirect
|
||||
github.com/projectdiscovery/networkpolicy v0.0.6 // indirect
|
||||
github.com/projectdiscovery/networkpolicy v0.0.7 // indirect
|
||||
github.com/projectdiscovery/rawhttp v0.1.28 // indirect
|
||||
github.com/projectdiscovery/rdap v0.9.1-0.20221108103045-9865884d1917 // indirect
|
||||
github.com/projectdiscovery/sarif v0.0.1 // indirect
|
||||
@ -210,7 +214,7 @@ require (
|
||||
go.uber.org/zap v1.25.0 // indirect
|
||||
goftp.io/server/v2 v2.0.1 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/term v0.15.0 // indirect
|
||||
golang.org/x/term v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect
|
||||
gopkg.in/corvus-ch/zbase32.v1 v1.0.0 // indirect
|
||||
@ -235,7 +239,7 @@ require (
|
||||
github.com/cheggaaa/pb/v3 v3.1.4 // indirect
|
||||
github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08 // indirect
|
||||
github.com/dlclark/regexp2 v1.10.0 // indirect
|
||||
github.com/dsnet/compress v0.0.1 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/fatih/color v1.15.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
@ -268,14 +272,14 @@ require (
|
||||
github.com/projectdiscovery/freeport v0.0.5 // indirect
|
||||
github.com/projectdiscovery/goconfig v0.0.1 // indirect
|
||||
github.com/projectdiscovery/goflags v0.1.29
|
||||
github.com/projectdiscovery/hmap v0.0.29
|
||||
github.com/projectdiscovery/hmap v0.0.35
|
||||
github.com/projectdiscovery/httpx v1.3.5
|
||||
github.com/projectdiscovery/mapcidr v1.1.16 // indirect
|
||||
github.com/projectdiscovery/ratelimit v0.0.20 // indirect
|
||||
github.com/projectdiscovery/retryabledns v1.0.47 // indirect
|
||||
github.com/projectdiscovery/retryablehttp-go v1.0.38
|
||||
github.com/projectdiscovery/retryabledns v1.0.52 // indirect
|
||||
github.com/projectdiscovery/retryablehttp-go v1.0.44
|
||||
github.com/projectdiscovery/subfinder/v2 v2.5.8
|
||||
github.com/projectdiscovery/utils v0.0.68
|
||||
github.com/projectdiscovery/utils v0.0.76
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d // indirect
|
||||
@ -301,7 +305,7 @@ require (
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/net v0.18.0
|
||||
golang.org/x/oauth2 v0.11.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/sys v0.16.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.15.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
@ -320,7 +324,7 @@ require (
|
||||
replace (
|
||||
github.com/lcvvvv/gonmap v1.3.4 => ./lib/gonmap
|
||||
github.com/projectdiscovery/dnsx v1.1.5 => ./lib/dnsx
|
||||
github.com/projectdiscovery/gologger v1.1.11 => ./lib/gologger
|
||||
github.com/projectdiscovery/gologger v1.1.12 => ./lib/gologger
|
||||
github.com/projectdiscovery/httpx v1.3.5 => ./lib/httpx
|
||||
github.com/projectdiscovery/nuclei/v3 v3.0.2 => ./lib/nuclei
|
||||
github.com/projectdiscovery/subfinder/v2 v2.5.8 => ./lib/subfinder/v2
|
||||
|
57
go.sum
57
go.sum
@ -109,6 +109,7 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAu
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA=
|
||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
|
||||
github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA=
|
||||
@ -235,8 +236,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||||
github.com/cloudflare/cfssl v1.6.4 h1:NMOvfrEjFfC63K3SGXgAnFdsgkmiq4kATme5BfcqrO8=
|
||||
github.com/cloudflare/cfssl v1.6.4/go.mod h1:8b3CQMxfWPAeom3zBnGJ6sd+G1NkL5TXqmDXacb+1J0=
|
||||
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
|
||||
github.com/cloudflare/circl v1.3.6 h1:/xbKIqSHbZXHwkhbrhrt2YOHIwYJlXH94E3tI/gDlUg=
|
||||
github.com/cloudflare/circl v1.3.6/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
|
||||
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08 h1:ox2F0PSMlrAAiAdknSRMDrAr8mfxPCfSZolH+/qQnyQ=
|
||||
github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08/go.mod h1:pCxVEbcm3AMg7ejXyorUXi6HQCzOIBf7zEDVPtw0/U4=
|
||||
@ -296,8 +297,8 @@ github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA
|
||||
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20230821135201-94e508132562 h1:ObbB2tzHWWAxzsG5futqeq2Ual2zYlo/+eMkSc5sn8w=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20230821135201-94e508132562/go.mod h1:X2TOTJ+Uamd454RFp7ig2tmP3hQg0Z2Qk8gbVQmU0mk=
|
||||
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
|
||||
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY=
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
|
||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
@ -457,6 +458,7 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gomodule/redigo v1.8.4/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
|
||||
@ -644,6 +646,7 @@ github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
|
||||
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
@ -651,6 +654,8 @@ github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
@ -711,6 +716,8 @@ github.com/mholt/acmez v1.2.0 h1:1hhLxSgY5FvH5HCnGUuwbKY2VQVo8IU7rxXKSnZ7F30=
|
||||
github.com/mholt/acmez v1.2.0/go.mod h1:VT9YwH1xgNX1kmYY89gY8xPJC84BFAisjo8Egigt4kE=
|
||||
github.com/mholt/archiver v3.1.1+incompatible h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU=
|
||||
github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU=
|
||||
github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo=
|
||||
github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4=
|
||||
github.com/microcosm-cc/bluemonday v1.0.21/go.mod h1:ytNkv4RrDrLJ2pqlsSI46O6IVXmZOBBD4SaJyDwwTkM=
|
||||
github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58=
|
||||
github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs=
|
||||
@ -763,6 +770,7 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
|
||||
github.com/nwaples/rardecode v1.1.3 h1:cWCaZwfM5H7nAD6PyEdcVnczzV8i/JtotnyW/dD9lEc=
|
||||
github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
|
||||
github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
|
||||
@ -812,6 +820,8 @@ github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4/v4 v4.1.2 h1:qvY3YFXRQE/XB8MlLzJH7mSzBs74eA2gg52YTk6jUPM=
|
||||
github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
|
||||
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
|
||||
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
|
||||
@ -841,42 +851,46 @@ github.com/projectdiscovery/clistats v0.0.20 h1:5jO5SLiRJ7f0nDV0ndBNmBeesbROouPo
|
||||
github.com/projectdiscovery/clistats v0.0.20/go.mod h1:GJ2av0KnOvK0AISQnP8hyDclYIji1LVkx2l0pwnzAu4=
|
||||
github.com/projectdiscovery/dsl v0.0.35 h1:kj+yVotGDweY+OGX2UUpBVIyfYpd7ADWatWP9pe7rxE=
|
||||
github.com/projectdiscovery/dsl v0.0.35/go.mod h1:IT6OlBEW+7yJl8F77GXfwDM9zZpb4sOj5IEAzdmJBPE=
|
||||
github.com/projectdiscovery/fastdialer v0.0.49 h1:YJ2EDSklvcq6putHko49+0RNKZKAIGwTKY5zGhQC/tE=
|
||||
github.com/projectdiscovery/fastdialer v0.0.49/go.mod h1:GwdxQhD65npOhDuKLhHxvZ6I/HqqnMOrC450Q/wUuYo=
|
||||
github.com/projectdiscovery/fastdialer v0.0.55 h1:dcD3La9MsImgQMrBnG0/w5Mu8PRJu2TU1STycKSSodc=
|
||||
github.com/projectdiscovery/fastdialer v0.0.55/go.mod h1:DNP62sWCLp0YHXwhlo73iyZODpSZE7dVstt2GNAC7+A=
|
||||
github.com/projectdiscovery/fasttemplate v0.0.2 h1:h2cISk5xDhlJEinlBQS6RRx0vOlOirB2y3Yu4PJzpiA=
|
||||
github.com/projectdiscovery/fasttemplate v0.0.2/go.mod h1:XYWWVMxnItd+r0GbjA1GCsUopMw1/XusuQxdyAIHMCw=
|
||||
github.com/projectdiscovery/fdmax v0.0.4 h1:K9tIl5MUZrEMzjvwn/G4drsHms2aufTn1xUdeVcmhmc=
|
||||
github.com/projectdiscovery/fdmax v0.0.4/go.mod h1:oZLqbhMuJ5FmcoaalOm31B1P4Vka/CqP50nWjgtSz+I=
|
||||
github.com/projectdiscovery/freeport v0.0.5 h1:jnd3Oqsl4S8n0KuFkE5Hm8WGDP24ITBvmyw5pFTHS8Q=
|
||||
github.com/projectdiscovery/freeport v0.0.5/go.mod h1:PY0bxSJ34HVy67LHIeF3uIutiCSDwOqKD8ruBkdiCwE=
|
||||
github.com/projectdiscovery/go-smb2 v0.0.0-20240129202741-052cc450c6cb h1:rutG906Drtbpz4DwU5mhGIeOhRcktDH4cGQitGUMAsg=
|
||||
github.com/projectdiscovery/go-smb2 v0.0.0-20240129202741-052cc450c6cb/go.mod h1:FLjF1DmZ+POoGEiIQdWuYVwS++C/GwpX8YaCsTSm1RY=
|
||||
github.com/projectdiscovery/goconfig v0.0.1 h1:36m3QjohZvemqh9bkJAakaHsm9iEZ2AcQSS18+0QX/s=
|
||||
github.com/projectdiscovery/goconfig v0.0.1/go.mod h1:CPO25zR+mzTtyBrsygqsHse0sp/4vB/PjaHi9upXlDw=
|
||||
github.com/projectdiscovery/goflags v0.1.29 h1:RIfBxJrm9ApJLKxizNm9vZq3/8uH7WBHyInXC4GjNSY=
|
||||
github.com/projectdiscovery/goflags v0.1.29/go.mod h1:JuNseKacVYPqxwhN7NLw/UPrzyZOuJ58ufLrPuZsXDY=
|
||||
github.com/projectdiscovery/gologger v1.1.12 h1:uX/QkQdip4PubJjjG0+uk5DtyAi1ANPJUvpmimXqv4A=
|
||||
github.com/projectdiscovery/gologger v1.1.12/go.mod h1:DI8nywPLERS5mo8QEA9E7gd5HZ3Je14SjJBH3F5/kLw=
|
||||
github.com/projectdiscovery/gostruct v0.0.2 h1:s8gP8ApugGM4go1pA+sVlPDXaWqNP5BBDDSv7VEdG1M=
|
||||
github.com/projectdiscovery/gostruct v0.0.2/go.mod h1:H86peL4HKwMXcQQtEa6lmC8FuD9XFt6gkNR0B/Mu5PE=
|
||||
github.com/projectdiscovery/gozero v0.0.1 h1:f08ZnYlbDZV/TNGDvIXV9s/oB/sAI+HWaSbW4em4aKM=
|
||||
github.com/projectdiscovery/gozero v0.0.1/go.mod h1:/dHwbly+1lhOX9UreVure4lEe7K4hIHeu/c/wZGNTDo=
|
||||
github.com/projectdiscovery/hmap v0.0.29 h1:YPnwrvyeF0jxWloRq45rG3GZrUTu5gZYizEIn2EVp50=
|
||||
github.com/projectdiscovery/hmap v0.0.29/go.mod h1:08/3+VcqLg9W/RViplkbIiYXDNltCDlF+P7oCQblprA=
|
||||
github.com/projectdiscovery/hmap v0.0.35 h1:JkadBpuB/GttuS+O72E26y6RrC8Ox90iFunrI2/zvrc=
|
||||
github.com/projectdiscovery/hmap v0.0.35/go.mod h1:EXm6Z/e10GS0uK7qNLH2OcT0bIKq+T4ZDWxSzK0ho5U=
|
||||
github.com/projectdiscovery/interactsh v1.1.8 h1:mDD+f/oo2tV4Z1WyUync0tgYeJyuiS89Un64Gm6Pvgk=
|
||||
github.com/projectdiscovery/interactsh v1.1.8/go.mod h1:E20ywFb7bL01GcOOk+6VZF48XZ8AZvYvBpULoBUSTbg=
|
||||
github.com/projectdiscovery/mapcidr v1.1.16 h1:rjj1w5D6hbTsUQXYClLcGdfBEy9bryclgi70t0vBggo=
|
||||
github.com/projectdiscovery/mapcidr v1.1.16/go.mod h1:rGqpBhStdwOQ2uS62QM9qPsybwMwIhT7CTd2bxoHs8Q=
|
||||
github.com/projectdiscovery/n3iwf v0.0.0-20230523120440-b8cd232ff1f5 h1:L/e8z8yw1pfT6bg35NiN7yd1XKtJap5Nk6lMwQ0RNi8=
|
||||
github.com/projectdiscovery/n3iwf v0.0.0-20230523120440-b8cd232ff1f5/go.mod h1:pGW2ncnTxTxHtP9wzcIJAB+3/NMp6IiuQWd2NK7K+oc=
|
||||
github.com/projectdiscovery/networkpolicy v0.0.6 h1:yDvm0XCrS9HeemRrBS+J+22surzVczM94W5nHiOy/1o=
|
||||
github.com/projectdiscovery/networkpolicy v0.0.6/go.mod h1:8HJQ/33Pi7v3a3MRWIQGXzpj+zHw2d60TysEL4qdoQk=
|
||||
github.com/projectdiscovery/networkpolicy v0.0.7 h1:AwHqBRXBqDQgnWzBMuoJtHBNEYBw+NFp/4qIK688x7o=
|
||||
github.com/projectdiscovery/networkpolicy v0.0.7/go.mod h1:CK0CnFoLF1Nou6mY7P4WODSAxhPN8g8g7XpapgEP8tI=
|
||||
github.com/projectdiscovery/ratelimit v0.0.20 h1:xKvCZyXZn7YzPtmw+Cvv7JiNxH9mp8CW2fsI39cZBhY=
|
||||
github.com/projectdiscovery/ratelimit v0.0.20/go.mod h1:Ddd6DaiibjUDAzUnYeRGA/xafYJNLlv/dmuSdc5Lvrw=
|
||||
github.com/projectdiscovery/rawhttp v0.1.28 h1:6cR6JpjzEMjtyXHOWKwfFUNdmo0CXtUbOn6w6RsBYf4=
|
||||
github.com/projectdiscovery/rawhttp v0.1.28/go.mod h1:VfGWfefvtSzixCdsst+gMRYVMMnOvrLieW1l9xDdO0U=
|
||||
github.com/projectdiscovery/rdap v0.9.1-0.20221108103045-9865884d1917 h1:m03X4gBVSorSzvmm0bFa7gDV4QNSOWPL/fgZ4kTXBxk=
|
||||
github.com/projectdiscovery/rdap v0.9.1-0.20221108103045-9865884d1917/go.mod h1:JxXtZC9e195awe7EynrcnBJmFoad/BNDzW9mzFkK8Sg=
|
||||
github.com/projectdiscovery/retryabledns v1.0.47 h1:9h/9H2YWg68mampF2re5CCQ2lAxBLzq/N0daN6kdUTQ=
|
||||
github.com/projectdiscovery/retryabledns v1.0.47/go.mod h1:TRErBaFkW2Gx73EepHeYSLgtD2ZCbNj1dTS9SuLd5GA=
|
||||
github.com/projectdiscovery/retryablehttp-go v1.0.38 h1:MvXTiqL58+HKNL0fxvGEXUWuTrYENtrNklxxOIGFh0o=
|
||||
github.com/projectdiscovery/retryablehttp-go v1.0.38/go.mod h1:vTDxZuLKrAots/HUBM6g+E5tc64XaFCTOpxPXp+F12E=
|
||||
github.com/projectdiscovery/retryabledns v1.0.52 h1:jJRIT5y7KYZvaZAAvlkxvkKkQzst6LvEeLDqRc3LeOM=
|
||||
github.com/projectdiscovery/retryabledns v1.0.52/go.mod h1:Ea478e6XNVAmfH4KwqtLNjkwdgkpVH1O3+FL2dKLNb8=
|
||||
github.com/projectdiscovery/retryablehttp-go v1.0.44 h1:hicCe2h6daHt4muPovmffZE3YKBqGioreO6EpIGZ87g=
|
||||
github.com/projectdiscovery/retryablehttp-go v1.0.44/go.mod h1:7ECXK2cH2/G4sstf8hacyrMdPPJ/3wCAO5tFPZ4iO4s=
|
||||
github.com/projectdiscovery/sarif v0.0.1 h1:C2Tyj0SGOKbCLgHrx83vaE6YkzXEVrMXYRGLkKCr/us=
|
||||
github.com/projectdiscovery/sarif v0.0.1/go.mod h1:cEYlDu8amcPf6b9dSakcz2nNnJsoz4aR6peERwV+wuQ=
|
||||
github.com/projectdiscovery/stringsutil v0.0.2 h1:uzmw3IVLJSMW1kEg8eCStG/cGbYYZAja8BH3LqqJXMA=
|
||||
@ -885,8 +899,8 @@ github.com/projectdiscovery/tlsx v1.1.6-0.20231116215000-e842dc367a74 h1:G0gw+3z
|
||||
github.com/projectdiscovery/tlsx v1.1.6-0.20231116215000-e842dc367a74/go.mod h1:YH8el7/6pyZbNed1IibjzbGpeigiCVyvE28g5+LsPAw=
|
||||
github.com/projectdiscovery/uncover v1.0.7 h1:ut+2lTuvmftmveqF5RTjMWAgyLj8ltPQC7siFy9sj0A=
|
||||
github.com/projectdiscovery/uncover v1.0.7/go.mod h1:HFXgm1sRPuoN0D4oATljPIdmbo/EEh1wVuxQqo/dwFE=
|
||||
github.com/projectdiscovery/utils v0.0.68 h1:rWvuG61oWeNzboYtugc3sG2uw5k8uptfHoth4CypVQI=
|
||||
github.com/projectdiscovery/utils v0.0.68/go.mod h1:c5XnwkcffXqma9Hf781Osekfuqehb981gdlQiBZ5QvU=
|
||||
github.com/projectdiscovery/utils v0.0.76 h1:6azn0Zju0taw5Y9qAjpGPxyqwJf2AI4VJjtIzPBcRzQ=
|
||||
github.com/projectdiscovery/utils v0.0.76/go.mod h1:ERIYcW+h5jKIYyYkfdOpNPIUtH8Ogz4q5Wq3gx/71Zw=
|
||||
github.com/projectdiscovery/wappalyzergo v0.0.109 h1:BERfwTRn1dvB1tbhyc5m67R8VkC9zbVuPsEq4VEm07k=
|
||||
github.com/projectdiscovery/wappalyzergo v0.0.109/go.mod h1:4Z3DKhi75zIPMuA+qSDDWxZvnhL4qTLmDx4dxNMu7MA=
|
||||
github.com/projectdiscovery/yamldoc-go v1.0.4 h1:eZoESapnMw6WAHiVgRwNqvbJEfNHEH148uthhFbG5jE=
|
||||
@ -1078,7 +1092,8 @@ github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg=
|
||||
github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
|
||||
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/ulule/deepcopier v0.0.0-20200430083143-45decc6639b6 h1:TtyC78WMafNW8QFfv3TeP3yWNDG+uxNkk9vOrnDu6JA=
|
||||
@ -1422,8 +1437,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@ -1434,8 +1449,8 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -886,6 +886,7 @@ func (r *Runner) RunEnumeration() {
|
||||
|
||||
newPath := getJumpPath(resp.Raw)
|
||||
if newPath != "" {
|
||||
newPath = strings.Trim(newPath, " ")
|
||||
newPath = strings.Trim(newPath, "'")
|
||||
newPath = strings.Trim(newPath, "\"")
|
||||
if strings.HasPrefix(newPath, "https://") || strings.HasPrefix(newPath, "http://") {
|
||||
|
@ -21,12 +21,12 @@ require (
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/projectdiscovery/clistats v0.0.20
|
||||
github.com/projectdiscovery/fastdialer v0.0.49
|
||||
github.com/projectdiscovery/hmap v0.0.29
|
||||
github.com/projectdiscovery/fastdialer v0.0.55
|
||||
github.com/projectdiscovery/hmap v0.0.35
|
||||
github.com/projectdiscovery/interactsh v1.1.8
|
||||
github.com/projectdiscovery/rawhttp v0.1.28
|
||||
github.com/projectdiscovery/retryabledns v1.0.47
|
||||
github.com/projectdiscovery/retryablehttp-go v1.0.38
|
||||
github.com/projectdiscovery/retryabledns v1.0.52
|
||||
github.com/projectdiscovery/retryablehttp-go v1.0.44
|
||||
github.com/projectdiscovery/yamldoc-go v1.0.4
|
||||
github.com/remeh/sizedwaitgroup v1.0.0
|
||||
github.com/rs/xid v1.5.0
|
||||
@ -70,16 +70,14 @@ require (
|
||||
github.com/go-pg/pg v8.0.7+incompatible
|
||||
github.com/go-sql-driver/mysql v1.6.0
|
||||
github.com/h2non/filetype v1.1.3
|
||||
github.com/hirochachacha/go-smb2 v1.1.0
|
||||
github.com/labstack/echo/v4 v4.10.2
|
||||
github.com/lib/pq v1.10.1
|
||||
github.com/mholt/archiver v3.1.1+incompatible
|
||||
github.com/ory/dockertest/v3 v3.10.0
|
||||
github.com/praetorian-inc/fingerprintx v1.1.9
|
||||
github.com/projectdiscovery/dsl v0.0.35
|
||||
github.com/projectdiscovery/fasttemplate v0.0.2
|
||||
github.com/projectdiscovery/go-smb2 v0.0.0-20240129202741-052cc450c6cb
|
||||
github.com/projectdiscovery/goflags v0.1.29
|
||||
github.com/projectdiscovery/gologger v1.1.11
|
||||
github.com/projectdiscovery/gologger v1.1.12
|
||||
github.com/projectdiscovery/gostruct v0.0.2
|
||||
github.com/projectdiscovery/gozero v0.0.1
|
||||
github.com/projectdiscovery/httpx v1.3.5
|
||||
@ -90,14 +88,14 @@ require (
|
||||
github.com/projectdiscovery/sarif v0.0.1
|
||||
github.com/projectdiscovery/tlsx v1.1.6-0.20231116215000-e842dc367a74
|
||||
github.com/projectdiscovery/uncover v1.0.7
|
||||
github.com/projectdiscovery/utils v0.0.68
|
||||
github.com/projectdiscovery/utils v0.0.76
|
||||
github.com/projectdiscovery/wappalyzergo v0.0.109
|
||||
github.com/redis/go-redis/v9 v9.1.0
|
||||
github.com/ropnop/gokrb5/v8 v8.0.0-20201111231119-729746023c02
|
||||
github.com/sashabaranov/go-openai v1.15.3
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/zmap/zgrab2 v0.1.8-0.20230806160807-97ba87c0e706
|
||||
golang.org/x/term v0.15.0
|
||||
golang.org/x/term v0.16.0
|
||||
gopkg.in/src-d/go-git.v4 v4.13.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
@ -109,7 +107,6 @@ require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/andybalholm/brotli v1.0.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
|
||||
@ -126,14 +123,13 @@ require (
|
||||
github.com/cheggaaa/pb/v3 v3.1.4 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/cloudflare/cfssl v1.6.4 // indirect
|
||||
github.com/cloudflare/circl v1.3.6 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/containerd/continuity v0.4.2 // indirect
|
||||
github.com/denisbrodbeck/machineid v1.0.1 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/dlclark/regexp2 v1.10.0 // indirect
|
||||
github.com/docker/cli v24.0.5+incompatible // indirect
|
||||
github.com/docker/docker v24.0.7+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/fatih/color v1.15.0 // indirect
|
||||
github.com/free5gc/util v1.0.5-0.20230511064842-2e120956883b // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
@ -144,13 +140,11 @@ require (
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect
|
||||
github.com/golang-sql/sqlexp v0.1.0 // indirect
|
||||
github.com/google/certificate-transparency-go v1.1.4 // indirect
|
||||
github.com/google/go-github/v30 v30.1.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.2 // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.6 // indirect
|
||||
@ -163,19 +157,20 @@ require (
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/kataras/jwt v0.1.10 // indirect
|
||||
github.com/klauspost/compress v1.17.3 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mackerelio/go-osstat v0.2.4 // indirect
|
||||
github.com/mholt/archiver/v3 v3.5.1 // indirect
|
||||
github.com/minio/selfupdate v0.6.1-0.20230907112617-f11e74f84ca7 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/muesli/reflow v0.3.0 // indirect
|
||||
github.com/muesli/termenv v0.15.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.9 // indirect
|
||||
github.com/ory/dockertest/v3 v3.10.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.2 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/projectdiscovery/asnmap v1.0.6 // indirect
|
||||
@ -198,9 +193,6 @@ require (
|
||||
github.com/tim-ywliu/nested-logrus-formatter v1.3.2 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/ysmood/fetchup v0.2.3 // indirect
|
||||
github.com/ysmood/got v0.34.1 // indirect
|
||||
github.com/yuin/goldmark v1.5.4 // indirect
|
||||
@ -226,7 +218,7 @@ require (
|
||||
github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/dsnet/compress v0.0.1 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
@ -259,7 +251,7 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/projectdiscovery/blackrock v0.0.1 // indirect
|
||||
github.com/projectdiscovery/networkpolicy v0.0.6
|
||||
github.com/projectdiscovery/networkpolicy v0.0.7
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
@ -281,7 +273,7 @@ require (
|
||||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/sys v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.15.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
@ -308,11 +300,9 @@ require (
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.4.1 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/labstack/gommon v0.4.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/nwaples/rardecode v1.1.3 // indirect
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
@ -323,6 +313,4 @@ require (
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/projectdiscovery/gologger v1.1.11 => ../gologger
|
||||
)
|
||||
replace github.com/projectdiscovery/gologger v1.1.11 => ../gologger
|
||||
|
1568
lib/nuclei/go.sum
Normal file
1568
lib/nuclei/go.sum
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,76 +0,0 @@
|
||||
// pdcp contains projectdiscovery cloud platform related features
|
||||
// like result upload , dashboard etc.
|
||||
package pdcp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/config"
|
||||
"github.com/projectdiscovery/utils/env"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
var (
|
||||
DashBoardURL = "https://cloud.projectdiscovery.io"
|
||||
DefaultApiServer = "https://api.projectdiscovery.io"
|
||||
)
|
||||
|
||||
// CheckNValidateCredentials checks if credentials exist on filesystem
|
||||
// if not waits for user to enter credentials and validates them
|
||||
// and saves them to filesystem
|
||||
// when validate is true any existing credentials are validated
|
||||
// Note: this is meant to be used in cli only (interactive mode)
|
||||
func CheckNValidateCredentials(toolName string) {
|
||||
h := &PDCPCredHandler{}
|
||||
creds, err := h.GetCreds()
|
||||
if err == nil {
|
||||
// validate by fetching user profile
|
||||
gotCreds, err := h.ValidateAPIKey(creds.APIKey, creds.Server, config.BinaryName)
|
||||
if err == nil {
|
||||
gologger.Info().Msgf("You are logged in as (@%v)", gotCreds.Username)
|
||||
os.Exit(0)
|
||||
}
|
||||
gologger.Error().Msgf("Invalid API key found in file, please recheck or recreate your API key and retry.")
|
||||
}
|
||||
if err != nil && err != ErrNoCreds {
|
||||
// this is unexpected error log it
|
||||
gologger.Error().Msgf("Could not read credentials from file: %s\n", err)
|
||||
}
|
||||
|
||||
// if we are here, we need to get credentials from user
|
||||
gologger.Info().Msgf("Get your free api key by signing up at %v", DashBoardURL)
|
||||
fmt.Printf("[*] Enter PDCP API Key (exit to abort): ")
|
||||
bin, err := term.ReadPassword(int(os.Stdin.Fd()))
|
||||
if err != nil {
|
||||
gologger.Fatal().Msgf("Could not read input from terminal: %s\n", err)
|
||||
}
|
||||
apiKey := string(bin)
|
||||
if strings.EqualFold(apiKey, "exit") {
|
||||
os.Exit(0)
|
||||
}
|
||||
fmt.Println()
|
||||
// if env variable is set use that for validating api key
|
||||
apiServer := env.GetEnvOrDefault(apiServerEnv, DefaultApiServer)
|
||||
// validate by fetching user profile
|
||||
validatedCreds, err := h.ValidateAPIKey(apiKey, apiServer, toolName)
|
||||
if err == nil {
|
||||
gologger.Info().Msgf("Successfully logged in as (@%v)", validatedCreds.Username)
|
||||
if saveErr := h.SaveCreds(validatedCreds); saveErr != nil {
|
||||
gologger.Warning().Msgf("Could not save credentials to file: %s\n", saveErr)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
gologger.Error().Msgf("Invalid API key '%v' got error: %v", maskKey(apiKey), err)
|
||||
gologger.Fatal().Msgf("please recheck or recreate your API key and retry")
|
||||
}
|
||||
|
||||
func maskKey(key string) string {
|
||||
if len(key) < 6 {
|
||||
// this is invalid key
|
||||
return key
|
||||
}
|
||||
return fmt.Sprintf("%v%v", key[:3], strings.Repeat("*", len(key)-3))
|
||||
}
|
@ -1,139 +0,0 @@
|
||||
package pdcp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/projectdiscovery/retryablehttp-go"
|
||||
"github.com/projectdiscovery/utils/env"
|
||||
fileutil "github.com/projectdiscovery/utils/file"
|
||||
folderutil "github.com/projectdiscovery/utils/folder"
|
||||
urlutil "github.com/projectdiscovery/utils/url"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var (
|
||||
PDCPDir = filepath.Join(folderutil.HomeDirOrDefault(""), ".pdcp")
|
||||
PDCPCredFile = filepath.Join(PDCPDir, "credentials.yaml")
|
||||
ErrNoCreds = fmt.Errorf("no credentials found in %s", PDCPDir)
|
||||
)
|
||||
|
||||
const (
|
||||
userProfileURL = "https://%s/v1/user?utm_source=%s"
|
||||
apiKeyEnv = "PDCP_API_KEY"
|
||||
apiServerEnv = "PDCP_API_SERVER"
|
||||
ApiKeyHeaderName = "X-Api-Key"
|
||||
dashBoardEnv = "PDCP_DASHBOARD_URL"
|
||||
)
|
||||
|
||||
type PDCPCredentials struct {
|
||||
Username string `yaml:"username"`
|
||||
APIKey string `yaml:"api-key"`
|
||||
Server string `yaml:"server"`
|
||||
}
|
||||
|
||||
type PDCPUserProfileResponse struct {
|
||||
UserName string `json:"name"`
|
||||
// there are more fields but we don't need them
|
||||
/// below fields are added later on and not part of the response
|
||||
}
|
||||
|
||||
// PDCPCredHandler is interface for adding / retrieving pdcp credentials
|
||||
// from file system
|
||||
type PDCPCredHandler struct{}
|
||||
|
||||
// GetCreds retrieves the credentials from the file system or environment variables
|
||||
func (p *PDCPCredHandler) GetCreds() (*PDCPCredentials, error) {
|
||||
credsFromEnv := p.getCredsFromEnv()
|
||||
if credsFromEnv != nil {
|
||||
return credsFromEnv, nil
|
||||
}
|
||||
if !fileutil.FolderExists(PDCPDir) || !fileutil.FileExists(PDCPCredFile) {
|
||||
return nil, ErrNoCreds
|
||||
}
|
||||
bin, err := os.Open(PDCPCredFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// for future use-cases
|
||||
var creds []PDCPCredentials
|
||||
err = yaml.NewDecoder(bin).Decode(&creds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(creds) == 0 {
|
||||
return nil, ErrNoCreds
|
||||
}
|
||||
return &creds[0], nil
|
||||
}
|
||||
|
||||
// getCredsFromEnv retrieves the credentials from the environment
|
||||
// if not or incomplete credentials are found it return nil
|
||||
func (p *PDCPCredHandler) getCredsFromEnv() *PDCPCredentials {
|
||||
apiKey := env.GetEnvOrDefault(apiKeyEnv, "")
|
||||
apiServer := env.GetEnvOrDefault(apiServerEnv, "")
|
||||
if apiKey == "" || apiServer == "" {
|
||||
return nil
|
||||
}
|
||||
return &PDCPCredentials{APIKey: apiKey, Server: apiServer}
|
||||
}
|
||||
|
||||
// SaveCreds saves the credentials to the file system
|
||||
func (p *PDCPCredHandler) SaveCreds(resp *PDCPCredentials) error {
|
||||
if resp == nil {
|
||||
return fmt.Errorf("invalid response")
|
||||
}
|
||||
if !fileutil.FolderExists(PDCPDir) {
|
||||
_ = fileutil.CreateFolder(PDCPDir)
|
||||
}
|
||||
bin, err := yaml.Marshal([]*PDCPCredentials{resp})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(PDCPCredFile, bin, 0600)
|
||||
}
|
||||
|
||||
// ValidateAPIKey validates the api key and retrieves associated user metadata like username
|
||||
// from given api server/host
|
||||
func (p *PDCPCredHandler) ValidateAPIKey(key string, host string, toolName string) (*PDCPCredentials, error) {
|
||||
// get address from url
|
||||
urlx, err := urlutil.Parse(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := retryablehttp.NewRequest("GET", fmt.Sprintf(userProfileURL, urlx.Host, toolName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set(ApiKeyHeaderName, key)
|
||||
resp, err := retryablehttp.DefaultHTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
_, _ = io.Copy(io.Discard, resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
return nil, fmt.Errorf("invalid status code: %d", resp.StatusCode)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
bin, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var profile PDCPUserProfileResponse
|
||||
err = json.Unmarshal(bin, &profile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if profile.UserName == "" {
|
||||
return nil, fmt.Errorf("invalid response from server got %v", string(bin))
|
||||
}
|
||||
return &PDCPCredentials{Username: profile.UserName, APIKey: key, Server: host}, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
DashBoardURL = env.GetEnvOrDefault("PDCP_DASHBOARD_URL", DashBoardURL)
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
package pdcp
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var exampleCred = `
|
||||
- username: test
|
||||
api-key: testpassword
|
||||
server: https://scanme.sh
|
||||
`
|
||||
|
||||
func TestLoadCreds(t *testing.T) {
|
||||
// temporarily change PDCP file location for testing
|
||||
f, err := os.CreateTemp("", "creds-test-*")
|
||||
require.Nil(t, err)
|
||||
_, _ = f.WriteString(strings.TrimSpace(exampleCred))
|
||||
defer os.Remove(f.Name())
|
||||
PDCPCredFile = f.Name()
|
||||
PDCPDir = filepath.Dir(f.Name())
|
||||
h := &PDCPCredHandler{}
|
||||
value, err := h.GetCreds()
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, value)
|
||||
require.Equal(t, "test", value.Username)
|
||||
require.Equal(t, "testpassword", value.APIKey)
|
||||
require.Equal(t, "https://scanme.sh", value.Server)
|
||||
}
|
18
lib/nuclei/internal/pdcp/utils.go
Executable file
18
lib/nuclei/internal/pdcp/utils.go
Executable file
@ -0,0 +1,18 @@
|
||||
package pdcp
|
||||
|
||||
import (
|
||||
pdcpauth "github.com/projectdiscovery/utils/auth/pdcp"
|
||||
urlutil "github.com/projectdiscovery/utils/url"
|
||||
)
|
||||
|
||||
func getScanDashBoardURL(id string) string {
|
||||
ux, _ := urlutil.Parse(pdcpauth.DashBoardURL)
|
||||
ux.Path = "/scans/" + id
|
||||
ux.Update()
|
||||
return ux.String()
|
||||
}
|
||||
|
||||
type uploadResponse struct {
|
||||
ID string `json:"id"`
|
||||
Message string `json:"message"`
|
||||
}
|
@ -1,29 +1,30 @@
|
||||
package pdcp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/config"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||
"github.com/projectdiscovery/retryablehttp-go"
|
||||
pdcpauth "github.com/projectdiscovery/utils/auth/pdcp"
|
||||
errorutil "github.com/projectdiscovery/utils/errors"
|
||||
fileutil "github.com/projectdiscovery/utils/file"
|
||||
folderutil "github.com/projectdiscovery/utils/folder"
|
||||
urlutil "github.com/projectdiscovery/utils/url"
|
||||
)
|
||||
|
||||
const (
|
||||
uploadEndpoint = "/v1/scans/import"
|
||||
appendEndpoint = "/v1/scans/%s/import"
|
||||
flushTimer = time.Duration(1) * time.Minute
|
||||
MaxChunkSize = 1024 * 1024 * 4 // 4 MB
|
||||
)
|
||||
|
||||
var _ output.Writer = &UploadWriter{}
|
||||
@ -32,32 +33,29 @@ var _ output.Writer = &UploadWriter{}
|
||||
// server to enable web dashboard and more
|
||||
type UploadWriter struct {
|
||||
*output.StandardWriter
|
||||
creds *PDCPCredentials
|
||||
tempFile *os.File
|
||||
done atomic.Bool
|
||||
creds *pdcpauth.PDCPCredentials
|
||||
uploadURL *url.URL
|
||||
client *retryablehttp.Client
|
||||
cancel context.CancelFunc
|
||||
done chan struct{}
|
||||
scanID string
|
||||
counter atomic.Int32
|
||||
}
|
||||
|
||||
// NewUploadWriter creates a new upload writer
|
||||
func NewUploadWriter(creds *PDCPCredentials) (*UploadWriter, error) {
|
||||
func NewUploadWriter(ctx context.Context, creds *pdcpauth.PDCPCredentials) (*UploadWriter, error) {
|
||||
if creds == nil {
|
||||
return nil, fmt.Errorf("no credentials provided")
|
||||
}
|
||||
u := &UploadWriter{creds: creds}
|
||||
// create a temporary file in cache directory
|
||||
cacheDir := folderutil.AppCacheDirOrDefault("", config.BinaryName)
|
||||
if !fileutil.FolderExists(cacheDir) {
|
||||
_ = fileutil.CreateFolder(cacheDir)
|
||||
u := &UploadWriter{
|
||||
creds: creds,
|
||||
done: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
var err error
|
||||
// tempfile is created in nuclei-results-<unix-timestamp>.json format
|
||||
u.tempFile, err = os.OpenFile(filepath.Join(cacheDir, "nuclei-results-"+strconv.Itoa(int(time.Now().Unix()))+".json"), os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return nil, errorutil.NewWithErr(err).Msgf("could not create temporary file")
|
||||
}
|
||||
reader, writer := io.Pipe()
|
||||
// create standard writer
|
||||
u.StandardWriter, err = output.NewWriter(
|
||||
output.WithWriter(u.tempFile),
|
||||
output.WithWriter(writer),
|
||||
output.WithJson(true, true),
|
||||
)
|
||||
if err != nil {
|
||||
@ -70,87 +68,164 @@ func NewUploadWriter(creds *PDCPCredentials) (*UploadWriter, error) {
|
||||
tmp.Path = uploadEndpoint
|
||||
tmp.Update()
|
||||
u.uploadURL = tmp.URL
|
||||
|
||||
// create http client
|
||||
opts := retryablehttp.DefaultOptionsSingle
|
||||
opts.NoAdjustTimeout = true
|
||||
opts.Timeout = time.Duration(3) * time.Minute
|
||||
u.client = retryablehttp.NewClient(opts)
|
||||
|
||||
// create context
|
||||
ctx, u.cancel = context.WithCancel(ctx)
|
||||
// start auto commit
|
||||
// upload every 1 minute or when buffer is full
|
||||
go u.autoCommit(ctx, reader)
|
||||
return u, nil
|
||||
}
|
||||
|
||||
type uploadResponse struct {
|
||||
ID string `json:"id"`
|
||||
Message string `json:"message"`
|
||||
// SetScanID sets the scan id for the upload writer
|
||||
func (u *UploadWriter) SetScanID(id string) {
|
||||
u.scanID = id
|
||||
}
|
||||
|
||||
// Upload uploads the results to pdcp server
|
||||
func (u *UploadWriter) Upload() {
|
||||
defer u.done.Store(true)
|
||||
func (u *UploadWriter) autoCommit(ctx context.Context, r *io.PipeReader) {
|
||||
reader := bufio.NewReader(r)
|
||||
ch := make(chan string, 4)
|
||||
|
||||
_ = u.tempFile.Sync()
|
||||
info, err := u.tempFile.Stat()
|
||||
if err != nil {
|
||||
gologger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
||||
return
|
||||
}
|
||||
if info.Size() == 0 {
|
||||
gologger.Verbose().Msgf("Scan results upload to cloud skipped, no results found to upload")
|
||||
return
|
||||
}
|
||||
_, _ = u.tempFile.Seek(0, 0)
|
||||
// continuously read from the reader and send to channel
|
||||
go func() {
|
||||
defer r.Close()
|
||||
defer close(ch)
|
||||
for {
|
||||
data, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
u.counter.Add(1)
|
||||
ch <- data
|
||||
}
|
||||
}()
|
||||
|
||||
id, err := u.upload()
|
||||
if err != nil {
|
||||
gologger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
||||
return
|
||||
// wait for context to be done
|
||||
defer func() {
|
||||
u.done <- struct{}{}
|
||||
close(u.done)
|
||||
// if no scanid is generated no results were uploaded
|
||||
if u.scanID == "" {
|
||||
gologger.Verbose().Msgf("Scan results upload to cloud skipped, no results found to upload")
|
||||
} else {
|
||||
gologger.Info().Msgf("%v Scan results uploaded to cloud, you can view scan results at %v", u.counter.Load(), getScanDashBoardURL(u.scanID))
|
||||
}
|
||||
}()
|
||||
// temporary buffer to store the results
|
||||
buff := &bytes.Buffer{}
|
||||
ticker := time.NewTicker(flushTimer)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// flush before exit
|
||||
if buff.Len() > 0 {
|
||||
if err := u.uploadChunk(buff); err != nil {
|
||||
gologger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
case <-ticker.C:
|
||||
// flush the buffer
|
||||
if buff.Len() > 0 {
|
||||
if err := u.uploadChunk(buff); err != nil {
|
||||
gologger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
||||
}
|
||||
}
|
||||
case line, ok := <-ch:
|
||||
if !ok {
|
||||
if buff.Len() > 0 {
|
||||
if err := u.uploadChunk(buff); err != nil {
|
||||
gologger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
if buff.Len()+len(line) > MaxChunkSize {
|
||||
// flush existing buffer
|
||||
if err := u.uploadChunk(buff); err != nil {
|
||||
gologger.Error().Msgf("Failed to upload scan results on cloud: %v", err)
|
||||
}
|
||||
} else {
|
||||
buff.WriteString(line)
|
||||
}
|
||||
}
|
||||
}
|
||||
gologger.Info().Msgf("Scan results uploaded! View them at %v", getScanDashBoardURL(id))
|
||||
}
|
||||
|
||||
func (u *UploadWriter) upload() (string, error) {
|
||||
req, err := retryablehttp.NewRequest(http.MethodPost, u.uploadURL.String(), u.tempFile)
|
||||
if err != nil {
|
||||
return "", errorutil.NewWithErr(err).Msgf("could not create cloud upload request")
|
||||
// uploadChunk uploads a chunk of data to the server
|
||||
func (u *UploadWriter) uploadChunk(buff *bytes.Buffer) error {
|
||||
if err := u.upload(buff.Bytes()); err != nil {
|
||||
return errorutil.NewWithErr(err).Msgf("could not upload chunk")
|
||||
}
|
||||
req.Header.Set(ApiKeyHeaderName, u.creds.APIKey)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
// if successful, reset the buffer
|
||||
buff.Reset()
|
||||
// log in verbose mode
|
||||
gologger.Warning().Msgf("Uploaded results chunk, you can view scan results at %v", getScanDashBoardURL(u.scanID))
|
||||
return nil
|
||||
}
|
||||
|
||||
opts := retryablehttp.DefaultOptionsSingle
|
||||
// we are uploading nuclei results which can be large
|
||||
// server has a size limit of ~20ish MB
|
||||
opts.Timeout = time.Duration(3) * time.Minute
|
||||
client := retryablehttp.NewClient(opts)
|
||||
resp, err := client.Do(req)
|
||||
func (u *UploadWriter) upload(data []byte) error {
|
||||
req, err := u.getRequest(data)
|
||||
if err != nil {
|
||||
return "", errorutil.NewWithErr(err).Msgf("could not upload results")
|
||||
return errorutil.NewWithErr(err).Msgf("could not create upload request")
|
||||
}
|
||||
resp, err := u.client.Do(req)
|
||||
if err != nil {
|
||||
return errorutil.NewWithErr(err).Msgf("could not upload results")
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
bin, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", errorutil.NewWithErr(err).Msgf("could not get id from response")
|
||||
return errorutil.NewWithErr(err).Msgf("could not get id from response")
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("could not upload results got status code %v", resp.StatusCode)
|
||||
return fmt.Errorf("could not upload results got status code %v on %v", resp.StatusCode, resp.Request.URL.String())
|
||||
}
|
||||
var uploadResp uploadResponse
|
||||
if err := json.Unmarshal(bin, &uploadResp); err != nil {
|
||||
return "", errorutil.NewWithErr(err).Msgf("could not unmarshal response got %v", string(bin))
|
||||
return errorutil.NewWithErr(err).Msgf("could not unmarshal response got %v", string(bin))
|
||||
}
|
||||
u.removeTempFile()
|
||||
return uploadResp.ID, nil
|
||||
if uploadResp.ID != "" && u.scanID == "" {
|
||||
u.scanID = uploadResp.ID
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeTempFile removes the temporary file
|
||||
func (u *UploadWriter) removeTempFile() {
|
||||
_ = os.Remove(u.tempFile.Name())
|
||||
// getRequest returns a new request for upload
|
||||
// if scanID is not provided create new scan by uploading the data
|
||||
// if scanID is provided append the data to existing scan
|
||||
func (u *UploadWriter) getRequest(bin []byte) (*retryablehttp.Request, error) {
|
||||
var method, url string
|
||||
|
||||
if u.scanID == "" {
|
||||
u.uploadURL.Path = uploadEndpoint
|
||||
method = http.MethodPost
|
||||
url = u.uploadURL.String()
|
||||
} else {
|
||||
u.uploadURL.Path = fmt.Sprintf(appendEndpoint, u.scanID)
|
||||
method = http.MethodPatch
|
||||
url = u.uploadURL.String()
|
||||
}
|
||||
req, err := retryablehttp.NewRequest(method, url, bytes.NewReader(bin))
|
||||
if err != nil {
|
||||
return nil, errorutil.NewWithErr(err).Msgf("could not create cloud upload request")
|
||||
}
|
||||
req.Header.Set(pdcpauth.ApiKeyHeaderName, u.creds.APIKey)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Close closes the upload writer
|
||||
func (u *UploadWriter) Close() {
|
||||
if !u.done.Load() {
|
||||
u.Upload()
|
||||
}
|
||||
}
|
||||
|
||||
func getScanDashBoardURL(id string) string {
|
||||
ux, _ := urlutil.Parse(DashBoardURL)
|
||||
ux.Path = "/scans/" + id
|
||||
ux.Update()
|
||||
return ux.String()
|
||||
u.cancel()
|
||||
<-u.done
|
||||
u.StandardWriter.Close()
|
||||
}
|
||||
|
@ -2,8 +2,8 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v3/internal/pdcp"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/catalog/config"
|
||||
pdcpauth "github.com/projectdiscovery/utils/auth/pdcp"
|
||||
updateutils "github.com/projectdiscovery/utils/update"
|
||||
)
|
||||
|
||||
@ -14,5 +14,5 @@ func NucleiToolUpdateCallback() {
|
||||
|
||||
// AuthWithPDCP is used to authenticate with PDCP
|
||||
func AuthWithPDCP() {
|
||||
pdcp.CheckNValidateCredentials(config.BinaryName)
|
||||
pdcpauth.CheckNValidateCredentials(config.BinaryName)
|
||||
}
|
||||
|
@ -259,9 +259,9 @@ func createReportingOptions(options *types.Options) (*reporting.Options, error)
|
||||
}
|
||||
if options.MarkdownExportDirectory != "" {
|
||||
reportingOptions.MarkdownExporter = &markdown.Options{
|
||||
Directory: options.MarkdownExportDirectory,
|
||||
IncludeRawPayload: !options.OmitRawRequests,
|
||||
SortMode: options.MarkdownExportSortMode,
|
||||
Directory: options.MarkdownExportDirectory,
|
||||
OmitRaw: options.OmitRawRequests,
|
||||
SortMode: options.MarkdownExportSortMode,
|
||||
}
|
||||
}
|
||||
if options.SarifExport != "" {
|
||||
@ -269,17 +269,18 @@ func createReportingOptions(options *types.Options) (*reporting.Options, error)
|
||||
}
|
||||
if options.JSONExport != "" {
|
||||
reportingOptions.JSONExporter = &jsonexporter.Options{
|
||||
File: options.JSONExport,
|
||||
IncludeRawPayload: !options.OmitRawRequests,
|
||||
File: options.JSONExport,
|
||||
OmitRaw: options.OmitRawRequests,
|
||||
}
|
||||
}
|
||||
if options.JSONLExport != "" {
|
||||
reportingOptions.JSONLExporter = &jsonl.Options{
|
||||
File: options.JSONLExport,
|
||||
IncludeRawPayload: !options.OmitRawRequests,
|
||||
File: options.JSONLExport,
|
||||
OmitRaw: options.OmitRawRequests,
|
||||
}
|
||||
}
|
||||
|
||||
reportingOptions.OmitRaw = options.OmitRawRequests
|
||||
return reportingOptions, nil
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/installer"
|
||||
"github.com/projectdiscovery/ratelimit"
|
||||
uncoverlib "github.com/projectdiscovery/uncover"
|
||||
pdcpauth "github.com/projectdiscovery/utils/auth/pdcp"
|
||||
"github.com/projectdiscovery/utils/env"
|
||||
permissionutil "github.com/projectdiscovery/utils/permission"
|
||||
|
||||
@ -325,6 +326,9 @@ func (r *Runner) Close() {
|
||||
if r.output != nil {
|
||||
r.output.Close()
|
||||
}
|
||||
if r.issuesClient != nil {
|
||||
r.issuesClient.Close()
|
||||
}
|
||||
if r.projectFile != nil {
|
||||
r.projectFile.Close()
|
||||
}
|
||||
@ -336,30 +340,41 @@ func (r *Runner) Close() {
|
||||
if r.rateLimiter != nil {
|
||||
r.rateLimiter.Stop()
|
||||
}
|
||||
r.progress.Stop()
|
||||
if r.browser != nil {
|
||||
r.browser.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// setupPDCPUpload sets up the PDCP upload writer
|
||||
// by creating a new writer and returning it
|
||||
func (r *Runner) setupPDCPUpload(writer output.Writer) output.Writer {
|
||||
// if scanid is given implicitly consider that scan upload is enabled
|
||||
if r.options.ScanID != "" {
|
||||
r.options.EnableCloudUpload = true
|
||||
}
|
||||
if !(r.options.EnableCloudUpload || EnableCloudUpload) {
|
||||
// r.pdcpUploadErrMsg = fmt.Sprintf("[%v] Scan results upload to cloud is disabled.", aurora.BrightYellow("WRN"))
|
||||
return writer
|
||||
}
|
||||
color := aurora.NewAurora(!r.options.NoColor)
|
||||
h := &pdcp.PDCPCredHandler{}
|
||||
h := &pdcpauth.PDCPCredHandler{}
|
||||
creds, err := h.GetCreds()
|
||||
if err != nil {
|
||||
if err != pdcp.ErrNoCreds && !HideAutoSaveMsg {
|
||||
if err != pdcpauth.ErrNoCreds && !HideAutoSaveMsg {
|
||||
gologger.Verbose().Msgf("Could not get credentials for cloud upload: %s\n", err)
|
||||
}
|
||||
r.pdcpUploadErrMsg = fmt.Sprintf("[%v] To view results on Cloud Dashboard, Configure API key from %v", color.BrightYellow("WRN"), pdcp.DashBoardURL)
|
||||
r.pdcpUploadErrMsg = fmt.Sprintf("[%v] To view results on Cloud Dashboard, Configure API key from %v", color.BrightYellow("WRN"), pdcpauth.DashBoardURL)
|
||||
return writer
|
||||
}
|
||||
uploadWriter, err := pdcp.NewUploadWriter(creds)
|
||||
uploadWriter, err := pdcp.NewUploadWriter(context.Background(), creds)
|
||||
if err != nil {
|
||||
r.pdcpUploadErrMsg = fmt.Sprintf("[%v] PDCP (%v) Auto-Save Failed: %s\n", color.BrightYellow("WRN"), pdcp.DashBoardURL, err)
|
||||
r.pdcpUploadErrMsg = fmt.Sprintf("[%v] PDCP (%v) Auto-Save Failed: %s\n", color.BrightYellow("WRN"), pdcpauth.DashBoardURL, err)
|
||||
return writer
|
||||
}
|
||||
if r.options.ScanID != "" {
|
||||
uploadWriter.SetScanID(r.options.ScanID)
|
||||
}
|
||||
return output.NewMultiWriter(writer, uploadWriter)
|
||||
}
|
||||
|
||||
@ -488,23 +503,15 @@ func (r *Runner) RunEnumeration(TargetAndPocsName map[string][]string) error {
|
||||
results.CompareAndSwap(false, true)
|
||||
}
|
||||
}
|
||||
r.progress.Stop()
|
||||
|
||||
if executorOpts.InputHelper != nil {
|
||||
_ = executorOpts.InputHelper.Close()
|
||||
}
|
||||
if r.issuesClient != nil {
|
||||
r.issuesClient.Close()
|
||||
}
|
||||
|
||||
// todo: error propagation without canonical straight error check is required by cloud?
|
||||
// use safe dereferencing to avoid potential panics in case of previous unchecked errors
|
||||
if len(output.Results) == 0 {
|
||||
gologger.Info().Msgf("Nuclei引擎无结果,下次好运!")
|
||||
}
|
||||
if r.browser != nil {
|
||||
r.browser.Close()
|
||||
}
|
||||
// check if a passive scan was requested but no target was provided
|
||||
if r.options.OfflineHTTP && len(r.options.Targets) == 0 && r.options.TargetsFilePath == "" {
|
||||
return errors.Wrap(err, "missing required input (http response) to run passive templates")
|
||||
@ -578,7 +585,6 @@ func (r *Runner) displayExecutionInfo(store *loader.Store) {
|
||||
stats.DisplayAsWarning(parsers.TemplatesExecutedStats)
|
||||
}
|
||||
stats.DisplayAsWarning(parsers.UnsignedWarning)
|
||||
|
||||
}
|
||||
|
||||
// SaveResumeConfig to file
|
||||
|
@ -17,7 +17,7 @@ const (
|
||||
CLIConfigFileName = "config.yaml"
|
||||
ReportingConfigFilename = "reporting-config.yaml"
|
||||
// Version is the current version of nuclei
|
||||
Version = `v3.1.3`
|
||||
Version = `v3.1.8`
|
||||
// Directory Names of custom templates
|
||||
CustomS3TemplatesDirName = "s3"
|
||||
CustomGitHubTemplatesDirName = "github"
|
||||
|
@ -18,12 +18,12 @@ import (
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/hmap/filekv"
|
||||
"github.com/projectdiscovery/hmap/store/hybrid"
|
||||
"github.com/projectdiscovery/mapcidr"
|
||||
"github.com/projectdiscovery/mapcidr/asn"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/contextargs"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/uncover"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/utils/expand"
|
||||
uncoverlib "github.com/projectdiscovery/uncover"
|
||||
fileutil "github.com/projectdiscovery/utils/file"
|
||||
iputil "github.com/projectdiscovery/utils/ip"
|
||||
@ -121,10 +121,10 @@ func (i *Input) initializeInputSources(opts *Options) error {
|
||||
for _, target := range options.Targets {
|
||||
switch {
|
||||
case iputil.IsCIDR(target):
|
||||
ips := i.expandCIDRInputValue(target)
|
||||
ips := expand.CIDR(target)
|
||||
i.addTargets(ips)
|
||||
case asn.IsASN(target):
|
||||
ips := i.expandASNInputValue(target)
|
||||
ips := expand.ASN(target)
|
||||
i.addTargets(ips)
|
||||
default:
|
||||
i.Set(target)
|
||||
@ -174,10 +174,10 @@ func (i *Input) initializeInputSources(opts *Options) error {
|
||||
for _, target := range options.ExcludeTargets {
|
||||
switch {
|
||||
case iputil.IsCIDR(target):
|
||||
ips := i.expandCIDRInputValue(target)
|
||||
ips := expand.CIDR(target)
|
||||
i.removeTargets(ips)
|
||||
case asn.IsASN(target):
|
||||
ips := i.expandASNInputValue(target)
|
||||
ips := expand.ASN(target)
|
||||
i.removeTargets(ips)
|
||||
default:
|
||||
i.Del(target)
|
||||
@ -195,10 +195,10 @@ func (i *Input) scanInputFromReader(reader io.Reader) {
|
||||
item := scanner.Text()
|
||||
switch {
|
||||
case iputil.IsCIDR(item):
|
||||
ips := i.expandCIDRInputValue(item)
|
||||
ips := expand.CIDR(item)
|
||||
i.addTargets(ips)
|
||||
case asn.IsASN(item):
|
||||
ips := i.expandASNInputValue(item)
|
||||
ips := expand.ASN(item)
|
||||
i.addTargets(ips)
|
||||
default:
|
||||
i.Set(item)
|
||||
@ -489,26 +489,6 @@ func (i *Input) Scan(callback func(value *contextargs.MetaInput) bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// expandCIDRInputValue expands CIDR and stores expanded IPs
|
||||
func (i *Input) expandCIDRInputValue(value string) []string {
|
||||
var ips []string
|
||||
ipsCh, _ := mapcidr.IPAddressesAsStream(value)
|
||||
for ip := range ipsCh {
|
||||
ips = append(ips, ip)
|
||||
}
|
||||
return ips
|
||||
}
|
||||
|
||||
// expandASNInputValue expands CIDRs for given ASN and stores expanded IPs
|
||||
func (i *Input) expandASNInputValue(value string) []string {
|
||||
var ips []string
|
||||
cidrs, _ := asn.GetCIDRsForASNNum(value)
|
||||
for _, cidr := range cidrs {
|
||||
ips = append(ips, i.expandCIDRInputValue(cidr.String())...)
|
||||
}
|
||||
return ips
|
||||
}
|
||||
|
||||
func (i *Input) addTargets(targets []string) {
|
||||
for _, target := range targets {
|
||||
i.Set(target)
|
||||
|
@ -12,10 +12,11 @@ import (
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/contextargs"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/utils/expand"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_expandCIDRInputValue(t *testing.T) {
|
||||
func Test_expandCIDR(t *testing.T) {
|
||||
tests := []struct {
|
||||
cidr string
|
||||
expected []string
|
||||
@ -33,7 +34,7 @@ func Test_expandCIDRInputValue(t *testing.T) {
|
||||
require.Nil(t, err, "could not create temporary input file")
|
||||
input := &Input{hostMap: hm}
|
||||
|
||||
ips := input.expandCIDRInputValue(tt.cidr)
|
||||
ips := expand.CIDR(tt.cidr)
|
||||
input.addTargets(ips)
|
||||
// scan
|
||||
got := []string{}
|
||||
@ -170,7 +171,7 @@ func Test_expandASNInputValue(t *testing.T) {
|
||||
require.Nil(t, err, "could not create temporary input file")
|
||||
input := &Input{hostMap: hm}
|
||||
// get the IP addresses for ASN number
|
||||
ips := input.expandASNInputValue(tt.asn)
|
||||
ips := expand.ASN(tt.asn)
|
||||
input.addTargets(ips)
|
||||
// scan the hmap
|
||||
got := []string{}
|
||||
|
@ -2,75 +2,37 @@
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"runtime/debug"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/dop251/goja/parser"
|
||||
"github.com/dop251/goja_nodejs/console"
|
||||
"github.com/dop251/goja_nodejs/require"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/projectdiscovery/gologger"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libbytes"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libfs"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libikev2"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libkerberos"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libldap"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libmssql"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libmysql"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libnet"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/liboracle"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libpop3"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libpostgres"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/librdp"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libredis"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/librsync"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libsmb"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libsmtp"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libssh"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libstructs"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libtelnet"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libvnc"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/global"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/libs/goconsole"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/generators"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||
contextutil "github.com/projectdiscovery/utils/context"
|
||||
)
|
||||
|
||||
// Compiler provides a runtime to execute goja runtime
|
||||
// based javascript scripts efficiently while also
|
||||
// providing them access to custom modules defined in libs/.
|
||||
type Compiler struct {
|
||||
registry *require.Registry
|
||||
}
|
||||
type Compiler struct{}
|
||||
|
||||
// New creates a new compiler for the goja runtime.
|
||||
func New() *Compiler {
|
||||
registry := new(require.Registry) // this can be shared by multiple runtimes
|
||||
// autoregister console node module with default printer it uses gologger backend
|
||||
require.RegisterNativeModule(console.ModuleName, console.RequireWithPrinter(goconsole.NewGoConsolePrinter()))
|
||||
return &Compiler{registry: registry}
|
||||
return &Compiler{}
|
||||
}
|
||||
|
||||
// ExecuteOptions provides options for executing a script.
|
||||
type ExecuteOptions struct {
|
||||
// Pool specifies whether to use a pool of goja runtimes
|
||||
// Can be used to speedup execution but requires
|
||||
// the script to not make any global changes.
|
||||
Pool bool
|
||||
|
||||
// CaptureOutput specifies whether to capture the output
|
||||
// of the script execution.
|
||||
CaptureOutput bool
|
||||
|
||||
// CaptureVariables specifies the variables to capture
|
||||
// from the script execution.
|
||||
CaptureVariables []string
|
||||
|
||||
// Callback can be used to register new runtime helper functions
|
||||
// ex: export etc
|
||||
Callback func(runtime *goja.Runtime) error
|
||||
|
||||
// Cleanup is extra cleanup function to be called after execution
|
||||
Cleanup func(runtime *goja.Runtime)
|
||||
|
||||
/// Timeout for this script execution
|
||||
Timeout int
|
||||
}
|
||||
|
||||
// ExecuteArgs is the arguments to pass to the script.
|
||||
@ -105,114 +67,51 @@ func (e ExecuteResult) GetSuccess() bool {
|
||||
|
||||
// Execute executes a script with the default options.
|
||||
func (c *Compiler) Execute(code string, args *ExecuteArgs) (ExecuteResult, error) {
|
||||
return c.ExecuteWithOptions(code, args, &ExecuteOptions{})
|
||||
}
|
||||
|
||||
// VM returns a new goja runtime for the compiler.
|
||||
func (c *Compiler) VM() *goja.Runtime {
|
||||
runtime := c.newRuntime(false)
|
||||
runtime.SetParserOptions(parser.WithDisableSourceMaps)
|
||||
c.registerHelpersForVM(runtime)
|
||||
return runtime
|
||||
}
|
||||
|
||||
// ExecuteWithOptions executes a script with the provided options.
|
||||
func (c *Compiler) ExecuteWithOptions(code string, args *ExecuteArgs, opts *ExecuteOptions) (ExecuteResult, error) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
gologger.Error().Msgf("Recovered panic %s %v: %v", code, args, err)
|
||||
gologger.Verbose().Msgf("%s", debug.Stack())
|
||||
return
|
||||
}
|
||||
}()
|
||||
if opts == nil {
|
||||
opts = &ExecuteOptions{}
|
||||
}
|
||||
runtime := c.newRuntime(opts.Pool)
|
||||
c.registerHelpersForVM(runtime)
|
||||
|
||||
// register runtime functions if any
|
||||
if opts.Callback != nil {
|
||||
if err := opts.Callback(runtime); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if args == nil {
|
||||
args = NewExecuteArgs()
|
||||
}
|
||||
for k, v := range args.Args {
|
||||
_ = runtime.Set(k, v)
|
||||
}
|
||||
if args.TemplateCtx == nil {
|
||||
args.TemplateCtx = make(map[string]interface{})
|
||||
}
|
||||
// merge all args into templatectx
|
||||
args.TemplateCtx = generators.MergeMaps(args.TemplateCtx, args.Args)
|
||||
_ = runtime.Set("template", args.TemplateCtx)
|
||||
|
||||
results, err := runtime.RunString(code)
|
||||
p, err := goja.Compile("", code, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
captured := results.Export()
|
||||
|
||||
if opts.CaptureOutput {
|
||||
return convertOutputToResult(captured)
|
||||
}
|
||||
if len(opts.CaptureVariables) > 0 {
|
||||
return c.captureVariables(runtime, opts.CaptureVariables)
|
||||
}
|
||||
// success is true by default . since js throws errors on failure
|
||||
// hence output result is always success
|
||||
return ExecuteResult{"response": captured, "success": results.ToBoolean()}, nil
|
||||
return c.ExecuteWithOptions(p, args, &ExecuteOptions{})
|
||||
}
|
||||
|
||||
// captureVariables captures the variables from the runtime.
|
||||
func (c *Compiler) captureVariables(runtime *goja.Runtime, variables []string) (ExecuteResult, error) {
|
||||
results := make(ExecuteResult, len(variables))
|
||||
for _, variable := range variables {
|
||||
value := runtime.Get(variable)
|
||||
if value == nil {
|
||||
continue
|
||||
}
|
||||
results[variable] = value.Export()
|
||||
// ExecuteWithOptions executes a script with the provided options.
|
||||
func (c *Compiler) ExecuteWithOptions(program *goja.Program, args *ExecuteArgs, opts *ExecuteOptions) (ExecuteResult, error) {
|
||||
if opts == nil {
|
||||
opts = &ExecuteOptions{}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
if args == nil {
|
||||
args = NewExecuteArgs()
|
||||
}
|
||||
// handle nil maps
|
||||
if args.TemplateCtx == nil {
|
||||
args.TemplateCtx = make(map[string]interface{})
|
||||
}
|
||||
if args.Args == nil {
|
||||
args.Args = make(map[string]interface{})
|
||||
}
|
||||
// merge all args into templatectx
|
||||
args.TemplateCtx = generators.MergeMaps(args.TemplateCtx, args.Args)
|
||||
|
||||
func convertOutputToResult(output interface{}) (ExecuteResult, error) {
|
||||
marshalled, err := jsoniter.Marshal(output)
|
||||
if opts.Timeout <= 0 || opts.Timeout > 180 {
|
||||
// some js scripts can take longer time so allow configuring timeout
|
||||
// from template but keep it within sane limits (180s)
|
||||
opts.Timeout = JsProtocolTimeout
|
||||
}
|
||||
|
||||
// execute with context and timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(opts.Timeout)*time.Second)
|
||||
defer cancel()
|
||||
// execute the script
|
||||
results, err := contextutil.ExecFuncWithTwoReturns(ctx, func() (val goja.Value, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("panic: %v", r)
|
||||
}
|
||||
}()
|
||||
return executeProgram(program, args, opts)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal output")
|
||||
}
|
||||
|
||||
var outputMap map[string]interface{}
|
||||
if err := jsoniter.Unmarshal(marshalled, &outputMap); err != nil {
|
||||
var v interface{}
|
||||
if unmarshalErr := jsoniter.Unmarshal(marshalled, &v); unmarshalErr != nil {
|
||||
return nil, unmarshalErr
|
||||
}
|
||||
outputMap = map[string]interface{}{"output": v}
|
||||
return outputMap, nil
|
||||
}
|
||||
return outputMap, nil
|
||||
}
|
||||
|
||||
// newRuntime creates a new goja runtime
|
||||
// TODO: Add support for runtime reuse for helper functions
|
||||
func (c *Compiler) newRuntime(reuse bool) *goja.Runtime {
|
||||
return protocolstate.NewJSRuntime()
|
||||
}
|
||||
|
||||
// registerHelpersForVM registers all the helper functions for the goja runtime.
|
||||
func (c *Compiler) registerHelpersForVM(runtime *goja.Runtime) {
|
||||
_ = c.registry.Enable(runtime)
|
||||
// by default import below modules every time
|
||||
_ = runtime.Set("console", require.Require(runtime, console.ModuleName))
|
||||
|
||||
// Register embedded scripts
|
||||
if err := global.RegisterNativeScripts(runtime); err != nil {
|
||||
gologger.Error().Msgf("Could not register scripts: %s\n", err)
|
||||
return nil, err
|
||||
}
|
||||
return ExecuteResult{"response": results.Export(), "success": results.ToBoolean()}, nil
|
||||
}
|
||||
|
@ -38,36 +38,6 @@ func TestExecuteResultGetSuccess(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompilerCaptureVariables(t *testing.T) {
|
||||
compiler := New()
|
||||
result, err := compiler.ExecuteWithOptions("var a = 1;", NewExecuteArgs(), &ExecuteOptions{CaptureVariables: []string{"a"}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gotValue, ok := result["a"]
|
||||
if !ok {
|
||||
t.Fatalf("expected a to be present in the result")
|
||||
}
|
||||
if gotValue.(int64) != 1 {
|
||||
t.Fatalf("expected a to be 1, got=%v", gotValue)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompilerCaptureOutput(t *testing.T) {
|
||||
compiler := New()
|
||||
result, err := compiler.ExecuteWithOptions("let obj = {'a':'b'}; obj", NewExecuteArgs(), &ExecuteOptions{CaptureOutput: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gotValue, ok := result["a"]
|
||||
if !ok {
|
||||
t.Fatalf("expected a to be present in the result")
|
||||
}
|
||||
if gotValue.(string) != "b" {
|
||||
t.Fatalf("expected a to be b, got=%v", gotValue)
|
||||
}
|
||||
}
|
||||
|
||||
type noopWriter struct {
|
||||
Callback func(data []byte, level levels.Level)
|
||||
}
|
||||
|
26
lib/nuclei/pkg/js/compiler/init.go
Executable file
26
lib/nuclei/pkg/js/compiler/init.go
Executable file
@ -0,0 +1,26 @@
|
||||
package compiler
|
||||
|
||||
import "github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
|
||||
// jsprotocolInit
|
||||
|
||||
var (
|
||||
// Per Execution Javascript timeout in seconds
|
||||
JsProtocolTimeout = 10
|
||||
JsVmConcurrency = 500
|
||||
)
|
||||
|
||||
// Init initializes the javascript protocol
|
||||
func Init(opts *types.Options) error {
|
||||
if opts.Timeout < 10 {
|
||||
// keep existing 10s timeout
|
||||
return nil
|
||||
}
|
||||
if opts.JsConcurrency < 100 {
|
||||
// 100 is reasonable default
|
||||
opts.JsConcurrency = 100
|
||||
}
|
||||
JsProtocolTimeout = opts.Timeout
|
||||
JsVmConcurrency = opts.JsConcurrency
|
||||
return nil
|
||||
}
|
116
lib/nuclei/pkg/js/compiler/pool.go
Executable file
116
lib/nuclei/pkg/js/compiler/pool.go
Executable file
@ -0,0 +1,116 @@
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/dop251/goja_nodejs/console"
|
||||
"github.com/dop251/goja_nodejs/require"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libbytes"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libfs"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libikev2"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libkerberos"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libldap"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libmssql"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libmysql"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libnet"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/liboracle"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libpop3"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libpostgres"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/librdp"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libredis"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/librsync"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libsmb"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libsmtp"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libssh"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libstructs"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libtelnet"
|
||||
_ "github.com/projectdiscovery/nuclei/v3/pkg/js/generated/go/libvnc"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/global"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/libs/goconsole"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
r *require.Registry
|
||||
lazyRegistryInit = sync.OnceFunc(func() {
|
||||
r = new(require.Registry) // this can be shared by multiple runtimes
|
||||
// autoregister console node module with default printer it uses gologger backend
|
||||
require.RegisterNativeModule(console.ModuleName, console.RequireWithPrinter(goconsole.NewGoConsolePrinter()))
|
||||
})
|
||||
sg sizedwaitgroup.SizedWaitGroup
|
||||
lazySgInit = sync.OnceFunc(func() {
|
||||
sg = sizedwaitgroup.New(JsVmConcurrency)
|
||||
})
|
||||
)
|
||||
|
||||
func getRegistry() *require.Registry {
|
||||
lazyRegistryInit()
|
||||
return r
|
||||
}
|
||||
|
||||
var gojapool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
runtime := protocolstate.NewJSRuntime()
|
||||
_ = getRegistry().Enable(runtime)
|
||||
// by default import below modules every time
|
||||
_ = runtime.Set("console", require.Require(runtime, console.ModuleName))
|
||||
|
||||
// Register embedded javacript helpers
|
||||
if err := global.RegisterNativeScripts(runtime); err != nil {
|
||||
gologger.Error().Msgf("Could not register scripts: %s\n", err)
|
||||
}
|
||||
return runtime
|
||||
},
|
||||
}
|
||||
|
||||
// executes the actual js program
|
||||
func executeProgram(p *goja.Program, args *ExecuteArgs, opts *ExecuteOptions) (result goja.Value, err error) {
|
||||
// its unknown (most likely cannot be done) to limit max js runtimes at a moment without making it static
|
||||
// unlike sync.Pool which reacts to GC and its purposes is to reuse objects rather than creating new ones
|
||||
lazySgInit()
|
||||
sg.Add()
|
||||
defer sg.Done()
|
||||
runtime := gojapool.Get().(*goja.Runtime)
|
||||
defer func() {
|
||||
// reset before putting back to pool
|
||||
_ = runtime.GlobalObject().Delete("template") // template ctx
|
||||
// remove all args
|
||||
for k := range args.Args {
|
||||
_ = runtime.GlobalObject().Delete(k)
|
||||
}
|
||||
if opts != nil && opts.Cleanup != nil {
|
||||
opts.Cleanup(runtime)
|
||||
}
|
||||
gojapool.Put(runtime)
|
||||
}()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("panic: %s", r)
|
||||
}
|
||||
}()
|
||||
// set template ctx
|
||||
_ = runtime.Set("template", args.TemplateCtx)
|
||||
// set args
|
||||
for k, v := range args.Args {
|
||||
_ = runtime.Set(k, v)
|
||||
}
|
||||
// register extra callbacks if any
|
||||
if opts != nil && opts.Callback != nil {
|
||||
if err := opts.Callback(runtime); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
// execute the script
|
||||
return runtime.RunProgram(p)
|
||||
}
|
||||
|
||||
// Internal purposes i.e generating bindings
|
||||
func InternalGetGeneratorRuntime() *goja.Runtime {
|
||||
runtime := gojapool.Get().(*goja.Runtime)
|
||||
return runtime
|
||||
}
|
@ -150,8 +150,7 @@ func CreateTemplateData(directory string, packagePrefix string) (*TemplateData,
|
||||
// InitNativeScripts initializes the native scripts array
|
||||
// with all the exported functions from the runtime
|
||||
func (d *TemplateData) InitNativeScripts() {
|
||||
compiler := compiler.New()
|
||||
runtime := compiler.VM()
|
||||
runtime := compiler.InternalGetGeneratorRuntime()
|
||||
|
||||
exports := runtime.Get("exports")
|
||||
if exports == nil {
|
||||
|
@ -97,6 +97,7 @@ func (c *KerberosClient) EnumerateUser(domain, controller string, username strin
|
||||
return resp, err
|
||||
}
|
||||
cl := kclient.NewWithPassword(username, opts.realm, "foobar", opts.config, kclient.DisablePAFXFAST(true))
|
||||
defer cl.Destroy()
|
||||
|
||||
req, err := messages.NewASReqForTGT(cl.Credentials.Domain(), cl.Config, cl.Credentials.CName())
|
||||
if err != nil {
|
||||
@ -143,3 +144,50 @@ func asRepToHashcat(asrep messages.ASRep) (string, error) {
|
||||
hex.EncodeToString(asrep.EncPart.Cipher[:16]),
|
||||
hex.EncodeToString(asrep.EncPart.Cipher[16:])), nil
|
||||
}
|
||||
|
||||
type TGS struct {
|
||||
Ticket messages.Ticket
|
||||
Hash string
|
||||
}
|
||||
|
||||
func (c *KerberosClient) GetServiceTicket(domain, controller string, username, password string, target, spn string) (TGS, error) {
|
||||
var tgs TGS
|
||||
|
||||
if !protocolstate.IsHostAllowed(domain) {
|
||||
// host is not valid according to network policy
|
||||
return tgs, protocolstate.ErrHostDenied.Msgf(domain)
|
||||
}
|
||||
|
||||
opts, err := newKerbrosEnumUserOpts(domain, controller)
|
||||
if err != nil {
|
||||
return tgs, err
|
||||
}
|
||||
cl := kclient.NewWithPassword(username, opts.realm, password, opts.config, kclient.DisablePAFXFAST(true))
|
||||
defer cl.Destroy()
|
||||
|
||||
ticket, _, err := cl.GetServiceTicket(spn)
|
||||
if err != nil {
|
||||
return tgs, err
|
||||
}
|
||||
|
||||
hashcat, err := tgsToHashcat(ticket, target)
|
||||
if err != nil {
|
||||
return tgs, err
|
||||
}
|
||||
|
||||
return TGS{
|
||||
Ticket: ticket,
|
||||
Hash: hashcat,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func tgsToHashcat(tgs messages.Ticket, username string) (string, error) {
|
||||
return fmt.Sprintf("$krb5tgs$%d$*%s$%s$%s*$%s$%s",
|
||||
tgs.EncPart.EType,
|
||||
username,
|
||||
tgs.Realm,
|
||||
strings.Join(tgs.SName.NameString[:], "/"),
|
||||
hex.EncodeToString(tgs.EncPart.Cipher[:16]),
|
||||
hex.EncodeToString(tgs.EncPart.Cipher[16:]),
|
||||
), nil
|
||||
}
|
||||
|
@ -203,3 +203,103 @@ func getBaseNamingContext(opts *ldapSessionOptions, conn *ldap.Conn) (string, er
|
||||
opts.baseDN = defaultNamingContext
|
||||
return opts.baseDN, nil
|
||||
}
|
||||
|
||||
// KerberoastableUser contains the important fields of the Active Directory
|
||||
// kerberoastable user
|
||||
type KerberoastableUser struct {
|
||||
SAMAccountName string
|
||||
ServicePrincipalName string
|
||||
PWDLastSet string
|
||||
MemberOf string
|
||||
UserAccountControl string
|
||||
LastLogon string
|
||||
}
|
||||
|
||||
// GetKerberoastableUsers collects all "person" users that have an SPN
|
||||
// associated with them. The LDAP filter is built with the same logic as
|
||||
// "GetUserSPNs.py", the well-known impacket example by Forta.
|
||||
// https://github.com/fortra/impacket/blob/master/examples/GetUserSPNs.py#L297
|
||||
//
|
||||
// Returns a list of KerberoastableUser, if an error occurs, returns an empty
|
||||
// slice and the raised error
|
||||
func (c *LdapClient) GetKerberoastableUsers(domain, controller string, username, password string) ([]KerberoastableUser, error) {
|
||||
opts := &ldapSessionOptions{
|
||||
domain: domain,
|
||||
domainController: controller,
|
||||
username: username,
|
||||
password: password,
|
||||
}
|
||||
|
||||
if !protocolstate.IsHostAllowed(domain) {
|
||||
// host is not valid according to network policy
|
||||
return nil, protocolstate.ErrHostDenied.Msgf(domain)
|
||||
}
|
||||
|
||||
conn, err := c.newLdapSession(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer c.close(conn)
|
||||
|
||||
domainParts := strings.Split(domain, ".")
|
||||
if username == "" {
|
||||
err = conn.UnauthenticatedBind("")
|
||||
} else {
|
||||
err = conn.Bind(
|
||||
fmt.Sprintf("%v\\%v", domainParts[0], username),
|
||||
password,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var baseDN strings.Builder
|
||||
for i, part := range domainParts {
|
||||
baseDN.WriteString("DC=")
|
||||
baseDN.WriteString(part)
|
||||
if i != len(domainParts)-1 {
|
||||
baseDN.WriteString(",")
|
||||
}
|
||||
}
|
||||
|
||||
sr := ldap.NewSearchRequest(
|
||||
baseDN.String(),
|
||||
ldap.ScopeWholeSubtree,
|
||||
ldap.NeverDerefAliases,
|
||||
0, 0, false,
|
||||
// (&(is_user) (!(account_is_disabled)) (has_SPN))
|
||||
"(&(objectCategory=person)(!(userAccountControl:1.2.840.113556.1.4.803:=2))(servicePrincipalName=*))",
|
||||
[]string{
|
||||
"SAMAccountName",
|
||||
"ServicePrincipalName",
|
||||
"pwdLastSet",
|
||||
"MemberOf",
|
||||
"userAccountControl",
|
||||
"lastLogon",
|
||||
},
|
||||
nil,
|
||||
)
|
||||
|
||||
res, err := conn.Search(sr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(res.Entries) == 0 {
|
||||
return nil, fmt.Errorf("no kerberoastable user found")
|
||||
}
|
||||
|
||||
var ku []KerberoastableUser
|
||||
for _, usr := range res.Entries {
|
||||
ku = append(ku, KerberoastableUser{
|
||||
SAMAccountName: usr.GetAttributeValue("sAMAccountName"),
|
||||
ServicePrincipalName: usr.GetAttributeValue("servicePrincipalName"),
|
||||
PWDLastSet: usr.GetAttributeValue("pwdLastSet"),
|
||||
MemberOf: usr.GetAttributeValue("MemberOf"),
|
||||
UserAccountControl: usr.GetAttributeValue("userAccountControl"),
|
||||
LastLogon: usr.GetAttributeValue("lastLogon"),
|
||||
})
|
||||
}
|
||||
return ku, nil
|
||||
}
|
||||
|
@ -4,11 +4,13 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
"github.com/go-sql-driver/mysql"
|
||||
"github.com/praetorian-inc/fingerprintx/pkg/plugins"
|
||||
mysqlplugin "github.com/praetorian-inc/fingerprintx/pkg/plugins/services/mysql"
|
||||
utils "github.com/projectdiscovery/nuclei/v3/pkg/js/utils"
|
||||
@ -66,6 +68,24 @@ func (c *MySQLClient) ConnectWithDB(host string, port int, username, password, d
|
||||
return connect(host, port, username, password, dbName)
|
||||
}
|
||||
|
||||
// ConnectWithDSN connects to MySQL database using given DSN.
|
||||
// we override mysql dialer with fastdialer so it respects network policy
|
||||
func (c *MySQLClient) ConnectWithDSN(dsn string) (bool, error) {
|
||||
db, err := sql.Open("mysql", dsn)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer db.Close()
|
||||
db.SetMaxOpenConns(1)
|
||||
db.SetMaxIdleConns(0)
|
||||
|
||||
_, err = db.Exec("select 1")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func connect(host string, port int, username, password, dbName string) (bool, error) {
|
||||
if host == "" || port <= 0 {
|
||||
return false, fmt.Errorf("invalid host or port")
|
||||
@ -78,7 +98,7 @@ func connect(host string, port int, username, password, dbName string) (bool, er
|
||||
|
||||
target := net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
||||
|
||||
db, err := sql.Open("mysql", fmt.Sprintf("%v:%v@tcp(%v)/%s",
|
||||
db, err := sql.Open("mysql", fmt.Sprintf("%v:%v@tcp(%v)/%s?allowOldPasswords=1",
|
||||
url.PathEscape(username),
|
||||
url.PathEscape(password),
|
||||
target,
|
||||
@ -87,6 +107,8 @@ func connect(host string, port int, username, password, dbName string) (bool, er
|
||||
return false, err
|
||||
}
|
||||
defer db.Close()
|
||||
db.SetMaxOpenConns(1)
|
||||
db.SetMaxIdleConns(0)
|
||||
|
||||
_, err = db.Exec("select 1")
|
||||
if err != nil {
|
||||
@ -115,6 +137,8 @@ func (c *MySQLClient) ExecuteQuery(host string, port int, username, password, db
|
||||
return "", err
|
||||
}
|
||||
defer db.Close()
|
||||
db.SetMaxOpenConns(1)
|
||||
db.SetMaxIdleConns(0)
|
||||
|
||||
rows, err := db.Query(query)
|
||||
if err != nil {
|
||||
@ -126,3 +150,7 @@ func (c *MySQLClient) ExecuteQuery(host string, port int, username, password, db
|
||||
}
|
||||
return string(resp), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
_ = mysql.SetLogger(log.New(io.Discard, "", 0))
|
||||
}
|
||||
|
@ -3,11 +3,10 @@ package smb
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/hirochachacha/go-smb2"
|
||||
"github.com/praetorian-inc/fingerprintx/pkg/plugins"
|
||||
"github.com/projectdiscovery/go-smb2"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||
"github.com/zmap/zgrab2/lib/smb/smb"
|
||||
)
|
||||
@ -15,7 +14,7 @@ import (
|
||||
// SMBClient is a client for SMB servers.
|
||||
//
|
||||
// Internally client uses github.com/zmap/zgrab2/lib/smb/smb driver.
|
||||
// github.com/hirochachacha/go-smb2 driver
|
||||
// github.com/projectdiscovery/go-smb2 driver
|
||||
type SMBClient struct{}
|
||||
|
||||
// ConnectSMBInfoMode tries to connect to provided host and port
|
||||
@ -24,26 +23,30 @@ type SMBClient struct{}
|
||||
// Returns handshake log and error. If error is not nil,
|
||||
// state will be false
|
||||
func (c *SMBClient) ConnectSMBInfoMode(host string, port int) (*smb.SMBLog, error) {
|
||||
if !protocolstate.IsHostAllowed(host) {
|
||||
// host is not valid according to network policy
|
||||
return nil, protocolstate.ErrHostDenied.Msgf(host)
|
||||
}
|
||||
conn, err := protocolstate.Dialer.Dial(context.TODO(), "tcp", fmt.Sprintf("%s:%d", host, port))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
// try to get SMBv2/v3 info
|
||||
result, err := c.getSMBInfo(conn, true, false)
|
||||
_ = conn.Close() // close regardless of error
|
||||
if err == nil {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
_ = conn.SetDeadline(time.Now().Add(10 * time.Second))
|
||||
setupSession := true
|
||||
|
||||
result, err := smb.GetSMBLog(conn, setupSession, false, false)
|
||||
// try to negotiate SMBv1
|
||||
conn, err = protocolstate.Dialer.Dial(context.TODO(), "tcp", fmt.Sprintf("%s:%d", host, port))
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
conn, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", host, port), 10*time.Second)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result, err = smb.GetSMBLog(conn, setupSession, true, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
result, err = c.getSMBInfo(conn, true, true)
|
||||
if err != nil {
|
||||
return result, nil
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@ -67,6 +70,10 @@ func (c *SMBClient) ListSMBv2Metadata(host string, port int) (*plugins.ServiceSM
|
||||
// Credentials cannot be blank. guest or anonymous credentials
|
||||
// can be used by providing empty password.
|
||||
func (c *SMBClient) ListShares(host string, port int, user, password string) ([]string, error) {
|
||||
if !protocolstate.IsHostAllowed(host) {
|
||||
// host is not valid according to network policy
|
||||
return nil, protocolstate.ErrHostDenied.Msgf(host)
|
||||
}
|
||||
conn, err := protocolstate.Dialer.Dial(context.TODO(), "tcp", fmt.Sprintf("%s:%d", host, port))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -9,8 +9,11 @@ import (
|
||||
"github.com/praetorian-inc/fingerprintx/pkg/plugins"
|
||||
"github.com/praetorian-inc/fingerprintx/pkg/plugins/services/smb"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||
zgrabsmb "github.com/zmap/zgrab2/lib/smb/smb"
|
||||
)
|
||||
|
||||
// ==== private helper functions/methods ====
|
||||
|
||||
// collectSMBv2Metadata collects metadata for SMBv2 services.
|
||||
func collectSMBv2Metadata(host string, port int, timeout time.Duration) (*plugins.ServiceSMB, error) {
|
||||
if timeout == 0 {
|
||||
@ -28,3 +31,17 @@ func collectSMBv2Metadata(host string, port int, timeout time.Duration) (*plugin
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// getSMBInfo
|
||||
func (c *SMBClient) getSMBInfo(conn net.Conn, setupSession, v1 bool) (*zgrabsmb.SMBLog, error) {
|
||||
_ = conn.SetDeadline(time.Now().Add(10 * time.Second))
|
||||
defer func() {
|
||||
_ = conn.SetDeadline(time.Time{})
|
||||
}()
|
||||
|
||||
result, err := zgrabsmb.GetSMBLog(conn, setupSession, v1, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
@ -20,6 +20,10 @@ const (
|
||||
// DetectSMBGhost tries to detect SMBGhost vulnerability
|
||||
// by using SMBv3 compression feature.
|
||||
func (c *SMBClient) DetectSMBGhost(host string, port int) (bool, error) {
|
||||
if !protocolstate.IsHostAllowed(host) {
|
||||
// host is not valid according to network policy
|
||||
return false, protocolstate.ErrHostDenied.Msgf(host)
|
||||
}
|
||||
addr := net.JoinHostPort(host, strconv.Itoa(port))
|
||||
conn, err := protocolstate.Dialer.Dial(context.TODO(), "tcp", addr)
|
||||
if err != nil {
|
||||
|
@ -30,7 +30,6 @@ func (e *Extractor) ExtractRegex(corpus string) map[string]struct{} {
|
||||
}
|
||||
}
|
||||
}
|
||||
e.SaveToFile(results)
|
||||
return results
|
||||
}
|
||||
|
||||
@ -58,7 +57,6 @@ func (e *Extractor) ExtractKval(data map[string]interface{}) map[string]struct{}
|
||||
results[itemString] = struct{}{}
|
||||
}
|
||||
}
|
||||
e.SaveToFile(results)
|
||||
return results
|
||||
}
|
||||
|
||||
@ -96,7 +94,6 @@ func (e *Extractor) ExtractHTML(corpus string) map[string]struct{} {
|
||||
}
|
||||
}
|
||||
}
|
||||
e.SaveToFile(results)
|
||||
return results
|
||||
}
|
||||
|
||||
@ -127,7 +124,6 @@ func (e *Extractor) ExtractXML(corpus string) map[string]struct{} {
|
||||
}
|
||||
}
|
||||
}
|
||||
e.SaveToFile(results)
|
||||
return results
|
||||
}
|
||||
|
||||
@ -164,7 +160,6 @@ func (e *Extractor) ExtractJSON(corpus string) map[string]struct{} {
|
||||
}
|
||||
}
|
||||
}
|
||||
e.SaveToFile(results)
|
||||
return results
|
||||
}
|
||||
|
||||
@ -187,6 +182,5 @@ func (e *Extractor) ExtractDSL(data map[string]interface{}) map[string]struct{}
|
||||
}
|
||||
}
|
||||
}
|
||||
e.SaveToFile(results)
|
||||
return results
|
||||
}
|
||||
|
@ -1,14 +1,10 @@
|
||||
package extractors
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/Knetic/govaluate"
|
||||
"github.com/itchyny/gojq"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
fileutil "github.com/projectdiscovery/utils/file"
|
||||
)
|
||||
|
||||
// Extractor is used to extract part of response using a regex.
|
||||
@ -117,36 +113,4 @@ type Extractor struct {
|
||||
// - false
|
||||
// - true
|
||||
CaseInsensitive bool `yaml:"case-insensitive,omitempty" json:"case-insensitive,omitempty" jsonschema:"title=use case insensitive extract,description=use case insensitive extract"`
|
||||
// description: |
|
||||
// ToFile (to) saves extracted requests to file and if file is present values are appended to file.
|
||||
ToFile string `yaml:"to,omitempty" json:"to,omitempty" jsonschema:"title=save extracted values to file,description=save extracted values to file"`
|
||||
}
|
||||
|
||||
// SaveToFile saves extracted values to file if `to` is present and valid
|
||||
func (e *Extractor) SaveToFile(data map[string]struct{}) {
|
||||
if e.ToFile == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if !fileutil.FileExists(e.ToFile) {
|
||||
baseDir := filepath.Dir(e.ToFile)
|
||||
if baseDir != "." && !fileutil.FolderExists(baseDir) {
|
||||
if err := fileutil.CreateFolder(baseDir); err != nil {
|
||||
gologger.Error().Msgf("extractor: could not create folder %s: %s\n", baseDir, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
file, err := os.OpenFile(e.ToFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
gologger.Error().Msgf("extractor: could not open file %s: %s\n", e.ToFile, err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
for k := range data {
|
||||
if _, err = file.WriteString(k + "\n"); err != nil {
|
||||
gologger.Error().Msgf("extractor: could not write to file %s: %s\n", e.ToFile, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -120,6 +120,14 @@ type Matcher struct {
|
||||
// - false
|
||||
// - true
|
||||
MatchAll bool `yaml:"match-all,omitempty" json:"match-all,omitempty" jsonschema:"title=match all values,description=match all matcher values ignoring condition"`
|
||||
// description: |
|
||||
// Internal when true hides the matcher from output. Default is false.
|
||||
// It is meant to be used in multiprotocol / flow templates to create internal matcher condition without printing it in output.
|
||||
// or other similar use cases.
|
||||
// values:
|
||||
// - false
|
||||
// - true
|
||||
Internal bool `yaml:"internal,omitempty" json:"internal,omitempty" jsonschema:"title=hide matcher from output,description=hide matcher from output"`
|
||||
|
||||
// cached data for the compiled matcher
|
||||
condition ConditionType // todo: this field should be the one used for overridden marshal ops
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var commonExpectedFields = []string{"Type", "Condition", "Name", "MatchAll", "Negative"}
|
||||
var commonExpectedFields = []string{"Type", "Condition", "Name", "MatchAll", "Negative", "Internal"}
|
||||
|
||||
// Validate perform initial validation on the matcher structure
|
||||
func (matcher *Matcher) Validate() error {
|
||||
|
@ -90,6 +90,8 @@ type Result struct {
|
||||
|
||||
// Optional lineCounts for file protocol
|
||||
LineCount string
|
||||
// Operators is reference to operators that generated this result (Read-Only)
|
||||
Operators *Operators
|
||||
}
|
||||
|
||||
func (result *Result) HasMatch(name string) bool {
|
||||
@ -194,7 +196,11 @@ func (r *Result) Merge(result *Result) {
|
||||
}
|
||||
}
|
||||
for k, v := range result.DynamicValues {
|
||||
r.DynamicValues[k] = v
|
||||
if _, ok := r.DynamicValues[k]; !ok {
|
||||
r.DynamicValues[k] = v
|
||||
} else {
|
||||
r.DynamicValues[k] = sliceutil.Dedupe(append(r.DynamicValues[k], v...))
|
||||
}
|
||||
}
|
||||
for k, v := range result.PayloadValues {
|
||||
r.PayloadValues[k] = v
|
||||
@ -217,10 +223,17 @@ func (operators *Operators) Execute(data map[string]interface{}, match MatchFunc
|
||||
Extracts: make(map[string][]string),
|
||||
DynamicValues: make(map[string][]string),
|
||||
outputUnique: make(map[string]struct{}),
|
||||
Operators: operators,
|
||||
}
|
||||
|
||||
// state variable to check if all extractors are internal
|
||||
var allInternalExtractors bool = true
|
||||
|
||||
// Start with the extractors first and evaluate them.
|
||||
for _, extractor := range operators.Extractors {
|
||||
if !extractor.Internal && allInternalExtractors {
|
||||
allInternalExtractors = false
|
||||
}
|
||||
var extractorResults []string
|
||||
for match := range extract(data, extractor) {
|
||||
extractorResults = append(extractorResults, match)
|
||||
@ -241,6 +254,10 @@ func (operators *Operators) Execute(data map[string]interface{}, match MatchFunc
|
||||
if len(extractorResults) > 0 && !extractor.Internal && extractor.Name != "" {
|
||||
result.Extracts[extractor.Name] = extractorResults
|
||||
}
|
||||
// update data with whatever was extracted doesn't matter if it is internal or not (skip unless it empty)
|
||||
if len(extractorResults) > 0 {
|
||||
data[extractor.Name] = getExtractedValue(extractorResults)
|
||||
}
|
||||
}
|
||||
|
||||
// expose dynamic values to same request matchers
|
||||
@ -288,7 +305,9 @@ func (operators *Operators) Execute(data map[string]interface{}, match MatchFunc
|
||||
|
||||
result.Matched = matches
|
||||
result.Extracted = len(result.OutputExtracts) > 0
|
||||
if len(result.DynamicValues) > 0 {
|
||||
if len(result.DynamicValues) > 0 && allInternalExtractors {
|
||||
// only return early if all extractors are internal
|
||||
// if some are internal and some are not then followthrough
|
||||
return result, true
|
||||
}
|
||||
|
||||
@ -339,3 +358,13 @@ func (operators *Operators) IsEmpty() bool {
|
||||
func (operators *Operators) Len() int {
|
||||
return len(operators.Matchers) + len(operators.Extractors)
|
||||
}
|
||||
|
||||
// getExtractedValue takes array of extracted values if it only has one value
|
||||
// then it is flattened and returned as a string else original type is returned
|
||||
func getExtractedValue(values []string) any {
|
||||
if len(values) == 1 {
|
||||
return values[0]
|
||||
} else {
|
||||
return values
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package output
|
||||
import (
|
||||
"bytes"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
mapsutil "github.com/projectdiscovery/utils/maps"
|
||||
@ -57,6 +58,9 @@ func (w *StandardWriter) formatScreen(output *ResultEvent) []byte {
|
||||
builder.WriteString(" [")
|
||||
|
||||
for i, item := range output.ExtractedResults {
|
||||
// trim trailing space
|
||||
item = strings.TrimSpace(item)
|
||||
item = strconv.QuoteToASCII(item)
|
||||
builder.WriteString(w.aurora.BrightCyan(item).String())
|
||||
|
||||
if i != len(output.ExtractedResults)-1 {
|
||||
|
@ -1,8 +1,10 @@
|
||||
package code
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -25,9 +27,19 @@ import (
|
||||
protocolutils "github.com/projectdiscovery/nuclei/v3/pkg/protocols/utils"
|
||||
templateTypes "github.com/projectdiscovery/nuclei/v3/pkg/templates/types"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
contextutil "github.com/projectdiscovery/utils/context"
|
||||
errorutil "github.com/projectdiscovery/utils/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
pythonEnvRegex = `os\.getenv\(['"]([^'"]+)['"]\)`
|
||||
TimeoutMultiplier = 6 // timeout multiplier for code protocol
|
||||
)
|
||||
|
||||
var (
|
||||
pythonEnvRegexCompiled = regexp.MustCompile(pythonEnvRegex)
|
||||
)
|
||||
|
||||
// Request is a request for the SSL protocol
|
||||
type Request struct {
|
||||
// Operators for the current request go here.
|
||||
@ -112,12 +124,17 @@ func (request *Request) GetID() string {
|
||||
}
|
||||
|
||||
// ExecuteWithResults executes the protocol requests and returns results instead of writing them.
|
||||
func (request *Request) ExecuteWithResults(input *contextargs.Context, dynamicValues, previous output.InternalEvent, callback protocols.OutputEventCallback) error {
|
||||
func (request *Request) ExecuteWithResults(input *contextargs.Context, dynamicValues, previous output.InternalEvent, callback protocols.OutputEventCallback) (err error) {
|
||||
metaSrc, err := gozero.NewSourceWithString(input.MetaInput.Input, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
// catch any panics just in case
|
||||
if r := recover(); r != nil {
|
||||
gologger.Error().Msgf("[%s] Panic occurred in code protocol: %s\n", request.options.TemplateID, r)
|
||||
err = fmt.Errorf("panic occurred: %s", r)
|
||||
}
|
||||
if err := metaSrc.Cleanup(); err != nil {
|
||||
gologger.Warning().Msgf("%s\n", err)
|
||||
}
|
||||
@ -125,31 +142,51 @@ func (request *Request) ExecuteWithResults(input *contextargs.Context, dynamicVa
|
||||
|
||||
var interactshURLs []string
|
||||
|
||||
// inject all template context values as gozero env variables
|
||||
variables := protocolutils.GenerateVariables(input.MetaInput.Input, false, nil)
|
||||
// add template context values
|
||||
variables = generators.MergeMaps(variables, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
// inject all template context values as gozero env allvars
|
||||
allvars := protocolutils.GenerateVariables(input.MetaInput.Input, false, nil)
|
||||
// add template context values if available
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
allvars = generators.MergeMaps(allvars, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
// optionvars are vars passed from CLI or env variables
|
||||
optionVars := generators.BuildPayloadFromOptions(request.options.Options)
|
||||
variablesMap := request.options.Variables.Evaluate(variables)
|
||||
variables = generators.MergeMaps(variablesMap, variables, optionVars, request.options.Constants)
|
||||
for name, value := range variables {
|
||||
variablesMap := request.options.Variables.Evaluate(allvars)
|
||||
// since we evaluate variables using allvars, give precedence to variablesMap
|
||||
allvars = generators.MergeMaps(allvars, variablesMap, optionVars, request.options.Constants)
|
||||
for name, value := range allvars {
|
||||
v := fmt.Sprint(value)
|
||||
v, interactshURLs = request.options.Interactsh.Replace(v, interactshURLs)
|
||||
// if value is updated by interactsh, update allvars to reflect the change downstream
|
||||
allvars[name] = v
|
||||
metaSrc.AddVariable(gozerotypes.Variable{Name: name, Value: v})
|
||||
}
|
||||
gOutput, err := request.gozero.Eval(context.Background(), request.src, metaSrc)
|
||||
if err != nil {
|
||||
return err
|
||||
timeout := TimeoutMultiplier * request.options.Options.Timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
|
||||
defer cancel()
|
||||
// Note: we use contextutil despite the fact that gozero accepts context as argument
|
||||
gOutput, err := contextutil.ExecFuncWithTwoReturns(ctx, func() (*gozerotypes.Result, error) {
|
||||
return request.gozero.Eval(ctx, request.src, metaSrc)
|
||||
})
|
||||
if gOutput == nil {
|
||||
// write error to stderr buff
|
||||
var buff bytes.Buffer
|
||||
if err != nil {
|
||||
buff.WriteString(err.Error())
|
||||
} else {
|
||||
buff.WriteString("no output something went wrong")
|
||||
}
|
||||
gOutput = &gozerotypes.Result{
|
||||
Stderr: buff,
|
||||
}
|
||||
}
|
||||
gologger.Verbose().Msgf("[%s] Executed code on local machine %v", request.options.TemplateID, input.MetaInput.Input)
|
||||
|
||||
if vardump.EnableVarDump {
|
||||
gologger.Debug().Msgf("Code Protocol request variables: \n%s\n", vardump.DumpVariables(variables))
|
||||
gologger.Debug().Msgf("Code Protocol request variables: \n%s\n", vardump.DumpVariables(allvars))
|
||||
}
|
||||
|
||||
if request.options.Options.Debug || request.options.Options.DebugRequests {
|
||||
gologger.Debug().Msgf("[%s] Dumped Executed Source Code for %v\n\n%v\n", request.options.TemplateID, input.MetaInput.Input, request.Source)
|
||||
gologger.Debug().Msgf("[%s] Dumped Executed Source Code for %v\n\n%v\n", request.options.TemplateID, input.MetaInput.Input, interpretEnvVars(request.Source, allvars))
|
||||
}
|
||||
|
||||
dataOutputString := fmtStdout(gOutput.Stdout.String())
|
||||
@ -171,7 +208,9 @@ func (request *Request) ExecuteWithResults(input *contextargs.Context, dynamicVa
|
||||
request.options.AddTemplateVars(input.MetaInput, request.Type(), request.ID, data)
|
||||
|
||||
// add variables from template context before matching/extraction
|
||||
data = generators.MergeMaps(data, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
data = generators.MergeMaps(data, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
|
||||
if request.options.Interactsh != nil {
|
||||
request.options.Interactsh.MakePlaceholders(interactshURLs, data)
|
||||
@ -272,3 +311,24 @@ func (request *Request) MakeResultEventItem(wrapped *output.InternalWrappedEvent
|
||||
func fmtStdout(data string) string {
|
||||
return strings.Trim(data, " \n\r\t")
|
||||
}
|
||||
|
||||
// interpretEnvVars replaces environment variables in the input string
|
||||
func interpretEnvVars(source string, vars map[string]interface{}) string {
|
||||
// bash mode
|
||||
if strings.Contains(source, "$") {
|
||||
for k, v := range vars {
|
||||
source = strings.ReplaceAll(source, "$"+k, fmt.Sprintf("'%s'", v))
|
||||
}
|
||||
}
|
||||
// python mode
|
||||
if strings.Contains(source, "os.getenv") {
|
||||
matches := pythonEnvRegexCompiled.FindAllStringSubmatch(source, -1)
|
||||
for _, match := range matches {
|
||||
if len(match) == 0 {
|
||||
continue
|
||||
}
|
||||
source = strings.ReplaceAll(source, fmt.Sprintf("os.getenv('%s')", match), fmt.Sprintf("'%s'", vars[match[0]]))
|
||||
}
|
||||
}
|
||||
return source
|
||||
}
|
||||
|
@ -1,8 +1,6 @@
|
||||
package automaticscan
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/templates"
|
||||
sliceutil "github.com/projectdiscovery/utils/slice"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -16,8 +14,10 @@ import (
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/contextargs"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/http/httpclientpool"
|
||||
httputil "github.com/projectdiscovery/nuclei/v3/pkg/protocols/utils/http"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/templates"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/templates/types"
|
||||
"github.com/projectdiscovery/retryablehttp-go"
|
||||
sliceutil "github.com/projectdiscovery/utils/slice"
|
||||
wappalyzer "github.com/projectdiscovery/wappalyzergo"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
@ -30,6 +30,14 @@ func CreateEventWithAdditionalOptions(request protocols.Request, outputEvent out
|
||||
if compiledOperator != nil {
|
||||
result, ok := compiledOperator.Execute(outputEvent, request.Match, request.Extract, isResponseDebug)
|
||||
if ok && result != nil {
|
||||
// if result has both extracted values and dynamic values, put dynamic values in data
|
||||
// and remove dynamic values to avoid skipping legitimate event
|
||||
if (len(result.Extracts) > 0 || len(result.OutputExtracts) > 0) && len(result.DynamicValues) > 0 {
|
||||
for k, v := range result.DynamicValues {
|
||||
event.InternalEvent[k] = v
|
||||
}
|
||||
result.DynamicValues = nil
|
||||
}
|
||||
event.OperatorsResult = result
|
||||
if addAdditionalOptions != nil {
|
||||
addAdditionalOptions(event)
|
||||
|
@ -151,14 +151,34 @@ func requestShouldStopAtFirstMatch(request *RequestData) bool {
|
||||
|
||||
// processInteractionForRequest processes an interaction for a request
|
||||
func (c *Client) processInteractionForRequest(interaction *server.Interaction, data *RequestData) bool {
|
||||
var result *operators.Result
|
||||
var matched bool
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
gologger.Error().Msgf("panic occurred while processing interaction with result=%v matched=%v err=%v", result, matched, r)
|
||||
}
|
||||
}()
|
||||
data.Event.Lock()
|
||||
data.Event.InternalEvent["interactsh_protocol"] = interaction.Protocol
|
||||
data.Event.InternalEvent["interactsh_request"] = interaction.RawRequest
|
||||
if strings.EqualFold(interaction.Protocol, "dns") {
|
||||
data.Event.InternalEvent["interactsh_request"] = strings.ToLower(interaction.RawRequest)
|
||||
} else {
|
||||
data.Event.InternalEvent["interactsh_request"] = interaction.RawRequest
|
||||
}
|
||||
data.Event.InternalEvent["interactsh_response"] = interaction.RawResponse
|
||||
data.Event.InternalEvent["interactsh_ip"] = interaction.RemoteAddress
|
||||
data.Event.Unlock()
|
||||
|
||||
result, matched := data.Operators.Execute(data.Event.InternalEvent, data.MatchFunc, data.ExtractFunc, c.options.Debug || c.options.DebugRequest || c.options.DebugResponse)
|
||||
if data.Operators != nil {
|
||||
result, matched = data.Operators.Execute(data.Event.InternalEvent, data.MatchFunc, data.ExtractFunc, c.options.Debug || c.options.DebugRequest || c.options.DebugResponse)
|
||||
} else {
|
||||
// this is most likely a bug so error instead of warning
|
||||
var templateID string
|
||||
if data.Event.InternalEvent != nil {
|
||||
templateID = fmt.Sprint(data.Event.InternalEvent[templateIdAttribute])
|
||||
}
|
||||
gologger.Error().Msgf("missing compiled operators for '%v' template", templateID)
|
||||
}
|
||||
|
||||
// for more context in github actions
|
||||
if strings.EqualFold(os.Getenv("GITHUB_ACTIONS"), "true") && c.options.Debug {
|
||||
|
@ -3,6 +3,7 @@ package protocolinit
|
||||
import (
|
||||
"github.com/corpix/uarand"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/js/compiler"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/dns/dnsclientpool"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/http/httpclientpool"
|
||||
@ -34,6 +35,9 @@ func Init(options *types.Options) error {
|
||||
if err := rdapclientpool.Init(options); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := compiler.Init(options); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -32,3 +32,8 @@ func NormalizePath(filePath string) (string, error) {
|
||||
}
|
||||
return "", errorutil.New("path %v is outside nuclei-template directory and -lfa is not enabled", filePath)
|
||||
}
|
||||
|
||||
// IsLFAAllowed returns true if local file access is allowed
|
||||
func IsLFAAllowed() bool {
|
||||
return lfaAllowed
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
var (
|
||||
ErrURLDenied = errorutil.NewWithFmt("headless: url %v dropped by rule: %v")
|
||||
ErrHostDenied = errorutil.NewWithFmt("host %v dropped by network policy")
|
||||
networkPolicy *networkpolicy.NetworkPolicy
|
||||
NetworkPolicy *networkpolicy.NetworkPolicy
|
||||
allowLocalFileAccess bool
|
||||
)
|
||||
|
||||
@ -51,14 +51,11 @@ func FailWithReason(page *rod.Page, e *proto.FetchRequestPaused) error {
|
||||
}
|
||||
|
||||
// InitHeadless initializes headless protocol state
|
||||
func InitHeadless(RestrictLocalNetworkAccess bool, localFileAccess bool) {
|
||||
func InitHeadless(localFileAccess bool, np *networkpolicy.NetworkPolicy) {
|
||||
allowLocalFileAccess = localFileAccess
|
||||
if !RestrictLocalNetworkAccess {
|
||||
return
|
||||
if np != nil {
|
||||
NetworkPolicy = np
|
||||
}
|
||||
networkPolicy, _ = networkpolicy.New(networkpolicy.Options{
|
||||
DenyList: append(networkpolicy.DefaultIPv4DenylistRanges, networkpolicy.DefaultIPv6DenylistRanges...),
|
||||
})
|
||||
}
|
||||
|
||||
// isValidHost checks if the host is valid (only limited to http/https protocols)
|
||||
@ -66,7 +63,7 @@ func isValidHost(targetUrl string) bool {
|
||||
if !stringsutil.HasPrefixAny(targetUrl, "http:", "https:") {
|
||||
return true
|
||||
}
|
||||
if networkPolicy == nil {
|
||||
if NetworkPolicy == nil {
|
||||
return true
|
||||
}
|
||||
urlx, err := urlutil.Parse(targetUrl)
|
||||
@ -75,15 +72,15 @@ func isValidHost(targetUrl string) bool {
|
||||
return false
|
||||
}
|
||||
targetUrl = urlx.Hostname()
|
||||
_, ok := networkPolicy.ValidateHost(targetUrl)
|
||||
_, ok := NetworkPolicy.ValidateHost(targetUrl)
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsHostAllowed checks if the host is allowed by network policy
|
||||
func IsHostAllowed(targetUrl string) bool {
|
||||
if networkPolicy == nil {
|
||||
if NetworkPolicy == nil {
|
||||
return true
|
||||
}
|
||||
_, ok := networkPolicy.ValidateHost(targetUrl)
|
||||
_, ok := NetworkPolicy.ValidateHost(targetUrl)
|
||||
return ok
|
||||
}
|
||||
|
@ -1,16 +1,20 @@
|
||||
package protocolstate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/proxy"
|
||||
|
||||
"github.com/projectdiscovery/fastdialer/fastdialer"
|
||||
"github.com/projectdiscovery/mapcidr/asn"
|
||||
"github.com/projectdiscovery/networkpolicy"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/utils/expand"
|
||||
)
|
||||
|
||||
// Dialer is a shared fastdialer instance for host DNS resolution
|
||||
@ -29,7 +33,27 @@ func Init(options *types.Options) error {
|
||||
if options.DialerKeepAlive > 0 {
|
||||
opts.DialerKeepAlive = options.DialerKeepAlive
|
||||
}
|
||||
InitHeadless(options.RestrictLocalNetworkAccess, options.AllowLocalFileAccess)
|
||||
|
||||
var expandedDenyList []string
|
||||
for _, excludeTarget := range options.ExcludeTargets {
|
||||
switch {
|
||||
case asn.IsASN(excludeTarget):
|
||||
expandedDenyList = append(expandedDenyList, expand.ASN(excludeTarget)...)
|
||||
default:
|
||||
expandedDenyList = append(expandedDenyList, excludeTarget)
|
||||
}
|
||||
}
|
||||
|
||||
if options.RestrictLocalNetworkAccess {
|
||||
expandedDenyList = append(expandedDenyList, networkpolicy.DefaultIPv4DenylistRanges...)
|
||||
expandedDenyList = append(expandedDenyList, networkpolicy.DefaultIPv6DenylistRanges...)
|
||||
}
|
||||
npOptions := &networkpolicy.Options{
|
||||
DenyList: expandedDenyList,
|
||||
}
|
||||
opts.WithNetworkPolicyOptions = npOptions
|
||||
NetworkPolicy, _ = networkpolicy.New(*npOptions)
|
||||
InitHeadless(options.AllowLocalFileAccess, NetworkPolicy)
|
||||
|
||||
switch {
|
||||
case options.SourceIP != "" && options.Interface != "":
|
||||
@ -99,9 +123,9 @@ func Init(options *types.Options) error {
|
||||
if options.ResolversFile != "" {
|
||||
opts.BaseResolvers = options.InternalResolversList
|
||||
}
|
||||
if options.RestrictLocalNetworkAccess {
|
||||
opts.Deny = append(networkpolicy.DefaultIPv4DenylistRanges, networkpolicy.DefaultIPv6DenylistRanges...)
|
||||
}
|
||||
|
||||
opts.Deny = append(opts.Deny, expandedDenyList...)
|
||||
|
||||
opts.WithDialerHistory = true
|
||||
opts.SNIName = options.SNI
|
||||
|
||||
@ -111,6 +135,12 @@ func Init(options *types.Options) error {
|
||||
return errors.Wrap(err, "could not create dialer")
|
||||
}
|
||||
Dialer = dialer
|
||||
|
||||
// override dialer in mysql
|
||||
mysql.RegisterDialContext("tcp", func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return Dialer.Dial(ctx, "tcp", addr)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -53,7 +53,9 @@ func (request *Request) ExecuteWithResults(input *contextargs.Context, metadata,
|
||||
// optionvars are vars passed from CLI or env variables
|
||||
optionVars := generators.BuildPayloadFromOptions(request.options.Options)
|
||||
// merge with metadata (eg. from workflow context)
|
||||
vars = generators.MergeMaps(vars, metadata, optionVars, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
vars = generators.MergeMaps(vars, metadata, optionVars, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
variablesMap := request.options.Variables.Evaluate(vars)
|
||||
vars = generators.MergeMaps(vars, variablesMap, request.options.Constants)
|
||||
|
||||
@ -160,7 +162,9 @@ func (request *Request) execute(input *contextargs.Context, domain string, metad
|
||||
outputEvent[k] = v
|
||||
}
|
||||
// add variables from template context before matching/extraction
|
||||
outputEvent = generators.MergeMaps(outputEvent, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
outputEvent = generators.MergeMaps(outputEvent, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
event := eventcreator.CreateEvent(request, outputEvent, request.options.Options.Debug || request.options.Options.DebugResponse)
|
||||
|
||||
dumpResponse(event, request, request.options, response.String(), question)
|
||||
|
@ -48,6 +48,10 @@ func (request *Request) Extract(data map[string]interface{}, extractor *extracto
|
||||
return extractor.ExtractRegex(itemStr)
|
||||
case extractors.KValExtractor:
|
||||
return extractor.ExtractKval(data)
|
||||
case extractors.JSONExtractor:
|
||||
return extractor.ExtractJSON(itemStr)
|
||||
case extractors.XPathExtractor:
|
||||
return extractor.ExtractXPath(itemStr)
|
||||
case extractors.DSLExtractor:
|
||||
return extractor.ExtractDSL(data)
|
||||
}
|
||||
|
@ -248,7 +248,9 @@ func (request *Request) findMatchesWithReader(reader io.Reader, input *contextar
|
||||
dslMap[k] = v
|
||||
}
|
||||
// add template context variables to DSL map
|
||||
dslMap = generators.MergeMaps(dslMap, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
dslMap = generators.MergeMaps(dslMap, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
discardEvent := eventcreator.CreateEvent(request, dslMap, isResponseDebug)
|
||||
newOpResult := discardEvent.OperatorsResult
|
||||
if newOpResult != nil {
|
||||
|
@ -1 +0,0 @@
|
||||
{"nuclei-templates-directory":"/Users/tarun/nuclei-templates","custom-s3-templates-directory":"/Users/tarun/nuclei-templates/s3","custom-github-templates-directory":"/Users/tarun/nuclei-templates/github","custom-gitlab-templates-directory":"/Users/tarun/nuclei-templates/gitlab","custom-azure-templates-directory":"/Users/tarun/nuclei-templates/azure","nuclei-latest-version":"","nuclei-templates-latest-version":""}
|
@ -19,6 +19,7 @@ import (
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/contextargs"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/expressions"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/generators"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/utils/vardump"
|
||||
protocolutils "github.com/projectdiscovery/nuclei/v3/pkg/protocols/utils"
|
||||
httputil "github.com/projectdiscovery/nuclei/v3/pkg/protocols/utils/http"
|
||||
@ -395,6 +396,24 @@ func (p *Page) Screenshot(act *Action, out map[string]string) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not take screenshot")
|
||||
}
|
||||
targetPath := p.getActionArgWithDefaultValues(act, "to")
|
||||
targetPath, err = fileutil.CleanPath(targetPath)
|
||||
if err != nil {
|
||||
return errorutil.New("could not clean output screenshot path %s", targetPath)
|
||||
}
|
||||
// allow if targetPath is child of current working directory
|
||||
if !protocolstate.IsLFAAllowed() {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return errorutil.NewWithErr(err).Msgf("could not get current working directory")
|
||||
}
|
||||
if !strings.HasPrefix(targetPath, cwd) {
|
||||
// writing outside of cwd requires -lfa flag
|
||||
return ErrLFAccessDenied
|
||||
}
|
||||
}
|
||||
|
||||
// edgecase create directory if mkdir=true and path contains directory
|
||||
if p.getActionArgWithDefaultValues(act, "mkdir") == "true" && stringsutil.ContainsAny(to, folderutil.UnixPathSeparator, folderutil.WindowsPathSeparator) {
|
||||
// creates new directory if needed based on path `to`
|
||||
// TODO: replace all permission bits with fileutil constants (https://github.com/projectdiscovery/utils/issues/113)
|
||||
@ -402,8 +421,10 @@ func (p *Page) Screenshot(act *Action, out map[string]string) error {
|
||||
return errorutil.NewWithErr(err).Msgf("failed to create directory while writing screenshot")
|
||||
}
|
||||
}
|
||||
filePath := to
|
||||
if !strings.HasSuffix(to, ".png") {
|
||||
|
||||
// actual file path to write
|
||||
filePath := targetPath
|
||||
if !strings.HasSuffix(filePath, ".png") {
|
||||
filePath += ".png"
|
||||
}
|
||||
|
||||
|
@ -577,7 +577,12 @@ func testHeadlessSimpleResponse(t *testing.T, response string, actions []*Action
|
||||
|
||||
func testHeadless(t *testing.T, actions []*Action, timeout time.Duration, handler func(w http.ResponseWriter, r *http.Request), assert func(page *Page, pageErr error, extractedData map[string]string)) {
|
||||
t.Helper()
|
||||
_ = protocolstate.Init(&types.Options{})
|
||||
|
||||
lfa := getBoolFromEnv("LOCAL_FILE_ACCESS", true)
|
||||
rna := getBoolFromEnv("RESTRICED_LOCAL_NETWORK_ACCESS", false)
|
||||
opts := &types.Options{AllowLocalFileAccess: lfa, RestrictLocalNetworkAccess: rna}
|
||||
|
||||
_ = protocolstate.Init(opts)
|
||||
|
||||
browser, err := New(&types.Options{ShowBrowser: false, UseInstalledChrome: testheadless.HeadlessLocal})
|
||||
require.Nil(t, err, "could not create browser")
|
||||
@ -594,10 +599,7 @@ func testHeadless(t *testing.T, actions []*Action, timeout time.Duration, handle
|
||||
input.CookieJar, err = cookiejar.New(nil)
|
||||
require.Nil(t, err)
|
||||
|
||||
lfa := getBoolFromEnv("LOCAL_FILE_ACCESS", true)
|
||||
rna := getBoolFromEnv("RESTRICED_LOCAL_NETWORK_ACCESS", false)
|
||||
|
||||
extractedData, page, err := instance.Run(input, actions, nil, &Options{Timeout: timeout, Options: &types.Options{AllowLocalFileAccess: lfa, RestrictLocalNetworkAccess: rna}}) // allow file access in test
|
||||
extractedData, page, err := instance.Run(input, actions, nil, &Options{Timeout: timeout, Options: opts}) // allow file access in test
|
||||
assert(page, err, extractedData)
|
||||
|
||||
if page != nil {
|
||||
|
@ -54,7 +54,10 @@ func (request *Request) ExecuteWithResults(input *contextargs.Context, metadata,
|
||||
vars := protocolutils.GenerateVariablesWithContextArgs(input, false)
|
||||
payloads := generators.BuildPayloadFromOptions(request.options.Options)
|
||||
// add templatecontext variables to varMap
|
||||
values := generators.MergeMaps(vars, metadata, payloads, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
values := generators.MergeMaps(vars, metadata, payloads)
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
values = generators.MergeMaps(values, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
variablesMap := request.options.Variables.Evaluate(values)
|
||||
payloads = generators.MergeMaps(variablesMap, payloads, request.options.Constants)
|
||||
|
||||
@ -183,7 +186,9 @@ func (request *Request) executeRequestWithPayloads(input *contextargs.Context, p
|
||||
outputEvent := request.responseToDSLMap(responseBody, out["header"], out["status_code"], reqBuilder.String(), input.MetaInput.Input, navigatedURL, page.DumpHistory())
|
||||
// add response fields to template context and merge templatectx variables to output event
|
||||
request.options.AddTemplateVars(input.MetaInput, request.Type(), request.ID, outputEvent)
|
||||
outputEvent = generators.MergeMaps(outputEvent, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
outputEvent = generators.MergeMaps(outputEvent, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
for k, v := range out {
|
||||
outputEvent[k] = v
|
||||
}
|
||||
|
@ -76,7 +76,10 @@ func (r *requestGenerator) Make(ctx context.Context, input *contextargs.Context,
|
||||
|
||||
// add template context values to dynamicValues (this takes care of self-contained and other types of requests)
|
||||
// Note: `iterate-all` and flow are mutually exclusive. flow uses templateCtx and iterate-all uses dynamicValues
|
||||
dynamicValues = generators.MergeMaps(dynamicValues, r.request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
if r.request.options.HasTemplateCtx(input.MetaInput) {
|
||||
// skip creating template context if not available
|
||||
dynamicValues = generators.MergeMaps(dynamicValues, r.request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
if r.request.SelfContained {
|
||||
return r.makeSelfContainedRequest(ctx, reqData, payloads, dynamicValues)
|
||||
}
|
||||
|
169
lib/nuclei/pkg/protocols/http/httputils/chain.go
Executable file
169
lib/nuclei/pkg/protocols/http/httputils/chain.go
Executable file
@ -0,0 +1,169 @@
|
||||
package httputils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
protoUtil "github.com/projectdiscovery/nuclei/v3/pkg/protocols/utils"
|
||||
)
|
||||
|
||||
// use buffer pool for storing response body
|
||||
// and reuse it for each request
|
||||
var bufPool = sync.Pool{
|
||||
New: func() any {
|
||||
// The Pool's New function should generally only return pointer
|
||||
// types, since a pointer can be put into the return interface
|
||||
// value without an allocation:
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
// getBuffer returns a buffer from the pool
|
||||
func getBuffer() *bytes.Buffer {
|
||||
return bufPool.Get().(*bytes.Buffer)
|
||||
}
|
||||
|
||||
// putBuffer returns a buffer to the pool
|
||||
func putBuffer(buf *bytes.Buffer) {
|
||||
buf.Reset()
|
||||
bufPool.Put(buf)
|
||||
}
|
||||
|
||||
// Performance Notes:
|
||||
// do not use http.Response once we create ResponseChain from it
|
||||
// as this reuses buffers and saves allocations and also drains response
|
||||
// body automatically.
|
||||
// In required cases it can be used but should never be used for anything
|
||||
// related to response body.
|
||||
// Bytes.Buffer returned by getters should not be used and are only meant for convinience
|
||||
// purposes like .String() or .Bytes() calls.
|
||||
// Remember to call Close() on ResponseChain once you are done with it.
|
||||
|
||||
// ResponseChain is a response chain for a http request
|
||||
// on every call to previous it returns the previous response
|
||||
// if it was redirected.
|
||||
type ResponseChain struct {
|
||||
headers *bytes.Buffer
|
||||
body *bytes.Buffer
|
||||
fullResponse *bytes.Buffer
|
||||
resp *http.Response
|
||||
reloaded bool // if response was reloaded to its previous redirect
|
||||
}
|
||||
|
||||
// NewResponseChain creates a new response chain for a http request
|
||||
// with a maximum body size. (if -1 stick to default 4MB)
|
||||
func NewResponseChain(resp *http.Response, maxBody int64) *ResponseChain {
|
||||
if _, ok := resp.Body.(protoUtil.LimitResponseBody); !ok {
|
||||
resp.Body = protoUtil.NewLimitResponseBodyWithSize(resp.Body, maxBody)
|
||||
}
|
||||
return &ResponseChain{
|
||||
headers: getBuffer(),
|
||||
body: getBuffer(),
|
||||
fullResponse: getBuffer(),
|
||||
resp: resp,
|
||||
}
|
||||
}
|
||||
|
||||
// Response returns the current response in the chain
|
||||
func (r *ResponseChain) Headers() *bytes.Buffer {
|
||||
return r.headers
|
||||
}
|
||||
|
||||
// Body returns the current response body in the chain
|
||||
func (r *ResponseChain) Body() *bytes.Buffer {
|
||||
return r.body
|
||||
}
|
||||
|
||||
// FullResponse returns the current response in the chain
|
||||
func (r *ResponseChain) FullResponse() *bytes.Buffer {
|
||||
return r.fullResponse
|
||||
}
|
||||
|
||||
// previous updates response pointer to previous response
|
||||
// if it was redirected and returns true else false
|
||||
func (r *ResponseChain) Previous() bool {
|
||||
if r.resp != nil && r.resp.Request != nil && r.resp.Request.Response != nil {
|
||||
r.resp = r.resp.Request.Response
|
||||
r.reloaded = true
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Fill buffers
|
||||
func (r *ResponseChain) Fill() error {
|
||||
r.reset()
|
||||
if r.resp == nil {
|
||||
return fmt.Errorf("response is nil")
|
||||
}
|
||||
|
||||
// load headers
|
||||
err := DumpResponseIntoBuffer(r.resp, false, r.headers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error dumping response headers: %s", err)
|
||||
}
|
||||
|
||||
if r.resp.StatusCode != http.StatusSwitchingProtocols && !r.reloaded {
|
||||
// Note about reloaded:
|
||||
// this is a known behaviour existing from earlier version
|
||||
// when redirect is followed and operators are executed on all redirect chain
|
||||
// body of those requests is not available since its already been redirected
|
||||
// This is not a issue since redirect happens with empty body according to RFC
|
||||
// but this may be required sometimes
|
||||
// Solution: Manual redirect using dynamic matchers or hijack redirected responses
|
||||
// at transport level at replace with bytes buffer and then use it
|
||||
|
||||
// load body
|
||||
err = readNNormalizeRespBody(r, r.body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading response body: %s", err)
|
||||
}
|
||||
|
||||
// response body should not be used anymore
|
||||
// drain and close
|
||||
DrainResponseBody(r.resp)
|
||||
}
|
||||
|
||||
// join headers and body
|
||||
r.fullResponse.Write(r.headers.Bytes())
|
||||
r.fullResponse.Write(r.body.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the response chain and releases the buffers.
|
||||
func (r *ResponseChain) Close() {
|
||||
putBuffer(r.headers)
|
||||
putBuffer(r.body)
|
||||
putBuffer(r.fullResponse)
|
||||
r.headers = nil
|
||||
r.body = nil
|
||||
r.fullResponse = nil
|
||||
}
|
||||
|
||||
// Has returns true if the response chain has a response
|
||||
func (r *ResponseChain) Has() bool {
|
||||
return r.resp != nil
|
||||
}
|
||||
|
||||
// Request is request of current response
|
||||
func (r *ResponseChain) Request() *http.Request {
|
||||
if r.resp == nil {
|
||||
return nil
|
||||
}
|
||||
return r.resp.Request
|
||||
}
|
||||
|
||||
// Response is response of current response
|
||||
func (r *ResponseChain) Response() *http.Response {
|
||||
return r.resp
|
||||
}
|
||||
|
||||
// reset without releasing the buffers
|
||||
// useful for redirect chain
|
||||
func (r *ResponseChain) reset() {
|
||||
r.headers.Reset()
|
||||
r.body.Reset()
|
||||
r.fullResponse.Reset()
|
||||
}
|
47
lib/nuclei/pkg/protocols/http/httputils/internal.go
Executable file
47
lib/nuclei/pkg/protocols/http/httputils/internal.go
Executable file
@ -0,0 +1,47 @@
|
||||
package httputils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// implementations copied from stdlib
|
||||
|
||||
// errNoBody is a sentinel error value used by failureToReadBody so we
|
||||
// can detect that the lack of body was intentional.
|
||||
var errNoBody = errors.New("sentinel error value")
|
||||
|
||||
// failureToReadBody is an io.ReadCloser that just returns errNoBody on
|
||||
// Read. It's swapped in when we don't actually want to consume
|
||||
// the body, but need a non-nil one, and want to distinguish the
|
||||
// error from reading the dummy body.
|
||||
type failureToReadBody struct{}
|
||||
|
||||
func (failureToReadBody) Read([]byte) (int, error) { return 0, errNoBody }
|
||||
func (failureToReadBody) Close() error { return nil }
|
||||
|
||||
// emptyBody is an instance of empty reader.
|
||||
var emptyBody = io.NopCloser(strings.NewReader(""))
|
||||
|
||||
// drainBody reads all of b to memory and then returns two equivalent
|
||||
// ReadClosers yielding the same bytes.
|
||||
//
|
||||
// It returns an error if the initial slurp of all bytes fails. It does not attempt
|
||||
// to make the returned ReadClosers have identical error-matching behavior.
|
||||
func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) {
|
||||
if b == nil || b == http.NoBody {
|
||||
// No copying needed. Preserve the magic sentinel meaning of NoBody.
|
||||
return http.NoBody, http.NoBody, nil
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if _, err = buf.ReadFrom(b); err != nil {
|
||||
return nil, b, err
|
||||
}
|
||||
if err = b.Close(); err != nil {
|
||||
return nil, b, err
|
||||
}
|
||||
return io.NopCloser(&buf), io.NopCloser(bytes.NewReader(buf.Bytes())), nil
|
||||
}
|
21
lib/nuclei/pkg/protocols/http/httputils/misc.go
Executable file
21
lib/nuclei/pkg/protocols/http/httputils/misc.go
Executable file
@ -0,0 +1,21 @@
|
||||
package httputils
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
mapsutil "github.com/projectdiscovery/utils/maps"
|
||||
)
|
||||
|
||||
// if template contains more than 1 request and matchers require requestcondition from
|
||||
// both requests , then we need to request for event from interactsh even if current request
|
||||
// doesnot use interactsh url in it
|
||||
func GetInteractshURLSFromEvent(event map[string]interface{}) []string {
|
||||
interactshUrls := map[string]struct{}{}
|
||||
for k, v := range event {
|
||||
if strings.HasPrefix(k, "interactsh-url") {
|
||||
interactshUrls[types.ToString(v)] = struct{}{}
|
||||
}
|
||||
}
|
||||
return mapsutil.GetKeys(interactshUrls)
|
||||
}
|
77
lib/nuclei/pkg/protocols/http/httputils/normalization.go
Executable file
77
lib/nuclei/pkg/protocols/http/httputils/normalization.go
Executable file
@ -0,0 +1,77 @@
|
||||
package httputils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/text/encoding/simplifiedchinese"
|
||||
"golang.org/x/text/transform"
|
||||
|
||||
stringsutil "github.com/projectdiscovery/utils/strings"
|
||||
)
|
||||
|
||||
// readNNormalizeRespBody performs normalization on the http response object.
|
||||
// and fills body buffer with actual response body.
|
||||
func readNNormalizeRespBody(rc *ResponseChain, body *bytes.Buffer) (err error) {
|
||||
response := rc.resp
|
||||
// net/http doesn't automatically decompress the response body if an
|
||||
// encoding has been specified by the user in the request so in case we have to
|
||||
// manually do it.
|
||||
|
||||
origBody := rc.resp.Body
|
||||
// wrap with decode if applicable
|
||||
wrapped, err := wrapDecodeReader(response)
|
||||
if err != nil {
|
||||
wrapped = origBody
|
||||
}
|
||||
// read response body to buffer
|
||||
_, err = body.ReadFrom(wrapped)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "gzip: invalid header") {
|
||||
// its invalid gzip but we will still use it from original body
|
||||
_, err = body.ReadFrom(origBody)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not read response body after gzip error")
|
||||
}
|
||||
}
|
||||
if stringsutil.ContainsAny(err.Error(), "unexpected EOF", "read: connection reset by peer", "user canceled") {
|
||||
// keep partial body and continue (skip error) (add meta header in response for debugging)
|
||||
response.Header.Set("x-nuclei-ignore-error", err.Error())
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "could not read response body")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// wrapDecodeReader wraps a decompression reader around the response body if it's compressed
|
||||
// using gzip or deflate.
|
||||
func wrapDecodeReader(resp *http.Response) (rc io.ReadCloser, err error) {
|
||||
switch resp.Header.Get("Content-Encoding") {
|
||||
case "gzip":
|
||||
rc, err = gzip.NewReader(resp.Body)
|
||||
case "deflate":
|
||||
rc, err = zlib.NewReader(resp.Body)
|
||||
default:
|
||||
rc = resp.Body
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// handle GBK encoding
|
||||
if isContentTypeGbk(resp.Header.Get("Content-Type")) {
|
||||
rc = io.NopCloser(transform.NewReader(rc, simplifiedchinese.GBK.NewDecoder()))
|
||||
}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// isContentTypeGbk checks if the content-type header is gbk
|
||||
func isContentTypeGbk(contentType string) bool {
|
||||
contentType = strings.ToLower(contentType)
|
||||
return stringsutil.ContainsAny(contentType, "gbk", "gb2312", "gb18030")
|
||||
}
|
52
lib/nuclei/pkg/protocols/http/httputils/response.go
Executable file
52
lib/nuclei/pkg/protocols/http/httputils/response.go
Executable file
@ -0,0 +1,52 @@
|
||||
package httputils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
protocolutil "github.com/projectdiscovery/nuclei/v3/pkg/protocols/utils"
|
||||
)
|
||||
|
||||
// DumpResponseIntoBuffer dumps a http response without allocating a new buffer
|
||||
// for the response body.
|
||||
func DumpResponseIntoBuffer(resp *http.Response, body bool, buff *bytes.Buffer) (err error) {
|
||||
if resp == nil {
|
||||
return fmt.Errorf("response is nil")
|
||||
}
|
||||
save := resp.Body
|
||||
savecl := resp.ContentLength
|
||||
|
||||
if !body {
|
||||
// For content length of zero. Make sure the body is an empty
|
||||
// reader, instead of returning error through failureToReadBody{}.
|
||||
if resp.ContentLength == 0 {
|
||||
resp.Body = emptyBody
|
||||
} else {
|
||||
resp.Body = failureToReadBody{}
|
||||
}
|
||||
} else if resp.Body == nil {
|
||||
resp.Body = emptyBody
|
||||
} else {
|
||||
save, resp.Body, err = drainBody(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = resp.Write(buff)
|
||||
if err == errNoBody {
|
||||
err = nil
|
||||
}
|
||||
resp.Body = save
|
||||
resp.ContentLength = savecl
|
||||
return
|
||||
}
|
||||
|
||||
// DrainResponseBody drains the response body and closes it.
|
||||
func DrainResponseBody(resp *http.Response) {
|
||||
defer resp.Body.Close()
|
||||
// don't reuse connection and just close if body length is more than 2 * MaxBodyRead
|
||||
// to avoid DOS
|
||||
_, _ = io.CopyN(io.Discard, resp.Body, 2*protocolutil.MaxBodyRead)
|
||||
}
|
@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -32,18 +31,23 @@ import (
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/interactsh"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/tostring"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/http/httpclientpool"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/http/httputils"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/http/signer"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/http/signerpool"
|
||||
protocolutil "github.com/projectdiscovery/nuclei/v3/pkg/protocols/utils"
|
||||
templateTypes "github.com/projectdiscovery/nuclei/v3/pkg/templates/types"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
convUtil "github.com/projectdiscovery/utils/conversion"
|
||||
"github.com/projectdiscovery/utils/reader"
|
||||
sliceutil "github.com/projectdiscovery/utils/slice"
|
||||
stringsutil "github.com/projectdiscovery/utils/strings"
|
||||
urlutil "github.com/projectdiscovery/utils/url"
|
||||
)
|
||||
|
||||
const defaultMaxWorkers = 150
|
||||
const (
|
||||
defaultMaxWorkers = 150
|
||||
)
|
||||
|
||||
// Type returns the type of the protocol request
|
||||
func (request *Request) Type() templateTypes.ProtocolType {
|
||||
@ -303,6 +307,7 @@ func (request *Request) executeFuzzingRule(input *contextargs.Context, previous
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
input.MetaInput = &contextargs.MetaInput{Input: generated.URL()}
|
||||
for _, rule := range request.Fuzzing {
|
||||
err = rule.Execute(&fuzz.ExecuteRuleInput{
|
||||
Input: input,
|
||||
@ -396,7 +401,7 @@ func (request *Request) ExecuteWithResults(input *contextargs.Context, dynamicVa
|
||||
MatchFunc: request.Match,
|
||||
ExtractFunc: request.Extract,
|
||||
}
|
||||
allOASTUrls := getInteractshURLsFromEvent(event.InternalEvent)
|
||||
allOASTUrls := httputils.GetInteractshURLSFromEvent(event.InternalEvent)
|
||||
allOASTUrls = append(allOASTUrls, generatedHttpRequest.interactshURLs...)
|
||||
request.options.Interactsh.RequestEvent(sliceutil.Dedupe(allOASTUrls), requestData)
|
||||
gotMatches = request.options.Interactsh.AlreadyMatched(requestData)
|
||||
@ -476,11 +481,12 @@ func (request *Request) executeRequest(input *contextargs.Context, generatedRequ
|
||||
finalMap["ip"] = input.MetaInput.CustomIP
|
||||
}
|
||||
|
||||
for payloadName, payloadValue := range generatedRequest.dynamicValues {
|
||||
if data, err := expressions.Evaluate(types.ToString(payloadValue), finalMap); err == nil {
|
||||
generatedRequest.dynamicValues[payloadName] = data
|
||||
}
|
||||
}
|
||||
// we should never evaluate all variables of a template
|
||||
// for payloadName, payloadValue := range generatedRequest.dynamicValues {
|
||||
// if data, err := expressions.Evaluate(types.ToString(payloadValue), finalMap); err == nil {
|
||||
// generatedRequest.dynamicValues[payloadName] = data
|
||||
// }
|
||||
// }
|
||||
for payloadName, payloadValue := range generatedRequest.meta {
|
||||
if data, err := expressions.Evaluate(types.ToString(payloadValue), finalMap); err == nil {
|
||||
generatedRequest.meta[payloadName] = data
|
||||
@ -646,6 +652,10 @@ func (request *Request) executeRequest(input *contextargs.Context, generatedRequ
|
||||
}
|
||||
}
|
||||
}
|
||||
// global wrap response body reader
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body = protocolutil.NewLimitResponseBody(resp.Body)
|
||||
}
|
||||
if err != nil {
|
||||
// rawhttp doesn't support draining response bodies.
|
||||
if resp != nil && resp.Body != nil && generatedRequest.rawRequest == nil && !generatedRequest.original.Pipeline {
|
||||
@ -676,12 +686,6 @@ func (request *Request) executeRequest(input *contextargs.Context, generatedRequ
|
||||
callback(event)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if resp.StatusCode != http.StatusSwitchingProtocols {
|
||||
_, _ = io.CopyN(io.Discard, resp.Body, drainReqSize)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
var curlCommand string
|
||||
if !request.Unsafe && resp != nil && generatedRequest.request != nil && resp.Request != nil && !request.Race {
|
||||
@ -697,55 +701,39 @@ func (request *Request) executeRequest(input *contextargs.Context, generatedRequ
|
||||
request.options.Output.Request(request.options.TemplatePath, formedURL, request.Type().String(), err)
|
||||
|
||||
duration := time.Since(timeStart)
|
||||
|
||||
dumpedResponseHeaders, err := httputil.DumpResponse(resp, false)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not dump http response")
|
||||
// define max body read limit
|
||||
maxBodylimit := -1 // stick to default 4MB
|
||||
if request.MaxSize > 0 {
|
||||
maxBodylimit = request.MaxSize
|
||||
} else if request.options.Options.ResponseReadSize != 0 {
|
||||
maxBodylimit = request.options.Options.ResponseReadSize
|
||||
}
|
||||
|
||||
var dumpedResponse []redirectedResponse
|
||||
var gotData []byte
|
||||
// If the status code is HTTP 101, we should not proceed with reading body.
|
||||
if resp.StatusCode != http.StatusSwitchingProtocols {
|
||||
var bodyReader io.Reader
|
||||
if request.MaxSize != 0 {
|
||||
bodyReader = io.LimitReader(resp.Body, int64(request.MaxSize))
|
||||
} else if request.options.Options.ResponseReadSize != 0 {
|
||||
bodyReader = io.LimitReader(resp.Body, int64(request.options.Options.ResponseReadSize))
|
||||
} else {
|
||||
bodyReader = resp.Body
|
||||
}
|
||||
data, err := io.ReadAll(bodyReader)
|
||||
if err != nil {
|
||||
// Ignore body read due to server misconfiguration errors
|
||||
if stringsutil.ContainsAny(err.Error(), "gzip: invalid header") {
|
||||
gologger.Warning().Msgf("[%s] Server sent an invalid gzip header and it was not possible to read the uncompressed body for %s: %s", request.options.TemplateID, formedURL, err.Error())
|
||||
} else if !stringsutil.ContainsAny(err.Error(), "unexpected EOF", "user canceled") { // ignore EOF and random error
|
||||
return errors.Wrap(err, "could not read http body")
|
||||
// respChain is http response chain that reads response body
|
||||
// efficiently by reusing buffers and does all decoding and optimizations
|
||||
respChain := httputils.NewResponseChain(resp, int64(maxBodylimit))
|
||||
defer respChain.Close() // reuse buffers
|
||||
|
||||
// we only intend to log/save the final redirected response
|
||||
// i.e why we have to use sync.Once to ensure it's only done once
|
||||
var errx error
|
||||
onceFunc := sync.OnceFunc(func() {
|
||||
// if nuclei-project is enabled store the response if not previously done
|
||||
if request.options.ProjectFile != nil && !fromCache {
|
||||
if err := request.options.ProjectFile.Set(dumpedRequest, resp, respChain.Body().Bytes()); err != nil {
|
||||
errx = errors.Wrap(err, "could not store in project file")
|
||||
}
|
||||
}
|
||||
gotData = data
|
||||
resp.Body.Close()
|
||||
})
|
||||
|
||||
dumpedResponse, err = dumpResponseWithRedirectChain(resp, data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not read http response with redirect chain")
|
||||
}
|
||||
} else {
|
||||
dumpedResponse = []redirectedResponse{{resp: resp, fullResponse: dumpedResponseHeaders, headers: dumpedResponseHeaders}}
|
||||
}
|
||||
|
||||
// if nuclei-project is enabled store the response if not previously done
|
||||
if request.options.ProjectFile != nil && !fromCache {
|
||||
if err := request.options.ProjectFile.Set(dumpedRequest, resp, gotData); err != nil {
|
||||
return errors.Wrap(err, "could not store in project file")
|
||||
}
|
||||
}
|
||||
|
||||
for _, response := range dumpedResponse {
|
||||
if response.resp == nil {
|
||||
continue // Skip nil responses
|
||||
// evaluate responses continiously until first redirect request in reverse order
|
||||
for respChain.Has() {
|
||||
// fill buffers, read response body and reuse connection
|
||||
if err := respChain.Fill(); err != nil {
|
||||
return errors.Wrap(err, "could not generate response chain")
|
||||
}
|
||||
// save response to projectfile
|
||||
onceFunc()
|
||||
matchedURL := input.MetaInput.Input
|
||||
if generatedRequest.rawRequest != nil {
|
||||
if generatedRequest.rawRequest.FullURL != "" {
|
||||
@ -758,17 +746,19 @@ func (request *Request) executeRequest(input *contextargs.Context, generatedRequ
|
||||
matchedURL = generatedRequest.request.URL.String()
|
||||
}
|
||||
// Give precedence to the final URL from response
|
||||
if response.resp.Request != nil {
|
||||
if responseURL := response.resp.Request.URL.String(); responseURL != "" {
|
||||
if respChain.Request() != nil {
|
||||
if responseURL := respChain.Request().URL.String(); responseURL != "" {
|
||||
matchedURL = responseURL
|
||||
}
|
||||
}
|
||||
finalEvent := make(output.InternalEvent)
|
||||
|
||||
outputEvent := request.responseToDSLMap(response.resp, input.MetaInput.Input, matchedURL, tostring.UnsafeToString(dumpedRequest), tostring.UnsafeToString(response.fullResponse), tostring.UnsafeToString(response.body), tostring.UnsafeToString(response.headers), duration, generatedRequest.meta)
|
||||
outputEvent := request.responseToDSLMap(respChain.Response(), input.MetaInput.Input, matchedURL, convUtil.String(dumpedRequest), respChain.FullResponse().String(), respChain.Body().String(), respChain.Headers().String(), duration, generatedRequest.meta)
|
||||
// add response fields to template context and merge templatectx variables to output event
|
||||
request.options.AddTemplateVars(input.MetaInput, request.Type(), request.ID, outputEvent)
|
||||
outputEvent = generators.MergeMaps(outputEvent, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
outputEvent = generators.MergeMaps(outputEvent, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
if i := strings.LastIndex(hostname, ":"); i != -1 {
|
||||
hostname = hostname[:i]
|
||||
}
|
||||
@ -808,9 +798,9 @@ func (request *Request) executeRequest(input *contextargs.Context, generatedRequ
|
||||
event.UsesInteractsh = true
|
||||
}
|
||||
|
||||
responseContentType := resp.Header.Get("Content-Type")
|
||||
isResponseTruncated := request.MaxSize > 0 && len(gotData) >= request.MaxSize
|
||||
dumpResponse(event, request, response.fullResponse, formedURL, responseContentType, isResponseTruncated, input.MetaInput.Input)
|
||||
responseContentType := respChain.Response().Header.Get("Content-Type")
|
||||
isResponseTruncated := request.MaxSize > 0 && respChain.Body().Len() >= request.MaxSize
|
||||
dumpResponse(event, request, respChain.FullResponse().Bytes(), formedURL, responseContentType, isResponseTruncated, input.MetaInput.Input)
|
||||
|
||||
callback(event)
|
||||
|
||||
@ -818,8 +808,15 @@ func (request *Request) executeRequest(input *contextargs.Context, generatedRequ
|
||||
if (request.options.Options.StopAtFirstMatch || request.options.StopAtFirstMatch || request.StopAtFirstMatch) && event.HasResults() {
|
||||
return nil
|
||||
}
|
||||
// proceed with previous response
|
||||
// we evaluate operators recursively for each response
|
||||
// until we reach the first redirect response
|
||||
if !respChain.Previous() {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
// return project file save error if any
|
||||
return errx
|
||||
}
|
||||
|
||||
// handleSignature of the http request
|
||||
|
@ -1,118 +1,13 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/text/encoding/simplifiedchinese"
|
||||
"golang.org/x/text/transform"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/generators"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
"github.com/projectdiscovery/rawhttp"
|
||||
mapsutil "github.com/projectdiscovery/utils/maps"
|
||||
stringsutil "github.com/projectdiscovery/utils/strings"
|
||||
)
|
||||
|
||||
type redirectedResponse struct {
|
||||
headers []byte
|
||||
body []byte
|
||||
fullResponse []byte
|
||||
resp *http.Response
|
||||
}
|
||||
|
||||
// dumpResponseWithRedirectChain dumps a http response with the
|
||||
// complete http redirect chain.
|
||||
//
|
||||
// It preserves the order in which responses were given to requests
|
||||
// and returns the data to the user for matching and viewing in that order.
|
||||
//
|
||||
// Inspired from - https://github.com/ffuf/ffuf/issues/324#issuecomment-719858923
|
||||
func dumpResponseWithRedirectChain(resp *http.Response, body []byte) ([]redirectedResponse, error) {
|
||||
var response []redirectedResponse
|
||||
|
||||
respData, err := httputil.DumpResponse(resp, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
respObj := redirectedResponse{
|
||||
headers: respData,
|
||||
body: body,
|
||||
resp: resp,
|
||||
fullResponse: bytes.Join([][]byte{respData, body}, []byte{}),
|
||||
}
|
||||
if err := normalizeResponseBody(resp, &respObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response = append(response, respObj)
|
||||
|
||||
var redirectResp *http.Response
|
||||
if resp != nil && resp.Request != nil {
|
||||
redirectResp = resp.Request.Response
|
||||
}
|
||||
for redirectResp != nil {
|
||||
var body []byte
|
||||
|
||||
respData, err := httputil.DumpResponse(redirectResp, false)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if redirectResp.Body != nil {
|
||||
body, _ = io.ReadAll(redirectResp.Body)
|
||||
}
|
||||
respObj := redirectedResponse{
|
||||
headers: respData,
|
||||
body: body,
|
||||
resp: redirectResp,
|
||||
fullResponse: bytes.Join([][]byte{respData, body}, []byte{}),
|
||||
}
|
||||
if err := normalizeResponseBody(redirectResp, &respObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response = append(response, respObj)
|
||||
redirectResp = redirectResp.Request.Response
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// normalizeResponseBody performs normalization on the http response object.
|
||||
func normalizeResponseBody(resp *http.Response, response *redirectedResponse) error {
|
||||
var err error
|
||||
// net/http doesn't automatically decompress the response body if an
|
||||
// encoding has been specified by the user in the request so in case we have to
|
||||
// manually do it.
|
||||
dataOrig := response.body
|
||||
response.body, err = handleDecompression(resp, response.body)
|
||||
// in case of error use original data
|
||||
if err != nil {
|
||||
response.body = dataOrig
|
||||
}
|
||||
response.fullResponse = bytes.ReplaceAll(response.fullResponse, dataOrig, response.body)
|
||||
|
||||
// Decode gbk response content-types
|
||||
// gb18030 supersedes gb2312
|
||||
responseContentType := resp.Header.Get("Content-Type")
|
||||
if isContentTypeGbk(responseContentType) {
|
||||
response.fullResponse, err = decodeGBK(response.fullResponse)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not gbk decode")
|
||||
}
|
||||
|
||||
// the uncompressed body needs to be decoded to standard utf8
|
||||
response.body, err = decodeGBK(response.body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not gbk decode")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dump creates a dump of the http request in form of a byte slice
|
||||
func dump(req *generatedRequest, reqURL string) ([]byte, error) {
|
||||
if req.request != nil {
|
||||
@ -121,60 +16,3 @@ func dump(req *generatedRequest, reqURL string) ([]byte, error) {
|
||||
rawHttpOptions := &rawhttp.Options{CustomHeaders: req.rawRequest.UnsafeHeaders, CustomRawBytes: req.rawRequest.UnsafeRawBytes}
|
||||
return rawhttp.DumpRequestRaw(req.rawRequest.Method, reqURL, req.rawRequest.Path, generators.ExpandMapValues(req.rawRequest.Headers), io.NopCloser(strings.NewReader(req.rawRequest.Data)), rawHttpOptions)
|
||||
}
|
||||
|
||||
// handleDecompression if the user specified a custom encoding (as golang transport doesn't do this automatically)
|
||||
func handleDecompression(resp *http.Response, bodyOrig []byte) (bodyDec []byte, err error) {
|
||||
if resp == nil {
|
||||
return bodyOrig, nil
|
||||
}
|
||||
|
||||
var reader io.ReadCloser
|
||||
switch resp.Header.Get("Content-Encoding") {
|
||||
case "gzip":
|
||||
reader, err = gzip.NewReader(bytes.NewReader(bodyOrig))
|
||||
case "deflate":
|
||||
reader, err = zlib.NewReader(bytes.NewReader(bodyOrig))
|
||||
default:
|
||||
return bodyOrig, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
bodyDec, err = io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return bodyOrig, err
|
||||
}
|
||||
return bodyDec, nil
|
||||
}
|
||||
|
||||
// decodeGBK converts GBK to UTF-8
|
||||
func decodeGBK(s []byte) ([]byte, error) {
|
||||
I := bytes.NewReader(s)
|
||||
O := transform.NewReader(I, simplifiedchinese.GBK.NewDecoder())
|
||||
d, e := io.ReadAll(O)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// isContentTypeGbk checks if the content-type header is gbk
|
||||
func isContentTypeGbk(contentType string) bool {
|
||||
contentType = strings.ToLower(contentType)
|
||||
return stringsutil.ContainsAny(contentType, "gbk", "gb2312", "gb18030")
|
||||
}
|
||||
|
||||
// if template contains more than 1 request and matchers require requestcondition from
|
||||
// both requests , then we need to request for event from interactsh even if current request
|
||||
// doesnot use interactsh url in it
|
||||
func getInteractshURLsFromEvent(event map[string]interface{}) []string {
|
||||
interactshUrls := map[string]struct{}{}
|
||||
for k, v := range event {
|
||||
if strings.HasPrefix(k, "interactsh-url") {
|
||||
interactshUrls[types.ToString(v)] = struct{}{}
|
||||
}
|
||||
}
|
||||
return mapsutil.GetKeys(interactshUrls)
|
||||
}
|
||||
|
@ -61,7 +61,9 @@ type Request struct {
|
||||
// description: |
|
||||
// Code contains code to execute for the javascript request.
|
||||
Code string `yaml:"code,omitempty" json:"code,omitempty" jsonschema:"title=code to execute in javascript,description=Executes inline javascript code for the request"`
|
||||
|
||||
// description: |
|
||||
// Timeout in seconds is optional timeout for each javascript script execution (i.e init, pre-condition, code)
|
||||
Timeout int `yaml:"timeout,omitempty" json:"timeout,omitempty" jsonschema:"title=timeout for javascript execution,description=Timeout in seconds is optional timeout for entire javascript script execution"`
|
||||
// description: |
|
||||
// StopAtFirstMatch stops processing the request at first match.
|
||||
StopAtFirstMatch bool `yaml:"stop-at-first-match,omitempty" json:"stop-at-first-match,omitempty" jsonschema:"title=stop at first match,description=Stop the execution after a match is found"`
|
||||
@ -89,6 +91,10 @@ type Request struct {
|
||||
|
||||
// cache any variables that may be needed for operation.
|
||||
options *protocols.ExecutorOptions `yaml:"-" json:"-"`
|
||||
|
||||
preConditionCompiled *goja.Program `yaml:"-" json:"-"`
|
||||
|
||||
scriptCompiled *goja.Program `yaml:"-" json:"-"`
|
||||
}
|
||||
|
||||
// Compile compiles the request generators preparing any requests possible.
|
||||
@ -141,7 +147,9 @@ func (request *Request) Compile(options *protocols.ExecutorOptions) error {
|
||||
prettyPrint(request.TemplateID, buff.String())
|
||||
}
|
||||
|
||||
opts := &compiler.ExecuteOptions{}
|
||||
opts := &compiler.ExecuteOptions{
|
||||
Timeout: request.Timeout,
|
||||
}
|
||||
// register 'export' function to export variables from init code
|
||||
// these are saved in args and are available in pre-condition and request code
|
||||
opts.Callback = func(runtime *goja.Runtime) error {
|
||||
@ -192,13 +200,21 @@ func (request *Request) Compile(options *protocols.ExecutorOptions) error {
|
||||
},
|
||||
})
|
||||
}
|
||||
opts.Cleanup = func(runtime *goja.Runtime) {
|
||||
_ = runtime.GlobalObject().Delete("set")
|
||||
_ = runtime.GlobalObject().Delete("updatePayload")
|
||||
}
|
||||
|
||||
args := compiler.NewExecuteArgs()
|
||||
allVars := generators.MergeMaps(options.Variables.GetAll(), options.Options.Vars.AsMap(), request.options.Constants)
|
||||
// proceed with whatever args we have
|
||||
args.Args, _ = request.evaluateArgs(allVars, options, true)
|
||||
|
||||
result, err := request.options.JsCompiler.ExecuteWithOptions(request.Init, args, opts)
|
||||
initCompiled, err := goja.Compile("", request.Init, false)
|
||||
if err != nil {
|
||||
return errorutil.NewWithTag(request.TemplateID, "could not compile init code: %s", err)
|
||||
}
|
||||
result, err := request.options.JsCompiler.ExecuteWithOptions(initCompiled, args, opts)
|
||||
if err != nil {
|
||||
return errorutil.NewWithTag(request.TemplateID, "could not execute pre-condition: %s", err)
|
||||
}
|
||||
@ -213,6 +229,24 @@ func (request *Request) Compile(options *protocols.ExecutorOptions) error {
|
||||
}
|
||||
}
|
||||
|
||||
// compile pre-condition if any
|
||||
if request.PreCondition != "" {
|
||||
preConditionCompiled, err := goja.Compile("", request.PreCondition, false)
|
||||
if err != nil {
|
||||
return errorutil.NewWithTag(request.TemplateID, "could not compile pre-condition: %s", err)
|
||||
}
|
||||
request.preConditionCompiled = preConditionCompiled
|
||||
}
|
||||
|
||||
// compile actual source code
|
||||
if request.Code != "" {
|
||||
scriptCompiled, err := goja.Compile("", request.Code, false)
|
||||
if err != nil {
|
||||
return errorutil.NewWithTag(request.TemplateID, "could not compile javascript code: %s", err)
|
||||
}
|
||||
request.scriptCompiled = scriptCompiled
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -303,7 +337,7 @@ func (request *Request) ExecuteWithResults(target *contextargs.Context, dynamicV
|
||||
}
|
||||
argsCopy.TemplateCtx = templateCtx.GetAll()
|
||||
|
||||
result, err := request.options.JsCompiler.ExecuteWithOptions(request.PreCondition, argsCopy, nil)
|
||||
result, err := request.options.JsCompiler.ExecuteWithOptions(request.preConditionCompiled, argsCopy, &compiler.ExecuteOptions{Timeout: request.Timeout})
|
||||
if err != nil {
|
||||
return errorutil.NewWithTag(request.TemplateID, "could not execute pre-condition: %s", err)
|
||||
}
|
||||
@ -415,19 +449,27 @@ func (request *Request) executeRequestWithPayloads(hostPort string, input *conte
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
argsCopy.TemplateCtx = request.options.GetTemplateCtx(input.MetaInput).GetAll()
|
||||
|
||||
var requestData = []byte(request.Code)
|
||||
var interactshURLs []string
|
||||
if request.options.Interactsh != nil {
|
||||
var transformedData string
|
||||
transformedData, interactshURLs = request.options.Interactsh.Replace(string(request.Code), []string{})
|
||||
requestData = []byte(transformedData)
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
argsCopy.TemplateCtx = request.options.GetTemplateCtx(input.MetaInput).GetAll()
|
||||
} else {
|
||||
argsCopy.TemplateCtx = map[string]interface{}{}
|
||||
}
|
||||
|
||||
results, err := request.options.JsCompiler.ExecuteWithOptions(string(requestData), argsCopy, &compiler.ExecuteOptions{
|
||||
Pool: false,
|
||||
})
|
||||
var interactshURLs []string
|
||||
if request.options.Interactsh != nil {
|
||||
if argsCopy.Args != nil {
|
||||
for k, v := range argsCopy.Args {
|
||||
var urls []string
|
||||
v, urls = request.options.Interactsh.Replace(fmt.Sprint(v), []string{})
|
||||
if len(urls) > 0 {
|
||||
interactshURLs = append(interactshURLs, urls...)
|
||||
argsCopy.Args[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results, err := request.options.JsCompiler.ExecuteWithOptions(request.scriptCompiled, argsCopy, &compiler.ExecuteOptions{Timeout: request.Timeout})
|
||||
if err != nil {
|
||||
// shouldn't fail even if it returned error instead create a failure event
|
||||
results = compiler.ExecuteResult{"success": false, "error": err.Error()}
|
||||
|
@ -136,7 +136,9 @@ func (request *Request) executeOnTarget(input *contextargs.Context, visited maps
|
||||
}
|
||||
variables := protocolutils.GenerateVariables(address, false, nil)
|
||||
// add template ctx variables to varMap
|
||||
variables = generators.MergeMaps(variables, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
variables = generators.MergeMaps(variables, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
variablesMap := request.options.Variables.Evaluate(variables)
|
||||
variables = generators.MergeMaps(variablesMap, variables, request.options.Constants)
|
||||
|
||||
@ -327,7 +329,9 @@ func (request *Request) executeRequestWithPayloads(variables map[string]interfac
|
||||
outputEvent := request.responseToDSLMap(reqBuilder.String(), string(final), response, input.MetaInput.Input, actualAddress)
|
||||
// add response fields to template context and merge templatectx variables to output event
|
||||
request.options.AddTemplateVars(input.MetaInput, request.Type(), request.ID, outputEvent)
|
||||
outputEvent = generators.MergeMaps(outputEvent, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
outputEvent = generators.MergeMaps(outputEvent, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
outputEvent["ip"] = request.dialer.GetDialedIP(hostname)
|
||||
if request.options.StopAtFirstMatch {
|
||||
outputEvent["stop-at-first-match"] = true
|
||||
|
@ -89,7 +89,9 @@ func (request *Request) ExecuteWithResults(input *contextargs.Context, metadata
|
||||
outputEvent := request.responseToDSLMap(resp, data, data, data, tostring.UnsafeToString(dumpedResponse), tostring.UnsafeToString(body), utils.HeadersToString(resp.Header), 0, nil)
|
||||
// add response fields to template context and merge templatectx variables to output event
|
||||
request.options.AddTemplateVars(input.MetaInput, request.Type(), request.GetID(), outputEvent)
|
||||
outputEvent = generators.MergeMaps(outputEvent, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
outputEvent = generators.MergeMaps(outputEvent, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
outputEvent["ip"] = ""
|
||||
for k, v := range previous {
|
||||
outputEvent[k] = v
|
||||
|
@ -126,6 +126,15 @@ func (e *ExecutorOptions) RemoveTemplateCtx(input *contextargs.MetaInput) {
|
||||
}
|
||||
}
|
||||
|
||||
// HasTemplateCtx returns true if template context exists for given input
|
||||
func (e *ExecutorOptions) HasTemplateCtx(input *contextargs.MetaInput) bool {
|
||||
scanId := input.GetScanHash(e.TemplateID)
|
||||
if e.templateCtxStore != nil {
|
||||
return e.templateCtxStore.Has(scanId)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetTemplateCtx returns template context for given input
|
||||
func (e *ExecutorOptions) GetTemplateCtx(input *contextargs.MetaInput) *contextargs.Context {
|
||||
scanId := input.GetScanHash(e.TemplateID)
|
||||
@ -133,6 +142,7 @@ func (e *ExecutorOptions) GetTemplateCtx(input *contextargs.MetaInput) *contexta
|
||||
if !ok {
|
||||
// if template context does not exist create new and add it to store and return it
|
||||
templateCtx = contextargs.New()
|
||||
templateCtx.MetaInput = input
|
||||
_ = e.templateCtxStore.Set(scanId, templateCtx)
|
||||
}
|
||||
return templateCtx
|
||||
@ -216,9 +226,19 @@ type Request interface {
|
||||
type OutputEventCallback func(result *output.InternalWrappedEvent)
|
||||
|
||||
func MakeDefaultResultEvent(request Request, wrapped *output.InternalWrappedEvent) []*output.ResultEvent {
|
||||
// Note: operator result is generated if something was succesfull match/extract/dynamic-extract
|
||||
// but results should not be generated if
|
||||
// 1. no match was found and some dynamic values were extracted
|
||||
// 2. if something was extracted (matchers exist but no match was found)
|
||||
if len(wrapped.OperatorsResult.DynamicValues) > 0 && !wrapped.OperatorsResult.Matched {
|
||||
return nil
|
||||
}
|
||||
// check if something was extracted (except dynamic values)
|
||||
extracted := len(wrapped.OperatorsResult.Extracts) > 0 || len(wrapped.OperatorsResult.OutputExtracts) > 0
|
||||
if extracted && len(wrapped.OperatorsResult.Operators.Matchers) > 0 && !wrapped.OperatorsResult.Matched {
|
||||
// if extracted and matchers exist but no match was found then don't generate result
|
||||
return nil
|
||||
}
|
||||
|
||||
results := make([]*output.ResultEvent, 0, len(wrapped.OperatorsResult.Matches)+1)
|
||||
|
||||
|
@ -215,7 +215,11 @@ func (request *Request) ExecuteWithResults(input *contextargs.Context, dynamicVa
|
||||
|
||||
hostnameVariables := protocolutils.GenerateDNSVariables(hostname)
|
||||
// add template context variables to varMap
|
||||
values := generators.MergeMaps(payloadValues, hostnameVariables, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
values := generators.MergeMaps(payloadValues, hostnameVariables)
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
values = generators.MergeMaps(values, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
|
||||
variablesMap := request.options.Variables.Evaluate(values)
|
||||
payloadValues = generators.MergeMaps(variablesMap, payloadValues, request.options.Constants)
|
||||
|
||||
@ -322,7 +326,9 @@ func (request *Request) ExecuteWithResults(input *contextargs.Context, dynamicVa
|
||||
}
|
||||
|
||||
// add response fields ^ to template context and merge templatectx variables to output event
|
||||
data = generators.MergeMaps(data, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
if request.options.HasTemplateCtx(input.MetaInput) {
|
||||
data = generators.MergeMaps(data, request.options.GetTemplateCtx(input.MetaInput).GetAll())
|
||||
}
|
||||
event := eventcreator.CreateEvent(request, data, requestOptions.Options.Debug || requestOptions.Options.DebugResponse)
|
||||
if requestOptions.Options.Debug || requestOptions.Options.DebugResponse || requestOptions.Options.StoreResponse {
|
||||
msg := fmt.Sprintf("[%s] Dumped SSL response for %s", requestOptions.TemplateID, input.MetaInput.Input)
|
||||
|
43
lib/nuclei/pkg/protocols/utils/reader.go
Executable file
43
lib/nuclei/pkg/protocols/utils/reader.go
Executable file
@ -0,0 +1,43 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
MaxBodyRead = int64(1 << 22) // 4MB using shift operator
|
||||
)
|
||||
|
||||
var _ io.ReadCloser = &LimitResponseBody{}
|
||||
|
||||
type LimitResponseBody struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// NewLimitResponseBody wraps response body with a limit reader.
|
||||
// thus only allowing MaxBodyRead bytes to be read. i.e 4MB
|
||||
func NewLimitResponseBody(body io.ReadCloser) io.ReadCloser {
|
||||
return NewLimitResponseBodyWithSize(body, MaxBodyRead)
|
||||
}
|
||||
|
||||
// NewLimitResponseBody wraps response body with a limit reader.
|
||||
// thus only allowing MaxBodyRead bytes to be read. i.e 4MB
|
||||
func NewLimitResponseBodyWithSize(body io.ReadCloser, size int64) io.ReadCloser {
|
||||
if body == nil {
|
||||
return nil
|
||||
}
|
||||
if size == -1 {
|
||||
// stick to default 4MB
|
||||
size = MaxBodyRead
|
||||
}
|
||||
return &LimitResponseBody{
|
||||
Reader: io.LimitReader(body, size),
|
||||
Closer: body,
|
||||
}
|
||||
}
|
||||
|
||||
// LimitBodyRead limits the body read to MaxBodyRead bytes.
|
||||
func LimitBodyRead(r io.Reader) ([]byte, error) {
|
||||
return io.ReadAll(io.LimitReader(r, MaxBodyRead))
|
||||
}
|
@ -2,10 +2,11 @@ package jsonexporter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||
)
|
||||
|
||||
type Exporter struct {
|
||||
@ -17,8 +18,8 @@ type Exporter struct {
|
||||
// Options contains the configuration options for JSON exporter client
|
||||
type Options struct {
|
||||
// File is the file to export found JSON result to
|
||||
File string `yaml:"file"`
|
||||
IncludeRawPayload bool `yaml:"include-raw-payload"`
|
||||
File string `yaml:"file"`
|
||||
OmitRaw bool `yaml:"omit-raw"`
|
||||
}
|
||||
|
||||
// New creates a new JSON exporter integration client based on options.
|
||||
@ -37,11 +38,7 @@ func (exporter *Exporter) Export(event *output.ResultEvent) error {
|
||||
exporter.mutex.Lock()
|
||||
defer exporter.mutex.Unlock()
|
||||
|
||||
// If the IncludeRawPayload is not set, then set the request and response to an empty string in the event to avoid
|
||||
// writing them to the list of events.
|
||||
// This will reduce the amount of storage as well as the fields being excluded from the resulting JSON output since
|
||||
// the property is set to "omitempty"
|
||||
if !exporter.options.IncludeRawPayload {
|
||||
if exporter.options.OmitRaw {
|
||||
event.Request = ""
|
||||
event.Response = ""
|
||||
}
|
||||
|
@ -2,10 +2,11 @@ package jsonl
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||
)
|
||||
|
||||
type Exporter struct {
|
||||
@ -17,8 +18,8 @@ type Exporter struct {
|
||||
// Options contains the configuration options for JSONL exporter client
|
||||
type Options struct {
|
||||
// File is the file to export found JSONL result to
|
||||
File string `yaml:"file"`
|
||||
IncludeRawPayload bool `yaml:"include-raw-payload"`
|
||||
File string `yaml:"file"`
|
||||
OmitRaw bool `yaml:"omit-raw"`
|
||||
}
|
||||
|
||||
// New creates a new JSONL exporter integration client based on options.
|
||||
@ -37,11 +38,7 @@ func (exporter *Exporter) Export(event *output.ResultEvent) error {
|
||||
exporter.mutex.Lock()
|
||||
defer exporter.mutex.Unlock()
|
||||
|
||||
// If the IncludeRawPayload is not set, then set the request and response to an empty string in the event to avoid
|
||||
// writing them to the list of events.
|
||||
// This will reduce the amount of storage as well as the fields being excluded from the resulting JSONL output since
|
||||
// the property is set to "omitempty"
|
||||
if !exporter.options.IncludeRawPayload {
|
||||
if exporter.options.OmitRaw {
|
||||
event.Request = ""
|
||||
event.Response = ""
|
||||
}
|
||||
|
@ -26,9 +26,9 @@ type Exporter struct {
|
||||
// Options contains the configuration options for GitHub issue tracker client
|
||||
type Options struct {
|
||||
// Directory is the directory to export found results to
|
||||
Directory string `yaml:"directory"`
|
||||
IncludeRawPayload bool `yaml:"include-raw-payload"`
|
||||
SortMode string `yaml:"sort-mode"`
|
||||
Directory string `yaml:"directory"`
|
||||
OmitRaw bool `yaml:"omit-raw"`
|
||||
SortMode string `yaml:"sort-mode"`
|
||||
}
|
||||
|
||||
// New creates a new markdown exporter integration client based on options.
|
||||
@ -56,15 +56,6 @@ func New(options *Options) (*Exporter, error) {
|
||||
|
||||
// Export exports a passed result event to markdown
|
||||
func (exporter *Exporter) Export(event *output.ResultEvent) error {
|
||||
// If the IncludeRawPayload is not set, then set the request and response to an empty string in the event to avoid
|
||||
// writing them to the list of events.
|
||||
// This will reduce the amount of storage as well as the fields being excluded from the markdown report output since
|
||||
// the property is set to "omitempty"
|
||||
if !exporter.options.IncludeRawPayload {
|
||||
event.Request = ""
|
||||
event.Response = ""
|
||||
}
|
||||
|
||||
// index file generation
|
||||
file, err := os.OpenFile(filepath.Join(exporter.directory, indexFileName), os.O_APPEND|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
@ -114,7 +105,7 @@ func (exporter *Exporter) Export(event *output.ResultEvent) error {
|
||||
dataBuilder.WriteString(util.CreateHeading3(format.Summary(event)))
|
||||
dataBuilder.WriteString("\n")
|
||||
dataBuilder.WriteString(util.CreateHorizontalLine())
|
||||
dataBuilder.WriteString(format.CreateReportDescription(event, util.MarkdownFormatter{}))
|
||||
dataBuilder.WriteString(format.CreateReportDescription(event, util.MarkdownFormatter{}, exporter.options.OmitRaw))
|
||||
data := dataBuilder.Bytes()
|
||||
|
||||
return os.WriteFile(filepath.Join(exporter.directory, subdirectory, filename), data, 0644)
|
||||
|
@ -34,7 +34,7 @@ func GetMatchedTemplateName(event *output.ResultEvent) string {
|
||||
return matchedTemplateName
|
||||
}
|
||||
|
||||
func CreateReportDescription(event *output.ResultEvent, formatter ResultFormatter) string {
|
||||
func CreateReportDescription(event *output.ResultEvent, formatter ResultFormatter, omitRaw bool) string {
|
||||
template := GetMatchedTemplateName(event)
|
||||
builder := &bytes.Buffer{}
|
||||
builder.WriteString(fmt.Sprintf("%s: %s matched at %s\n\n", formatter.MakeBold("Details"), formatter.MakeBold(template), event.Host))
|
||||
@ -51,20 +51,22 @@ func CreateReportDescription(event *output.ResultEvent, formatter ResultFormatte
|
||||
builder.WriteString("\n\n")
|
||||
builder.WriteString(CreateTemplateInfoTable(&event.Info, formatter))
|
||||
|
||||
if event.Request != "" {
|
||||
builder.WriteString(formatter.CreateCodeBlock("Request", types.ToHexOrString(event.Request), "http"))
|
||||
}
|
||||
if event.Response != "" {
|
||||
var responseString string
|
||||
// If the response is larger than 5 kb, truncate it before writing.
|
||||
maxKbSize := 5 * 1024
|
||||
if len(event.Response) > maxKbSize {
|
||||
responseString = event.Response[:maxKbSize]
|
||||
responseString += ".... Truncated ...."
|
||||
} else {
|
||||
responseString = event.Response
|
||||
if !omitRaw {
|
||||
if event.Request != "" {
|
||||
builder.WriteString(formatter.CreateCodeBlock("Request", types.ToHexOrString(event.Request), "http"))
|
||||
}
|
||||
if event.Response != "" {
|
||||
var responseString string
|
||||
// If the response is larger than 5 kb, truncate it before writing.
|
||||
maxKbSize := 5 * 1024
|
||||
if len(event.Response) > maxKbSize {
|
||||
responseString = event.Response[:maxKbSize]
|
||||
responseString += ".... Truncated ...."
|
||||
} else {
|
||||
responseString = event.Response
|
||||
}
|
||||
builder.WriteString(formatter.CreateCodeBlock("Response", responseString, "http"))
|
||||
}
|
||||
builder.WriteString(formatter.CreateCodeBlock("Response", responseString, "http"))
|
||||
}
|
||||
|
||||
if len(event.ExtractedResults) > 0 || len(event.Metadata) > 0 {
|
||||
|
@ -39,4 +39,5 @@ type Options struct {
|
||||
JSONLExporter *jsonl.Options `yaml:"jsonl"`
|
||||
|
||||
HttpClient *retryablehttp.Client `yaml:"-"`
|
||||
OmitRaw bool `yaml:"-"`
|
||||
}
|
||||
|
@ -99,6 +99,7 @@ func New(options *Options, db string) (Client, error) {
|
||||
|
||||
if options.GitHub != nil {
|
||||
options.GitHub.HttpClient = options.HttpClient
|
||||
options.GitHub.OmitRaw = options.OmitRaw
|
||||
tracker, err := github.New(options.GitHub)
|
||||
if err != nil {
|
||||
return nil, errorutil.NewWithErr(err).Wrap(ErrReportingClientCreation)
|
||||
@ -107,6 +108,7 @@ func New(options *Options, db string) (Client, error) {
|
||||
}
|
||||
if options.GitLab != nil {
|
||||
options.GitLab.HttpClient = options.HttpClient
|
||||
options.GitLab.OmitRaw = options.OmitRaw
|
||||
tracker, err := gitlab.New(options.GitLab)
|
||||
if err != nil {
|
||||
return nil, errorutil.NewWithErr(err).Wrap(ErrReportingClientCreation)
|
||||
@ -115,6 +117,7 @@ func New(options *Options, db string) (Client, error) {
|
||||
}
|
||||
if options.Jira != nil {
|
||||
options.Jira.HttpClient = options.HttpClient
|
||||
options.Jira.OmitRaw = options.OmitRaw
|
||||
tracker, err := jira.New(options.Jira)
|
||||
if err != nil {
|
||||
return nil, errorutil.NewWithErr(err).Wrap(ErrReportingClientCreation)
|
||||
|
@ -3,6 +3,11 @@ package github
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-github/github"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||
@ -11,10 +16,6 @@ import (
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
"github.com/projectdiscovery/retryablehttp-go"
|
||||
"golang.org/x/oauth2"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Integration is a client for an issue tracker integration
|
||||
@ -45,6 +46,7 @@ type Options struct {
|
||||
DuplicateIssueCheck bool `yaml:"duplicate-issue-check"`
|
||||
|
||||
HttpClient *retryablehttp.Client `yaml:"-"`
|
||||
OmitRaw bool `yaml:"-"`
|
||||
}
|
||||
|
||||
// New creates a new issue tracker integration client based on options.
|
||||
@ -80,7 +82,7 @@ func New(options *Options) (*Integration, error) {
|
||||
// CreateIssue creates an issue in the tracker
|
||||
func (i *Integration) CreateIssue(event *output.ResultEvent) (err error) {
|
||||
summary := format.Summary(event)
|
||||
description := format.CreateReportDescription(event, util.MarkdownFormatter{})
|
||||
description := format.CreateReportDescription(event, util.MarkdownFormatter{}, i.options.OmitRaw)
|
||||
labels := []string{}
|
||||
severityLabel := fmt.Sprintf("Severity: %s", event.Info.SeverityHolder.Severity.String())
|
||||
if i.options.SeverityAsLabel && severityLabel != "" {
|
||||
|
@ -37,6 +37,7 @@ type Options struct {
|
||||
DuplicateIssueCheck bool `yaml:"duplicate-issue-check" default:"false"`
|
||||
|
||||
HttpClient *retryablehttp.Client `yaml:"-"`
|
||||
OmitRaw bool `yaml:"-"`
|
||||
}
|
||||
|
||||
// New creates a new issue tracker integration client based on options.
|
||||
@ -62,7 +63,7 @@ func New(options *Options) (*Integration, error) {
|
||||
// CreateIssue creates an issue in the tracker
|
||||
func (i *Integration) CreateIssue(event *output.ResultEvent) error {
|
||||
summary := format.Summary(event)
|
||||
description := format.CreateReportDescription(event, util.MarkdownFormatter{})
|
||||
description := format.CreateReportDescription(event, util.MarkdownFormatter{}, i.options.OmitRaw)
|
||||
labels := []string{}
|
||||
severityLabel := fmt.Sprintf("Severity: %s", event.Info.SeverityHolder.Severity.String())
|
||||
if i.options.SeverityAsLabel && severityLabel != "" {
|
||||
|
@ -77,6 +77,7 @@ type Options struct {
|
||||
// that will be used to create the issue
|
||||
CustomFields map[string]interface{} `yaml:"custom-fields" json:"custom_fields"`
|
||||
StatusNot string `yaml:"status-not" json:"status_not"`
|
||||
OmitRaw bool `yaml:"-"`
|
||||
}
|
||||
|
||||
// New creates a new issue tracker integration client based on options.
|
||||
@ -154,7 +155,7 @@ func (i *Integration) CreateNewIssue(event *output.ResultEvent) error {
|
||||
}
|
||||
}
|
||||
fields := &jira.IssueFields{
|
||||
Description: format.CreateReportDescription(event, i),
|
||||
Description: format.CreateReportDescription(event, i, i.options.OmitRaw),
|
||||
Unknowns: customFields,
|
||||
Type: jira.IssueType{Name: i.options.IssueType},
|
||||
Project: jira.Project{Key: i.options.ProjectName},
|
||||
@ -164,7 +165,7 @@ func (i *Integration) CreateNewIssue(event *output.ResultEvent) error {
|
||||
if !i.options.Cloud {
|
||||
fields = &jira.IssueFields{
|
||||
Assignee: &jira.User{Name: i.options.AccountID},
|
||||
Description: format.CreateReportDescription(event, i),
|
||||
Description: format.CreateReportDescription(event, i, i.options.OmitRaw),
|
||||
Type: jira.IssueType{Name: i.options.IssueType},
|
||||
Project: jira.Project{Key: i.options.ProjectName},
|
||||
Summary: summary,
|
||||
@ -196,7 +197,7 @@ func (i *Integration) CreateIssue(event *output.ResultEvent) error {
|
||||
return err
|
||||
} else if issueID != "" {
|
||||
_, _, err = i.jira.Issue.AddComment(issueID, &jira.Comment{
|
||||
Body: format.CreateReportDescription(event, i),
|
||||
Body: format.CreateReportDescription(event, i, i.options.OmitRaw),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -2,7 +2,9 @@ package scan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/contextargs"
|
||||
@ -10,46 +12,52 @@ import (
|
||||
|
||||
type ScanContext struct {
|
||||
context.Context
|
||||
Input *contextargs.Context
|
||||
errors []error
|
||||
events []*output.InternalWrappedEvent
|
||||
// exported / configurable fields
|
||||
Input *contextargs.Context
|
||||
|
||||
// callbacks or hooks
|
||||
OnError func(error)
|
||||
OnResult func(e *output.InternalWrappedEvent)
|
||||
|
||||
// unexported state fields
|
||||
errors []error
|
||||
warnings []string
|
||||
events []*output.InternalWrappedEvent
|
||||
|
||||
// might not be required but better to sync
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
// NewScanContext creates a new scan context using input
|
||||
func NewScanContext(input *contextargs.Context) *ScanContext {
|
||||
return &ScanContext{Input: input}
|
||||
}
|
||||
|
||||
// GenerateResult returns final results slice from all events
|
||||
func (s *ScanContext) GenerateResult() []*output.ResultEvent {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
return aggregateResults(s.events)
|
||||
}
|
||||
|
||||
func aggregateResults(events []*output.InternalWrappedEvent) []*output.ResultEvent {
|
||||
var results []*output.ResultEvent
|
||||
for _, e := range events {
|
||||
results = append(results, e.Results...)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func joinErrors(errors []error) string {
|
||||
var errorMessages []string
|
||||
for _, e := range errors {
|
||||
errorMessages = append(errorMessages, e.Error())
|
||||
}
|
||||
return strings.Join(errorMessages, "; ")
|
||||
}
|
||||
|
||||
// LogEvent logs events to all events and triggeres any callbacks
|
||||
func (s *ScanContext) LogEvent(e *output.InternalWrappedEvent) {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
if e == nil {
|
||||
// do not log nil events
|
||||
return
|
||||
}
|
||||
if s.OnResult != nil {
|
||||
s.OnResult(e)
|
||||
}
|
||||
s.events = append(s.events, e)
|
||||
}
|
||||
|
||||
// LogError logs error to all events and triggeres any callbacks
|
||||
func (s *ScanContext) LogError(err error) {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
@ -68,3 +76,37 @@ func (s *ScanContext) LogError(err error) {
|
||||
e.InternalEvent["error"] = errorMessage
|
||||
}
|
||||
}
|
||||
|
||||
// LogWarning logs warning to all events
|
||||
func (s *ScanContext) LogWarning(format string, args ...any) {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
val := fmt.Sprintf(format, args...)
|
||||
s.warnings = append(s.warnings, val)
|
||||
|
||||
for _, e := range s.events {
|
||||
if e.InternalEvent != nil {
|
||||
e.InternalEvent["warning"] = strings.Join(s.warnings, "; ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// aggregateResults aggregates results from multiple events
|
||||
func aggregateResults(events []*output.InternalWrappedEvent) []*output.ResultEvent {
|
||||
var results []*output.ResultEvent
|
||||
for _, e := range events {
|
||||
results = append(results, e.Results...)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// joinErrors joins multiple errors and returns a single error string
|
||||
func joinErrors(errors []error) string {
|
||||
var errorMessages []string
|
||||
for _, e := range errors {
|
||||
if e != nil {
|
||||
errorMessages = append(errorMessages, e.Error())
|
||||
}
|
||||
}
|
||||
return strings.Join(errorMessages, "; ")
|
||||
}
|
||||
|
@ -183,8 +183,9 @@ func (template *Template) compileProtocolRequests(options *protocols.ExecutorOpt
|
||||
requests = append(requests, template.convertRequestToProtocolsRequest(template.RequestsJavascript)...)
|
||||
}
|
||||
}
|
||||
template.Executer = tmplexec.NewTemplateExecuter(requests, options)
|
||||
return nil
|
||||
var err error
|
||||
template.Executer, err = tmplexec.NewTemplateExecuter(requests, options)
|
||||
return err
|
||||
}
|
||||
|
||||
// convertRequestToProtocolsRequest is a convenience wrapper to convert
|
||||
@ -228,8 +229,13 @@ mainLoop:
|
||||
}
|
||||
if len(operatorsList) > 0 {
|
||||
options.Operators = operatorsList
|
||||
template.Executer = tmplexec.NewTemplateExecuter([]protocols.Request{&offlinehttp.Request{}}, options)
|
||||
return nil
|
||||
var err error
|
||||
template.Executer, err = tmplexec.NewTemplateExecuter([]protocols.Request{&offlinehttp.Request{}}, options)
|
||||
if err != nil {
|
||||
// it seems like flow executor cannot be used for offline http matching (ex:http(1) && http(2))
|
||||
return ErrIncompatibleWithOfflineMatching
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return ErrIncompatibleWithOfflineMatching
|
||||
|
@ -1698,7 +1698,7 @@ func init() {
|
||||
Value: "Matched is the input which was matched upon",
|
||||
},
|
||||
}
|
||||
JAVASCRIPTRequestDoc.Fields = make([]encoder.Doc, 9)
|
||||
JAVASCRIPTRequestDoc.Fields = make([]encoder.Doc, 10)
|
||||
JAVASCRIPTRequestDoc.Fields[0].Name = "id"
|
||||
JAVASCRIPTRequestDoc.Fields[0].Type = "string"
|
||||
JAVASCRIPTRequestDoc.Fields[0].Note = ""
|
||||
@ -1724,28 +1724,33 @@ func init() {
|
||||
JAVASCRIPTRequestDoc.Fields[4].Note = ""
|
||||
JAVASCRIPTRequestDoc.Fields[4].Description = "Code contains code to execute for the javascript request."
|
||||
JAVASCRIPTRequestDoc.Fields[4].Comments[encoder.LineComment] = "Code contains code to execute for the javascript request."
|
||||
JAVASCRIPTRequestDoc.Fields[5].Name = "stop-at-first-match"
|
||||
JAVASCRIPTRequestDoc.Fields[5].Type = "bool"
|
||||
JAVASCRIPTRequestDoc.Fields[5].Name = "timeout"
|
||||
JAVASCRIPTRequestDoc.Fields[5].Type = "int"
|
||||
JAVASCRIPTRequestDoc.Fields[5].Note = ""
|
||||
JAVASCRIPTRequestDoc.Fields[5].Description = "StopAtFirstMatch stops processing the request at first match."
|
||||
JAVASCRIPTRequestDoc.Fields[5].Comments[encoder.LineComment] = "StopAtFirstMatch stops processing the request at first match."
|
||||
JAVASCRIPTRequestDoc.Fields[6].Name = "attack"
|
||||
JAVASCRIPTRequestDoc.Fields[6].Type = "generators.AttackTypeHolder"
|
||||
JAVASCRIPTRequestDoc.Fields[5].Description = "Timeout in seconds is optional timeout for each javascript script execution (i.e init, pre-condition, code)"
|
||||
JAVASCRIPTRequestDoc.Fields[5].Comments[encoder.LineComment] = "Timeout in seconds is optional timeout for each javascript script execution (i.e init, pre-condition, code)"
|
||||
JAVASCRIPTRequestDoc.Fields[6].Name = "stop-at-first-match"
|
||||
JAVASCRIPTRequestDoc.Fields[6].Type = "bool"
|
||||
JAVASCRIPTRequestDoc.Fields[6].Note = ""
|
||||
JAVASCRIPTRequestDoc.Fields[6].Description = "Attack is the type of payload combinations to perform.\n\nSniper is each payload once, pitchfork combines multiple payload sets and clusterbomb generates\npermutations and combinations for all payloads."
|
||||
JAVASCRIPTRequestDoc.Fields[6].Comments[encoder.LineComment] = "Attack is the type of payload combinations to perform."
|
||||
JAVASCRIPTRequestDoc.Fields[7].Name = "threads"
|
||||
JAVASCRIPTRequestDoc.Fields[7].Type = "int"
|
||||
JAVASCRIPTRequestDoc.Fields[6].Description = "StopAtFirstMatch stops processing the request at first match."
|
||||
JAVASCRIPTRequestDoc.Fields[6].Comments[encoder.LineComment] = "StopAtFirstMatch stops processing the request at first match."
|
||||
JAVASCRIPTRequestDoc.Fields[7].Name = "attack"
|
||||
JAVASCRIPTRequestDoc.Fields[7].Type = "generators.AttackTypeHolder"
|
||||
JAVASCRIPTRequestDoc.Fields[7].Note = ""
|
||||
JAVASCRIPTRequestDoc.Fields[7].Description = "Payload concurreny i.e threads for sending requests."
|
||||
JAVASCRIPTRequestDoc.Fields[7].Comments[encoder.LineComment] = "Payload concurreny i.e threads for sending requests."
|
||||
|
||||
JAVASCRIPTRequestDoc.Fields[7].AddExample("Send requests using 10 concurrent threads", 10)
|
||||
JAVASCRIPTRequestDoc.Fields[8].Name = "payloads"
|
||||
JAVASCRIPTRequestDoc.Fields[8].Type = "map[string]interface{}"
|
||||
JAVASCRIPTRequestDoc.Fields[7].Description = "Attack is the type of payload combinations to perform.\n\nSniper is each payload once, pitchfork combines multiple payload sets and clusterbomb generates\npermutations and combinations for all payloads."
|
||||
JAVASCRIPTRequestDoc.Fields[7].Comments[encoder.LineComment] = "Attack is the type of payload combinations to perform."
|
||||
JAVASCRIPTRequestDoc.Fields[8].Name = "threads"
|
||||
JAVASCRIPTRequestDoc.Fields[8].Type = "int"
|
||||
JAVASCRIPTRequestDoc.Fields[8].Note = ""
|
||||
JAVASCRIPTRequestDoc.Fields[8].Description = "Payloads contains any payloads for the current request.\n\nPayloads support both key-values combinations where a list\nof payloads is provided, or optionally a single file can also\nbe provided as payload which will be read on run-time."
|
||||
JAVASCRIPTRequestDoc.Fields[8].Comments[encoder.LineComment] = "Payloads contains any payloads for the current request."
|
||||
JAVASCRIPTRequestDoc.Fields[8].Description = "Payload concurreny i.e threads for sending requests."
|
||||
JAVASCRIPTRequestDoc.Fields[8].Comments[encoder.LineComment] = "Payload concurreny i.e threads for sending requests."
|
||||
|
||||
JAVASCRIPTRequestDoc.Fields[8].AddExample("Send requests using 10 concurrent threads", 10)
|
||||
JAVASCRIPTRequestDoc.Fields[9].Name = "payloads"
|
||||
JAVASCRIPTRequestDoc.Fields[9].Type = "map[string]interface{}"
|
||||
JAVASCRIPTRequestDoc.Fields[9].Note = ""
|
||||
JAVASCRIPTRequestDoc.Fields[9].Description = "Payloads contains any payloads for the current request.\n\nPayloads support both key-values combinations where a list\nof payloads is provided, or optionally a single file can also\nbe provided as payload which will be read on run-time."
|
||||
JAVASCRIPTRequestDoc.Fields[9].Comments[encoder.LineComment] = "Payloads contains any payloads for the current request."
|
||||
|
||||
HTTPSignatureTypeHolderDoc.Type = "http.SignatureTypeHolder"
|
||||
HTTPSignatureTypeHolderDoc.Comments[encoder.LineComment] = " SignatureTypeHolder is used to hold internal type of the signature"
|
||||
|
@ -6,11 +6,11 @@ import (
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/operators/common/dsl"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/contextargs"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/helpers/writer"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/scan"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/tmplexec/flow"
|
||||
@ -24,6 +24,7 @@ type TemplateExecuter struct {
|
||||
options *protocols.ExecutorOptions
|
||||
engine TemplateEngine
|
||||
results *atomic.Bool
|
||||
program *goja.Program
|
||||
}
|
||||
|
||||
// Both executer & Executor are correct spellings (its open to interpretation)
|
||||
@ -31,7 +32,7 @@ type TemplateExecuter struct {
|
||||
var _ protocols.Executer = &TemplateExecuter{}
|
||||
|
||||
// NewTemplateExecuter creates a new request TemplateExecuter for list of requests
|
||||
func NewTemplateExecuter(requests []protocols.Request, options *protocols.ExecutorOptions) *TemplateExecuter {
|
||||
func NewTemplateExecuter(requests []protocols.Request, options *protocols.ExecutorOptions) (*TemplateExecuter, error) {
|
||||
isMultiProto := false
|
||||
lastProto := ""
|
||||
for _, request := range requests {
|
||||
@ -47,7 +48,11 @@ func NewTemplateExecuter(requests []protocols.Request, options *protocols.Execut
|
||||
// we use a dummy input here because goal of flow executor at this point is to just check
|
||||
// syntax and other things are correct before proceeding to actual execution
|
||||
// during execution new instance of flow will be created as it is tightly coupled with lot of executor options
|
||||
e.engine = flow.NewFlowExecutor(requests, contextargs.NewWithInput("dummy"), options, e.results)
|
||||
p, err := goja.Compile("flow.js", options.Flow, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not compile flow: %s", err)
|
||||
}
|
||||
e.program = p
|
||||
} else {
|
||||
// Review:
|
||||
// multiproto engine is only used if there is more than one protocol in template
|
||||
@ -58,8 +63,7 @@ func NewTemplateExecuter(requests []protocols.Request, options *protocols.Execut
|
||||
e.engine = generic.NewGenericEngine(requests, options, e.results)
|
||||
}
|
||||
}
|
||||
|
||||
return e
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// Compile compiles the execution generators preparing any requests possible.
|
||||
@ -81,6 +85,10 @@ func (e *TemplateExecuter) Compile() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if e.engine == nil && e.options.Flow != "" {
|
||||
// this is true for flow executor
|
||||
return nil
|
||||
}
|
||||
return e.engine.Compile()
|
||||
}
|
||||
|
||||
@ -117,6 +125,22 @@ func (e *TemplateExecuter) Execute(ctx *scan.ScanContext) (bool, error) {
|
||||
// something went wrong
|
||||
return
|
||||
}
|
||||
// check for internal true matcher event
|
||||
if event.HasOperatorResult() && event.OperatorsResult.Matched && event.OperatorsResult.Operators != nil {
|
||||
// note all matchers should have internal:true if it is a combination then print it
|
||||
allInternalMatchers := true
|
||||
for _, matcher := range event.OperatorsResult.Operators.Matchers {
|
||||
if allInternalMatchers && !matcher.Internal {
|
||||
allInternalMatchers = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if allInternalMatchers {
|
||||
// this is a internal event and no meant to be printed
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If no results were found, and also interactsh is not being used
|
||||
// in that case we can skip it, otherwise we've to show failure in
|
||||
// case of matcher-status flag.
|
||||
@ -130,7 +154,7 @@ func (e *TemplateExecuter) Execute(ctx *scan.ScanContext) (bool, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
var err error
|
||||
var errx error
|
||||
|
||||
// Note: this is required for flow executor
|
||||
// flow executer is tightly coupled with lot of executor options
|
||||
@ -139,19 +163,24 @@ func (e *TemplateExecuter) Execute(ctx *scan.ScanContext) (bool, error) {
|
||||
// so in compile step earlier we compile it to validate javascript syntax and other things
|
||||
// and while executing we create new instance of flow executor everytime
|
||||
if e.options.Flow != "" {
|
||||
flowexec := flow.NewFlowExecutor(e.requests, ctx.Input, e.options, results)
|
||||
flowexec, err := flow.NewFlowExecutor(e.requests, ctx, e.options, results, e.program)
|
||||
if err != nil {
|
||||
ctx.LogError(err)
|
||||
return false, fmt.Errorf("could not create flow executor: %s", err)
|
||||
}
|
||||
if err := flowexec.Compile(); err != nil {
|
||||
ctx.LogError(err)
|
||||
return false, err
|
||||
}
|
||||
err = flowexec.ExecuteWithResults(ctx)
|
||||
errx = flowexec.ExecuteWithResults(ctx)
|
||||
} else {
|
||||
err = e.engine.ExecuteWithResults(ctx)
|
||||
errx = e.engine.ExecuteWithResults(ctx)
|
||||
}
|
||||
|
||||
if lastMatcherEvent != nil {
|
||||
writeFailureCallback(lastMatcherEvent, e.options.Options.MatcherStatus)
|
||||
}
|
||||
return results.Load(), err
|
||||
return results.Load(), errx
|
||||
}
|
||||
|
||||
// ExecuteWithResults executes the protocol requests and returns results instead of writing them.
|
||||
|
@ -9,11 +9,8 @@ import (
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/contextargs"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/generators"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/scan"
|
||||
templateTypes "github.com/projectdiscovery/nuclei/v3/pkg/templates/types"
|
||||
|
||||
@ -38,25 +35,28 @@ type ProtoOptions struct {
|
||||
|
||||
// FlowExecutor is a flow executor for executing a flow
|
||||
type FlowExecutor struct {
|
||||
input *contextargs.Context
|
||||
ctx *scan.ScanContext // scan context (includes target etc)
|
||||
options *protocols.ExecutorOptions
|
||||
|
||||
// javascript runtime reference and compiled program
|
||||
jsVM *goja.Runtime
|
||||
program *goja.Program // compiled js program
|
||||
lastEvent *output.InternalWrappedEvent // contains last event that was emitted
|
||||
program *goja.Program // compiled js program
|
||||
|
||||
// protocol requests and their callback functions
|
||||
allProtocols map[string][]protocols.Request
|
||||
protoFunctions map[string]func(call goja.FunctionCall) goja.Value // reqFunctions contains functions that allow executing requests/protocols from js
|
||||
protoFunctions map[string]func(call goja.FunctionCall, runtime *goja.Runtime) goja.Value // reqFunctions contains functions that allow executing requests/protocols from js
|
||||
|
||||
// logic related variables
|
||||
results *atomic.Bool
|
||||
allErrs mapsutil.SyncLockMap[string, error]
|
||||
// these are keys whose values are meant to be flatten before executing
|
||||
// a request ex: if dynamic extractor returns ["value"] it will be converted to "value"
|
||||
flattenKeys []string
|
||||
}
|
||||
|
||||
// NewFlowExecutor creates a new flow executor from a list of requests
|
||||
func NewFlowExecutor(requests []protocols.Request, input *contextargs.Context, options *protocols.ExecutorOptions, results *atomic.Bool) *FlowExecutor {
|
||||
// Note: Unlike other engine for every target x template flow needs to be compiled and executed everytime
|
||||
// unlike other engines where we compile once and execute multiple times
|
||||
func NewFlowExecutor(requests []protocols.Request, ctx *scan.ScanContext, options *protocols.ExecutorOptions, results *atomic.Bool, program *goja.Program) (*FlowExecutor, error) {
|
||||
allprotos := make(map[string][]protocols.Request)
|
||||
for _, req := range requests {
|
||||
switch req.Type() {
|
||||
@ -80,8 +80,11 @@ func NewFlowExecutor(requests []protocols.Request, input *contextargs.Context, o
|
||||
allprotos[templateTypes.CodeProtocol.String()] = append(allprotos[templateTypes.CodeProtocol.String()], req)
|
||||
case templateTypes.JavascriptProtocol:
|
||||
allprotos[templateTypes.JavascriptProtocol.String()] = append(allprotos[templateTypes.JavascriptProtocol.String()], req)
|
||||
case templateTypes.OfflineHTTPProtocol:
|
||||
// offlinehttp is run in passive mode but templates are same so instead of using offlinehttp() we use http() in flow
|
||||
allprotos[templateTypes.HTTPProtocol.String()] = append(allprotos[templateTypes.OfflineHTTPProtocol.String()], req)
|
||||
default:
|
||||
gologger.Error().Msgf("invalid request type %s", req.Type().String())
|
||||
return nil, fmt.Errorf("invalid request type %s", req.Type().String())
|
||||
}
|
||||
}
|
||||
f := &FlowExecutor{
|
||||
@ -91,12 +94,12 @@ func NewFlowExecutor(requests []protocols.Request, input *contextargs.Context, o
|
||||
ReadOnly: atomic.Bool{},
|
||||
Map: make(map[string]error),
|
||||
},
|
||||
protoFunctions: map[string]func(call goja.FunctionCall) goja.Value{},
|
||||
protoFunctions: map[string]func(call goja.FunctionCall, runtime *goja.Runtime) goja.Value{},
|
||||
results: results,
|
||||
jsVM: protocolstate.NewJSRuntime(),
|
||||
input: input,
|
||||
ctx: ctx,
|
||||
program: program,
|
||||
}
|
||||
return f
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Compile compiles js program and registers all functions
|
||||
@ -105,7 +108,7 @@ func (f *FlowExecutor) Compile() error {
|
||||
f.results = new(atomic.Bool)
|
||||
}
|
||||
// load all variables and evaluate with existing data
|
||||
variableMap := f.options.Variables.Evaluate(f.options.GetTemplateCtx(f.input.MetaInput).GetAll())
|
||||
variableMap := f.options.Variables.Evaluate(f.options.GetTemplateCtx(f.ctx.Input.MetaInput).GetAll())
|
||||
// cli options
|
||||
optionVars := generators.BuildPayloadFromOptions(f.options.Options)
|
||||
// constants
|
||||
@ -118,14 +121,14 @@ func (f *FlowExecutor) Compile() error {
|
||||
if value, err := f.ReadDataFromFile(str); err == nil {
|
||||
allVars[k] = value
|
||||
} else {
|
||||
gologger.Warning().Msgf("could not load file '%s' for variable '%s': %s", str, k, err)
|
||||
f.ctx.LogWarning("could not load file '%s' for variable '%s': %s", str, k, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
f.options.GetTemplateCtx(f.input.MetaInput).Merge(allVars) // merge all variables into template context
|
||||
f.options.GetTemplateCtx(f.ctx.Input.MetaInput).Merge(allVars) // merge all variables into template context
|
||||
|
||||
// ---- define callback functions/objects----
|
||||
f.protoFunctions = map[string]func(call goja.FunctionCall) goja.Value{}
|
||||
f.protoFunctions = map[string]func(call goja.FunctionCall, runtime *goja.Runtime) goja.Value{}
|
||||
// iterate over all protocols and generate callback functions for each protocol
|
||||
for p, requests := range f.allProtocols {
|
||||
// for each protocol build a requestMap with reqID and protocol request
|
||||
@ -145,7 +148,7 @@ func (f *FlowExecutor) Compile() error {
|
||||
}
|
||||
// ---define hook that allows protocol/request execution from js-----
|
||||
// --- this is the actual callback that is executed when function is invoked in js----
|
||||
f.protoFunctions[proto] = func(call goja.FunctionCall) goja.Value {
|
||||
f.protoFunctions[proto] = func(call goja.FunctionCall, runtime *goja.Runtime) goja.Value {
|
||||
opts := &ProtoOptions{
|
||||
protoName: proto,
|
||||
}
|
||||
@ -155,34 +158,82 @@ func (f *FlowExecutor) Compile() error {
|
||||
opts.reqIDS = append(opts.reqIDS, types.ToString(value))
|
||||
}
|
||||
}
|
||||
return f.jsVM.ToValue(f.requestExecutor(reqMap, opts))
|
||||
// before executing any protocol function flatten tracked values
|
||||
if len(f.flattenKeys) > 0 {
|
||||
ctx := f.options.GetTemplateCtx(f.ctx.Input.MetaInput)
|
||||
for _, key := range f.flattenKeys {
|
||||
if value, ok := ctx.Get(key); ok {
|
||||
ctx.Set(key, flatten(value))
|
||||
}
|
||||
}
|
||||
}
|
||||
return runtime.ToValue(f.requestExecutor(runtime, reqMap, opts))
|
||||
}
|
||||
}
|
||||
return f.registerBuiltInFunctions()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecuteWithResults executes the flow and returns results
|
||||
func (f *FlowExecutor) ExecuteWithResults(ctx *scan.ScanContext) error {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
f.ctx.LogError(fmt.Errorf("panic occurred while executing target %v with flow: %v", ctx.Input.MetaInput.Input, e))
|
||||
gologger.Error().Label(f.options.TemplateID).Msgf("panic occurred while executing target %v with flow: %v", ctx.Input.MetaInput.Input, e)
|
||||
panic(e)
|
||||
}
|
||||
}()
|
||||
|
||||
f.input = ctx.Input
|
||||
f.ctx.Input = ctx.Input
|
||||
// -----Load all types of variables-----
|
||||
// add all input args to template context
|
||||
if f.input != nil && f.input.HasArgs() {
|
||||
f.input.ForEach(func(key string, value interface{}) {
|
||||
f.options.GetTemplateCtx(f.input.MetaInput).Set(key, value)
|
||||
if f.ctx.Input != nil && f.ctx.Input.HasArgs() {
|
||||
f.ctx.Input.ForEach(func(key string, value interface{}) {
|
||||
f.options.GetTemplateCtx(f.ctx.Input.MetaInput).Set(key, value)
|
||||
})
|
||||
}
|
||||
|
||||
// get a new runtime from pool
|
||||
runtime := GetJSRuntime(f.options.Options)
|
||||
defer PutJSRuntime(runtime) // put runtime back to pool
|
||||
defer func() {
|
||||
// remove set builtin
|
||||
_ = runtime.GlobalObject().Delete("set")
|
||||
_ = runtime.GlobalObject().Delete("template")
|
||||
for proto := range f.protoFunctions {
|
||||
_ = runtime.GlobalObject().Delete(proto)
|
||||
}
|
||||
|
||||
}()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
f.ctx.LogError(fmt.Errorf("panic occurred while executing flow: %v", r))
|
||||
}
|
||||
}()
|
||||
|
||||
if ctx.OnResult == nil {
|
||||
return fmt.Errorf("output callback cannot be nil")
|
||||
}
|
||||
// before running register set of builtins
|
||||
if err := runtime.Set("set", func(call goja.FunctionCall) goja.Value {
|
||||
varName := call.Argument(0).Export()
|
||||
varValue := call.Argument(1).Export()
|
||||
f.options.GetTemplateCtx(f.ctx.Input.MetaInput).Set(types.ToString(varName), varValue)
|
||||
return goja.Null()
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
// also register functions that allow executing protocols from js
|
||||
for proto, fn := range f.protoFunctions {
|
||||
if err := runtime.Set(proto, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// register template object
|
||||
if err := runtime.Set("template", f.options.GetTemplateCtx(f.ctx.Input.MetaInput).GetAll()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// pass flow and execute the js vm and handle errors
|
||||
value, err := f.jsVM.RunProgram(f.program)
|
||||
_, err := runtime.RunProgram(f.program)
|
||||
if err != nil {
|
||||
ctx.LogError(err)
|
||||
return errorutil.NewWithErr(err).Msgf("failed to execute flow\n%v\n", f.options.Flow)
|
||||
@ -192,13 +243,7 @@ func (f *FlowExecutor) ExecuteWithResults(ctx *scan.ScanContext) error {
|
||||
ctx.LogError(runtimeErr)
|
||||
return errorutil.NewWithErr(runtimeErr).Msgf("got following errors while executing flow")
|
||||
}
|
||||
// this is where final result is generated/created
|
||||
ctx.LogEvent(f.lastEvent)
|
||||
if value.Export() != nil {
|
||||
f.results.Store(value.ToBoolean())
|
||||
} else {
|
||||
f.results.Store(true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1,32 +1,27 @@
|
||||
package flow
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/logrusorgru/aurora"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/output"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/utils/vardump"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/tmplexec/flow/builtin"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
mapsutil "github.com/projectdiscovery/utils/maps"
|
||||
)
|
||||
|
||||
// contains all internal/unexported methods of flow
|
||||
|
||||
// requestExecutor executes a protocol/request and returns true if any matcher was found
|
||||
func (f *FlowExecutor) requestExecutor(reqMap mapsutil.Map[string, protocols.Request], opts *ProtoOptions) bool {
|
||||
func (f *FlowExecutor) requestExecutor(runtime *goja.Runtime, reqMap mapsutil.Map[string, protocols.Request], opts *ProtoOptions) bool {
|
||||
defer func() {
|
||||
// evaluate all variables after execution of each protocol
|
||||
variableMap := f.options.Variables.Evaluate(f.options.GetTemplateCtx(f.input.MetaInput).GetAll())
|
||||
f.options.GetTemplateCtx(f.input.MetaInput).Merge(variableMap) // merge all variables into template context
|
||||
variableMap := f.options.Variables.Evaluate(f.options.GetTemplateCtx(f.ctx.Input.MetaInput).GetAll())
|
||||
f.options.GetTemplateCtx(f.ctx.Input.MetaInput).Merge(variableMap) // merge all variables into template context
|
||||
|
||||
// to avoid polling update template variables everytime we execute a protocol
|
||||
var m map[string]interface{} = f.options.GetTemplateCtx(f.input.MetaInput).GetAll()
|
||||
_ = f.jsVM.Set("template", m)
|
||||
var m map[string]interface{} = f.options.GetTemplateCtx(f.ctx.Input.MetaInput).GetAll()
|
||||
_ = runtime.Set("template", m)
|
||||
}()
|
||||
matcherStatus := &atomic.Bool{} // due to interactsh matcher polling logic this needs to be atomic bool
|
||||
// if no id is passed execute all requests in sequence
|
||||
@ -34,7 +29,7 @@ func (f *FlowExecutor) requestExecutor(reqMap mapsutil.Map[string, protocols.Req
|
||||
// execution logic for http()/dns() etc
|
||||
for index := range f.allProtocols[opts.protoName] {
|
||||
req := f.allProtocols[opts.protoName][index]
|
||||
err := req.ExecuteWithResults(f.input, output.InternalEvent(f.options.GetTemplateCtx(f.input.MetaInput).GetAll()), nil, f.getProtoRequestCallback(req, matcherStatus, opts))
|
||||
err := req.ExecuteWithResults(f.ctx.Input, output.InternalEvent(f.options.GetTemplateCtx(f.ctx.Input.MetaInput).GetAll()), nil, f.protocolResultCallback(req, matcherStatus, opts))
|
||||
if err != nil {
|
||||
// save all errors in a map with id as key
|
||||
// its less likely that there will be race condition but just in case
|
||||
@ -44,7 +39,7 @@ func (f *FlowExecutor) requestExecutor(reqMap mapsutil.Map[string, protocols.Req
|
||||
}
|
||||
err = f.allErrs.Set(opts.protoName+":"+id, err)
|
||||
if err != nil {
|
||||
gologger.Error().Msgf("failed to store flow runtime errors got %v", err)
|
||||
f.ctx.LogError(fmt.Errorf("failed to store flow runtime errors got %v", err))
|
||||
}
|
||||
return matcherStatus.Load()
|
||||
}
|
||||
@ -56,36 +51,38 @@ func (f *FlowExecutor) requestExecutor(reqMap mapsutil.Map[string, protocols.Req
|
||||
for _, id := range opts.reqIDS {
|
||||
req, ok := reqMap[id]
|
||||
if !ok {
|
||||
gologger.Error().Msgf("[%v] invalid request id '%s' provided", f.options.TemplateID, id)
|
||||
f.ctx.LogError(fmt.Errorf("[%v] invalid request id '%s' provided", f.options.TemplateID, id))
|
||||
// compile error
|
||||
if err := f.allErrs.Set(opts.protoName+":"+id, ErrInvalidRequestID.Msgf(f.options.TemplateID, id)); err != nil {
|
||||
gologger.Error().Msgf("failed to store flow runtime errors got %v", err)
|
||||
f.ctx.LogError(fmt.Errorf("failed to store flow runtime errors got %v", err))
|
||||
}
|
||||
return matcherStatus.Load()
|
||||
}
|
||||
err := req.ExecuteWithResults(f.input, output.InternalEvent(f.options.GetTemplateCtx(f.input.MetaInput).GetAll()), nil, f.getProtoRequestCallback(req, matcherStatus, opts))
|
||||
err := req.ExecuteWithResults(f.ctx.Input, output.InternalEvent(f.options.GetTemplateCtx(f.ctx.Input.MetaInput).GetAll()), nil, f.protocolResultCallback(req, matcherStatus, opts))
|
||||
if err != nil {
|
||||
index := id
|
||||
err = f.allErrs.Set(opts.protoName+":"+index, err)
|
||||
if err != nil {
|
||||
gologger.Error().Msgf("failed to store flow runtime errors got %v", err)
|
||||
f.ctx.LogError(fmt.Errorf("failed to store flow runtime errors got %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return matcherStatus.Load()
|
||||
}
|
||||
|
||||
// getProtoRequestCallback returns a callback that is executed
|
||||
// protocolResultCallback returns a callback that is executed
|
||||
// after execution of each protocol request
|
||||
func (f *FlowExecutor) getProtoRequestCallback(req protocols.Request, matcherStatus *atomic.Bool, opts *ProtoOptions) func(result *output.InternalWrappedEvent) {
|
||||
func (f *FlowExecutor) protocolResultCallback(req protocols.Request, matcherStatus *atomic.Bool, opts *ProtoOptions) func(result *output.InternalWrappedEvent) {
|
||||
return func(result *output.InternalWrappedEvent) {
|
||||
if result != nil {
|
||||
f.results.CompareAndSwap(false, true)
|
||||
f.lastEvent = result
|
||||
// Note: flow specific implicit behaviours should be handled here
|
||||
// before logging the event
|
||||
f.ctx.LogEvent(result)
|
||||
// export dynamic values from operators (i.e internal:true)
|
||||
// add add it to template context
|
||||
// this is a conflicting behaviour with iterate-all
|
||||
if result.HasOperatorResult() {
|
||||
f.results.CompareAndSwap(false, true)
|
||||
// this is to handle case where there is any operator result (matcher or extractor)
|
||||
matcherStatus.CompareAndSwap(false, result.OperatorsResult.Matched)
|
||||
if !result.OperatorsResult.Matched && !hasMatchers(req.GetCompiledOperators()) {
|
||||
@ -95,7 +92,14 @@ func (f *FlowExecutor) getProtoRequestCallback(req protocols.Request, matcherSta
|
||||
}
|
||||
if len(result.OperatorsResult.DynamicValues) > 0 {
|
||||
for k, v := range result.OperatorsResult.DynamicValues {
|
||||
f.options.GetTemplateCtx(f.input.MetaInput).Set(k, v)
|
||||
// if length of v is 1 then remove slice and convert it to single value
|
||||
if len(v) == 1 {
|
||||
// add it to flatten keys list so it will be flattened to a string later
|
||||
f.flattenKeys = append(f.flattenKeys, k)
|
||||
}
|
||||
// always preserve extracted value type
|
||||
f.options.GetTemplateCtx(f.ctx.Input.MetaInput).Set(k, v)
|
||||
|
||||
}
|
||||
}
|
||||
} else if !result.HasOperatorResult() && !hasOperators(req.GetCompiledOperators()) {
|
||||
@ -107,99 +111,3 @@ func (f *FlowExecutor) getProtoRequestCallback(req protocols.Request, matcherSta
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// registerBuiltInFunctions registers all built in functions for the flow
|
||||
func (f *FlowExecutor) registerBuiltInFunctions() error {
|
||||
// currently we register following builtin functions
|
||||
// log -> log to stdout with [JS] prefix should only be used for debugging
|
||||
// set -> set a variable in template context
|
||||
// proto(arg ...String) <- this is generic syntax of how a protocol/request binding looks in js
|
||||
// we only register only those protocols that are available in template
|
||||
|
||||
// we also register a map datatype called template with all template variables
|
||||
// template -> all template variables are available in js template object
|
||||
|
||||
if err := f.jsVM.Set("log", func(call goja.FunctionCall) goja.Value {
|
||||
// TODO: verify string interpolation and handle multiple args
|
||||
arg := call.Argument(0).Export()
|
||||
switch value := arg.(type) {
|
||||
case string:
|
||||
gologger.DefaultLogger.Print().Msgf("[%v] %v", aurora.BrightCyan("JS"), value)
|
||||
case map[string]interface{}:
|
||||
gologger.DefaultLogger.Print().Msgf("[%v] %v", aurora.BrightCyan("JS"), vardump.DumpVariables(value))
|
||||
default:
|
||||
gologger.DefaultLogger.Print().Msgf("[%v] %v", aurora.BrightCyan("JS"), value)
|
||||
}
|
||||
return goja.Null()
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f.jsVM.Set("set", func(call goja.FunctionCall) goja.Value {
|
||||
varName := call.Argument(0).Export()
|
||||
varValue := call.Argument(1).Export()
|
||||
f.options.GetTemplateCtx(f.input.MetaInput).Set(types.ToString(varName), varValue)
|
||||
return goja.Null()
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// iterate provides global iterator function by handling null values or strings
|
||||
if err := f.jsVM.Set("iterate", func(call goja.FunctionCall) goja.Value {
|
||||
allVars := []any{}
|
||||
for _, v := range call.Arguments {
|
||||
if v.Export() == nil {
|
||||
continue
|
||||
}
|
||||
if v.ExportType().Kind() == reflect.Slice {
|
||||
// convert []datatype to []interface{}
|
||||
// since it cannot be type asserted to []interface{} directly
|
||||
rfValue := reflect.ValueOf(v.Export())
|
||||
for i := 0; i < rfValue.Len(); i++ {
|
||||
allVars = append(allVars, rfValue.Index(i).Interface())
|
||||
}
|
||||
} else {
|
||||
allVars = append(allVars, v.Export())
|
||||
}
|
||||
}
|
||||
return f.jsVM.ToValue(allVars)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// add a builtin dedupe object
|
||||
if err := f.jsVM.Set("Dedupe", func(call goja.ConstructorCall) *goja.Object {
|
||||
d := builtin.NewDedupe(f.jsVM)
|
||||
obj := call.This
|
||||
// register these methods
|
||||
_ = obj.Set("Add", d.Add)
|
||||
_ = obj.Set("Values", d.Values)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var m = f.options.GetTemplateCtx(f.input.MetaInput).GetAll()
|
||||
if m == nil {
|
||||
m = map[string]interface{}{}
|
||||
}
|
||||
|
||||
if err := f.jsVM.Set("template", m); err != nil {
|
||||
// all template variables are available in js template object
|
||||
return err
|
||||
}
|
||||
|
||||
// register all protocols
|
||||
for name, fn := range f.protoFunctions {
|
||||
if err := f.jsVM.Set(name, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
program, err := goja.Compile("flow", f.options.Flow, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.program = program
|
||||
return nil
|
||||
}
|
||||
|
@ -21,3 +21,20 @@ func hasOperators(all []*operators.Operators) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func flatten(v interface{}) interface{} {
|
||||
switch v := v.(type) {
|
||||
case []interface{}:
|
||||
if len(v) == 1 {
|
||||
return v[0]
|
||||
}
|
||||
return v
|
||||
case []string:
|
||||
if len(v) == 1 {
|
||||
return v[0]
|
||||
}
|
||||
return v
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
94
lib/nuclei/pkg/tmplexec/flow/vm.go
Executable file
94
lib/nuclei/pkg/tmplexec/flow/vm.go
Executable file
@ -0,0 +1,94 @@
|
||||
package flow
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/logrusorgru/aurora"
|
||||
"github.com/projectdiscovery/gologger"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/protocolstate"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/protocols/common/utils/vardump"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/tmplexec/flow/builtin"
|
||||
"github.com/projectdiscovery/nuclei/v3/pkg/types"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
)
|
||||
|
||||
type jsWaitGroup struct {
|
||||
sync.Once
|
||||
sg sizedwaitgroup.SizedWaitGroup
|
||||
}
|
||||
|
||||
var jsPool = &jsWaitGroup{}
|
||||
|
||||
// GetJSRuntime returns a new JS runtime from pool
|
||||
func GetJSRuntime(opts *types.Options) *goja.Runtime {
|
||||
jsPool.Do(func() {
|
||||
if opts.JsConcurrency < 100 {
|
||||
opts.JsConcurrency = 100
|
||||
}
|
||||
jsPool.sg = sizedwaitgroup.New(opts.JsConcurrency)
|
||||
})
|
||||
jsPool.sg.Add()
|
||||
return gojapool.Get().(*goja.Runtime)
|
||||
}
|
||||
|
||||
// PutJSRuntime returns a JS runtime to pool
|
||||
func PutJSRuntime(runtime *goja.Runtime) {
|
||||
defer jsPool.sg.Done()
|
||||
gojapool.Put(runtime)
|
||||
}
|
||||
|
||||
// js runtime pool using sync.Pool
|
||||
var gojapool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
runtime := protocolstate.NewJSRuntime()
|
||||
registerBuiltins(runtime)
|
||||
return runtime
|
||||
},
|
||||
}
|
||||
|
||||
func registerBuiltins(runtime *goja.Runtime) {
|
||||
_ = runtime.Set("log", func(call goja.FunctionCall) goja.Value {
|
||||
// TODO: verify string interpolation and handle multiple args
|
||||
arg := call.Argument(0).Export()
|
||||
switch value := arg.(type) {
|
||||
case string:
|
||||
gologger.DefaultLogger.Print().Msgf("[%v] %v", aurora.BrightCyan("JS"), value)
|
||||
case map[string]interface{}:
|
||||
gologger.DefaultLogger.Print().Msgf("[%v] %v", aurora.BrightCyan("JS"), vardump.DumpVariables(value))
|
||||
default:
|
||||
gologger.DefaultLogger.Print().Msgf("[%v] %v", aurora.BrightCyan("JS"), value)
|
||||
}
|
||||
return call.Argument(0) // return the same value
|
||||
})
|
||||
|
||||
_ = runtime.Set("iterate", func(call goja.FunctionCall) goja.Value {
|
||||
allVars := []any{}
|
||||
for _, v := range call.Arguments {
|
||||
if v.Export() == nil {
|
||||
continue
|
||||
}
|
||||
if v.ExportType().Kind() == reflect.Slice {
|
||||
// convert []datatype to []interface{}
|
||||
// since it cannot be type asserted to []interface{} directly
|
||||
rfValue := reflect.ValueOf(v.Export())
|
||||
for i := 0; i < rfValue.Len(); i++ {
|
||||
allVars = append(allVars, rfValue.Index(i).Interface())
|
||||
}
|
||||
} else {
|
||||
allVars = append(allVars, v.Export())
|
||||
}
|
||||
}
|
||||
return runtime.ToValue(allVars)
|
||||
})
|
||||
|
||||
_ = runtime.Set("Dedupe", func(call goja.ConstructorCall) *goja.Object {
|
||||
d := builtin.NewDedupe(runtime)
|
||||
obj := call.This
|
||||
// register these methods
|
||||
_ = obj.Set("Add", d.Add)
|
||||
_ = obj.Set("Values", d.Values)
|
||||
return nil
|
||||
})
|
||||
}
|
@ -366,6 +366,10 @@ type Options struct {
|
||||
EnableCodeTemplates bool
|
||||
// Disables cloud upload
|
||||
EnableCloudUpload bool
|
||||
// ScanID is the scan ID to use for cloud upload
|
||||
ScanID string
|
||||
// JsConcurrency is the number of concurrent js routines to run
|
||||
JsConcurrency int
|
||||
}
|
||||
|
||||
// ShouldLoadResume resume file
|
||||
|
26
lib/nuclei/pkg/utils/expand/expand.go
Executable file
26
lib/nuclei/pkg/utils/expand/expand.go
Executable file
@ -0,0 +1,26 @@
|
||||
package expand
|
||||
|
||||
import (
|
||||
"github.com/projectdiscovery/mapcidr"
|
||||
"github.com/projectdiscovery/mapcidr/asn"
|
||||
)
|
||||
|
||||
// Expands CIDR to IPs
|
||||
func CIDR(value string) []string {
|
||||
var ips []string
|
||||
ipsCh, _ := mapcidr.IPAddressesAsStream(value)
|
||||
for ip := range ipsCh {
|
||||
ips = append(ips, ip)
|
||||
}
|
||||
return ips
|
||||
}
|
||||
|
||||
// Expand ASN to IPs
|
||||
func ASN(value string) []string {
|
||||
var ips []string
|
||||
cidrs, _ := asn.GetCIDRsForASNNum(value)
|
||||
for _, cidr := range cidrs {
|
||||
ips = append(ips, CIDR(cidr.String())...)
|
||||
}
|
||||
return ips
|
||||
}
|
@ -10,6 +10,7 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/DataDog/gostackparse"
|
||||
@ -20,28 +21,44 @@ import (
|
||||
|
||||
// Agent is an agent for monitoring hanging programs
|
||||
type Agent struct {
|
||||
cancel context.CancelFunc
|
||||
lastStack []string
|
||||
callbacks []Callback
|
||||
|
||||
goroutineCount int
|
||||
currentIteration int // number of times we've checked hang
|
||||
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
const defaultMonitorIteration = 6
|
||||
|
||||
// NewStackMonitor returns a new stack monitor instance
|
||||
func NewStackMonitor(interval time.Duration) context.CancelFunc {
|
||||
func NewStackMonitor() *Agent {
|
||||
return &Agent{}
|
||||
}
|
||||
|
||||
// Callback when crash is detected and stack trace is saved to disk
|
||||
type Callback func(dumpID string) error
|
||||
|
||||
// RegisterCallback adds a callback to perform additional operations before bailing out.
|
||||
func (s *Agent) RegisterCallback(callback Callback) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.callbacks = append(s.callbacks, callback)
|
||||
}
|
||||
|
||||
func (s *Agent) Start(interval time.Duration) context.CancelFunc {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ticker := time.NewTicker(interval)
|
||||
|
||||
monitor := &Agent{cancel: cancel}
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
case <-ticker.C:
|
||||
monitor.monitorWorker()
|
||||
s.monitorWorker(cancel)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
@ -51,7 +68,7 @@ func NewStackMonitor(interval time.Duration) context.CancelFunc {
|
||||
}
|
||||
|
||||
// monitorWorker is a worker for monitoring running goroutines
|
||||
func (s *Agent) monitorWorker() {
|
||||
func (s *Agent) monitorWorker(cancel context.CancelFunc) {
|
||||
current := runtime.NumGoroutine()
|
||||
if current != s.goroutineCount {
|
||||
s.goroutineCount = current
|
||||
@ -77,12 +94,24 @@ func (s *Agent) monitorWorker() {
|
||||
s.currentIteration = 0
|
||||
return
|
||||
}
|
||||
s.cancel()
|
||||
stackTraceFile := fmt.Sprintf("nuclei-stacktrace-%s.dump", xid.New().String())
|
||||
|
||||
cancel()
|
||||
dumpID := xid.New().String()
|
||||
stackTraceFile := fmt.Sprintf("nuclei-stacktrace-%s.dump", dumpID)
|
||||
gologger.Error().Msgf("Detected hanging goroutine (count=%d/%d) = %s\n", current, s.goroutineCount, stackTraceFile)
|
||||
if err := os.WriteFile(stackTraceFile, currentStack, permissionutil.ConfigFilePermission); err != nil {
|
||||
gologger.Error().Msgf("Could not write stack trace for goroutines: %s\n", err)
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
callbacks := s.callbacks
|
||||
s.lock.Unlock()
|
||||
for _, callback := range callbacks {
|
||||
if err := callback(dumpID); err != nil {
|
||||
gologger.Error().Msgf("Stack monitor callback error: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
os.Exit(1) // exit forcefully if we've been stuck
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user