Merge pull request #57 from chainreactors/dev

merge v1.0.2
This commit is contained in:
M09Ic 2024-08-20 16:53:55 +08:00 committed by GitHub
commit bf6d1c5f0b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 38 additions and 19 deletions

View File

@ -326,30 +326,30 @@ func (opt *Option) NewRunner() (*Runner, error) {
r.Active = true
pkg.EnableAllFingerEngine = true
pkg.Extractors["recon"] = pkg.ExtractRegexps["pentest"]
r.IsCheck = false
r.bruteMod = true
opt.AppendRule = append(opt.AppendRule, "filebak")
}
if opt.FileBak {
r.IsCheck = false
r.bruteMod = true
opt.AppendRule = append(opt.AppendRule, "filebak")
}
if opt.Common {
r.IsCheck = false
r.bruteMod = true
r.AppendWords = append(r.AppendWords, mask.SpecialWords["common_file"]...)
}
if opt.Active {
r.IsCheck = false
r.bruteMod = true
r.AppendWords = append(r.AppendWords, pkg.ActivePath...)
}
if opt.Crawl {
r.IsCheck = false
r.bruteMod = true
}
opt.PrintPlugin()
if r.IsCheck == false {
if r.bruteMod {
logs.Log.Important("enabling brute mod, because of enabled brute plugin")
}

View File

@ -51,7 +51,7 @@ func NewBrutePool(ctx context.Context, config *Config) (*BrutePool, error) {
additionCh: make(chan *Unit, config.Thread),
closeCh: make(chan struct{}),
processCh: make(chan *pkg.Baseline, config.Thread),
wg: sync.WaitGroup{},
wg: &sync.WaitGroup{},
},
base: u.Scheme + "://" + u.Host,
isDir: strings.HasSuffix(u.Path, "/"),
@ -196,7 +196,7 @@ func (pool *BrutePool) Upgrade(bl *pkg.Baseline) error {
return nil
}
func (pool *BrutePool) Run(offset, limit int) {
func (pool *BrutePool) Run(ctx context.Context, offset, limit int) {
pool.Worder.Run()
if pool.Active {
pool.wg.Add(1)
@ -279,7 +279,7 @@ Loop:
}
case <-pool.closeCh:
break Loop
case <-pool.ctx.Done():
case <-ctx.Done():
break Loop
case <-pool.ctx.Done():
break Loop

View File

@ -29,7 +29,7 @@ func NewCheckPool(ctx context.Context, config *Config) (*CheckPool, error) {
Timeout: time.Duration(config.Timeout) * time.Second,
ProxyAddr: config.ProxyAddr,
}),
wg: sync.WaitGroup{},
wg: &sync.WaitGroup{},
additionCh: make(chan *Unit, 1024),
closeCh: make(chan struct{}),
processCh: make(chan *pkg.Baseline, config.Thread),
@ -50,21 +50,35 @@ type CheckPool struct {
func (pool *CheckPool) Run(ctx context.Context, offset, limit int) {
pool.Worder.Run()
var done bool
// 挂起一个监控goroutine, 每100ms判断一次done, 如果已经done, 则关闭closeCh, 然后通过Loop中的select case closeCh去break, 实现退出
go func() {
for {
if done {
pool.wg.Wait()
close(pool.closeCh)
return
}
time.Sleep(100 * time.Millisecond)
}
}()
Loop:
for {
select {
case u, ok := <-pool.Worder.C:
if !ok {
break Loop
done = true
continue
}
if pool.reqCount < offset {
pool.reqCount++
break Loop
continue
}
if pool.reqCount > limit {
break Loop
continue
}
pool.wg.Add(1)
@ -82,7 +96,7 @@ Loop:
break Loop
}
}
pool.wg.Wait()
pool.Close()
}
@ -127,7 +141,11 @@ func (pool *CheckPool) Invoke(v interface{}) {
pool.doUpgrade(bl)
} else {
bl = pkg.NewBaseline(req.URI(), req.Host(), resp)
bl.ReqDepth = unit.depth
bl.Collect()
if bl.Status == 400 {
pool.doUpgrade(bl)
}
}
bl.ReqDepth = unit.depth
bl.Source = unit.source
@ -141,9 +159,6 @@ func (pool *CheckPool) Handler() {
if bl.RedirectURL != "" {
pool.doRedirect(bl, bl.ReqDepth)
pool.putToOutput(bl)
} else if bl.Status == 400 {
pool.doUpgrade(bl)
pool.putToOutput(bl)
} else {
params := map[string]interface{}{
"current": bl,

View File

@ -29,7 +29,7 @@ type BasePool struct {
failedCount int
additionCh chan *Unit
closeCh chan struct{}
wg sync.WaitGroup
wg *sync.WaitGroup
}
func (pool *BasePool) doRedirect(bl *pkg.Baseline, depth int) {

View File

@ -35,6 +35,7 @@ type Runner struct {
outputCh chan *pkg.Baseline
fuzzyCh chan *pkg.Baseline
bar *mpb.Bar
bruteMod bool
IsCheck bool
Pools *ants.PoolWithFunc
PoolName map[string]bool
@ -111,6 +112,9 @@ func (r *Runner) AppendFunction(fn func(string) []string) {
}
func (r *Runner) Prepare(ctx context.Context) error {
if r.bruteMod {
r.IsCheck = false
}
r.OutputHandler()
var err error
if r.IsCheck {
@ -207,7 +211,7 @@ func (r *Runner) Prepare(ctx context.Context) error {
}
}
brutePool.Run(brutePool.Statistor.Offset, limit)
brutePool.Run(ctx, brutePool.Statistor.Offset, limit)
if brutePool.IsFailed && len(brutePool.FailedBaselines) > 0 {
// 如果因为错误积累退出, end将指向第一个错误发生时, 防止resume时跳过大量目标