mirror of
https://github.com/chainreactors/spray.git
synced 2025-09-15 19:50:18 +00:00
refactor cmd ui.
This commit is contained in:
parent
40a1f90601
commit
3f4094d89e
@ -160,7 +160,7 @@ func Spray() {
|
|||||||
}()
|
}()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if runner.CheckOnly {
|
if runner.IsCheck {
|
||||||
runner.RunWithCheck(ctx)
|
runner.RunWithCheck(ctx)
|
||||||
} else {
|
} else {
|
||||||
runner.Run(ctx)
|
runner.Run(ctx)
|
||||||
|
@ -101,7 +101,7 @@ mode:
|
|||||||
# Bool, skip error break
|
# Bool, skip error break
|
||||||
force: false
|
force: false
|
||||||
# Bool, check only
|
# Bool, check only
|
||||||
check-only: false
|
default: false
|
||||||
# Bool, no scope
|
# Bool, no scope
|
||||||
no-scope: false
|
no-scope: false
|
||||||
# String, custom scope, e.g.: --scope *.example.com
|
# String, custom scope, e.g.: --scope *.example.com
|
||||||
@ -132,7 +132,7 @@ mode:
|
|||||||
unique: false
|
unique: false
|
||||||
# Int, retry count
|
# Int, retry count
|
||||||
retry: 0
|
retry: 0
|
||||||
distance: 5
|
sim-distance: 5
|
||||||
misc:
|
misc:
|
||||||
# String, path/host spray
|
# String, path/host spray
|
||||||
mod: path
|
mod: path
|
||||||
|
@ -50,17 +50,18 @@ type InputOptions struct {
|
|||||||
URL []string `short:"u" long:"url" description:"Strings, input baseurl, e.g.: http://google.com"`
|
URL []string `short:"u" long:"url" description:"Strings, input baseurl, e.g.: http://google.com"`
|
||||||
URLFile string `short:"l" long:"list" description:"File, input filename"`
|
URLFile string `short:"l" long:"list" description:"File, input filename"`
|
||||||
PortRange string `short:"p" long:"port" description:"String, input port range, e.g.: 80,8080-8090,db"`
|
PortRange string `short:"p" long:"port" description:"String, input port range, e.g.: 80,8080-8090,db"`
|
||||||
CIDRs string `long:"cidr" description:"String, input cidr, e.g.: 1.1.1.1/24 "`
|
CIDRs string `short:"i" long:"cidr" description:"String, input cidr, e.g.: 1.1.1.1/24 "`
|
||||||
RawFile string `long:"raw" description:"File, input raw request filename"`
|
RawFile string `long:"raw" description:"File, input raw request filename"`
|
||||||
Dictionaries []string `short:"d" long:"dict" description:"Files, Multi,dict files, e.g.: -d 1.txt -d 2.txt" config:"dictionaries"`
|
Dictionaries []string `short:"d" long:"dict" description:"Files, Multi,dict files, e.g.: -d 1.txt -d 2.txt" config:"dictionaries"`
|
||||||
NoDict bool `long:"no-dict" description:"Bool, no dictionary" config:"no-dict"`
|
//NoDict bool `long:"no-dict" description:"Bool, no dictionary" config:"no-dict"`
|
||||||
Word string `short:"w" long:"word" description:"String, word generate dsl, e.g.: -w test{?ld#4}" config:"word"`
|
DefaultDict bool `short:"D" long:"default" description:"Bool, use default dictionary" config:"default"`
|
||||||
Rules []string `short:"r" long:"rules" description:"Files, rule files, e.g.: -r rule1.txt -r rule2.txt" config:"rules"`
|
Word string `short:"w" long:"word" description:"String, word generate dsl, e.g.: -w test{?ld#4}" config:"word"`
|
||||||
AppendRule []string `long:"append-rule" description:"Files, when found valid path , use append rule generator new word with current path" config:"append-rules"`
|
Rules []string `short:"r" long:"rules" description:"Files, rule files, e.g.: -r rule1.txt -r rule2.txt" config:"rules"`
|
||||||
FilterRule string `long:"filter-rule" description:"String, filter rule, e.g.: --rule-filter '>8 <4'" config:"filter-rule"`
|
AppendRule []string `long:"append-rule" description:"Files, when found valid path , use append rule generator new word with current path" config:"append-rules"`
|
||||||
AppendFile []string `long:"append-file" description:"Files, when found valid path , use append file new word with current path" config:"append-files"`
|
FilterRule string `long:"filter-rule" description:"String, filter rule, e.g.: --rule-filter '>8 <4'" config:"filter-rule"`
|
||||||
Offset int `long:"offset" description:"Int, wordlist offset"`
|
AppendFile []string `long:"append-file" description:"Files, when found valid path , use append file new word with current path" config:"append-files"`
|
||||||
Limit int `long:"limit" description:"Int, wordlist limit, start with offset. e.g.: --offset 1000 --limit 100"`
|
Offset int `long:"offset" description:"Int, wordlist offset"`
|
||||||
|
Limit int `long:"limit" description:"Int, wordlist limit, start with offset. e.g.: --offset 1000 --limit 100"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type FunctionOptions struct {
|
type FunctionOptions struct {
|
||||||
@ -118,9 +119,9 @@ type PluginOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ModeOptions struct {
|
type ModeOptions struct {
|
||||||
RateLimit int `long:"rate-limit" default:"0" description:"Int, request rate limit (rate/s), e.g.: --rate-limit 100" config:"rate-limit"`
|
RateLimit int `long:"rate-limit" default:"0" description:"Int, request rate limit (rate/s), e.g.: --rate-limit 100" config:"rate-limit"`
|
||||||
Force bool `long:"force" description:"Bool, skip error break" config:"force"`
|
Force bool `long:"force" description:"Bool, skip error break" config:"force"`
|
||||||
CheckOnly bool `long:"check-only" description:"Bool, check only" config:"check-only"`
|
//CheckOnly bool `long:"check-only" description:"Bool, check only" config:"check-only"`
|
||||||
NoScope bool `long:"no-scope" description:"Bool, no scope" config:"no-scope"`
|
NoScope bool `long:"no-scope" description:"Bool, no scope" config:"no-scope"`
|
||||||
Scope []string `long:"scope" description:"String, custom scope, e.g.: --scope *.example.com" config:"scope"`
|
Scope []string `long:"scope" description:"String, custom scope, e.g.: --scope *.example.com" config:"scope"`
|
||||||
Recursive string `long:"recursive" default:"current.IsDir()" description:"String,custom recursive rule, e.g.: --recursive current.IsDir()" config:"recursive"`
|
Recursive string `long:"recursive" default:"current.IsDir()" description:"String,custom recursive rule, e.g.: --recursive current.IsDir()" config:"recursive"`
|
||||||
@ -136,7 +137,7 @@ type ModeOptions struct {
|
|||||||
UniqueStatus string `long:"unique-status" default:"403,200,404" description:"Strings (comma split), custom unique status" config:"unique-status"`
|
UniqueStatus string `long:"unique-status" default:"403,200,404" description:"Strings (comma split), custom unique status" config:"unique-status"`
|
||||||
Unique bool `long:"unique" description:"Bool, unique response" config:"unique"`
|
Unique bool `long:"unique" description:"Bool, unique response" config:"unique"`
|
||||||
RetryCount int `long:"retry" default:"0" description:"Int, retry count" config:"retry"`
|
RetryCount int `long:"retry" default:"0" description:"Int, retry count" config:"retry"`
|
||||||
SimhashDistance int `long:"distance" default:"5" config:"distance"`
|
SimhashDistance int `long:"sim-distance" default:"5" config:"sim-distance"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type MiscOptions struct {
|
type MiscOptions struct {
|
||||||
@ -200,7 +201,7 @@ func (opt *Option) PrepareRunner() (*Runner, error) {
|
|||||||
r.ClientType = ihttp.STANDARD
|
r.ClientType = ihttp.STANDARD
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.Threads == DefaultThreads && opt.CheckOnly {
|
if opt.Threads == DefaultThreads && len(opt.Dictionaries) == 0 {
|
||||||
r.Threads = 1000
|
r.Threads = 1000
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -291,22 +292,28 @@ func (opt *Option) PrepareRunner() (*Runner, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// prepare word
|
// prepare word
|
||||||
dicts := make([][]string, len(opt.Dictionaries))
|
var dicts [][]string
|
||||||
if len(opt.Dictionaries) == 0 && opt.Word == "" && !opt.NoDict {
|
if opt.DefaultDict {
|
||||||
dicts = append(dicts, pkg.LoadDefaultDict())
|
dicts = append(dicts, pkg.LoadDefaultDict())
|
||||||
logs.Log.Warn("not set any dictionary, use default dictionary: https://github.com/maurosoria/dirsearch/blob/master/db/dicc.txt")
|
logs.Log.Info("not set any dictionary, use default dictionary: https://github.com/maurosoria/dirsearch/blob/master/db/dicc.txt")
|
||||||
} else {
|
}
|
||||||
for i, f := range opt.Dictionaries {
|
for i, f := range opt.Dictionaries {
|
||||||
dicts[i], err = loadFileToSlice(f)
|
dict, err := loadFileToSlice(f)
|
||||||
if opt.ResumeFrom != "" {
|
if err != nil {
|
||||||
dictCache[f] = dicts[i]
|
return nil, err
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
logs.Log.Logf(pkg.LogVerbose, "Loaded %d word from %s", len(dicts[i]), f)
|
|
||||||
}
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dicts = append(dicts, dict)
|
||||||
|
if opt.ResumeFrom != "" {
|
||||||
|
dictCache[f] = dicts[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
logs.Log.Logf(pkg.LogVerbose, "Loaded %d word from %s", len(dicts[i]), f)
|
||||||
|
}
|
||||||
|
if len(dicts) == 0 {
|
||||||
|
r.IsCheck = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.Word == "" {
|
if opt.Word == "" {
|
||||||
|
@ -30,7 +30,7 @@ type Config struct {
|
|||||||
ProcessCh chan *pkg.Baseline
|
ProcessCh chan *pkg.Baseline
|
||||||
OutputCh chan *pkg.Baseline
|
OutputCh chan *pkg.Baseline
|
||||||
FuzzyCh chan *pkg.Baseline
|
FuzzyCh chan *pkg.Baseline
|
||||||
OutLocker *sync.WaitGroup
|
Outwg *sync.WaitGroup
|
||||||
RateLimit int
|
RateLimit int
|
||||||
CheckPeriod int
|
CheckPeriod int
|
||||||
ErrPeriod int32
|
ErrPeriod int32
|
||||||
|
@ -153,12 +153,12 @@ func (pool *BasePool) putToOutput(bl *pkg.Baseline) {
|
|||||||
if bl.IsValid || bl.IsFuzzy {
|
if bl.IsValid || bl.IsFuzzy {
|
||||||
bl.Collect()
|
bl.Collect()
|
||||||
}
|
}
|
||||||
pool.OutLocker.Add(1)
|
pool.Outwg.Add(1)
|
||||||
pool.OutputCh <- bl
|
pool.OutputCh <- bl
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pool *BasePool) putToFuzzy(bl *pkg.Baseline) {
|
func (pool *BasePool) putToFuzzy(bl *pkg.Baseline) {
|
||||||
pool.OutLocker.Add(1)
|
pool.Outwg.Add(1)
|
||||||
bl.IsFuzzy = true
|
bl.IsFuzzy = true
|
||||||
pool.FuzzyCh <- bl
|
pool.FuzzyCh <- bl
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,6 @@ import (
|
|||||||
"github.com/vbauerster/mpb/v8"
|
"github.com/vbauerster/mpb/v8"
|
||||||
"github.com/vbauerster/mpb/v8/decor"
|
"github.com/vbauerster/mpb/v8/decor"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -36,7 +35,7 @@ type Runner struct {
|
|||||||
outputCh chan *pkg.Baseline
|
outputCh chan *pkg.Baseline
|
||||||
fuzzyCh chan *pkg.Baseline
|
fuzzyCh chan *pkg.Baseline
|
||||||
bar *mpb.Bar
|
bar *mpb.Bar
|
||||||
finished int
|
IsCheck bool
|
||||||
Pools *ants.PoolWithFunc
|
Pools *ants.PoolWithFunc
|
||||||
PoolName map[string]bool
|
PoolName map[string]bool
|
||||||
Tasks chan *Task
|
Tasks chan *Task
|
||||||
@ -73,7 +72,7 @@ func (r *Runner) PrepareConfig() *pool.Config {
|
|||||||
Mod: pool.ModMap[r.Mod],
|
Mod: pool.ModMap[r.Mod],
|
||||||
OutputCh: r.outputCh,
|
OutputCh: r.outputCh,
|
||||||
FuzzyCh: r.fuzzyCh,
|
FuzzyCh: r.fuzzyCh,
|
||||||
OutLocker: r.outwg,
|
Outwg: r.outwg,
|
||||||
Fuzzy: r.Fuzzy,
|
Fuzzy: r.Fuzzy,
|
||||||
CheckPeriod: r.CheckPeriod,
|
CheckPeriod: r.CheckPeriod,
|
||||||
ErrPeriod: int32(r.ErrPeriod),
|
ErrPeriod: int32(r.ErrPeriod),
|
||||||
@ -113,15 +112,15 @@ func (r *Runner) AppendFunction(fn func(string) []string) {
|
|||||||
|
|
||||||
func (r *Runner) Prepare(ctx context.Context) error {
|
func (r *Runner) Prepare(ctx context.Context) error {
|
||||||
var err error
|
var err error
|
||||||
if r.CheckOnly {
|
if r.IsCheck {
|
||||||
// 仅check, 类似httpx
|
// 仅check, 类似httpx
|
||||||
r.Pools, err = ants.NewPoolWithFunc(1, func(i interface{}) {
|
r.Pools, err = ants.NewPoolWithFunc(1, func(i interface{}) {
|
||||||
config := r.PrepareConfig()
|
config := r.PrepareConfig()
|
||||||
|
|
||||||
pool, err := pool.NewCheckPool(ctx, config)
|
checkPool, err := pool.NewCheckPool(ctx, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logs.Log.Error(err.Error())
|
logs.Log.Error(err.Error())
|
||||||
pool.Cancel()
|
checkPool.Cancel()
|
||||||
r.poolwg.Done()
|
r.poolwg.Done()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -133,10 +132,10 @@ func (r *Runner) Prepare(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
}()
|
}()
|
||||||
pool.Worder = words.NewWorderWithChan(ch)
|
checkPool.Worder = words.NewWorderWithChan(ch)
|
||||||
pool.Worder.Fns = r.Fns
|
checkPool.Worder.Fns = r.Fns
|
||||||
pool.Bar = pkg.NewBar("check", r.Count-r.Offset, pool.Statistor, r.Progress)
|
checkPool.Bar = pkg.NewBar("check", r.Count-r.Offset, checkPool.Statistor, r.Progress)
|
||||||
pool.Run(ctx, r.Offset, r.Count)
|
checkPool.Run(ctx, r.Offset, r.Count)
|
||||||
r.poolwg.Done()
|
r.poolwg.Done()
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
@ -162,57 +161,57 @@ func (r *Runner) Prepare(ctx context.Context) error {
|
|||||||
config := r.PrepareConfig()
|
config := r.PrepareConfig()
|
||||||
config.BaseURL = t.baseUrl
|
config.BaseURL = t.baseUrl
|
||||||
|
|
||||||
pool, err := pool.NewBrutePool(ctx, config)
|
brutePool, err := pool.NewBrutePool(ctx, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logs.Log.Error(err.Error())
|
logs.Log.Error(err.Error())
|
||||||
pool.Cancel()
|
brutePool.Cancel()
|
||||||
r.Done()
|
r.Done()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if t.origin != nil && len(r.Wordlist) == 0 {
|
if t.origin != nil && len(r.Wordlist) == 0 {
|
||||||
// 如果是从断点续传中恢复的任务, 则自动设置word,dict与rule, 不过优先级低于命令行参数
|
// 如果是从断点续传中恢复的任务, 则自动设置word,dict与rule, 不过优先级低于命令行参数
|
||||||
pool.Statistor = pkg.NewStatistorFromStat(t.origin.Statistor)
|
brutePool.Statistor = pkg.NewStatistorFromStat(t.origin.Statistor)
|
||||||
pool.Worder, err = t.origin.InitWorder(r.Fns)
|
brutePool.Worder, err = t.origin.InitWorder(r.Fns)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logs.Log.Error(err.Error())
|
logs.Log.Error(err.Error())
|
||||||
r.Done()
|
r.Done()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pool.Statistor.Total = t.origin.sum
|
brutePool.Statistor.Total = t.origin.sum
|
||||||
} else {
|
} else {
|
||||||
pool.Statistor = pkg.NewStatistor(t.baseUrl)
|
brutePool.Statistor = pkg.NewStatistor(t.baseUrl)
|
||||||
pool.Worder = words.NewWorder(r.Wordlist)
|
brutePool.Worder = words.NewWorder(r.Wordlist)
|
||||||
pool.Worder.Fns = r.Fns
|
brutePool.Worder.Fns = r.Fns
|
||||||
pool.Worder.Rules = r.Rules.Expressions
|
brutePool.Worder.Rules = r.Rules.Expressions
|
||||||
}
|
}
|
||||||
|
|
||||||
var limit int
|
var limit int
|
||||||
if pool.Statistor.Total > r.Limit && r.Limit != 0 {
|
if brutePool.Statistor.Total > r.Limit && r.Limit != 0 {
|
||||||
limit = r.Limit
|
limit = r.Limit
|
||||||
} else {
|
} else {
|
||||||
limit = pool.Statistor.Total
|
limit = brutePool.Statistor.Total
|
||||||
}
|
}
|
||||||
pool.Bar = pkg.NewBar(config.BaseURL, limit-pool.Statistor.Offset, pool.Statistor, r.Progress)
|
brutePool.Bar = pkg.NewBar(config.BaseURL, limit-brutePool.Statistor.Offset, brutePool.Statistor, r.Progress)
|
||||||
logs.Log.Importantf("[pool] task: %s, total %d words, %d threads, proxy: %s", pool.BaseURL, limit-pool.Statistor.Offset, pool.Thread, pool.ProxyAddr)
|
logs.Log.Importantf("[pool] task: %s, total %d words, %d threads, proxy: %s", brutePool.BaseURL, limit-brutePool.Statistor.Offset, brutePool.Thread, brutePool.ProxyAddr)
|
||||||
err = pool.Init()
|
err = brutePool.Init()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pool.Statistor.Error = err.Error()
|
brutePool.Statistor.Error = err.Error()
|
||||||
if !r.Force {
|
if !r.Force {
|
||||||
// 如果没开启force, init失败将会关闭pool
|
// 如果没开启force, init失败将会关闭pool
|
||||||
pool.Close()
|
brutePool.Close()
|
||||||
r.PrintStat(pool)
|
r.PrintStat(brutePool)
|
||||||
r.Done()
|
r.Done()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pool.Run(pool.Statistor.Offset, limit)
|
brutePool.Run(brutePool.Statistor.Offset, limit)
|
||||||
|
|
||||||
if pool.IsFailed && len(pool.FailedBaselines) > 0 {
|
if brutePool.IsFailed && len(brutePool.FailedBaselines) > 0 {
|
||||||
// 如果因为错误积累退出, end将指向第一个错误发生时, 防止resume时跳过大量目标
|
// 如果因为错误积累退出, end将指向第一个错误发生时, 防止resume时跳过大量目标
|
||||||
pool.Statistor.End = pool.FailedBaselines[0].Number
|
brutePool.Statistor.End = brutePool.FailedBaselines[0].Number
|
||||||
}
|
}
|
||||||
r.PrintStat(pool)
|
r.PrintStat(brutePool)
|
||||||
r.Done()
|
r.Done()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -224,28 +223,6 @@ func (r *Runner) Prepare(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Runner) AddRecursive(bl *pkg.Baseline) {
|
|
||||||
// 递归新任务
|
|
||||||
task := &Task{
|
|
||||||
baseUrl: bl.UrlString,
|
|
||||||
depth: bl.RecuDepth + 1,
|
|
||||||
origin: NewOrigin(pkg.NewStatistor(bl.UrlString)),
|
|
||||||
}
|
|
||||||
|
|
||||||
r.AddPool(task)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Runner) AddPool(task *Task) {
|
|
||||||
// 递归新任务
|
|
||||||
if _, ok := r.PoolName[task.baseUrl]; ok {
|
|
||||||
logs.Log.Importantf("already added pool, skip %s", task.baseUrl)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
task.depth++
|
|
||||||
r.poolwg.Add(1)
|
|
||||||
r.Pools.Invoke(task)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Runner) Run(ctx context.Context) {
|
func (r *Runner) Run(ctx context.Context) {
|
||||||
Loop:
|
Loop:
|
||||||
for {
|
for {
|
||||||
@ -294,13 +271,29 @@ Loop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
r.outwg.Wait()
|
||||||
if len(r.outputCh) == 0 {
|
}
|
||||||
break
|
|
||||||
}
|
func (r *Runner) AddRecursive(bl *pkg.Baseline) {
|
||||||
|
// 递归新任务
|
||||||
|
task := &Task{
|
||||||
|
baseUrl: bl.UrlString,
|
||||||
|
depth: bl.RecuDepth + 1,
|
||||||
|
origin: NewOrigin(pkg.NewStatistor(bl.UrlString)),
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(100 * time.Millisecond) // 延迟100ms, 等所有数据处理完毕
|
r.AddPool(task)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Runner) AddPool(task *Task) {
|
||||||
|
// 递归新任务
|
||||||
|
if _, ok := r.PoolName[task.baseUrl]; ok {
|
||||||
|
logs.Log.Importantf("already added pool, skip %s", task.baseUrl)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
task.depth++
|
||||||
|
r.poolwg.Add(1)
|
||||||
|
r.Pools.Invoke(task)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Runner) addBar(total int) {
|
func (r *Runner) addBar(total int) {
|
||||||
@ -329,7 +322,6 @@ func (r *Runner) Done() {
|
|||||||
if r.bar != nil {
|
if r.bar != nil {
|
||||||
r.bar.Increment()
|
r.bar.Increment()
|
||||||
}
|
}
|
||||||
r.finished++
|
|
||||||
r.poolwg.Done()
|
r.poolwg.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user