实现断点续传

This commit is contained in:
M09Ic 2022-12-02 19:59:15 +08:00
parent 023e316518
commit f9c5a71258
5 changed files with 126 additions and 72 deletions

View File

@ -19,8 +19,13 @@ func Spray() {
}
return
}
var runner *internal.Runner
if option.ResumeFrom != "" {
runner, err = option.PrepareRunner()
} else {
runner, err = option.PrepareRunner()
}
runner, err := option.PrepareRunner()
if err != nil {
logs.Log.Errorf(err.Error())
return

View File

@ -25,6 +25,7 @@ type Option struct {
}
type InputOptions struct {
ResumeFrom string `short:"r" long:"resume-from"`
URL string `short:"u" long:"url" description:"String, input baseurl (separated by commas), e.g.: http://google.com, http://baidu.com"`
URLFile string `short:"l" long:"list" description:"File, input filename"`
Offset int `long:"offset" description:"Int, wordlist offset"`
@ -76,6 +77,7 @@ type MiscOptions struct {
Threads int `short:"t" long:"thread" default:"20" description:"Int, number of threads per pool (seconds)"`
Debug bool `long:"debug" description:"Bool, output debug info"`
Quiet bool `short:"q" long:"quiet" description:"Bool, Quiet"`
NoBar bool `long:"no-bar"`
Mod string `short:"m" long:"mod" default:"path" choice:"path" choice:"host" description:"String, path/host spray"`
Client string `short:"c" long:"client" default:"auto" choice:"fast" choice:"standard" choice:"auto" description:"String, Client type"`
}
@ -94,8 +96,8 @@ func (opt *Option) PrepareRunner() (*Runner, error) {
Timeout: opt.Timeout,
Deadline: opt.Deadline,
Offset: opt.Offset,
Limit: opt.Limit,
urlCh: make(chan string),
Total: opt.Limit,
taskCh: make(chan *Task),
OutputCh: make(chan *pkg.Baseline, 100),
FuzzyCh: make(chan *pkg.Baseline, 100),
Fuzzy: opt.Fuzzy,
@ -124,13 +126,13 @@ func (opt *Option) PrepareRunner() (*Runner, error) {
if opt.Debug {
logs.Log.Level = logs.Debug
}
if !opt.Quiet {
r.Progress.Start()
logs.Log.Writer = r.Progress.Bypass()
} else {
if opt.Quiet {
logs.Log.Quiet = true
}
if opt.Quiet || opt.NoBar {
r.Progress.Start()
logs.Log.Writer = r.Progress.Bypass()
}
if opt.SimhashDistance != 0 {
pkg.Distance = uint8(opt.SimhashDistance)
}
@ -172,34 +174,6 @@ func (opt *Option) PrepareRunner() (*Runner, error) {
}
}
// prepare url
var urls []string
var file *os.File
urlfrom := opt.URLFile
if opt.URL != "" {
urls = append(urls, opt.URL)
urlfrom = "cmd"
} else if opt.URLFile != "" {
file, err = os.Open(opt.URLFile)
if err != nil {
return nil, err
}
} else if pkg.HasStdin() {
file = os.Stdin
urlfrom = "stdin"
}
if file != nil {
content, err := ioutil.ReadAll(file)
if err != nil {
return nil, err
}
urls = strings.Split(strings.TrimSpace(string(content)), "\n")
}
r.URLList = urls
logs.Log.Importantf("Loaded %d urls from %s", len(urls), urlfrom)
// prepare word
dicts := make([][]string, len(opt.Dictionaries))
for i, f := range opt.Dictionaries {
@ -244,20 +218,69 @@ func (opt *Option) PrepareRunner() (*Runner, error) {
return nil, err
}
logs.Log.Importantf("Parsed %d words by %s", len(r.Wordlist), opt.Word)
pkg.DefaultStatistor.Total = len(r.Wordlist)
pkg.DefaultStatistor.Word = opt.Word
pkg.DefaultStatistor.Dictionaries = opt.Dictionaries
pkg.DefaultStatistor = pkg.Statistor{
Word: opt.Word,
WordCount: len(r.Wordlist),
Dictionaries: opt.Dictionaries,
Offset: opt.Offset,
}
if r.Limit == 0 {
if r.CheckOnly {
r.Limit = len(r.URLList)
} else {
r.Limit = len(r.Wordlist)
r.Total = len(r.Wordlist)
if opt.Limit != 0 {
if total := r.Offset + opt.Limit; total < r.Total {
r.Total = total
}
}
// prepare task
var tasks []*Task
var taskfrom string
if opt.ResumeFrom != "" {
stats, err := pkg.ReadStatistors(opt.ResumeFrom)
if err != nil {
return nil, err
}
taskfrom = "resume " + opt.ResumeFrom
for _, stat := range stats {
tasks = append(tasks, &Task{baseUrl: stat.BaseUrl, offset: stat.Offset + stat.ReqNumber, total: r.Total})
}
} else {
r.Limit = r.Offset + opt.Limit
var file *os.File
var urls []string
if opt.URL != "" {
urls = append(urls, opt.URL)
tasks = append(tasks, &Task{baseUrl: opt.URL, offset: opt.Offset, total: r.Total})
taskfrom = "cmd"
} else if opt.URLFile != "" {
file, err = os.Open(opt.URLFile)
if err != nil {
return nil, err
}
taskfrom = opt.URLFile
} else if pkg.HasStdin() {
file = os.Stdin
taskfrom = "stdin"
}
if file != nil {
content, err := ioutil.ReadAll(file)
if err != nil {
return nil, err
}
urls := strings.Split(strings.TrimSpace(string(content)), "\n")
for _, u := range urls {
tasks = append(tasks, &Task{baseUrl: strings.TrimSpace(u), offset: opt.Offset, total: r.Total})
}
}
if opt.CheckOnly {
r.URLList = urls
r.Total = len(r.URLList)
}
}
r.Tasks = tasks
logs.Log.Importantf("Loaded %d urls from %s", len(tasks), taskfrom)
if opt.Uppercase {
r.Fns = append(r.Fns, strings.ToUpper)
}
@ -361,7 +384,7 @@ func loadFileToSlice(filename string) ([]string, error) {
return nil, err
}
ss = strings.Split(string(content), "\n")
ss = strings.Split(strings.TrimSpace(string(content)), "\n")
// 统一windows与linux的回车换行差异
for i, word := range ss {
@ -395,3 +418,9 @@ func IntsContains(s []int, e int) bool {
}
return false
}
type Task struct {
baseUrl string
offset int
total int
}

View File

@ -73,7 +73,7 @@ func NewPool(ctx context.Context, config *pkg.Config) (*Pool, error) {
}
p, _ := ants.NewPoolWithFunc(config.Thread, func(i interface{}) {
pool.Statistor.ReqNumber++
pool.Statistor.Total++
unit := i.(*Unit)
req, err := pool.genReq(unit.path)
if err != nil {
@ -287,7 +287,6 @@ func (p *Pool) addRedirect(bl *pkg.Baseline, reCount int) {
}
func (p *Pool) Run(ctx context.Context, offset, limit int) {
p.Statistor.Offset = offset
Loop:
for {
select {

View File

@ -11,7 +11,6 @@ import (
"github.com/gosuri/uiprogress"
"github.com/panjf2000/ants/v2"
"net/http"
"strings"
"sync"
"time"
)
@ -24,11 +23,12 @@ var (
)
type Runner struct {
urlCh chan string
taskCh chan *Task
poolwg sync.WaitGroup
bar *uiprogress.Bar
finished int
Tasks []*Task
URLList []string
Wordlist []string
Headers http.Header
@ -50,7 +50,7 @@ type Runner struct {
Force bool
Progress *uiprogress.Progress
Offset int
Limit int
Total int
Deadline int
CheckPeriod int
ErrPeriod int
@ -84,6 +84,7 @@ func (r *Runner) PrepareConfig() *pkg.Config {
func (r *Runner) Prepare(ctx context.Context) error {
var err error
if r.CheckOnly {
// 仅check, 类似httpx
r.Pools, err = ants.NewPoolWithFunc(1, func(i interface{}) {
config := r.PrepareConfig()
config.Wordlist = r.URLList
@ -94,31 +95,31 @@ func (r *Runner) Prepare(ctx context.Context) error {
r.poolwg.Done()
return
}
pool.bar = pkg.NewBar("check", r.Limit-r.Offset, r.Progress)
pool.Run(ctx, r.Offset, r.Limit)
pool.bar = pkg.NewBar("check", r.Total-r.Offset, r.Progress)
pool.Run(ctx, r.Offset, r.Total)
r.poolwg.Done()
})
} else {
go func() {
for _, u := range r.URLList {
r.urlCh <- strings.TrimSpace(u)
for _, t := range r.Tasks {
r.taskCh <- t
}
close(r.urlCh)
close(r.taskCh)
}()
if len(r.URLList) > 0 {
r.bar = r.Progress.AddBar(len(r.URLList))
if len(r.Tasks) > 0 {
r.bar = r.Progress.AddBar(len(r.Tasks))
r.bar.PrependCompleted()
r.bar.PrependFunc(func(b *uiprogress.Bar) string {
return fmt.Sprintf("total progressive: %d/%d ", r.finished, len(r.URLList))
return fmt.Sprintf("total progressive: %d/%d ", r.finished, len(r.Tasks))
})
r.bar.AppendElapsed()
}
r.Pools, err = ants.NewPoolWithFunc(r.PoolSize, func(i interface{}) {
u := i.(string)
t := i.(*Task)
config := r.PrepareConfig()
config.BaseURL = u
config.BaseURL = t.baseUrl
config.Wordlist = r.Wordlist
pool, err := NewPool(ctx, config)
if err != nil {
@ -128,7 +129,7 @@ func (r *Runner) Prepare(ctx context.Context) error {
return
}
pool.bar = pkg.NewBar(u, r.Limit-r.Offset, r.Progress)
pool.bar = pkg.NewBar(config.BaseURL, t.total-t.offset, r.Progress)
err = pool.Init()
if err != nil {
logs.Log.Error(err.Error())
@ -140,12 +141,11 @@ func (r *Runner) Prepare(ctx context.Context) error {
}
}
pool.Run(ctx, r.Offset, r.Limit)
pool.Run(ctx, t.offset, t.total)
logs.Log.Important(pool.Statistor.String())
logs.Log.Important(pool.Statistor.Detail())
if r.StatFile != nil {
r.StatFile.SafeWrite(pool.Statistor.Json() + "\n")
r.StatFile.SafeWrite(pool.Statistor.Json())
r.StatFile.SafeSync()
}
r.Done()
@ -167,18 +167,17 @@ Loop:
case <-ctx.Done():
logs.Log.Error("cancel with deadline")
break Loop
case u, ok := <-r.urlCh:
case t, ok := <-r.taskCh:
if !ok {
break Loop
}
r.poolwg.Add(1)
r.Pools.Invoke(u)
r.Pools.Invoke(t)
}
}
r.poolwg.Wait()
time.Sleep(100) // 延迟100ms, 等所有数据处理完毕
//time.Sleep(100 * time.Millisecond) // 延迟100ms, 等所有数据处理完毕
for {
if len(r.OutputCh) == 0 {
close(r.OutputCh)
@ -192,7 +191,7 @@ Loop:
break
}
}
time.Sleep(100) // 延迟100ms, 等所有数据处理完毕
time.Sleep(100 * time.Millisecond) // 延迟100ms, 等所有数据处理完毕
}
func (r *Runner) RunWithCheck(ctx context.Context) {
@ -225,7 +224,7 @@ Loop:
}
}
time.Sleep(100) // 延迟100ms, 等所有数据处理完毕
time.Sleep(100 * time.Millisecond) // 延迟100ms, 等所有数据处理完毕
}
func (r *Runner) Done() {

View File

@ -1,8 +1,10 @@
package pkg
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"strconv"
"strings"
"time"
@ -33,6 +35,7 @@ type Statistor struct {
Total int `json:"total"`
StartTime int64 `json:"start_time"`
EndTime int64 `json:"end_time"`
WordCount int `json:"word_count"`
Word string `json:"word"`
Dictionaries []string `json:"dictionaries"`
}
@ -71,5 +74,24 @@ func (stat *Statistor) Json() string {
if err != nil {
return err.Error()
}
return string(content)
return string(content) + "\n"
}
func ReadStatistors(filename string) (Statistors, error) {
content, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var stats Statistors
for _, line := range bytes.Split(content, []byte("\n")) {
var stat Statistor
err := json.Unmarshal(line, &stat)
if err != nil {
return nil, err
}
stats = append(stats, stat)
}
return stats, nil
}
type Statistors []Statistor