113 |
114 |
115 |
116 |
117 |
118 |
119 |
132 |
143 |
144 |
145 |
146 | `
147 | )
148 |
149 | // colorizeResults returns a new slice with HTMLColor attribute
150 | func colorizeResults(results []ffuf.Result) []ffuf.Result {
151 | newResults := make([]ffuf.Result, 0)
152 |
153 | for _, r := range results {
154 | result := r
155 | result.HTMLColor = "black"
156 |
157 | s := result.StatusCode
158 |
159 | if s >= 200 && s <= 299 {
160 | result.HTMLColor = "#adea9e"
161 | }
162 |
163 | if s >= 300 && s <= 399 {
164 | result.HTMLColor = "#bbbbe6"
165 | }
166 |
167 | if s >= 400 && s <= 499 {
168 | result.HTMLColor = "#d2cb7e"
169 | }
170 |
171 | if s >= 500 && s <= 599 {
172 | result.HTMLColor = "#de8dc1"
173 | }
174 |
175 | newResults = append(newResults, result)
176 | }
177 |
178 | return newResults
179 | }
180 |
181 | func writeHTML(filename string, config *ffuf.Config, results []ffuf.Result) error {
182 | results = colorizeResults(results)
183 |
184 | ti := time.Now()
185 |
186 | keywords := make([]string, 0)
187 | for _, inputprovider := range config.InputProviders {
188 | keywords = append(keywords, inputprovider.Keyword)
189 | }
190 |
191 | outHTML := htmlFileOutput{
192 | CommandLine: config.CommandLine,
193 | Time: ti.Format(time.RFC3339),
194 | Results: results,
195 | Keys: keywords,
196 | }
197 |
198 | f, err := os.Create(filename)
199 | if err != nil {
200 | return err
201 | }
202 | defer f.Close()
203 |
204 | templateName := "output.html"
205 | t := template.New(templateName).Delims("{{", "}}")
206 | _, err = t.Parse(htmlTemplate)
207 | if err != nil {
208 | return err
209 | }
210 | err = t.Execute(f, outHTML)
211 | return err
212 | }
213 |
--------------------------------------------------------------------------------
/pkg/input/input.go:
--------------------------------------------------------------------------------
1 | package input
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/ffuf/ffuf/pkg/ffuf"
7 | )
8 |
9 | type MainInputProvider struct {
10 | Providers []ffuf.InternalInputProvider
11 | Config *ffuf.Config
12 | position int
13 | msbIterator int
14 | }
15 |
16 | func NewInputProvider(conf *ffuf.Config) (ffuf.InputProvider, ffuf.Multierror) {
17 | validmode := false
18 | errs := ffuf.NewMultierror()
19 | for _, mode := range []string{"clusterbomb", "pitchfork", "sniper"} {
20 | if conf.InputMode == mode {
21 | validmode = true
22 | }
23 | }
24 | if !validmode {
25 | errs.Add(fmt.Errorf("Input mode (-mode) %s not recognized", conf.InputMode))
26 | return &MainInputProvider{}, errs
27 | }
28 | mainip := MainInputProvider{Config: conf, msbIterator: 0}
29 | // Initialize the correct inputprovider
30 | for _, v := range conf.InputProviders {
31 | err := mainip.AddProvider(v)
32 | if err != nil {
33 | errs.Add(err)
34 | }
35 | }
36 | return &mainip, errs
37 | }
38 |
39 | func (i *MainInputProvider) AddProvider(provider ffuf.InputProviderConfig) error {
40 | if provider.Name == "command" {
41 | newcomm, _ := NewCommandInput(provider.Keyword, provider.Value, i.Config)
42 | i.Providers = append(i.Providers, newcomm)
43 | } else {
44 | // Default to wordlist
45 | newwl, err := NewWordlistInput(provider.Keyword, provider.Value, i.Config)
46 | if err != nil {
47 | return err
48 | }
49 | i.Providers = append(i.Providers, newwl)
50 | }
51 | return nil
52 | }
53 |
54 | // ActivateKeywords enables / disables wordlists based on list of active keywords
55 | func (i *MainInputProvider) ActivateKeywords(kws []string) {
56 | for _, p := range i.Providers {
57 | if sliceContains(kws, p.Keyword()) {
58 | p.Active()
59 | } else {
60 | p.Disable()
61 | }
62 | }
63 | }
64 |
65 | // Position will return the current position of progress
66 | func (i *MainInputProvider) Position() int {
67 | return i.position
68 | }
69 |
70 | // SetPosition will reset the MainInputProvider to a specific position
71 | func (i *MainInputProvider) SetPosition(pos int) {
72 | if i.Config.InputMode == "clusterbomb" || i.Config.InputMode == "sniper" {
73 | i.setclusterbombPosition(pos)
74 | } else {
75 | i.setpitchforkPosition(pos)
76 | }
77 | }
78 |
79 | // Keywords returns a slice of all keywords in the inputprovider
80 | func (i *MainInputProvider) Keywords() []string {
81 | kws := make([]string, 0)
82 | for _, p := range i.Providers {
83 | kws = append(kws, p.Keyword())
84 | }
85 | return kws
86 | }
87 |
88 | // Next will increment the cursor position, and return a boolean telling if there's inputs left
89 | func (i *MainInputProvider) Next() bool {
90 | if i.position >= i.Total() {
91 | return false
92 | }
93 | i.position++
94 | return true
95 | }
96 |
97 | // Value returns a map of inputs for keywords
98 | func (i *MainInputProvider) Value() map[string][]byte {
99 | retval := make(map[string][]byte)
100 | if i.Config.InputMode == "clusterbomb" || i.Config.InputMode == "sniper" {
101 | retval = i.clusterbombValue()
102 | }
103 | if i.Config.InputMode == "pitchfork" {
104 | retval = i.pitchforkValue()
105 | }
106 | return retval
107 | }
108 |
109 | // Reset resets all the inputproviders and counters
110 | func (i *MainInputProvider) Reset() {
111 | for _, p := range i.Providers {
112 | p.ResetPosition()
113 | }
114 | i.position = 0
115 | i.msbIterator = 0
116 | }
117 |
118 | // pitchforkValue returns a map of keyword:value pairs including all inputs.
119 | // This mode will iterate through wordlists in lockstep.
120 | func (i *MainInputProvider) pitchforkValue() map[string][]byte {
121 | values := make(map[string][]byte)
122 | for _, p := range i.Providers {
123 | if !p.Active() {
124 | // The inputprovider is disabled
125 | continue
126 | }
127 | if !p.Next() {
128 | // Loop to beginning if the inputprovider has been exhausted
129 | p.ResetPosition()
130 | }
131 | values[p.Keyword()] = p.Value()
132 | p.IncrementPosition()
133 | }
134 | return values
135 | }
136 |
137 | func (i *MainInputProvider) setpitchforkPosition(pos int) {
138 | for _, p := range i.Providers {
139 | p.SetPosition(pos)
140 | }
141 | }
142 |
143 | // clusterbombValue returns map of keyword:value pairs including all inputs.
144 | // this mode will iterate through all possible combinations.
145 | func (i *MainInputProvider) clusterbombValue() map[string][]byte {
146 | values := make(map[string][]byte)
147 | // Should we signal the next InputProvider in the slice to increment
148 | signalNext := false
149 | first := true
150 | index := 0
151 | for _, p := range i.Providers {
152 | if !p.Active() {
153 | continue
154 | }
155 | if signalNext {
156 | p.IncrementPosition()
157 | signalNext = false
158 | }
159 | if !p.Next() {
160 | // No more inputs in this inputprovider
161 | if index == i.msbIterator {
162 | // Reset all previous wordlists and increment the msb counter
163 | i.msbIterator += 1
164 | i.clusterbombIteratorReset()
165 | // Start again
166 | return i.clusterbombValue()
167 | }
168 | p.ResetPosition()
169 | signalNext = true
170 | }
171 | values[p.Keyword()] = p.Value()
172 | if first {
173 | p.IncrementPosition()
174 | first = false
175 | }
176 | index += 1
177 | }
178 | return values
179 | }
180 |
181 | func (i *MainInputProvider) setclusterbombPosition(pos int) {
182 | i.Reset()
183 | if pos > i.Total() {
184 | // noop
185 | return
186 | }
187 | for i.position < pos-1 {
188 | i.Next()
189 | i.Value()
190 | }
191 | }
192 |
193 | func (i *MainInputProvider) clusterbombIteratorReset() {
194 | index := 0
195 | for _, p := range i.Providers {
196 | if !p.Active() {
197 | continue
198 | }
199 | if index < i.msbIterator {
200 | p.ResetPosition()
201 | }
202 | if index == i.msbIterator {
203 | p.IncrementPosition()
204 | }
205 | index += 1
206 | }
207 | }
208 |
209 | // Total returns the amount of input combinations available
210 | func (i *MainInputProvider) Total() int {
211 | count := 0
212 | if i.Config.InputMode == "pitchfork" {
213 | for _, p := range i.Providers {
214 | if !p.Active() {
215 | continue
216 | }
217 | if p.Total() > count {
218 | count = p.Total()
219 | }
220 | }
221 | }
222 | if i.Config.InputMode == "clusterbomb" || i.Config.InputMode == "sniper" {
223 | count = 1
224 | for _, p := range i.Providers {
225 | if !p.Active() {
226 | continue
227 | }
228 | count = count * p.Total()
229 | }
230 | }
231 | return count
232 | }
233 |
234 | // sliceContains is a helper function that returns true if a string is included in a string slice
235 | func sliceContains(sslice []string, str string) bool {
236 | for _, v := range sslice {
237 | if v == str {
238 | return true
239 | }
240 | }
241 | return false
242 | }
243 |
--------------------------------------------------------------------------------
/pkg/ffuf/request_test.go:
--------------------------------------------------------------------------------
1 | package ffuf
2 |
3 | import (
4 | "reflect"
5 | "testing"
6 | )
7 |
8 | func TestBaseRequest(t *testing.T) {
9 | headers := make(map[string]string)
10 | headers["foo"] = "bar"
11 | headers["baz"] = "wibble"
12 | headers["Content-Type"] = "application/json"
13 |
14 | data := "{\"quote\":\"I'll still be here tomorrow to high five you yesterday, my friend. Peace.\"}"
15 |
16 | expectedreq := Request{Method: "POST", Url: "http://example.com/aaaa", Headers: headers, Data: []byte(data)}
17 | config := Config{Method: "POST", Url: "http://example.com/aaaa", Headers: headers, Data: data}
18 | basereq := BaseRequest(&config)
19 |
20 | if !reflect.DeepEqual(basereq, expectedreq) {
21 | t.Errorf("BaseRequest does not return a struct with expected values")
22 | }
23 |
24 | }
25 |
26 | func TestCopyRequest(t *testing.T) {
27 | headers := make(map[string]string)
28 | headers["foo"] = "bar"
29 | headers["omg"] = "bbq"
30 |
31 | data := "line=Is+that+where+creativity+comes+from?+From+sad+biz?"
32 |
33 | input := make(map[string][]byte)
34 | input["matthew"] = []byte("If you are the head that floats atop the §ziggurat§, then the stairs that lead to you must be infinite.")
35 |
36 | basereq := Request{Method: "POST",
37 | Host: "testhost.local",
38 | Url: "http://example.com/aaaa",
39 | Headers: headers,
40 | Data: []byte(data),
41 | Input: input,
42 | Position: 2,
43 | Raw: "We're not oil and water, we're oil and vinegar! It's good. It's yummy.",
44 | }
45 |
46 | copiedreq := CopyRequest(&basereq)
47 |
48 | if !reflect.DeepEqual(basereq, copiedreq) {
49 | t.Errorf("CopyRequest does not return an equal struct")
50 | }
51 | }
52 |
53 | func TestSniperRequests(t *testing.T) {
54 | headers := make(map[string]string)
55 | headers["foo"] = "§bar§"
56 | headers["§omg§"] = "bbq"
57 |
58 | testreq := Request{
59 | Method: "§POST§",
60 | Url: "http://example.com/aaaa?param=§lemony§",
61 | Headers: headers,
62 | Data: []byte("line=§yo yo, it's grease§"),
63 | }
64 |
65 | requests := SniperRequests(&testreq, "§")
66 |
67 | if len(requests) != 5 {
68 | t.Errorf("SniperRequests returned an incorrect number of requests")
69 | }
70 |
71 | headers = make(map[string]string)
72 | headers["foo"] = "bar"
73 | headers["omg"] = "bbq"
74 |
75 | var expected Request
76 | expected = Request{ // Method
77 | Method: "FUZZ",
78 | Url: "http://example.com/aaaa?param=lemony",
79 | Headers: headers,
80 | Data: []byte("line=yo yo, it's grease"),
81 | }
82 |
83 | pass := false
84 | for _, req := range requests {
85 | if reflect.DeepEqual(req, expected) {
86 | pass = true
87 | }
88 | }
89 |
90 | if !pass {
91 | t.Errorf("SniperRequests does not return expected values (Method)")
92 | }
93 |
94 | expected = Request{ // URL
95 | Method: "POST",
96 | Url: "http://example.com/aaaa?param=FUZZ",
97 | Headers: headers,
98 | Data: []byte("line=yo yo, it's grease"),
99 | }
100 |
101 | pass = false
102 | for _, req := range requests {
103 | if reflect.DeepEqual(req, expected) {
104 | pass = true
105 | }
106 | }
107 |
108 | if !pass {
109 | t.Errorf("SniperRequests does not return expected values (Url)")
110 | }
111 |
112 | expected = Request{ // Data
113 | Method: "POST",
114 | Url: "http://example.com/aaaa?param=lemony",
115 | Headers: headers,
116 | Data: []byte("line=FUZZ"),
117 | }
118 |
119 | pass = false
120 | for _, req := range requests {
121 | if reflect.DeepEqual(req, expected) {
122 | pass = true
123 | }
124 | }
125 |
126 | if !pass {
127 | t.Errorf("SniperRequests does not return expected values (Data)")
128 | }
129 |
130 | headers = make(map[string]string)
131 | headers["foo"] = "FUZZ"
132 | headers["omg"] = "bbq"
133 |
134 | expected = Request{ // Header value
135 | Method: "POST",
136 | Url: "http://example.com/aaaa?param=lemony",
137 | Headers: headers,
138 | Data: []byte("line=yo yo, it's grease"),
139 | }
140 |
141 | pass = false
142 | for _, req := range requests {
143 | if reflect.DeepEqual(req, expected) {
144 | pass = true
145 | }
146 | }
147 |
148 | if !pass {
149 | t.Errorf("SniperRequests does not return expected values (Header value)")
150 | }
151 |
152 | headers = make(map[string]string)
153 | headers["foo"] = "bar"
154 | headers["FUZZ"] = "bbq"
155 |
156 | expected = Request{ // Header key
157 | Method: "POST",
158 | Url: "http://example.com/aaaa?param=lemony",
159 | Headers: headers,
160 | Data: []byte("line=yo yo, it's grease"),
161 | }
162 |
163 | pass = false
164 | for _, req := range requests {
165 | if reflect.DeepEqual(req, expected) {
166 | pass = true
167 | }
168 | }
169 |
170 | if !pass {
171 | t.Errorf("SniperRequests does not return expected values (Header key)")
172 | }
173 |
174 | }
175 |
176 | func TestTemplateLocations(t *testing.T) {
177 | test := "this is my 1§template locator§ test"
178 | arr := templateLocations("§", test)
179 | expected := []int{12, 29}
180 | if !reflect.DeepEqual(arr, expected) {
181 | t.Errorf("templateLocations does not return expected values")
182 | }
183 |
184 | test2 := "§template locator§"
185 | arr = templateLocations("§", test2)
186 | expected = []int{0, 17}
187 | if !reflect.DeepEqual(arr, expected) {
188 | t.Errorf("templateLocations does not return expected values")
189 | }
190 |
191 | if len(templateLocations("§", "te§st2")) != 1 {
192 | t.Errorf("templateLocations does not return expected values")
193 | }
194 | }
195 |
196 | func TestInjectKeyword(t *testing.T) {
197 | input := "§Greetings, creator§"
198 | offsetTuple := templateLocations("§", input)
199 | expected := "FUZZ"
200 |
201 | result := injectKeyword(input, "FUZZ", offsetTuple[0], offsetTuple[1])
202 | if result != expected {
203 | t.Errorf("injectKeyword returned unexpected result: " + result)
204 | }
205 |
206 | if injectKeyword(input, "FUZZ", -32, 44) != input {
207 | t.Errorf("injectKeyword offset validation failed")
208 | }
209 |
210 | if injectKeyword(input, "FUZZ", 12, 2) != input {
211 | t.Errorf("injectKeyword offset validation failed")
212 | }
213 |
214 | if injectKeyword(input, "FUZZ", 0, 25) != input {
215 | t.Errorf("injectKeyword offset validation failed")
216 | }
217 |
218 | input = "id=§a§&sort=desc"
219 | offsetTuple = templateLocations("§", input)
220 | expected = "id=FUZZ&sort=desc"
221 |
222 | result = injectKeyword(input, "FUZZ", offsetTuple[0], offsetTuple[1])
223 | if result != expected {
224 | t.Errorf("injectKeyword returned unexpected result: " + result)
225 | }
226 |
227 | input = "feature=aaa&thingie=bbb&array[§0§]=baz"
228 | offsetTuple = templateLocations("§", input)
229 | expected = "feature=aaa&thingie=bbb&array[FUZZ]=baz"
230 |
231 | result = injectKeyword(input, "FUZZ", offsetTuple[0], offsetTuple[1])
232 | if result != expected {
233 | t.Errorf("injectKeyword returned unexpected result: " + result)
234 | }
235 | }
236 |
237 | func TestScrubTemplates(t *testing.T) {
238 | headers := make(map[string]string)
239 | headers["foo"] = "§bar§"
240 | headers["§omg§"] = "bbq"
241 |
242 | testreq := Request{Method: "§POST§",
243 | Url: "http://example.com/aaaa?param=§lemony§",
244 | Headers: headers,
245 | Data: []byte("line=§yo yo, it's grease§"),
246 | }
247 |
248 | headers = make(map[string]string)
249 | headers["foo"] = "bar"
250 | headers["omg"] = "bbq"
251 |
252 | expectedreq := Request{Method: "POST",
253 | Url: "http://example.com/aaaa?param=lemony",
254 | Headers: headers,
255 | Data: []byte("line=yo yo, it's grease"),
256 | }
257 |
258 | scrubTemplates(&testreq, "§")
259 |
260 | if !reflect.DeepEqual(testreq, expectedreq) {
261 | t.Errorf("scrubTemplates does not return expected values")
262 | }
263 | }
264 |
--------------------------------------------------------------------------------
/pkg/ffuf/autocalibration.go:
--------------------------------------------------------------------------------
1 | package ffuf
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "math/rand"
7 | "strconv"
8 | "time"
9 | )
10 |
11 | func (j *Job) autoCalibrationStrings() map[string][]string {
12 | rand.Seed(time.Now().UnixNano())
13 | cInputs := make(map[string][]string)
14 | if len(j.Config.AutoCalibrationStrings) < 1 {
15 | cInputs["basic_admin"] = append(cInputs["basic_admin"], "admin"+RandomString(16))
16 | cInputs["basic_admin"] = append(cInputs["basic_admin"], "admin"+RandomString(8))
17 | cInputs["htaccess"] = append(cInputs["htaccess"], ".htaccess"+RandomString(16))
18 | cInputs["htaccess"] = append(cInputs["htaccess"], ".htaccess"+RandomString(8))
19 | cInputs["basic_random"] = append(cInputs["basic_random"], RandomString(16))
20 | cInputs["basic_random"] = append(cInputs["basic_random"], RandomString(8))
21 | if j.Config.AutoCalibrationStrategy == "advanced" {
22 | // Add directory tests and .htaccess too
23 | cInputs["admin_dir"] = append(cInputs["admin_dir"], "admin"+RandomString(16)+"/")
24 | cInputs["admin_dir"] = append(cInputs["admin_dir"], "admin"+RandomString(8)+"/")
25 | cInputs["random_dir"] = append(cInputs["random_dir"], RandomString(16)+"/")
26 | cInputs["random_dir"] = append(cInputs["random_dir"], RandomString(8)+"/")
27 | }
28 | } else {
29 | cInputs["custom"] = append(cInputs["custom"], j.Config.AutoCalibrationStrings...)
30 | }
31 | return cInputs
32 | }
33 |
34 | func (j *Job) calibrationRequest(inputs map[string][]byte) (Response, error) {
35 | basereq := BaseRequest(j.Config)
36 | req, err := j.Runner.Prepare(inputs, &basereq)
37 | if err != nil {
38 | j.Output.Error(fmt.Sprintf("Encountered an error while preparing autocalibration request: %s\n", err))
39 | j.incError()
40 | log.Printf("%s", err)
41 | return Response{}, err
42 | }
43 | resp, err := j.Runner.Execute(&req)
44 | if err != nil {
45 | j.Output.Error(fmt.Sprintf("Encountered an error while executing autocalibration request: %s\n", err))
46 | j.incError()
47 | log.Printf("%s", err)
48 | return Response{}, err
49 | }
50 | // Only calibrate on responses that would be matched otherwise
51 | if j.isMatch(resp) {
52 | return resp, nil
53 | }
54 | return resp, fmt.Errorf("Response wouldn't be matched")
55 | }
56 |
57 | // CalibrateForHost runs autocalibration for a specific host
58 | func (j *Job) CalibrateForHost(host string, baseinput map[string][]byte) error {
59 | if j.Config.MatcherManager.CalibratedForDomain(host) {
60 | return nil
61 | }
62 | if baseinput[j.Config.AutoCalibrationKeyword] == nil {
63 | return fmt.Errorf("Autocalibration keyword \"%s\" not found in the request.", j.Config.AutoCalibrationKeyword)
64 | }
65 | cStrings := j.autoCalibrationStrings()
66 | input := make(map[string][]byte)
67 | for k, v := range baseinput {
68 | input[k] = v
69 | }
70 | for _, v := range cStrings {
71 | responses := make([]Response, 0)
72 | for _, cs := range v {
73 | input[j.Config.AutoCalibrationKeyword] = []byte(cs)
74 | resp, err := j.calibrationRequest(input)
75 | if err != nil {
76 | continue
77 | }
78 | responses = append(responses, resp)
79 | err = j.calibrateFilters(responses, true)
80 | if err != nil {
81 | j.Output.Error(fmt.Sprintf("%s", err))
82 | }
83 | }
84 | }
85 | j.Config.MatcherManager.SetCalibratedForHost(host, true)
86 | return nil
87 | }
88 |
89 | // CalibrateResponses returns slice of Responses for randomly generated filter autocalibration requests
90 | func (j *Job) Calibrate(input map[string][]byte) error {
91 | if j.Config.MatcherManager.Calibrated() {
92 | return nil
93 | }
94 | cInputs := j.autoCalibrationStrings()
95 |
96 | for _, v := range cInputs {
97 | responses := make([]Response, 0)
98 | for _, cs := range v {
99 | input[j.Config.AutoCalibrationKeyword] = []byte(cs)
100 | resp, err := j.calibrationRequest(input)
101 | if err != nil {
102 | continue
103 | }
104 | responses = append(responses, resp)
105 | }
106 | _ = j.calibrateFilters(responses, false)
107 | }
108 | j.Config.MatcherManager.SetCalibrated(true)
109 | return nil
110 | }
111 |
112 | // CalibrateIfNeeded runs a self-calibration task for filtering options (if needed) by requesting random resources and
113 | //
114 | // configuring the filters accordingly
115 | func (j *Job) CalibrateIfNeeded(host string, input map[string][]byte) error {
116 | j.calibMutex.Lock()
117 | defer j.calibMutex.Unlock()
118 | if !j.Config.AutoCalibration {
119 | return nil
120 | }
121 | if j.Config.AutoCalibrationPerHost {
122 | return j.CalibrateForHost(host, input)
123 | }
124 | return j.Calibrate(input)
125 | }
126 |
127 | func (j *Job) calibrateFilters(responses []Response, perHost bool) error {
128 | // Work down from the most specific common denominator
129 | if len(responses) > 0 {
130 | // Content length
131 | baselineSize := responses[0].ContentLength
132 | sizeMatch := true
133 | for _, r := range responses {
134 | if baselineSize != r.ContentLength {
135 | sizeMatch = false
136 | }
137 | }
138 | if sizeMatch {
139 | if perHost {
140 | // Check if already filtered
141 | for _, f := range j.Config.MatcherManager.FiltersForDomain(HostURLFromRequest(*responses[0].Request)) {
142 | match, _ := f.Filter(&responses[0])
143 | if match {
144 | // Already filtered
145 | return nil
146 | }
147 | }
148 | _ = j.Config.MatcherManager.AddPerDomainFilter(HostURLFromRequest(*responses[0].Request), "size", strconv.FormatInt(baselineSize, 10))
149 | return nil
150 | } else {
151 | // Check if already filtered
152 | for _, f := range j.Config.MatcherManager.GetFilters() {
153 | match, _ := f.Filter(&responses[0])
154 | if match {
155 | // Already filtered
156 | return nil
157 | }
158 | }
159 | _ = j.Config.MatcherManager.AddFilter("size", strconv.FormatInt(baselineSize, 10), false)
160 | return nil
161 | }
162 | }
163 |
164 | // Content words
165 | baselineWords := responses[0].ContentWords
166 | wordsMatch := true
167 | for _, r := range responses {
168 | if baselineWords != r.ContentWords {
169 | wordsMatch = false
170 | }
171 | }
172 | if wordsMatch {
173 | if perHost {
174 | // Check if already filtered
175 | for _, f := range j.Config.MatcherManager.FiltersForDomain(HostURLFromRequest(*responses[0].Request)) {
176 | match, _ := f.Filter(&responses[0])
177 | if match {
178 | // Already filtered
179 | return nil
180 | }
181 | }
182 | _ = j.Config.MatcherManager.AddPerDomainFilter(HostURLFromRequest(*responses[0].Request), "word", strconv.FormatInt(baselineWords, 10))
183 | return nil
184 | } else {
185 | // Check if already filtered
186 | for _, f := range j.Config.MatcherManager.GetFilters() {
187 | match, _ := f.Filter(&responses[0])
188 | if match {
189 | // Already filtered
190 | return nil
191 | }
192 | }
193 | _ = j.Config.MatcherManager.AddFilter("word", strconv.FormatInt(baselineWords, 10), false)
194 | return nil
195 | }
196 | }
197 |
198 | // Content lines
199 | baselineLines := responses[0].ContentLines
200 | linesMatch := true
201 | for _, r := range responses {
202 | if baselineLines != r.ContentLines {
203 | linesMatch = false
204 | }
205 | }
206 | if linesMatch {
207 | if perHost {
208 | // Check if already filtered
209 | for _, f := range j.Config.MatcherManager.FiltersForDomain(HostURLFromRequest(*responses[0].Request)) {
210 | match, _ := f.Filter(&responses[0])
211 | if match {
212 | // Already filtered
213 | return nil
214 | }
215 | }
216 | _ = j.Config.MatcherManager.AddPerDomainFilter(HostURLFromRequest(*responses[0].Request), "line", strconv.FormatInt(baselineLines, 10))
217 | return nil
218 | } else {
219 | // Check if already filtered
220 | for _, f := range j.Config.MatcherManager.GetFilters() {
221 | match, _ := f.Filter(&responses[0])
222 | if match {
223 | // Already filtered
224 | return nil
225 | }
226 | }
227 | _ = j.Config.MatcherManager.AddFilter("line", strconv.FormatInt(baselineLines, 10), false)
228 | return nil
229 | }
230 | }
231 | }
232 | return fmt.Errorf("No common filtering values found")
233 | }
234 |
--------------------------------------------------------------------------------
/pkg/interactive/termhandler.go:
--------------------------------------------------------------------------------
1 | package interactive
2 |
3 | import (
4 | "bufio"
5 | "fmt"
6 | "strconv"
7 | "strings"
8 | "time"
9 |
10 | "github.com/ffuf/ffuf/pkg/ffuf"
11 | )
12 |
13 | type interactive struct {
14 | Job *ffuf.Job
15 | paused bool
16 | }
17 |
18 | func Handle(job *ffuf.Job) error {
19 | i := interactive{job, false}
20 | tty, err := termHandle()
21 | if err != nil {
22 | return err
23 | }
24 | defer tty.Close()
25 | inreader := bufio.NewScanner(tty)
26 | inreader.Split(bufio.ScanLines)
27 | for inreader.Scan() {
28 | i.handleInput(inreader.Bytes())
29 | }
30 | return nil
31 | }
32 |
33 | func (i *interactive) handleInput(in []byte) {
34 | instr := string(in)
35 | args := strings.Split(strings.TrimSpace(instr), " ")
36 | if len(args) == 1 && args[0] == "" {
37 | // Enter pressed - toggle interactive state
38 | i.paused = !i.paused
39 | if i.paused {
40 | i.Job.Pause()
41 | time.Sleep(500 * time.Millisecond)
42 | i.printBanner()
43 | } else {
44 | i.Job.Resume()
45 | }
46 | } else {
47 | switch args[0] {
48 | case "?":
49 | i.printHelp()
50 | case "help":
51 | i.printHelp()
52 | case "resume":
53 | i.paused = false
54 | i.Job.Resume()
55 | case "restart":
56 | i.Job.Reset(false)
57 | i.paused = false
58 | i.Job.Output.Info("Restarting the current ffuf job!")
59 | i.Job.Resume()
60 | case "show":
61 | for _, r := range i.Job.Output.GetCurrentResults() {
62 | i.Job.Output.PrintResult(r)
63 | }
64 | case "savejson":
65 | if len(args) < 2 {
66 | i.Job.Output.Error("Please define the filename")
67 | } else if len(args) > 2 {
68 | i.Job.Output.Error("Too many arguments for \"savejson\"")
69 | } else {
70 | err := i.Job.Output.SaveFile(args[1], "json")
71 | if err != nil {
72 | i.Job.Output.Error(fmt.Sprintf("%s", err))
73 | } else {
74 | i.Job.Output.Info("Output file successfully saved!")
75 | }
76 | }
77 | case "fc":
78 | if len(args) < 2 {
79 | i.Job.Output.Error("Please define a value for status code filter, or \"none\" for removing it")
80 | } else if len(args) > 2 {
81 | i.Job.Output.Error("Too many arguments for \"fc\"")
82 | } else {
83 | i.updateFilter("status", args[1], true)
84 | i.Job.Output.Info("New status code filter value set")
85 | }
86 | case "afc":
87 | if len(args) < 2 {
88 | i.Job.Output.Error("Please define a value to append to status code filter")
89 | } else if len(args) > 2 {
90 | i.Job.Output.Error("Too many arguments for \"afc\"")
91 | } else {
92 | i.appendFilter("status", args[1])
93 | i.Job.Output.Info("New status code filter value set")
94 | }
95 | case "fl":
96 | if len(args) < 2 {
97 | i.Job.Output.Error("Please define a value for line count filter, or \"none\" for removing it")
98 | } else if len(args) > 2 {
99 | i.Job.Output.Error("Too many arguments for \"fl\"")
100 | } else {
101 | i.updateFilter("line", args[1], true)
102 | i.Job.Output.Info("New line count filter value set")
103 | }
104 | case "afl":
105 | if len(args) < 2 {
106 | i.Job.Output.Error("Please define a value to append to line count filter")
107 | } else if len(args) > 2 {
108 | i.Job.Output.Error("Too many arguments for \"afl\"")
109 | } else {
110 | i.appendFilter("line", args[1])
111 | i.Job.Output.Info("New line count filter value set")
112 | }
113 | case "fw":
114 | if len(args) < 2 {
115 | i.Job.Output.Error("Please define a value for word count filter, or \"none\" for removing it")
116 | } else if len(args) > 2 {
117 | i.Job.Output.Error("Too many arguments for \"fw\"")
118 | } else {
119 | i.updateFilter("word", args[1], true)
120 | i.Job.Output.Info("New word count filter value set")
121 | }
122 | case "afw":
123 | if len(args) < 2 {
124 | i.Job.Output.Error("Please define a value to append to word count filter")
125 | } else if len(args) > 2 {
126 | i.Job.Output.Error("Too many arguments for \"afw\"")
127 | } else {
128 | i.appendFilter("word", args[1])
129 | i.Job.Output.Info("New word count filter value set")
130 | }
131 | case "fs":
132 | if len(args) < 2 {
133 | i.Job.Output.Error("Please define a value for response size filter, or \"none\" for removing it")
134 | } else if len(args) > 2 {
135 | i.Job.Output.Error("Too many arguments for \"fs\"")
136 | } else {
137 | i.updateFilter("size", args[1], true)
138 | i.Job.Output.Info("New response size filter value set")
139 | }
140 | case "afs":
141 | if len(args) < 2 {
142 | i.Job.Output.Error("Please define a value to append to size filter")
143 | } else if len(args) > 2 {
144 | i.Job.Output.Error("Too many arguments for \"afs\"")
145 | } else {
146 | i.appendFilter("size", args[1])
147 | i.Job.Output.Info("New response size filter value set")
148 | }
149 | case "ft":
150 | if len(args) < 2 {
151 | i.Job.Output.Error("Please define a value for response time filter, or \"none\" for removing it")
152 | } else if len(args) > 2 {
153 | i.Job.Output.Error("Too many arguments for \"ft\"")
154 | } else {
155 | i.updateFilter("time", args[1], true)
156 | i.Job.Output.Info("New response time filter value set")
157 | }
158 | case "aft":
159 | if len(args) < 2 {
160 | i.Job.Output.Error("Please define a value to append to response time filter")
161 | } else if len(args) > 2 {
162 | i.Job.Output.Error("Too many arguments for \"aft\"")
163 | } else {
164 | i.appendFilter("time", args[1])
165 | i.Job.Output.Info("New response time filter value set")
166 | }
167 | case "queueshow":
168 | i.printQueue()
169 | case "queuedel":
170 | if len(args) < 2 {
171 | i.Job.Output.Error("Please define the index of a queued job to remove. Use \"queueshow\" for listing of jobs.")
172 | } else if len(args) > 2 {
173 | i.Job.Output.Error("Too many arguments for \"queuedel\"")
174 | } else {
175 | i.deleteQueue(args[1])
176 | }
177 | case "queueskip":
178 | i.Job.SkipQueue()
179 | i.Job.Output.Info("Skipping to the next queued job")
180 | case "rate":
181 | if len(args) < 2 {
182 | i.Job.Output.Error("Please define the new rate")
183 | } else if len(args) > 2 {
184 | i.Job.Output.Error("Too many arguments for \"rate\"")
185 | } else {
186 | newrate, err := strconv.Atoi(args[1])
187 | if err != nil {
188 | i.Job.Output.Error(fmt.Sprintf("Could not adjust rate: %s", err))
189 | } else {
190 | i.Job.Rate.ChangeRate(newrate)
191 | }
192 | }
193 |
194 | default:
195 | if i.paused {
196 | i.Job.Output.Warning(fmt.Sprintf("Unknown command: \"%s\". Enter \"help\" for a list of available commands", args[0]))
197 | } else {
198 | i.Job.Output.Error("NOPE")
199 | }
200 | }
201 | }
202 |
203 | if i.paused {
204 | i.printPrompt()
205 | }
206 | }
207 |
208 | func (i *interactive) refreshResults() {
209 | results := make([]ffuf.Result, 0)
210 | filters := i.Job.Config.MatcherManager.GetFilters()
211 | for _, filter := range filters {
212 | for _, res := range i.Job.Output.GetCurrentResults() {
213 | fakeResp := &ffuf.Response{
214 | StatusCode: res.StatusCode,
215 | ContentLines: res.ContentLength,
216 | ContentWords: res.ContentWords,
217 | ContentLength: res.ContentLength,
218 | }
219 | filterOut, _ := filter.Filter(fakeResp)
220 | if !filterOut {
221 | results = append(results, res)
222 | }
223 | }
224 | }
225 | i.Job.Output.SetCurrentResults(results)
226 | }
227 |
228 | func (i *interactive) updateFilter(name, value string, replace bool) {
229 | if value == "none" {
230 | i.Job.Config.MatcherManager.RemoveFilter(name)
231 | } else {
232 | _ = i.Job.Config.MatcherManager.AddFilter(name, value, replace)
233 | }
234 | i.refreshResults()
235 | }
236 |
237 | func (i *interactive) appendFilter(name, value string) {
238 | i.updateFilter(name, value, false)
239 | }
240 |
241 | func (i *interactive) printQueue() {
242 | if len(i.Job.QueuedJobs()) > 0 {
243 | i.Job.Output.Raw("Queued jobs:\n")
244 | for index, job := range i.Job.QueuedJobs() {
245 | postfix := ""
246 | if index == 0 {
247 | postfix = " (active job)"
248 | }
249 | i.Job.Output.Raw(fmt.Sprintf(" [%d] : %s%s\n", index, job.Url, postfix))
250 | }
251 | } else {
252 | i.Job.Output.Info("Job queue is empty")
253 | }
254 | }
255 |
256 | func (i *interactive) deleteQueue(in string) {
257 | index, err := strconv.Atoi(in)
258 | if err != nil {
259 | i.Job.Output.Warning(fmt.Sprintf("Not a number: %s", in))
260 | } else {
261 | if index < 0 || index > len(i.Job.QueuedJobs())-1 {
262 | i.Job.Output.Warning("No such queued job. Use \"queueshow\" to list the jobs in queue")
263 | } else if index == 0 {
264 | i.Job.Output.Warning("Cannot delete the currently running job. Use \"queueskip\" to advance to the next one")
265 | } else {
266 | i.Job.DeleteQueueItem(index)
267 | i.Job.Output.Info("Job successfully deleted!")
268 | }
269 | }
270 | }
271 | func (i *interactive) printBanner() {
272 | i.Job.Output.Raw("entering interactive mode\ntype \"help\" for a list of commands, or ENTER to resume.\n")
273 | }
274 |
275 | func (i *interactive) printPrompt() {
276 | i.Job.Output.Raw("> ")
277 | }
278 |
279 | func (i *interactive) printHelp() {
280 | var fc, fl, fs, ft, fw string
281 | for name, filter := range i.Job.Config.MatcherManager.GetFilters() {
282 | switch name {
283 | case "status":
284 | fc = "(active: " + filter.Repr() + ")"
285 | case "line":
286 | fl = "(active: " + filter.Repr() + ")"
287 | case "word":
288 | fw = "(active: " + filter.Repr() + ")"
289 | case "size":
290 | fs = "(active: " + filter.Repr() + ")"
291 | case "time":
292 | ft = "(active: " + filter.Repr() + ")"
293 | }
294 | }
295 | rate := fmt.Sprintf("(active: %d)", i.Job.Config.Rate)
296 | help := `
297 | available commands:
298 | afc [value] - append to status code filter %s
299 | fc [value] - (re)configure status code filter %s
300 | afl [value] - append to line count filter %s
301 | fl [value] - (re)configure line count filter %s
302 | afw [value] - append to word count filter %s
303 | fw [value] - (re)configure word count filter %s
304 | afs [value] - append to size filter %s
305 | fs [value] - (re)configure size filter %s
306 | aft [value] - append to time filter %s
307 | ft [value] - (re)configure time filter %s
308 | rate [value] - adjust rate of requests per second %s
309 | queueshow - show job queue
310 | queuedel [number] - delete a job in the queue
311 | queueskip - advance to the next queued job
312 | restart - restart and resume the current ffuf job
313 | resume - resume current ffuf job (or: ENTER)
314 | show - show results for the current job
315 | savejson [filename] - save current matches to a file
316 | help - you are looking at it
317 | `
318 | i.Job.Output.Raw(fmt.Sprintf(help, fc, fc, fl, fl, fw, fw, fs, fs, ft, ft, rate))
319 | }
320 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | ## Changelog
2 | - master
3 | - New
4 | - Added a new, dynamic keyword `FFUFHASH` that generates hash from job configuration and wordlist position to map blind payloads back to the initial request.
5 | - New command line parameter for searching a hash: `-search FFUFHASH`
6 | - Changed
7 | - Multiline output prints out alphabetically sorted by keyword
8 | - Default configuration directories now follow `XDG_CONFIG_HOME` variable (less spam in your home directory)
9 | - Fixed issue with autocalibration of line & words filter
10 | - Made JSON (`-json`) output format take precedence over quiet output mode, to allow JSON output without the banner etc
11 |
12 |
13 | - v1.5.0
14 | - New
15 | - New autocalibration options: `-ach`, `-ack` and `-acs`. Revamped the whole autocalibration process
16 | - Configurable modes for matchers and filters (CLI flags: `fmode` and `mmode`): "and" and "or"
17 | - Changed
18 |
19 | - v1.4.1
20 | - New
21 | - Changed
22 | - Fixed a bug with recursion, introduced in the 1.4.0 release
23 | - Recursion now works better with multiple wordlists, disabling unnecessary wordlists for queued jobs where needed
24 |
25 | - v1.4.0
26 | - New
27 | - Added response time logging and filtering
28 | - Added a CLI flag to specify TLS SNI value
29 | - Added full line colors
30 | - Added `-json` to emit newline delimited JSON output
31 | - Added 500 Internal Server Error to list of status codes matched by default
32 | - Changed
33 | - Fixed an issue where output file was created regardless of `-or`
34 | - Fixed an issue where output (often a lot of it) would be printed after entering interactive mode
35 | - Fixed an issue when reading wordlist files from ffufrc
36 | - Fixed an issue where `-of all` option only creates one output file (instead of all formats)
37 | - Fixed an issue where redirection to the same domain in recursive mode dropped port info from URL
38 | - Added HTTP2 support
39 |
40 | - v1.3.1
41 | - New
42 | - Added a CLI flag to disable the interactive mode
43 | - Changed
44 | - Do not read the last newline in the end of the raw request file when using -request
45 | - Fixed an issue with storing the matches for recursion jobs
46 | - Fixed the way the "size" is calculated, it should match content-length now
47 | - Fixed an issue with header canonicalization when a keyword was just a part of the header name
48 | - Fixed output writing so it doesn't silently fail if it needs to create directories recursively
49 |
50 | - v1.3.0
51 | - New
52 | - All output file formats now include the `Content-Type`.
53 | - New CLI flag `-recursion-strategy` that allows adding new queued recursion jobs for non-redirect responses.
54 | - Ability to enter interactive mode by pressing `ENTER` during the ffuf execution. The interactive mode allows
55 | user to change filters, manage recursion queue, save snapshot of matches to a file etc.
56 | - Changed
57 | - Fix a badchar in progress output
58 |
59 | - v1.2.1
60 | - Changed
61 | - Fixed a build breaking bug in `input-shell` parameter
62 |
63 | - v1.2.0
64 | - New
65 | - Added 405 Method Not Allowed to list of status codes matched by default.
66 | - New CLI flag `-rate` to set maximum rate of requests per second. The adjustment is dynamic.
67 | - New CLI flag `-config` to define a configuration file with preconfigured settings for the job.
68 | - Ffuf now reads a default configuration file `$HOME/.ffufrc` upon startup. Options set in this file
69 | are overwritten by the ones provided on CLI.
70 | - Change banner logging to stderr instead of stdout.
71 | - New CLI flag `-or` to avoid creating result files if we didn't get any.
72 | - New CLI flag `-input-shell` to set the shell to be used by `input-cmd`
73 |
74 | - Changed
75 | - Pre-flight errors are now displayed also after the usage text to prevent the need to scroll through backlog.
76 | - Cancelling via SIGINT (Ctrl-C) is now more responsive
77 | - Fixed issue where a thread would hang due to TCP errors
78 | - Fixed the issue where the option -ac was overwriting existing filters. Now auto-calibration will add them where needed.
79 | - The `-w` flag now accepts comma delimited values in the form of `file1:W1,file2:W2`.
80 | - Links in the HTML report are now clickable
81 | - Fixed panic during wordlist flag parsing in Windows systems.
82 |
83 | - v1.1.0
84 | - New
85 | - New CLI flag `-maxtime-job` to set max. execution time per job.
86 | - Changed behaviour of `-maxtime`, can now be used for entire process.
87 | - A new flag `-ignore-body` so ffuf does not fetch the response content. Default value=false.
88 | - Added the wordlists to the header information.
89 | - Added support to output "all" formats (specify the path/filename sans file extension and ffuf will add the appropriate suffix for the filetype)
90 |
91 | - Changed
92 | - Fixed a bug related to the autocalibration feature making the random seed initialization also to take place before autocalibration needs it.
93 | - Added tls renegotiation flag to fix #193 in http.Client
94 | - Fixed HTML report to display select/combo-box for rows per page (and increased default from 10 to 250 rows).
95 | - Added Host information to JSON output file
96 | - Fixed request method when supplying request file
97 | - Fixed crash with 3XX responses that weren't redirects (304 Not Modified, 300 Multiple Choices etc)
98 |
99 | - v1.0.2
100 | - Changed
101 | - Write POST request data properly to file when ran with `-od`.
102 | - Fixed a bug by using header canonicaliztion related to HTTP headers being case insensitive.
103 | - Properly handle relative redirect urls with `-recursion`
104 | - Calculate req/sec correctly for when using recursion
105 | - When `-request` is used, allow the user to override URL using `-u`
106 |
107 | - v1.0.1
108 | - Changed
109 | - Fixed a bug where regex matchers and filters would fail if `-od` was used to store the request & response contents.
110 |
111 | - v1.0
112 | - New
113 | - New CLI flag `-ic` to ignore comments from wordlist.
114 | - New CLI flags `-request` to specify the raw request file to build the actual request from and `-request-proto` to define the new request format.
115 | - New CLI flag `-od` (output directory) to enable writing requests and responses for matched results to a file for postprocessing or debugging purposes.
116 | - New CLI flag `-maxtime` to limit the running time of ffuf
117 | - New CLI flags `-recursion` and `-recursion-depth` to control recursive ffuf jobs if directories are found. This requires the `-u` to end with FUZZ keyword.
118 | - New CLI flag `-replay-proxy` to replay matched requests using a custom proxy.
119 | - Changed
120 | - Limit the use of `-e` (extensions) to a single keyword: FUZZ
121 | - Regexp matching and filtering (-mr/-fr) allow using keywords in patterns
122 | - Take 429 responses into account when -sa (stop on all error cases) is used
123 | - Remove -k flag support, convert to dummy flag #134
124 | - Write configuration to output JSON
125 | - Better help text.
126 | - If any matcher is set, ignore -mc default value.
127 |
128 | - v0.12
129 | - New
130 | - Added a new flag to select a multi wordlist operation mode: `--mode`, possible values: `clusterbomb` and `pitchfork`.
131 | - Added a new output file format eJSON, for always base64 encoding the input data.
132 | - Redirect location is always shown in the output files (when using `-o`)
133 | - Full URL is always shown in the output files (when using `-o`)
134 | - HTML output format got [DataTables](https://datatables.net/) support allowing realtime searches, sorting by column etc.
135 | - New CLI flag `-v` for verbose output. Including full URL, and redirect location.
136 | - SIGTERM monitoring, in order to catch keyboard interrupts an such, to be able to write `-o` files before exiting.
137 | - Changed
138 | - Fixed a bug in the default multi wordlist mode
139 | - Fixed JSON output regression, where all the input data was always encoded in base64
140 | - `--debug-log` no correctly logs connection errors
141 | - Removed `-l` flag in favor of `-v`
142 | - More verbose information in banner shown in startup.
143 |
144 | - v0.11
145 | - New
146 |
147 | - New CLI flag: -l, shows target location of redirect responses
148 | - New CLI flac: -acc, custom auto-calibration strings
149 | - New CLI flag: -debug-log, writes the debug logging to the specified file.
150 | - New CLI flags -ml and -fl, filters/matches line count in response
151 | - Ability to use multiple wordlists / keywords by defining multiple -w command line flags. The if no keyword is defined, the default is FUZZ to keep backwards compatibility. Example: `-w "wordlists/custom.txt:CUSTOM" -H "RandomHeader: CUSTOM"`.
152 |
153 | - Changed
154 | - New CLI flag: -i, dummy flag that does nothing. for compatibility with copy as curl.
155 | - New CLI flag: -b/--cookie, cookie data for compatibility with copy as curl.
156 | - New Output format are available: HTML and Markdown table.
157 | - New CLI flag: -l, shows target location of redirect responses
158 | - Filtering and matching by status code, response size or word count now allow using ranges in addition to single values
159 | - The internal logging information to be discarded, and can be written to a file with the new `-debug-log` flag.
160 |
161 | - v0.10
162 | - New
163 | - New CLI flag: -ac to autocalibrate response size and word filters based on few preset URLs.
164 | - New CLI flag: -timeout to specify custom timeouts for all HTTP requests.
165 | - New CLI flag: --data for compatibility with copy as curl functionality of browsers.
166 | - New CLI flag: --compressed, dummy flag that does nothing. for compatibility with copy as curl.
167 | - New CLI flags: --input-cmd, and --input-num to handle input generation using external commands. Mutators for example. Environment variable FFUF_NUM will be updated on every call of the command.
168 | - When --input-cmd is used, display position instead of the payload in results. The output file (of all formats) will include the payload in addition to the position however.
169 |
170 | - Changed
171 | - Wordlist can also be read from standard input
172 | - Defining -d or --data implies POST method if -X doesn't set it to something else than GET
173 |
174 | - v0.9
175 | - New
176 | - New output file formats: CSV and eCSV (CSV with base64 encoded input field to avoid CSV breakage with payloads containing a comma)
177 | - New CLI flag to follow redirects
178 | - Erroring connections will be retried once
179 | - Error counter in status bar
180 | - New CLI flags: -se (stop on spurious errors) and -sa (stop on all errors, implies -se and -sf)
181 | - New CLI flags: -e to provide a list of extensions to add to wordlist entries, and -D to provide DirSearch wordlist format compatibility.
182 | - Wildcard option for response status code matcher.
183 | - v0.8
184 | - New
185 | - New CLI flag to write output to a file in JSON format
186 | - New CLI flag to stop on spurious 403 responses
187 | - Changed
188 | - Regex matching / filtering now matches the headers alongside of the response body
189 |
--------------------------------------------------------------------------------
/pkg/output/stdout.go:
--------------------------------------------------------------------------------
1 | package output
2 |
3 | import (
4 | "crypto/md5"
5 | "encoding/json"
6 | "fmt"
7 | "os"
8 | "path"
9 | "sort"
10 | "strconv"
11 | "strings"
12 | "time"
13 |
14 | "github.com/ffuf/ffuf/pkg/ffuf"
15 | )
16 |
17 | const (
18 | BANNER_HEADER = `
19 | /'___\ /'___\ /'___\
20 | /\ \__/ /\ \__/ __ __ /\ \__/
21 | \ \ ,__\\ \ ,__\/\ \/\ \ \ \ ,__\
22 | \ \ \_/ \ \ \_/\ \ \_\ \ \ \ \_/
23 | \ \_\ \ \_\ \ \____/ \ \_\
24 | \/_/ \/_/ \/___/ \/_/
25 | `
26 | BANNER_SEP = "________________________________________________"
27 | )
28 |
29 | type Stdoutput struct {
30 | config *ffuf.Config
31 | fuzzkeywords []string
32 | Results []ffuf.Result
33 | CurrentResults []ffuf.Result
34 | }
35 |
36 | func NewStdoutput(conf *ffuf.Config) *Stdoutput {
37 | var outp Stdoutput
38 | outp.config = conf
39 | outp.Results = make([]ffuf.Result, 0)
40 | outp.CurrentResults = make([]ffuf.Result, 0)
41 | outp.fuzzkeywords = make([]string, 0)
42 | for _, ip := range conf.InputProviders {
43 | outp.fuzzkeywords = append(outp.fuzzkeywords, ip.Keyword)
44 | }
45 | sort.Strings(outp.fuzzkeywords)
46 | return &outp
47 | }
48 |
49 | func (s *Stdoutput) Banner() {
50 | version := strings.ReplaceAll(ffuf.Version(), "<3", fmt.Sprintf("%s<3%s", ANSI_RED, ANSI_CLEAR))
51 | fmt.Fprintf(os.Stderr, "%s\n v%s\n%s\n\n", BANNER_HEADER, version, BANNER_SEP)
52 | printOption([]byte("Method"), []byte(s.config.Method))
53 | printOption([]byte("URL"), []byte(s.config.Url))
54 |
55 | // Print wordlists
56 | for _, provider := range s.config.InputProviders {
57 | if provider.Name == "wordlist" {
58 | printOption([]byte("Wordlist"), []byte(provider.Keyword+": "+provider.Value))
59 | }
60 | }
61 |
62 | // Print headers
63 | if len(s.config.Headers) > 0 {
64 | for k, v := range s.config.Headers {
65 | printOption([]byte("Header"), []byte(fmt.Sprintf("%s: %s", k, v)))
66 | }
67 | }
68 | // Print POST data
69 | if len(s.config.Data) > 0 {
70 | printOption([]byte("Data"), []byte(s.config.Data))
71 | }
72 |
73 | // Print extensions
74 | if len(s.config.Extensions) > 0 {
75 | exts := ""
76 | for _, ext := range s.config.Extensions {
77 | exts = fmt.Sprintf("%s%s ", exts, ext)
78 | }
79 | printOption([]byte("Extensions"), []byte(exts))
80 | }
81 |
82 | // Output file info
83 | if len(s.config.OutputFile) > 0 {
84 |
85 | // Use filename as specified by user
86 | OutputFile := s.config.OutputFile
87 |
88 | if s.config.OutputFormat == "all" {
89 | // Actually... append all extensions
90 | OutputFile += ".{json,ejson,html,md,csv,ecsv}"
91 | }
92 |
93 | printOption([]byte("Output file"), []byte(OutputFile))
94 | printOption([]byte("File format"), []byte(s.config.OutputFormat))
95 | }
96 |
97 | // Follow redirects?
98 | follow := fmt.Sprintf("%t", s.config.FollowRedirects)
99 | printOption([]byte("Follow redirects"), []byte(follow))
100 |
101 | // Autocalibration
102 | autocalib := fmt.Sprintf("%t", s.config.AutoCalibration)
103 | printOption([]byte("Calibration"), []byte(autocalib))
104 |
105 | // Proxies
106 | if len(s.config.ProxyURL) > 0 {
107 | printOption([]byte("Proxy"), []byte(s.config.ProxyURL))
108 | }
109 | if len(s.config.ReplayProxyURL) > 0 {
110 | printOption([]byte("ReplayProxy"), []byte(s.config.ReplayProxyURL))
111 | }
112 |
113 | // Timeout
114 | timeout := fmt.Sprintf("%d", s.config.Timeout)
115 | printOption([]byte("Timeout"), []byte(timeout))
116 |
117 | // Threads
118 | threads := fmt.Sprintf("%d", s.config.Threads)
119 | printOption([]byte("Threads"), []byte(threads))
120 |
121 | // Delay?
122 | if s.config.Delay.HasDelay {
123 | delay := ""
124 | if s.config.Delay.IsRange {
125 | delay = fmt.Sprintf("%.2f - %.2f seconds", s.config.Delay.Min, s.config.Delay.Max)
126 | } else {
127 | delay = fmt.Sprintf("%.2f seconds", s.config.Delay.Min)
128 | }
129 | printOption([]byte("Delay"), []byte(delay))
130 | }
131 |
132 | // Print matchers
133 | for _, f := range s.config.MatcherManager.GetMatchers() {
134 | printOption([]byte("Matcher"), []byte(f.ReprVerbose()))
135 | }
136 | // Print filters
137 | for _, f := range s.config.MatcherManager.GetFilters() {
138 | printOption([]byte("Filter"), []byte(f.ReprVerbose()))
139 | }
140 | fmt.Fprintf(os.Stderr, "%s\n\n", BANNER_SEP)
141 | }
142 |
143 | // Reset resets the result slice
144 | func (s *Stdoutput) Reset() {
145 | s.CurrentResults = make([]ffuf.Result, 0)
146 | }
147 |
148 | // Cycle moves the CurrentResults to Results and resets the results slice
149 | func (s *Stdoutput) Cycle() {
150 | s.Results = append(s.Results, s.CurrentResults...)
151 | s.Reset()
152 | }
153 |
154 | // GetResults returns the result slice
155 | func (s *Stdoutput) GetCurrentResults() []ffuf.Result {
156 | return s.CurrentResults
157 | }
158 |
159 | // SetResults sets the result slice
160 | func (s *Stdoutput) SetCurrentResults(results []ffuf.Result) {
161 | s.CurrentResults = results
162 | }
163 |
164 | func (s *Stdoutput) Progress(status ffuf.Progress) {
165 | if s.config.Quiet {
166 | // No progress for quiet mode
167 | return
168 | }
169 |
170 | dur := time.Since(status.StartedAt)
171 | runningSecs := int(dur / time.Second)
172 | var reqRate int64
173 | if runningSecs > 0 {
174 | reqRate = status.ReqSec
175 | } else {
176 | reqRate = 0
177 | }
178 |
179 | hours := dur / time.Hour
180 | dur -= hours * time.Hour
181 | mins := dur / time.Minute
182 | dur -= mins * time.Minute
183 | secs := dur / time.Second
184 |
185 | fmt.Fprintf(os.Stderr, "%s:: Progress: [%d/%d] :: Job [%d/%d] :: %d req/sec :: Duration: [%d:%02d:%02d] :: Errors: %d ::", TERMINAL_CLEAR_LINE, status.ReqCount, status.ReqTotal, status.QueuePos, status.QueueTotal, reqRate, hours, mins, secs, status.ErrorCount)
186 | }
187 |
188 | func (s *Stdoutput) Info(infostring string) {
189 | if s.config.Quiet {
190 | fmt.Fprintf(os.Stderr, "%s", infostring)
191 | } else {
192 | if !s.config.Colors {
193 | fmt.Fprintf(os.Stderr, "%s[INFO] %s\n\n", TERMINAL_CLEAR_LINE, infostring)
194 | } else {
195 | fmt.Fprintf(os.Stderr, "%s[%sINFO%s] %s\n\n", TERMINAL_CLEAR_LINE, ANSI_BLUE, ANSI_CLEAR, infostring)
196 | }
197 | }
198 | }
199 |
200 | func (s *Stdoutput) Error(errstring string) {
201 | if s.config.Quiet {
202 | fmt.Fprintf(os.Stderr, "%s", errstring)
203 | } else {
204 | if !s.config.Colors {
205 | fmt.Fprintf(os.Stderr, "%s[ERR] %s\n", TERMINAL_CLEAR_LINE, errstring)
206 | } else {
207 | fmt.Fprintf(os.Stderr, "%s[%sERR%s] %s\n", TERMINAL_CLEAR_LINE, ANSI_RED, ANSI_CLEAR, errstring)
208 | }
209 | }
210 | }
211 |
212 | func (s *Stdoutput) Warning(warnstring string) {
213 | if s.config.Quiet {
214 | fmt.Fprintf(os.Stderr, "%s", warnstring)
215 | } else {
216 | if !s.config.Colors {
217 | fmt.Fprintf(os.Stderr, "%s[WARN] %s\n", TERMINAL_CLEAR_LINE, warnstring)
218 | } else {
219 | fmt.Fprintf(os.Stderr, "%s[%sWARN%s] %s\n", TERMINAL_CLEAR_LINE, ANSI_RED, ANSI_CLEAR, warnstring)
220 | }
221 | }
222 | }
223 |
224 | func (s *Stdoutput) Raw(output string) {
225 | fmt.Fprintf(os.Stderr, "%s%s", TERMINAL_CLEAR_LINE, output)
226 | }
227 |
228 | func (s *Stdoutput) writeToAll(filename string, config *ffuf.Config, res []ffuf.Result) error {
229 | var err error
230 | var BaseFilename string = s.config.OutputFile
231 |
232 | // Go through each type of write, adding
233 | // the suffix to each output file.
234 |
235 | s.config.OutputFile = BaseFilename + ".json"
236 | err = writeJSON(s.config.OutputFile, s.config, res)
237 | if err != nil {
238 | s.Error(err.Error())
239 | }
240 |
241 | s.config.OutputFile = BaseFilename + ".ejson"
242 | err = writeEJSON(s.config.OutputFile, s.config, res)
243 | if err != nil {
244 | s.Error(err.Error())
245 | }
246 |
247 | s.config.OutputFile = BaseFilename + ".html"
248 | err = writeHTML(s.config.OutputFile, s.config, res)
249 | if err != nil {
250 | s.Error(err.Error())
251 | }
252 |
253 | s.config.OutputFile = BaseFilename + ".md"
254 | err = writeMarkdown(s.config.OutputFile, s.config, res)
255 | if err != nil {
256 | s.Error(err.Error())
257 | }
258 |
259 | s.config.OutputFile = BaseFilename + ".csv"
260 | err = writeCSV(s.config.OutputFile, s.config, res, false)
261 | if err != nil {
262 | s.Error(err.Error())
263 | }
264 |
265 | s.config.OutputFile = BaseFilename + ".ecsv"
266 | err = writeCSV(s.config.OutputFile, s.config, res, true)
267 | if err != nil {
268 | s.Error(err.Error())
269 | }
270 |
271 | return nil
272 |
273 | }
274 |
275 | // SaveFile saves the current results to a file of a given type
276 | func (s *Stdoutput) SaveFile(filename, format string) error {
277 | var err error
278 | if s.config.OutputSkipEmptyFile && len(s.Results) == 0 {
279 | s.Info("No results and -or defined, output file not written.")
280 | return err
281 | }
282 | switch format {
283 | case "all":
284 | err = s.writeToAll(filename, s.config, append(s.Results, s.CurrentResults...))
285 | case "json":
286 | err = writeJSON(filename, s.config, append(s.Results, s.CurrentResults...))
287 | case "ejson":
288 | err = writeEJSON(filename, s.config, append(s.Results, s.CurrentResults...))
289 | case "html":
290 | err = writeHTML(filename, s.config, append(s.Results, s.CurrentResults...))
291 | case "md":
292 | err = writeMarkdown(filename, s.config, append(s.Results, s.CurrentResults...))
293 | case "csv":
294 | err = writeCSV(filename, s.config, append(s.Results, s.CurrentResults...), false)
295 | case "ecsv":
296 | err = writeCSV(filename, s.config, append(s.Results, s.CurrentResults...), true)
297 | }
298 | return err
299 | }
300 |
301 | // Finalize gets run after all the ffuf jobs are completed
302 | func (s *Stdoutput) Finalize() error {
303 | var err error
304 | if s.config.OutputFile != "" {
305 | err = s.SaveFile(s.config.OutputFile, s.config.OutputFormat)
306 | if err != nil {
307 | s.Error(err.Error())
308 | }
309 | }
310 | fmt.Fprintf(os.Stderr, "\n")
311 | return nil
312 | }
313 |
314 | func (s *Stdoutput) Result(resp ffuf.Response) {
315 | // Do we want to write request and response to a file
316 | if len(s.config.OutputDirectory) > 0 {
317 | resp.ResultFile = s.writeResultToFile(resp)
318 | }
319 |
320 | inputs := make(map[string][]byte, len(resp.Request.Input))
321 | for k, v := range resp.Request.Input {
322 | inputs[k] = v
323 | }
324 | sResult := ffuf.Result{
325 | Input: inputs,
326 | Position: resp.Request.Position,
327 | StatusCode: resp.StatusCode,
328 | ContentLength: resp.ContentLength,
329 | ContentWords: resp.ContentWords,
330 | ContentLines: resp.ContentLines,
331 | ContentType: resp.ContentType,
332 | RedirectLocation: resp.GetRedirectLocation(false),
333 | Url: resp.Request.Url,
334 | Duration: resp.Time,
335 | ResultFile: resp.ResultFile,
336 | Host: resp.Request.Host,
337 | }
338 | s.CurrentResults = append(s.CurrentResults, sResult)
339 | // Output the result
340 | s.PrintResult(sResult)
341 | }
342 |
343 | func (s *Stdoutput) writeResultToFile(resp ffuf.Response) string {
344 | var fileContent, fileName, filePath string
345 | // Create directory if needed
346 | if s.config.OutputDirectory != "" {
347 | err := os.MkdirAll(s.config.OutputDirectory, 0750)
348 | if err != nil {
349 | if !os.IsExist(err) {
350 | s.Error(err.Error())
351 | return ""
352 | }
353 | }
354 | }
355 | fileContent = fmt.Sprintf("%s\n---- ↑ Request ---- Response ↓ ----\n\n%s", resp.Request.Raw, resp.Raw)
356 |
357 | // Create file name
358 | fileName = fmt.Sprintf("%x", md5.Sum([]byte(fileContent)))
359 |
360 | filePath = path.Join(s.config.OutputDirectory, fileName)
361 | err := os.WriteFile(filePath, []byte(fileContent), 0640)
362 | if err != nil {
363 | s.Error(err.Error())
364 | }
365 | return fileName
366 | }
367 |
368 | func (s *Stdoutput) PrintResult(res ffuf.Result) {
369 | switch {
370 | case s.config.Json:
371 | s.resultJson(res)
372 | case s.config.Quiet:
373 | s.resultQuiet(res)
374 | case len(res.Input) > 1 || s.config.Verbose || len(s.config.OutputDirectory) > 0:
375 | // Print a multi-line result (when using multiple input keywords and wordlists)
376 | s.resultMultiline(res)
377 | default:
378 | s.resultNormal(res)
379 | }
380 | }
381 |
382 | func (s *Stdoutput) prepareInputsOneLine(res ffuf.Result) string {
383 | inputs := ""
384 | if len(res.Input) > 1 {
385 | for k, v := range res.Input {
386 | if inSlice(k, s.config.CommandKeywords) {
387 | // If we're using external command for input, display the position instead of input
388 | inputs = fmt.Sprintf("%s%s : %s ", inputs, k, strconv.Itoa(res.Position))
389 | } else {
390 | inputs = fmt.Sprintf("%s%s : %s ", inputs, k, v)
391 | }
392 | }
393 | } else {
394 | for k, v := range res.Input {
395 | if inSlice(k, s.config.CommandKeywords) {
396 | // If we're using external command for input, display the position instead of input
397 | inputs = strconv.Itoa(res.Position)
398 | } else {
399 | inputs = string(v)
400 | }
401 | }
402 | }
403 | return inputs
404 | }
405 |
406 | func (s *Stdoutput) resultQuiet(res ffuf.Result) {
407 | fmt.Println(s.prepareInputsOneLine(res))
408 | }
409 |
410 | func (s *Stdoutput) resultMultiline(res ffuf.Result) {
411 | var res_hdr, res_str string
412 | res_str = "%s%s * %s: %s\n"
413 | res_hdr = fmt.Sprintf("%s%s[Status: %d, Size: %d, Words: %d, Lines: %d, Duration: %dms]%s", TERMINAL_CLEAR_LINE, s.colorize(res.StatusCode), res.StatusCode, res.ContentLength, res.ContentWords, res.ContentLines, res.Duration.Milliseconds(), ANSI_CLEAR)
414 | reslines := ""
415 | if s.config.Verbose {
416 | reslines = fmt.Sprintf("%s%s| URL | %s\n", reslines, TERMINAL_CLEAR_LINE, res.Url)
417 | redirectLocation := res.RedirectLocation
418 | if redirectLocation != "" {
419 | reslines = fmt.Sprintf("%s%s| --> | %s\n", reslines, TERMINAL_CLEAR_LINE, redirectLocation)
420 | }
421 | }
422 | if res.ResultFile != "" {
423 | reslines = fmt.Sprintf("%s%s| RES | %s\n", reslines, TERMINAL_CLEAR_LINE, res.ResultFile)
424 | }
425 | for _, k := range s.fuzzkeywords {
426 | if inSlice(k, s.config.CommandKeywords) {
427 | // If we're using external command for input, display the position instead of input
428 | reslines = fmt.Sprintf(res_str, reslines, TERMINAL_CLEAR_LINE, k, strconv.Itoa(res.Position))
429 | } else {
430 | // Wordlist input
431 | reslines = fmt.Sprintf(res_str, reslines, TERMINAL_CLEAR_LINE, k, res.Input[k])
432 | }
433 | }
434 | fmt.Printf("%s\n%s\n", res_hdr, reslines)
435 | }
436 |
437 | func (s *Stdoutput) resultNormal(res ffuf.Result) {
438 | resnormal := fmt.Sprintf("%s%s%-23s [Status: %d, Size: %d, Words: %d, Lines: %d, Duration: %dms]%s", TERMINAL_CLEAR_LINE, s.colorize(res.StatusCode), s.prepareInputsOneLine(res), res.StatusCode, res.ContentLength, res.ContentWords, res.ContentLines, res.Duration.Milliseconds(), ANSI_CLEAR)
439 | fmt.Println(resnormal)
440 | }
441 |
442 | func (s *Stdoutput) resultJson(res ffuf.Result) {
443 | resBytes, err := json.Marshal(res)
444 | if err != nil {
445 | s.Error(err.Error())
446 | } else {
447 | fmt.Fprint(os.Stderr, TERMINAL_CLEAR_LINE)
448 | fmt.Println(string(resBytes))
449 | }
450 | }
451 |
452 | func (s *Stdoutput) colorize(status int64) string {
453 | if !s.config.Colors {
454 | return ""
455 | }
456 | colorCode := ANSI_CLEAR
457 | if status >= 200 && status < 300 {
458 | colorCode = ANSI_GREEN
459 | }
460 | if status >= 300 && status < 400 {
461 | colorCode = ANSI_BLUE
462 | }
463 | if status >= 400 && status < 500 {
464 | colorCode = ANSI_YELLOW
465 | }
466 | if status >= 500 && status < 600 {
467 | colorCode = ANSI_RED
468 | }
469 | return colorCode
470 | }
471 |
472 | func printOption(name []byte, value []byte) {
473 | fmt.Fprintf(os.Stderr, " :: %-16s : %s\n", name, value)
474 | }
475 |
476 | func inSlice(key string, slice []string) bool {
477 | for _, v := range slice {
478 | if v == key {
479 | return true
480 | }
481 | }
482 | return false
483 | }
484 |
--------------------------------------------------------------------------------
/pkg/ffuf/job.go:
--------------------------------------------------------------------------------
1 | package ffuf
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "math/rand"
7 | "os"
8 | "os/signal"
9 | "sync"
10 | "syscall"
11 | "time"
12 | )
13 |
14 | // Job ties together Config, Runner, Input and Output
15 | type Job struct {
16 | Config *Config
17 | ErrorMutex sync.Mutex
18 | Input InputProvider
19 | Runner RunnerProvider
20 | ReplayRunner RunnerProvider
21 | Output OutputProvider
22 | Jobhash string
23 | Counter int
24 | ErrorCounter int
25 | SpuriousErrorCounter int
26 | Total int
27 | Running bool
28 | RunningJob bool
29 | Paused bool
30 | Count403 int
31 | Count429 int
32 | Error string
33 | Rate *RateThrottle
34 | startTime time.Time
35 | startTimeJob time.Time
36 | queuejobs []QueueJob
37 | queuepos int
38 | skipQueue bool
39 | currentDepth int
40 | calibMutex sync.Mutex
41 | pauseWg sync.WaitGroup
42 | }
43 |
44 | type QueueJob struct {
45 | Url string
46 | depth int
47 | req Request
48 | }
49 |
50 | func NewJob(conf *Config) *Job {
51 | var j Job
52 | j.Config = conf
53 | j.Counter = 0
54 | j.ErrorCounter = 0
55 | j.SpuriousErrorCounter = 0
56 | j.Running = false
57 | j.RunningJob = false
58 | j.Paused = false
59 | j.queuepos = 0
60 | j.queuejobs = make([]QueueJob, 0)
61 | j.currentDepth = 0
62 | j.Rate = NewRateThrottle(conf)
63 | j.skipQueue = false
64 | return &j
65 | }
66 |
67 | // incError increments the error counter
68 | func (j *Job) incError() {
69 | j.ErrorMutex.Lock()
70 | defer j.ErrorMutex.Unlock()
71 | j.ErrorCounter++
72 | j.SpuriousErrorCounter++
73 | }
74 |
75 | // inc403 increments the 403 response counter
76 | func (j *Job) inc403() {
77 | j.ErrorMutex.Lock()
78 | defer j.ErrorMutex.Unlock()
79 | j.Count403++
80 | }
81 |
82 | // inc429 increments the 429 response counter
83 | func (j *Job) inc429() {
84 | j.ErrorMutex.Lock()
85 | defer j.ErrorMutex.Unlock()
86 | j.Count429++
87 | }
88 |
89 | // resetSpuriousErrors resets the spurious error counter
90 | func (j *Job) resetSpuriousErrors() {
91 | j.ErrorMutex.Lock()
92 | defer j.ErrorMutex.Unlock()
93 | j.SpuriousErrorCounter = 0
94 | }
95 |
96 | // DeleteQueueItem deletes a recursion job from the queue by its index in the slice
97 | func (j *Job) DeleteQueueItem(index int) {
98 | index = j.queuepos + index - 1
99 | j.queuejobs = append(j.queuejobs[:index], j.queuejobs[index+1:]...)
100 | }
101 |
102 | // QueuedJobs returns the slice of queued recursive jobs
103 | func (j *Job) QueuedJobs() []QueueJob {
104 | return j.queuejobs[j.queuepos-1:]
105 | }
106 |
107 | // Start the execution of the Job
108 | func (j *Job) Start() {
109 | if j.startTime.IsZero() {
110 | j.startTime = time.Now()
111 | }
112 |
113 | basereq := BaseRequest(j.Config)
114 |
115 | if j.Config.InputMode == "sniper" {
116 | // process multiple payload locations and create a queue job for each location
117 | reqs := SniperRequests(&basereq, j.Config.InputProviders[0].Template)
118 | for _, r := range reqs {
119 | j.queuejobs = append(j.queuejobs, QueueJob{Url: j.Config.Url, depth: 0, req: r})
120 | }
121 | j.Total = j.Input.Total() * len(reqs)
122 | } else {
123 | // Add the default job to job queue
124 | j.queuejobs = append(j.queuejobs, QueueJob{Url: j.Config.Url, depth: 0, req: BaseRequest(j.Config)})
125 | j.Total = j.Input.Total()
126 | }
127 |
128 | rand.Seed(time.Now().UnixNano())
129 | defer j.Stop()
130 |
131 | j.Running = true
132 | j.RunningJob = true
133 | //Show banner if not running in silent mode
134 | if !j.Config.Quiet {
135 | j.Output.Banner()
136 | }
137 | // Monitor for SIGTERM and do cleanup properly (writing the output files etc)
138 | j.interruptMonitor()
139 | for j.jobsInQueue() {
140 | j.prepareQueueJob()
141 | j.Reset(true)
142 | j.RunningJob = true
143 | j.startExecution()
144 | }
145 |
146 | err := j.Output.Finalize()
147 | if err != nil {
148 | j.Output.Error(err.Error())
149 | }
150 | }
151 |
152 | // Reset resets the counters and wordlist position for a job
153 | func (j *Job) Reset(cycle bool) {
154 | j.Input.Reset()
155 | j.Counter = 0
156 | j.skipQueue = false
157 | j.startTimeJob = time.Now()
158 | if cycle {
159 | j.Output.Cycle()
160 | } else {
161 | j.Output.Reset()
162 | }
163 | }
164 |
165 | func (j *Job) jobsInQueue() bool {
166 | return j.queuepos < len(j.queuejobs)
167 | }
168 |
169 | func (j *Job) prepareQueueJob() {
170 | j.Config.Url = j.queuejobs[j.queuepos].Url
171 | j.currentDepth = j.queuejobs[j.queuepos].depth
172 |
173 | //Find all keywords present in new queued job
174 | kws := j.Input.Keywords()
175 | found_kws := make([]string, 0)
176 | for _, k := range kws {
177 | if RequestContainsKeyword(j.queuejobs[j.queuepos].req, k) {
178 | found_kws = append(found_kws, k)
179 | }
180 | }
181 | //And activate / disable inputproviders as needed
182 | j.Input.ActivateKeywords(found_kws)
183 | j.queuepos += 1
184 | j.Jobhash, _ = WriteHistoryEntry(j.Config)
185 | }
186 |
187 | // SkipQueue allows to skip the current job and advance to the next queued recursion job
188 | func (j *Job) SkipQueue() {
189 | j.skipQueue = true
190 | }
191 |
192 | func (j *Job) sleepIfNeeded() {
193 | var sleepDuration time.Duration
194 | if j.Config.Delay.HasDelay {
195 | if j.Config.Delay.IsRange {
196 | sTime := j.Config.Delay.Min + rand.Float64()*(j.Config.Delay.Max-j.Config.Delay.Min)
197 | sleepDuration = time.Duration(sTime * 1000)
198 | } else {
199 | sleepDuration = time.Duration(j.Config.Delay.Min * 1000)
200 | }
201 | sleepDuration = sleepDuration * time.Millisecond
202 | }
203 | // makes the sleep cancellable by context
204 | select {
205 | case <-j.Config.Context.Done(): // cancelled
206 | case <-time.After(sleepDuration): // sleep
207 | }
208 | }
209 |
210 | // Pause pauses the job process
211 | func (j *Job) Pause() {
212 | if !j.Paused {
213 | j.Paused = true
214 | j.pauseWg.Add(1)
215 | j.Output.Info("------ PAUSING ------")
216 | }
217 | }
218 |
219 | // Resume resumes the job process
220 | func (j *Job) Resume() {
221 | if j.Paused {
222 | j.Paused = false
223 | j.Output.Info("------ RESUMING -----")
224 | j.pauseWg.Done()
225 | }
226 | }
227 |
228 | func (j *Job) startExecution() {
229 | var wg sync.WaitGroup
230 | wg.Add(1)
231 | go j.runBackgroundTasks(&wg)
232 |
233 | // Print the base URL when starting a new recursion or sniper queue job
234 | if j.queuepos > 1 {
235 | if j.Config.InputMode == "sniper" {
236 | j.Output.Info(fmt.Sprintf("Starting queued sniper job (%d of %d) on target: %s", j.queuepos, len(j.queuejobs), j.Config.Url))
237 | } else {
238 | j.Output.Info(fmt.Sprintf("Starting queued job on target: %s", j.Config.Url))
239 | }
240 | }
241 |
242 | //Limiter blocks after reaching the buffer, ensuring limited concurrency
243 | threadlimiter := make(chan bool, j.Config.Threads)
244 |
245 | for j.Input.Next() && !j.skipQueue {
246 | // Check if we should stop the process
247 | j.CheckStop()
248 |
249 | if !j.Running {
250 | defer j.Output.Warning(j.Error)
251 | break
252 | }
253 | j.pauseWg.Wait()
254 | // Handle the rate & thread limiting
255 | threadlimiter <- true
256 | // Ratelimiter handles the rate ticker
257 | <-j.Rate.RateLimiter.C
258 | nextInput := j.Input.Value()
259 | nextPosition := j.Input.Position()
260 | // Add FFUFHASH and its value
261 | nextInput["FFUFHASH"] = j.ffufHash(nextPosition)
262 |
263 | wg.Add(1)
264 | j.Counter++
265 |
266 | go func() {
267 | defer func() { <-threadlimiter }()
268 | defer wg.Done()
269 | threadStart := time.Now()
270 | j.runTask(nextInput, nextPosition, false)
271 | j.sleepIfNeeded()
272 | threadEnd := time.Now()
273 | j.Rate.Tick(threadStart, threadEnd)
274 | }()
275 | if !j.RunningJob {
276 | defer j.Output.Warning(j.Error)
277 | return
278 | }
279 | }
280 | wg.Wait()
281 | j.updateProgress()
282 | }
283 |
284 | func (j *Job) interruptMonitor() {
285 | sigChan := make(chan os.Signal, 2)
286 | signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
287 | go func() {
288 | for range sigChan {
289 | j.Error = "Caught keyboard interrupt (Ctrl-C)\n"
290 | // resume if paused
291 | if j.Paused {
292 | j.pauseWg.Done()
293 | }
294 | // Stop the job
295 | j.Stop()
296 | }
297 | }()
298 | }
299 |
300 | func (j *Job) runBackgroundTasks(wg *sync.WaitGroup) {
301 | defer wg.Done()
302 | totalProgress := j.Input.Total()
303 | for j.Counter <= totalProgress && !j.skipQueue {
304 | j.pauseWg.Wait()
305 | if !j.Running {
306 | break
307 | }
308 | j.updateProgress()
309 | if j.Counter == totalProgress {
310 | return
311 | }
312 | if !j.RunningJob {
313 | return
314 | }
315 | time.Sleep(time.Millisecond * time.Duration(j.Config.ProgressFrequency))
316 | }
317 | }
318 |
319 | func (j *Job) updateProgress() {
320 | prog := Progress{
321 | StartedAt: j.startTimeJob,
322 | ReqCount: j.Counter,
323 | ReqTotal: j.Input.Total(),
324 | ReqSec: j.Rate.CurrentRate(),
325 | QueuePos: j.queuepos,
326 | QueueTotal: len(j.queuejobs),
327 | ErrorCount: j.ErrorCounter,
328 | }
329 | j.Output.Progress(prog)
330 | }
331 |
332 | func (j *Job) isMatch(resp Response) bool {
333 | matched := false
334 | var matchers map[string]FilterProvider
335 | var filters map[string]FilterProvider
336 | if j.Config.AutoCalibrationPerHost {
337 | filters = j.Config.MatcherManager.FiltersForDomain(HostURLFromRequest(*resp.Request))
338 | } else {
339 | filters = j.Config.MatcherManager.GetFilters()
340 | }
341 | matchers = j.Config.MatcherManager.GetMatchers()
342 | for _, m := range matchers {
343 | match, err := m.Filter(&resp)
344 | if err != nil {
345 | continue
346 | }
347 | if match {
348 | matched = true
349 | } else if j.Config.MatcherMode == "and" {
350 | // we already know this isn't "and" match
351 | return false
352 |
353 | }
354 | }
355 | // The response was not matched, return before running filters
356 | if !matched {
357 | return false
358 | }
359 | for _, f := range filters {
360 | fv, err := f.Filter(&resp)
361 | if err != nil {
362 | continue
363 | }
364 | if fv {
365 | // return false
366 | if j.Config.FilterMode == "or" {
367 | // return early, as filter matched
368 | return false
369 | }
370 | } else {
371 | if j.Config.FilterMode == "and" {
372 | // return early as not all filters matched in "and" mode
373 | return true
374 | }
375 | }
376 | }
377 | if len(filters) > 0 && j.Config.FilterMode == "and" {
378 | // we did not return early, so all filters were matched
379 | return false
380 | }
381 | return true
382 | }
383 |
384 | func (j *Job) ffufHash(pos int) []byte {
385 | hashstring := ""
386 | r := []rune(j.Jobhash)
387 | if len(r) > 5 {
388 | hashstring = string(r[:5])
389 | }
390 | hashstring += fmt.Sprintf("%x", pos)
391 | return []byte(hashstring)
392 | }
393 |
394 | func (j *Job) runTask(input map[string][]byte, position int, retried bool) {
395 | basereq := j.queuejobs[j.queuepos-1].req
396 | req, err := j.Runner.Prepare(input, &basereq)
397 | req.Position = position
398 | if err != nil {
399 | j.Output.Error(fmt.Sprintf("Encountered an error while preparing request: %s\n", err))
400 | j.incError()
401 | log.Printf("%s", err)
402 | return
403 | }
404 |
405 | resp, err := j.Runner.Execute(&req)
406 | if err != nil {
407 | if retried {
408 | j.incError()
409 | log.Printf("%s", err)
410 | } else {
411 | j.runTask(input, position, true)
412 | }
413 | return
414 | }
415 | if j.SpuriousErrorCounter > 0 {
416 | j.resetSpuriousErrors()
417 | }
418 | if j.Config.StopOn403 || j.Config.StopOnAll {
419 | // Increment Forbidden counter if we encountered one
420 | if resp.StatusCode == 403 {
421 | j.inc403()
422 | }
423 | }
424 | if j.Config.StopOnAll {
425 | // increment 429 counter if the response code is 429
426 | if resp.StatusCode == 429 {
427 | j.inc429()
428 | }
429 | }
430 | j.pauseWg.Wait()
431 |
432 | // Handle autocalibration, must be done after the actual request to ensure sane value in req.Host
433 | _ = j.CalibrateIfNeeded(HostURLFromRequest(req), input)
434 |
435 | if j.isMatch(resp) {
436 | // Re-send request through replay-proxy if needed
437 | if j.ReplayRunner != nil {
438 | replayreq, err := j.ReplayRunner.Prepare(input, &basereq)
439 | replayreq.Position = position
440 | if err != nil {
441 | j.Output.Error(fmt.Sprintf("Encountered an error while preparing replayproxy request: %s\n", err))
442 | j.incError()
443 | log.Printf("%s", err)
444 | } else {
445 | _, _ = j.ReplayRunner.Execute(&replayreq)
446 | }
447 | }
448 | j.Output.Result(resp)
449 |
450 | // Refresh the progress indicator as we printed something out
451 | j.updateProgress()
452 | if j.Config.Recursion && j.Config.RecursionStrategy == "greedy" {
453 | j.handleGreedyRecursionJob(resp)
454 | }
455 | }
456 |
457 | if j.Config.Recursion && j.Config.RecursionStrategy == "default" && len(resp.GetRedirectLocation(false)) > 0 {
458 | j.handleDefaultRecursionJob(resp)
459 | }
460 | }
461 |
462 | // handleGreedyRecursionJob adds a recursion job to the queue if the maximum depth has not been reached
463 | func (j *Job) handleGreedyRecursionJob(resp Response) {
464 | // Handle greedy recursion strategy. Match has been determined before calling handleRecursionJob
465 | if j.Config.RecursionDepth == 0 || j.currentDepth < j.Config.RecursionDepth {
466 | recUrl := resp.Request.Url + "/" + "FUZZ"
467 | newJob := QueueJob{Url: recUrl, depth: j.currentDepth + 1, req: RecursionRequest(j.Config, recUrl)}
468 | j.queuejobs = append(j.queuejobs, newJob)
469 | j.Output.Info(fmt.Sprintf("Adding a new job to the queue: %s", recUrl))
470 | } else {
471 | j.Output.Warning(fmt.Sprintf("Maximum recursion depth reached. Ignoring: %s", resp.Request.Url))
472 | }
473 | }
474 |
475 | // handleDefaultRecursionJob adds a new recursion job to the job queue if a new directory is found and maximum depth has
476 | // not been reached
477 | func (j *Job) handleDefaultRecursionJob(resp Response) {
478 | recUrl := resp.Request.Url + "/" + "FUZZ"
479 | if (resp.Request.Url + "/") != resp.GetRedirectLocation(true) {
480 | // Not a directory, return early
481 | return
482 | }
483 | if j.Config.RecursionDepth == 0 || j.currentDepth < j.Config.RecursionDepth {
484 | // We have yet to reach the maximum recursion depth
485 | newJob := QueueJob{Url: recUrl, depth: j.currentDepth + 1, req: RecursionRequest(j.Config, recUrl)}
486 | j.queuejobs = append(j.queuejobs, newJob)
487 | j.Output.Info(fmt.Sprintf("Adding a new job to the queue: %s", recUrl))
488 | } else {
489 | j.Output.Warning(fmt.Sprintf("Directory found, but recursion depth exceeded. Ignoring: %s", resp.GetRedirectLocation(true)))
490 | }
491 | }
492 |
493 | // CheckStop stops the job if stopping conditions are met
494 | func (j *Job) CheckStop() {
495 | if j.Counter > 50 {
496 | // We have enough samples
497 | if j.Config.StopOn403 || j.Config.StopOnAll {
498 | if float64(j.Count403)/float64(j.Counter) > 0.95 {
499 | // Over 95% of requests are 403
500 | j.Error = "Getting an unusual amount of 403 responses, exiting."
501 | j.Stop()
502 | }
503 | }
504 | if j.Config.StopOnErrors || j.Config.StopOnAll {
505 | if j.SpuriousErrorCounter > j.Config.Threads*2 {
506 | // Most of the requests are erroring
507 | j.Error = "Receiving spurious errors, exiting."
508 | j.Stop()
509 | }
510 |
511 | }
512 | if j.Config.StopOnAll && (float64(j.Count429)/float64(j.Counter) > 0.2) {
513 | // Over 20% of responses are 429
514 | j.Error = "Getting an unusual amount of 429 responses, exiting."
515 | j.Stop()
516 | }
517 | }
518 |
519 | // Check for runtime of entire process
520 | if j.Config.MaxTime > 0 {
521 | dur := time.Since(j.startTime)
522 | runningSecs := int(dur / time.Second)
523 | if runningSecs >= j.Config.MaxTime {
524 | j.Error = "Maximum running time for entire process reached, exiting."
525 | j.Stop()
526 | }
527 | }
528 |
529 | // Check for runtime of current job
530 | if j.Config.MaxTimeJob > 0 {
531 | dur := time.Since(j.startTimeJob)
532 | runningSecs := int(dur / time.Second)
533 | if runningSecs >= j.Config.MaxTimeJob {
534 | j.Error = "Maximum running time for this job reached, continuing with next job if one exists."
535 | j.Next()
536 |
537 | }
538 | }
539 | }
540 |
541 | // Stop the execution of the Job
542 | func (j *Job) Stop() {
543 | j.Running = false
544 | j.Config.Cancel()
545 | }
546 |
547 | // Stop current, resume to next
548 | func (j *Job) Next() {
549 | j.RunningJob = false
550 | }
551 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 | # ffuf - Fuzz Faster U Fool
3 |
4 | A fast web fuzzer written in Go.
5 |
6 | - [Installation](https://github.com/ffuf/ffuf#installation)
7 | - [Example usage](https://github.com/ffuf/ffuf#example-usage)
8 | - [Content discovery](https://github.com/ffuf/ffuf#typical-directory-discovery)
9 | - [Vhost discovery](https://github.com/ffuf/ffuf#virtual-host-discovery-without-dns-records)
10 | - [Parameter fuzzing](https://github.com/ffuf/ffuf#get-parameter-fuzzing)
11 | - [POST data fuzzing](https://github.com/ffuf/ffuf#post-data-fuzzing)
12 | - [Using external mutator](https://github.com/ffuf/ffuf#using-external-mutator-to-produce-test-cases)
13 | - [Configuration files](https://github.com/ffuf/ffuf#configuration-files)
14 | - [Help](https://github.com/ffuf/ffuf#usage)
15 | - [Interactive mode](https://github.com/ffuf/ffuf#interactive-mode)
16 | - [Sponsorware?](https://github.com/ffuf/ffuf#sponsorware)
17 |
18 | ## Sponsors
19 | [](https://www.offensive-security.com/)
20 |
21 | ## Official Discord Channel
22 |
23 | ffuf has a channel at Porchetta Industries Discord server alongside of channels for many other tools.
24 |
25 | Come to hang out & to discuss about ffuf, it's usage and development!
26 |
27 | [](https://discord.gg/VWcdZCUsQP)
28 |
29 | ## Installation
30 |
31 | - [Download](https://github.com/ffuf/ffuf/releases/latest) a prebuilt binary from [releases page](https://github.com/ffuf/ffuf/releases/latest), unpack and run!
32 |
33 | _or_
34 | - If you are on mac with [homebrew](https://brew.sh) installed `brew install ffuf`
35 |
36 | _or_
37 | - If you have recent go compiler installed: `go install github.com/ffuf/ffuf@latest` (the same command works for updating)
38 |
39 | _or_
40 | - `git clone https://github.com/ffuf/ffuf ; cd ffuf ; go get ; go build`
41 |
42 | Ffuf depends on Go 1.16 or greater.
43 |
44 | ## Example usage
45 |
46 | The usage examples below show just the simplest tasks you can accomplish using `ffuf`.
47 |
48 | For more extensive documentation, with real life usage examples and tips, be sure to check out the awesome guide:
49 | "[Everything you need to know about FFUF](https://codingo.io/tools/ffuf/bounty/2020/09/17/everything-you-need-to-know-about-ffuf.html)" by
50 | Michael Skelton ([@codingo](https://github.com/codingo)).
51 |
52 | You can also practise your ffuf scans against a live host with different lessons and use cases either locally by using the docker container https://github.com/adamtlangley/ffufme or against the live hosted version at http://ffuf.me created by Adam Langley [@adamtlangley](https://twitter.com/adamtlangley).
53 |
54 | ### Typical directory discovery
55 |
56 | [](https://asciinema.org/a/211350)
57 |
58 | By using the FUZZ keyword at the end of URL (`-u`):
59 |
60 | ```
61 | ffuf -w /path/to/wordlist -u https://target/FUZZ
62 | ```
63 |
64 | ### Virtual host discovery (without DNS records)
65 |
66 | [](https://asciinema.org/a/211360)
67 |
68 | Assuming that the default virtualhost response size is 4242 bytes, we can filter out all the responses of that size (`-fs 4242`)while fuzzing the Host - header:
69 |
70 | ```
71 | ffuf -w /path/to/vhost/wordlist -u https://target -H "Host: FUZZ" -fs 4242
72 | ```
73 |
74 | ### GET parameter fuzzing
75 |
76 | GET parameter name fuzzing is very similar to directory discovery, and works by defining the `FUZZ` keyword as a part of the URL. This also assumes an response size of 4242 bytes for invalid GET parameter name.
77 |
78 | ```
79 | ffuf -w /path/to/paramnames.txt -u https://target/script.php?FUZZ=test_value -fs 4242
80 | ```
81 |
82 | If the parameter name is known, the values can be fuzzed the same way. This example assumes a wrong parameter value returning HTTP response code 401.
83 |
84 | ```
85 | ffuf -w /path/to/values.txt -u https://target/script.php?valid_name=FUZZ -fc 401
86 | ```
87 |
88 | ### POST data fuzzing
89 |
90 | This is a very straightforward operation, again by using the `FUZZ` keyword. This example is fuzzing only part of the POST request. We're again filtering out the 401 responses.
91 |
92 | ```
93 | ffuf -w /path/to/postdata.txt -X POST -d "username=admin\&password=FUZZ" -u https://target/login.php -fc 401
94 | ```
95 |
96 | ### Maximum execution time
97 |
98 | If you don't want ffuf to run indefinitely, you can use the `-maxtime`. This stops __the entire__ process after a given time (in seconds).
99 |
100 | ```
101 | ffuf -w /path/to/wordlist -u https://target/FUZZ -maxtime 60
102 | ```
103 |
104 | When working with recursion, you can control the maxtime __per job__ using `-maxtime-job`. This will stop the current job after a given time (in seconds) and continue with the next one. New jobs are created when the recursion functionality detects a subdirectory.
105 |
106 | ```
107 | ffuf -w /path/to/wordlist -u https://target/FUZZ -maxtime-job 60 -recursion -recursion-depth 2
108 | ```
109 |
110 | It is also possible to combine both flags limiting the per job maximum execution time as well as the overall execution time. If you do not use recursion then both flags behave equally.
111 |
112 | ### Using external mutator to produce test cases
113 |
114 | For this example, we'll fuzz JSON data that's sent over POST. [Radamsa](https://gitlab.com/akihe/radamsa) is used as the mutator.
115 |
116 | When `--input-cmd` is used, ffuf will display matches as their position. This same position value will be available for the callee as an environment variable `$FFUF_NUM`. We'll use this position value as the seed for the mutator. Files example1.txt and example2.txt contain valid JSON payloads. We are matching all the responses, but filtering out response code `400 - Bad request`:
117 |
118 | ```
119 | ffuf --input-cmd 'radamsa --seed $FFUF_NUM example1.txt example2.txt' -H "Content-Type: application/json" -X POST -u https://ffuf.io.fi/FUZZ -mc all -fc 400
120 | ```
121 |
122 | It of course isn't very efficient to call the mutator for each payload, so we can also pre-generate the payloads, still using [Radamsa](https://gitlab.com/akihe/radamsa) as an example:
123 |
124 | ```
125 | # Generate 1000 example payloads
126 | radamsa -n 1000 -o %n.txt example1.txt example2.txt
127 |
128 | # This results into files 1.txt ... 1000.txt
129 | # Now we can just read the payload data in a loop from file for ffuf
130 |
131 | ffuf --input-cmd 'cat $FFUF_NUM.txt' -H "Content-Type: application/json" -X POST -u https://ffuf.io.fi/ -mc all -fc 400
132 | ```
133 |
134 | ### Configuration files
135 |
136 | When running ffuf, it first checks if a default configuration file exists. The file path for it is `~/.ffufrc` / `$HOME/.ffufrc`
137 | for most *nixes (for example `/home/joohoi/.ffufrc`) and `%USERPROFILE%\.ffufrc` for Windows. You can configure one or
138 | multiple options in this file, and they will be applied on every subsequent ffuf job. An example of .ffufrc file can be
139 | found [here](https://github.com/ffuf/ffuf/blob/master/ffufrc.example).
140 |
141 | The configuration options provided on the command line override the ones loaded from `~/.ffufrc`.
142 | Note: this does not apply for CLI flags that can be provided more than once. One of such examples is `-H` (header) flag.
143 | In this case, the `-H` values provided on the command line will be _appended_ to the ones from the config file instead.
144 |
145 | Additionally, in case you wish to use bunch of configuration files for different use cases, you can do this by defining
146 | the configuration file path using `-config` command line flag that takes the file path to the configuration file as its
147 | parameter.
148 |
149 |
150 |
151 |
152 |
153 | ## Usage
154 |
155 | To define the test case for ffuf, use the keyword `FUZZ` anywhere in the URL (`-u`), headers (`-H`), or POST data (`-d`).
156 |
157 | ```
158 | Fuzz Faster U Fool - v1.3.0-dev
159 |
160 | HTTP OPTIONS:
161 | -H Header `"Name: Value"`, separated by colon. Multiple -H flags are accepted.
162 | -X HTTP method to use
163 | -b Cookie data `"NAME1=VALUE1; NAME2=VALUE2"` for copy as curl functionality.
164 | -d POST data
165 | -ignore-body Do not fetch the response content. (default: false)
166 | -r Follow redirects (default: false)
167 | -recursion Scan recursively. Only FUZZ keyword is supported, and URL (-u) has to end in it. (default: false)
168 | -recursion-depth Maximum recursion depth. (default: 0)
169 | -recursion-strategy Recursion strategy: "default" for a redirect based, and "greedy" to recurse on all matches (default: default)
170 | -replay-proxy Replay matched requests using this proxy.
171 | -sni Target TLS SNI, does not support FUZZ keyword
172 | -timeout HTTP request timeout in seconds. (default: 10)
173 | -u Target URL
174 | -x Proxy URL (SOCKS5 or HTTP). For example: http://127.0.0.1:8080 or socks5://127.0.0.1:8080
175 |
176 | GENERAL OPTIONS:
177 | -V Show version information. (default: false)
178 | -ac Automatically calibrate filtering options (default: false)
179 | -acc Custom auto-calibration string. Can be used multiple times. Implies -ac
180 | -c Colorize output. (default: false)
181 | -config Load configuration from a file
182 | -maxtime Maximum running time in seconds for entire process. (default: 0)
183 | -maxtime-job Maximum running time in seconds per job. (default: 0)
184 | -noninteractive Disable the interactive console functionality (default: false)
185 | -p Seconds of `delay` between requests, or a range of random delay. For example "0.1" or "0.1-2.0"
186 | -rate Rate of requests per second (default: 0)
187 | -s Do not print additional information (silent mode) (default: false)
188 | -sa Stop on all error cases. Implies -sf and -se. (default: false)
189 | -se Stop on spurious errors (default: false)
190 | -sf Stop when > 95% of responses return 403 Forbidden (default: false)
191 | -t Number of concurrent threads. (default: 40)
192 | -v Verbose output, printing full URL and redirect location (if any) with the results. (default: false)
193 |
194 | MATCHER OPTIONS:
195 | -mc Match HTTP status codes, or "all" for everything. (default: 200,204,301,302,307,401,403,405,500)
196 | -ml Match amount of lines in response
197 | -mr Match regexp
198 | -ms Match HTTP response size
199 | -mt Match how many milliseconds to the first response byte, either greater or less than. EG: ">100" or "<100"
200 | -mw Match amount of words in response
201 |
202 | FILTER OPTIONS:
203 | -fc Filter HTTP status codes from response. Comma separated list of codes and ranges
204 | -fl Filter by amount of lines in response. Comma separated list of line counts and ranges
205 | -fr Filter regexp
206 | -fs Filter HTTP response size. Comma separated list of sizes and ranges
207 | -ft Filter by number of milliseconds to the first response byte, either greater or less than. EG: ">100" or "<100"
208 | -fw Filter by amount of words in response. Comma separated list of word counts and ranges
209 |
210 | INPUT OPTIONS:
211 | -D DirSearch wordlist compatibility mode. Used in conjunction with -e flag. (default: false)
212 | -e Comma separated list of extensions. Extends FUZZ keyword.
213 | -ic Ignore wordlist comments (default: false)
214 | -input-cmd Command producing the input. --input-num is required when using this input method. Overrides -w.
215 | -input-num Number of inputs to test. Used in conjunction with --input-cmd. (default: 100)
216 | -input-shell Shell to be used for running command
217 | -mode Multi-wordlist operation mode. Available modes: clusterbomb, pitchfork, sniper (default: clusterbomb)
218 | -request File containing the raw http request
219 | -request-proto Protocol to use along with raw request (default: https)
220 | -w Wordlist file path and (optional) keyword separated by colon. eg. '/path/to/wordlist:KEYWORD'
221 |
222 | OUTPUT OPTIONS:
223 | -debug-log Write all of the internal logging to the specified file.
224 | -o Write output to file
225 | -od Directory path to store matched results to.
226 | -of Output file format. Available formats: json, ejson, html, md, csv, ecsv (or, 'all' for all formats) (default: json)
227 | -or Don't create the output file if we don't have results (default: false)
228 |
229 | EXAMPLE USAGE:
230 | Fuzz file paths from wordlist.txt, match all responses but filter out those with content-size 42.
231 | Colored, verbose output.
232 | ffuf -w wordlist.txt -u https://example.org/FUZZ -mc all -fs 42 -c -v
233 |
234 | Fuzz Host-header, match HTTP 200 responses.
235 | ffuf -w hosts.txt -u https://example.org/ -H "Host: FUZZ" -mc 200
236 |
237 | Fuzz POST JSON data. Match all responses not containing text "error".
238 | ffuf -w entries.txt -u https://example.org/ -X POST -H "Content-Type: application/json" \
239 | -d '{"name": "FUZZ", "anotherkey": "anothervalue"}' -fr "error"
240 |
241 | Fuzz multiple locations. Match only responses reflecting the value of "VAL" keyword. Colored.
242 | ffuf -w params.txt:PARAM -w values.txt:VAL -u https://example.org/?PARAM=VAL -mr "VAL" -c
243 |
244 | More information and examples: https://github.com/ffuf/ffuf
245 |
246 | ```
247 |
248 | ### Interactive mode
249 |
250 | By pressing `ENTER` during ffuf execution, the process is paused and user is dropped to a shell-like interactive mode:
251 | ```
252 | entering interactive mode
253 | type "help" for a list of commands, or ENTER to resume.
254 | > help
255 |
256 | available commands:
257 | fc [value] - (re)configure status code filter
258 | fl [value] - (re)configure line count filter
259 | fw [value] - (re)configure word count filter
260 | fs [value] - (re)configure size filter
261 | queueshow - show recursive job queue
262 | queuedel [number] - delete a recursion job in the queue
263 | queueskip - advance to the next queued recursion job
264 | restart - restart and resume the current ffuf job
265 | resume - resume current ffuf job (or: ENTER)
266 | show - show results for the current job
267 | savejson [filename] - save current matches to a file
268 | help - you are looking at it
269 | >
270 | ```
271 |
272 | in this mode, filters can be reconfigured, queue managed and the current state saved to disk.
273 |
274 | When (re)configuring the filters, they get applied posthumously and all the false positive matches from memory that
275 | would have been filtered out by the newly added filters get deleted.
276 |
277 | The new state of matches can be printed out with a command `show` that will print out all the matches as like they
278 | would have been found by `ffuf`.
279 |
280 | As "negative" matches are not stored to memory, relaxing the filters cannot unfortunately bring back the lost matches.
281 | For this kind of scenario, the user is able to use the command `restart`, which resets the state and starts the current
282 | job from the beginning.
283 |
284 |
285 |
286 |
287 |
288 |
289 | ## Sponsorware
290 |
291 | `ffuf` employs a sponsorware model. This means that all new features developed by its author are initially exclusively
292 | available for their sponsors. 30 days after the exclusive release, all the new features will be released at the freely
293 | available open source repository at https://github.com/ffuf/ffuf .
294 |
295 | This model enables me to provide concrete benefits for the generous individuals and companies that enable me to work on
296 | `ffuf`. The different sponsorship tiers can be seen [here](https://github.com/sponsors/joohoi).
297 |
298 | All the community contributions are and will be available directly in the freely available open source repository. The
299 | exclusive version benefits only include new features created by [@joohoi](https://github.com/joohoi)
300 |
301 | ### Access the sponsorware through code contributions
302 |
303 | People that create significant contributions to the `ffuf` project itself should and will have access to the sponsorware
304 | as well. If you are planning to create such a contribution, please contact [@joohoi](https://github.com/joohoi)
305 | first to ensure that there aren't other people working on the same feature.
306 |
307 | ## Helper scripts and advanced payloads
308 |
309 | See [ffuf-scripts](https://github.com/ffuf/ffuf-scripts) repository for helper scripts and payload generators
310 | for different workflows and usage scenarios.
311 |
312 | ## License
313 |
314 | ffuf is released under MIT license. See [LICENSE](https://github.com/ffuf/ffuf/blob/master/LICENSE).
315 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "flag"
6 | "fmt"
7 | "github.com/ffuf/ffuf/pkg/ffuf"
8 | "github.com/ffuf/ffuf/pkg/filter"
9 | "github.com/ffuf/ffuf/pkg/input"
10 | "github.com/ffuf/ffuf/pkg/interactive"
11 | "github.com/ffuf/ffuf/pkg/output"
12 | "github.com/ffuf/ffuf/pkg/runner"
13 | "io"
14 | "log"
15 | "os"
16 | "strings"
17 | "time"
18 | )
19 |
20 | type multiStringFlag []string
21 | type wordlistFlag []string
22 |
23 | func (m *multiStringFlag) String() string {
24 | return ""
25 | }
26 |
27 | func (m *wordlistFlag) String() string {
28 | return ""
29 | }
30 |
31 | func (m *multiStringFlag) Set(value string) error {
32 | *m = append(*m, value)
33 | return nil
34 | }
35 |
36 | func (m *wordlistFlag) Set(value string) error {
37 | delimited := strings.Split(value, ",")
38 |
39 | if len(delimited) > 1 {
40 | *m = append(*m, delimited...)
41 | } else {
42 | *m = append(*m, value)
43 | }
44 |
45 | return nil
46 | }
47 |
48 | // ParseFlags parses the command line flags and (re)populates the ConfigOptions struct
49 | func ParseFlags(opts *ffuf.ConfigOptions) *ffuf.ConfigOptions {
50 | var ignored bool
51 | var cookies, autocalibrationstrings, headers, inputcommands multiStringFlag
52 | var wordlists wordlistFlag
53 |
54 | cookies = opts.HTTP.Cookies
55 | autocalibrationstrings = opts.General.AutoCalibrationStrings
56 | headers = opts.HTTP.Headers
57 | inputcommands = opts.Input.Inputcommands
58 | wordlists = opts.Input.Wordlists
59 |
60 | flag.BoolVar(&ignored, "compressed", true, "Dummy flag for copy as curl functionality (ignored)")
61 | flag.BoolVar(&ignored, "i", true, "Dummy flag for copy as curl functionality (ignored)")
62 | flag.BoolVar(&ignored, "k", false, "Dummy flag for backwards compatibility")
63 | flag.BoolVar(&opts.Output.OutputSkipEmptyFile, "or", opts.Output.OutputSkipEmptyFile, "Don't create the output file if we don't have results")
64 | flag.BoolVar(&opts.General.AutoCalibration, "ac", opts.General.AutoCalibration, "Automatically calibrate filtering options")
65 | flag.BoolVar(&opts.General.AutoCalibrationPerHost, "ach", opts.General.AutoCalibration, "Per host autocalibration")
66 | flag.BoolVar(&opts.General.Colors, "c", opts.General.Colors, "Colorize output.")
67 | flag.BoolVar(&opts.General.Json, "json", opts.General.Json, "JSON output, printing newline-delimited JSON records")
68 | flag.BoolVar(&opts.General.Noninteractive, "noninteractive", opts.General.Noninteractive, "Disable the interactive console functionality")
69 | flag.BoolVar(&opts.General.Quiet, "s", opts.General.Quiet, "Do not print additional information (silent mode)")
70 | flag.BoolVar(&opts.General.ShowVersion, "V", opts.General.ShowVersion, "Show version information.")
71 | flag.BoolVar(&opts.General.StopOn403, "sf", opts.General.StopOn403, "Stop when > 95% of responses return 403 Forbidden")
72 | flag.BoolVar(&opts.General.StopOnAll, "sa", opts.General.StopOnAll, "Stop on all error cases. Implies -sf and -se.")
73 | flag.BoolVar(&opts.General.StopOnErrors, "se", opts.General.StopOnErrors, "Stop on spurious errors")
74 | flag.BoolVar(&opts.General.Verbose, "v", opts.General.Verbose, "Verbose output, printing full URL and redirect location (if any) with the results.")
75 | flag.BoolVar(&opts.HTTP.FollowRedirects, "r", opts.HTTP.FollowRedirects, "Follow redirects")
76 | flag.BoolVar(&opts.HTTP.IgnoreBody, "ignore-body", opts.HTTP.IgnoreBody, "Do not fetch the response content.")
77 | flag.BoolVar(&opts.HTTP.Recursion, "recursion", opts.HTTP.Recursion, "Scan recursively. Only FUZZ keyword is supported, and URL (-u) has to end in it.")
78 | flag.BoolVar(&opts.HTTP.Http2, "http2", opts.HTTP.Http2, "Use HTTP2 protocol")
79 | flag.BoolVar(&opts.Input.DirSearchCompat, "D", opts.Input.DirSearchCompat, "DirSearch wordlist compatibility mode. Used in conjunction with -e flag.")
80 | flag.BoolVar(&opts.Input.IgnoreWordlistComments, "ic", opts.Input.IgnoreWordlistComments, "Ignore wordlist comments")
81 | flag.IntVar(&opts.General.MaxTime, "maxtime", opts.General.MaxTime, "Maximum running time in seconds for entire process.")
82 | flag.IntVar(&opts.General.MaxTimeJob, "maxtime-job", opts.General.MaxTimeJob, "Maximum running time in seconds per job.")
83 | flag.IntVar(&opts.General.Rate, "rate", opts.General.Rate, "Rate of requests per second")
84 | flag.IntVar(&opts.General.Threads, "t", opts.General.Threads, "Number of concurrent threads.")
85 | flag.IntVar(&opts.HTTP.RecursionDepth, "recursion-depth", opts.HTTP.RecursionDepth, "Maximum recursion depth.")
86 | flag.IntVar(&opts.HTTP.Timeout, "timeout", opts.HTTP.Timeout, "HTTP request timeout in seconds.")
87 | flag.IntVar(&opts.Input.InputNum, "input-num", opts.Input.InputNum, "Number of inputs to test. Used in conjunction with --input-cmd.")
88 | flag.StringVar(&opts.General.AutoCalibrationKeyword, "ack", opts.General.AutoCalibrationKeyword, "Autocalibration keyword")
89 | flag.StringVar(&opts.General.AutoCalibrationStrategy, "acs", opts.General.AutoCalibrationStrategy, "Autocalibration strategy: \"basic\" or \"advanced\"")
90 | flag.StringVar(&opts.General.ConfigFile, "config", "", "Load configuration from a file")
91 | flag.StringVar(&opts.Filter.Mode, "fmode", opts.Filter.Mode, "Filter set operator. Either of: and, or")
92 | flag.StringVar(&opts.Filter.Lines, "fl", opts.Filter.Lines, "Filter by amount of lines in response. Comma separated list of line counts and ranges")
93 | flag.StringVar(&opts.Filter.Regexp, "fr", opts.Filter.Regexp, "Filter regexp")
94 | flag.StringVar(&opts.Filter.Size, "fs", opts.Filter.Size, "Filter HTTP response size. Comma separated list of sizes and ranges")
95 | flag.StringVar(&opts.Filter.Status, "fc", opts.Filter.Status, "Filter HTTP status codes from response. Comma separated list of codes and ranges")
96 | flag.StringVar(&opts.Filter.Time, "ft", opts.Filter.Time, "Filter by number of milliseconds to the first response byte, either greater or less than. EG: >100 or <100")
97 | flag.StringVar(&opts.Filter.Words, "fw", opts.Filter.Words, "Filter by amount of words in response. Comma separated list of word counts and ranges")
98 | flag.StringVar(&opts.General.Delay, "p", opts.General.Delay, "Seconds of `delay` between requests, or a range of random delay. For example \"0.1\" or \"0.1-2.0\"")
99 | flag.StringVar(&opts.General.Searchhash, "search", opts.General.Searchhash, "Search for a FFUFHASH payload from ffuf history")
100 | flag.StringVar(&opts.HTTP.Data, "d", opts.HTTP.Data, "POST data")
101 | flag.StringVar(&opts.HTTP.Data, "data", opts.HTTP.Data, "POST data (alias of -d)")
102 | flag.StringVar(&opts.HTTP.Data, "data-ascii", opts.HTTP.Data, "POST data (alias of -d)")
103 | flag.StringVar(&opts.HTTP.Data, "data-binary", opts.HTTP.Data, "POST data (alias of -d)")
104 | flag.StringVar(&opts.HTTP.Method, "X", opts.HTTP.Method, "HTTP method to use")
105 | flag.StringVar(&opts.HTTP.ProxyURL, "x", opts.HTTP.ProxyURL, "Proxy URL (SOCKS5 or HTTP). For example: http://127.0.0.1:8080 or socks5://127.0.0.1:8080")
106 | flag.StringVar(&opts.HTTP.ReplayProxyURL, "replay-proxy", opts.HTTP.ReplayProxyURL, "Replay matched requests using this proxy.")
107 | flag.StringVar(&opts.HTTP.RecursionStrategy, "recursion-strategy", opts.HTTP.RecursionStrategy, "Recursion strategy: \"default\" for a redirect based, and \"greedy\" to recurse on all matches")
108 | flag.StringVar(&opts.HTTP.URL, "u", opts.HTTP.URL, "Target URL")
109 | flag.StringVar(&opts.HTTP.SNI, "sni", opts.HTTP.SNI, "Target TLS SNI, does not support FUZZ keyword")
110 | flag.StringVar(&opts.Input.Extensions, "e", opts.Input.Extensions, "Comma separated list of extensions. Extends FUZZ keyword.")
111 | flag.StringVar(&opts.Input.InputMode, "mode", opts.Input.InputMode, "Multi-wordlist operation mode. Available modes: clusterbomb, pitchfork, sniper")
112 | flag.StringVar(&opts.Input.InputShell, "input-shell", opts.Input.InputShell, "Shell to be used for running command")
113 | flag.StringVar(&opts.Input.Request, "request", opts.Input.Request, "File containing the raw http request")
114 | flag.StringVar(&opts.Input.RequestProto, "request-proto", opts.Input.RequestProto, "Protocol to use along with raw request")
115 | flag.StringVar(&opts.Matcher.Mode, "mmode", opts.Matcher.Mode, "Matcher set operator. Either of: and, or")
116 | flag.StringVar(&opts.Matcher.Lines, "ml", opts.Matcher.Lines, "Match amount of lines in response")
117 | flag.StringVar(&opts.Matcher.Regexp, "mr", opts.Matcher.Regexp, "Match regexp")
118 | flag.StringVar(&opts.Matcher.Size, "ms", opts.Matcher.Size, "Match HTTP response size")
119 | flag.StringVar(&opts.Matcher.Status, "mc", opts.Matcher.Status, "Match HTTP status codes, or \"all\" for everything.")
120 | flag.StringVar(&opts.Matcher.Time, "mt", opts.Matcher.Time, "Match how many milliseconds to the first response byte, either greater or less than. EG: >100 or <100")
121 | flag.StringVar(&opts.Matcher.Words, "mw", opts.Matcher.Words, "Match amount of words in response")
122 | flag.StringVar(&opts.Output.DebugLog, "debug-log", opts.Output.DebugLog, "Write all of the internal logging to the specified file.")
123 | flag.StringVar(&opts.Output.OutputDirectory, "od", opts.Output.OutputDirectory, "Directory path to store matched results to.")
124 | flag.StringVar(&opts.Output.OutputFile, "o", opts.Output.OutputFile, "Write output to file")
125 | flag.StringVar(&opts.Output.OutputFormat, "of", opts.Output.OutputFormat, "Output file format. Available formats: json, ejson, html, md, csv, ecsv (or, 'all' for all formats)")
126 | flag.Var(&autocalibrationstrings, "acc", "Custom auto-calibration string. Can be used multiple times. Implies -ac")
127 | flag.Var(&cookies, "b", "Cookie data `\"NAME1=VALUE1; NAME2=VALUE2\"` for copy as curl functionality.")
128 | flag.Var(&cookies, "cookie", "Cookie data (alias of -b)")
129 | flag.Var(&headers, "H", "Header `\"Name: Value\"`, separated by colon. Multiple -H flags are accepted.")
130 | flag.Var(&inputcommands, "input-cmd", "Command producing the input. --input-num is required when using this input method. Overrides -w.")
131 | flag.Var(&wordlists, "w", "Wordlist file path and (optional) keyword separated by colon. eg. '/path/to/wordlist:KEYWORD'")
132 | flag.Usage = Usage
133 | flag.Parse()
134 |
135 | opts.General.AutoCalibrationStrings = autocalibrationstrings
136 | opts.HTTP.Cookies = cookies
137 | opts.HTTP.Headers = headers
138 | opts.Input.Inputcommands = inputcommands
139 | opts.Input.Wordlists = wordlists
140 | return opts
141 | }
142 |
143 | func main() {
144 |
145 | var err, optserr error
146 | ctx, cancel := context.WithCancel(context.Background())
147 | defer cancel()
148 | // prepare the default config options from default config file
149 | var opts *ffuf.ConfigOptions
150 | opts, optserr = ffuf.ReadDefaultConfig()
151 |
152 | opts = ParseFlags(opts)
153 |
154 | // Handle searchhash functionality and exit
155 | if opts.General.Searchhash != "" {
156 | coptions, pos, err := ffuf.SearchHash(opts.General.Searchhash)
157 | if err != nil {
158 | fmt.Printf("[ERR] %s\n", err)
159 | os.Exit(1)
160 | }
161 | if len(coptions) > 0 {
162 | fmt.Printf("Request candidate(s) for hash %s\n", opts.General.Searchhash)
163 | }
164 | for _, copt := range coptions {
165 | conf, err := ffuf.ConfigFromOptions(&copt.ConfigOptions, ctx, cancel)
166 | if err != nil {
167 | continue
168 | }
169 | printSearchResults(conf, pos, copt.Time, opts.General.Searchhash)
170 | }
171 | if err != nil {
172 | fmt.Printf("[ERR] %s\n", err)
173 | }
174 | os.Exit(0)
175 | }
176 |
177 | if opts.General.ShowVersion {
178 | fmt.Printf("ffuf version: %s\n", ffuf.Version())
179 | os.Exit(0)
180 | }
181 | if len(opts.Output.DebugLog) != 0 {
182 | f, err := os.OpenFile(opts.Output.DebugLog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
183 | if err != nil {
184 | fmt.Fprintf(os.Stderr, "Disabling logging, encountered error(s): %s\n", err)
185 | log.SetOutput(io.Discard)
186 | } else {
187 | log.SetOutput(f)
188 | defer f.Close()
189 | }
190 | } else {
191 | log.SetOutput(io.Discard)
192 | }
193 | if optserr != nil {
194 | log.Printf("Error while opening default config file: %s", optserr)
195 | }
196 |
197 | if opts.General.ConfigFile != "" {
198 | opts, err = ffuf.ReadConfig(opts.General.ConfigFile)
199 | if err != nil {
200 | fmt.Fprintf(os.Stderr, "Encoutered error(s): %s\n", err)
201 | Usage()
202 | fmt.Fprintf(os.Stderr, "Encoutered error(s): %s\n", err)
203 | os.Exit(1)
204 | }
205 | // Reset the flag package state
206 | flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
207 | // Re-parse the cli options
208 | opts = ParseFlags(opts)
209 | }
210 |
211 | // Set up Config struct
212 | conf, err := ffuf.ConfigFromOptions(opts, ctx, cancel)
213 | if err != nil {
214 | fmt.Fprintf(os.Stderr, "Encountered error(s): %s\n", err)
215 | Usage()
216 | fmt.Fprintf(os.Stderr, "Encountered error(s): %s\n", err)
217 | os.Exit(1)
218 | }
219 |
220 | job, err := prepareJob(conf)
221 | if err != nil {
222 | fmt.Fprintf(os.Stderr, "Encountered error(s): %s\n", err)
223 | Usage()
224 | fmt.Fprintf(os.Stderr, "Encountered error(s): %s\n", err)
225 | os.Exit(1)
226 | }
227 | if err := SetupFilters(opts, conf); err != nil {
228 | fmt.Fprintf(os.Stderr, "Encountered error(s): %s\n", err)
229 | Usage()
230 | fmt.Fprintf(os.Stderr, "Encountered error(s): %s\n", err)
231 | os.Exit(1)
232 | }
233 |
234 | if !conf.Noninteractive {
235 | go func() {
236 | err := interactive.Handle(job)
237 | if err != nil {
238 | log.Printf("Error while trying to initialize interactive session: %s", err)
239 | }
240 | }()
241 | }
242 |
243 | // Job handles waiting for goroutines to complete itself
244 | job.Start()
245 | }
246 |
247 | func prepareJob(conf *ffuf.Config) (*ffuf.Job, error) {
248 | job := ffuf.NewJob(conf)
249 | var errs ffuf.Multierror
250 | job.Input, errs = input.NewInputProvider(conf)
251 | // TODO: implement error handling for runnerprovider and outputprovider
252 | // We only have http runner right now
253 | job.Runner = runner.NewRunnerByName("http", conf, false)
254 | if len(conf.ReplayProxyURL) > 0 {
255 | job.ReplayRunner = runner.NewRunnerByName("http", conf, true)
256 | }
257 | // We only have stdout outputprovider right now
258 | job.Output = output.NewOutputProviderByName("stdout", conf)
259 | return job, errs.ErrorOrNil()
260 | }
261 |
262 | func SetupFilters(parseOpts *ffuf.ConfigOptions, conf *ffuf.Config) error {
263 | errs := ffuf.NewMultierror()
264 | conf.MatcherManager = filter.NewMatcherManager()
265 | // If any other matcher is set, ignore -mc default value
266 | matcherSet := false
267 | statusSet := false
268 | warningIgnoreBody := false
269 | flag.Visit(func(f *flag.Flag) {
270 | if f.Name == "mc" {
271 | statusSet = true
272 | }
273 | if f.Name == "ms" {
274 | matcherSet = true
275 | warningIgnoreBody = true
276 | }
277 | if f.Name == "ml" {
278 | matcherSet = true
279 | warningIgnoreBody = true
280 | }
281 | if f.Name == "mr" {
282 | matcherSet = true
283 | }
284 | if f.Name == "mt" {
285 | matcherSet = true
286 | }
287 | if f.Name == "mw" {
288 | matcherSet = true
289 | warningIgnoreBody = true
290 | }
291 | })
292 | // Only set default matchers if no
293 | if statusSet || !matcherSet {
294 | if err := conf.MatcherManager.AddMatcher("status", parseOpts.Matcher.Status); err != nil {
295 | errs.Add(err)
296 | }
297 | }
298 |
299 | if parseOpts.Filter.Status != "" {
300 | if err := conf.MatcherManager.AddFilter("status", parseOpts.Filter.Status, false); err != nil {
301 | errs.Add(err)
302 | }
303 | }
304 | if parseOpts.Filter.Size != "" {
305 | warningIgnoreBody = true
306 | if err := conf.MatcherManager.AddFilter("size", parseOpts.Filter.Size, false); err != nil {
307 | errs.Add(err)
308 | }
309 | }
310 | if parseOpts.Filter.Regexp != "" {
311 | if err := conf.MatcherManager.AddFilter("regexp", parseOpts.Filter.Regexp, false); err != nil {
312 | errs.Add(err)
313 | }
314 | }
315 | if parseOpts.Filter.Words != "" {
316 | warningIgnoreBody = true
317 | if err := conf.MatcherManager.AddFilter("word", parseOpts.Filter.Words, false); err != nil {
318 | errs.Add(err)
319 | }
320 | }
321 | if parseOpts.Filter.Lines != "" {
322 | warningIgnoreBody = true
323 | if err := conf.MatcherManager.AddFilter("line", parseOpts.Filter.Lines, false); err != nil {
324 | errs.Add(err)
325 | }
326 | }
327 | if parseOpts.Filter.Time != "" {
328 | if err := conf.MatcherManager.AddFilter("time", parseOpts.Filter.Time, false); err != nil {
329 | errs.Add(err)
330 | }
331 | }
332 | if parseOpts.Matcher.Size != "" {
333 | if err := conf.MatcherManager.AddMatcher("size", parseOpts.Matcher.Size); err != nil {
334 | errs.Add(err)
335 | }
336 | }
337 | if parseOpts.Matcher.Regexp != "" {
338 | if err := conf.MatcherManager.AddMatcher("regexp", parseOpts.Matcher.Regexp); err != nil {
339 | errs.Add(err)
340 | }
341 | }
342 | if parseOpts.Matcher.Words != "" {
343 | if err := conf.MatcherManager.AddMatcher("word", parseOpts.Matcher.Words); err != nil {
344 | errs.Add(err)
345 | }
346 | }
347 | if parseOpts.Matcher.Lines != "" {
348 | if err := conf.MatcherManager.AddMatcher("line", parseOpts.Matcher.Lines); err != nil {
349 | errs.Add(err)
350 | }
351 | }
352 | if parseOpts.Matcher.Time != "" {
353 | if err := conf.MatcherManager.AddMatcher("time", parseOpts.Matcher.Time); err != nil {
354 | errs.Add(err)
355 | }
356 | }
357 | if conf.IgnoreBody && warningIgnoreBody {
358 | fmt.Printf("*** Warning: possible undesired combination of -ignore-body and the response options: fl,fs,fw,ml,ms and mw.\n")
359 | }
360 | return errs.ErrorOrNil()
361 | }
362 |
363 | func printSearchResults(conf *ffuf.Config, pos int, exectime time.Time, hash string) {
364 | inp, err := input.NewInputProvider(conf)
365 | if err.ErrorOrNil() != nil {
366 | fmt.Printf("-------------------------------------------\n")
367 | fmt.Println("Encountered error that prevents reproduction of the request:")
368 | fmt.Println(err.ErrorOrNil())
369 | return
370 | }
371 | inp.SetPosition(pos)
372 | inputdata := inp.Value()
373 | inputdata["FFUFHASH"] = []byte(hash)
374 | basereq := ffuf.BaseRequest(conf)
375 | dummyrunner := runner.NewRunnerByName("simple", conf, false)
376 | ffufreq, _ := dummyrunner.Prepare(inputdata, &basereq)
377 | rawreq, _ := dummyrunner.Dump(&ffufreq)
378 | fmt.Printf("-------------------------------------------\n")
379 | fmt.Printf("ffuf job started at: %s\n\n", exectime.Format(time.RFC3339))
380 | fmt.Printf("%s\n", string(rawreq))
381 | }
382 |
--------------------------------------------------------------------------------