Version 2.06b: Major update, see full commit msg

- Crawler update which gives more control over the injection test
    scheduling. This comes with the --checks and --checks-toggle
    flags to display and enable/disable checks.
  - Pages where the response varies are no longer completely
    discarded. Instead now we only disable tests that require stability
    which increases scan coverage.
  - Split the traversal and disclosure test to increase coverage:
    traversal checks require stable pages, the disclosure checks can be
    performed on all.
  - Updated dictionaries and converted them to use the dictionary
    optimisations we introduced in 2.03b
  - Fixed offline report viewing (thanks to Sebastian Roschke)
  - Added NULL byte file disclosure tests
  - Added JSP inclusion error check to analyse.c
  - Added XSS injection tests for cookies
  - Directory listings are now reported as individual (info-type) issues
  - Added warning in case the negotiated SSL cipher turns out to be a
    weak one (leaving the cipher enumeration to network scanners)
  - Added experimental -v flag which can be used to enable (limited)
    runtime reporting. This output is written to stderr and should be
    redirected to a file, unless you use the -u flag.
  - The man page has been rewritten and now includes detailed
    descriptions
    and examples.
  - A whole bunch of small bug fixes
This commit is contained in:
Steve Pinkham 2012-09-12 16:48:57 -04:00
parent 771e70eba4
commit d1f54c9fe2
28 changed files with 9549 additions and 10588 deletions

View File

@ -1,3 +1,44 @@
Version 2.06b:
--------------
- Crawler update which gives more control over the injection test
scheduling. This comes with the --checks and --checks-toggle
flags to display and enable/disable checks.
- Pages where the response varies are no longer completely
discarded. Instead now we only disable tests that require stability
which increases scan coverage.
- Split the traversal and disclosure test to increase coverage:
traversal checks require stable pages, the disclosure checks can be
performed on all.
- Updated dictionaries and converted them to use the dictionary
optimisations we introduced in 2.03b
- Fixed offline report viewing (thanks to Sebastian Roschke)
- Added NULL byte file disclosure tests
- Added JSP inclusion error check to analyse.c
- Added XSS injection tests for cookies
- Directory listings are now reported as individual (info-type) issues
- Added warning in case the negotiated SSL cipher turns out to be a
weak one (leaving the cipher enumeration to network scanners)
- Added experimental -v flag which can be used to enable (limited)
runtime reporting. This output is written to stderr and should be
redirected to a file, unless you use the -u flag.
- The man page has been rewritten and now includes detailed descriptions
and examples.
- A whole bunch of small bug fixes
Version 2.05b:
--------------

View File

@ -20,11 +20,13 @@
#
PROGNAME = skipfish
VERSION = 2.05b
VERSION = 2.06b
OBJFILES = http_client.c database.c crawler.c analysis.c report.c
OBJFILES = http_client.c database.c crawler.c analysis.c report.c \
checks.c
INCFILES = alloc-inl.h string-inl.h debug.h types.h http_client.h \
database.h crawler.h analysis.h config.h report.h
database.h crawler.h analysis.h config.h report.h \
checks.h
CFLAGS_GEN = -Wall -funsigned-char -g -ggdb -I/usr/local/include/ \
-I/opt/local/include/ $(CFLAGS) -DVERSION=\"$(VERSION)\"

2
README
View File

@ -454,7 +454,7 @@ $ ./skipfish -MEU -S dictionaries/minimal.wl -W new_dict.wl \
-C "AuthCookie=value" -X /logout.aspx -o output_dir \
http://www.example.com/
Five-connection crawl, but no brute-force; pretending to be MSIE and and
Five-connection crawl, but no brute-force; pretending to be MSIE and
trusting example.com content:
$ ./skipfish -m 5 -L -W- -o output_dir -b ie -B example.com \

View File

@ -7,7 +7,7 @@
Author: Michal Zalewski <lcamtuf@google.com>
Copyright 2009, 2010, 2011 by Google Inc. All Rights Reserved.
Copyright 2009 - 2012 by Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -35,18 +35,48 @@
#define ALLOC_CHECK_SIZE(_s) do { \
if ((_s) > MAX_ALLOC) \
FATAL("bad alloc request: %u bytes", (_s)); \
ABORT("Bad alloc request: %u bytes", (_s)); \
} while (0)
#define ALLOC_CHECK_RESULT(_r,_s) do { \
if (!(_r)) \
FATAL("out of memory: can't allocate %u bytes", (_s)); \
ABORT("Out of memory: can't allocate %u bytes", (_s)); \
} while (0)
#define ALLOC_MAGIC 0xFF00
#define ALLOC_MAGIC_F 0xFE00
#define ALLOC_C(_ptr) (((u16*)(_ptr))[-3])
#define ALLOC_S(_ptr) (((u32*)(_ptr))[-1])
#define CHECK_PTR(_p) do { \
if ((_p) && ALLOC_C(_p) != ALLOC_MAGIC) {\
if (ALLOC_C(_p) == ALLOC_MAGIC_F) \
ABORT("Use after free."); \
else \
ABORT("Bad alloc canary."); \
} \
} while (0)
#define ALLOC_MAGIC 0xFF00
#define ALLOC_C(_ptr) (((u16*)(_ptr))[-3])
#define ALLOC_S(_ptr) (((u32*)(_ptr))[-1])
#define CHECK_PTR_EXPR(_p) ({ \
typeof (_p) _tmp = (_p); \
CHECK_PTR(_tmp); \
_tmp; \
})
#ifdef CHECK_UAF
# define CP(_p) CHECK_PTR_EXPR(_p)
#else
# define CP(_p) (_p)
#endif /* ^CHECK_UAF */
#ifdef ALIGN_ACCESS
# define ALLOC_OFF 8
#else
# define ALLOC_OFF 6
#endif /* ^ALIGN_ACCESS */
static inline void* __DFL_ck_alloc(u32 size) {
void* ret;
@ -54,10 +84,10 @@ static inline void* __DFL_ck_alloc(u32 size) {
if (!size) return NULL;
ALLOC_CHECK_SIZE(size);
ret = malloc(size + 6);
ret = malloc(size + ALLOC_OFF);
ALLOC_CHECK_RESULT(ret, size);
ret += 6;
ret += ALLOC_OFF;
ALLOC_C(ret) = ALLOC_MAGIC;
ALLOC_S(ret) = size;
@ -71,21 +101,64 @@ static inline void* __DFL_ck_realloc(void* orig, u32 size) {
u32 old_size = 0;
if (!size) {
if (orig) free(orig - 6);
if (orig) {
CHECK_PTR(orig);
/* Catch pointer issues sooner. */
#ifdef DEBUG_ALLOCATOR
memset(orig - ALLOC_OFF, 0xFF, ALLOC_S(orig) + ALLOC_OFF);
ALLOC_C(orig) = ALLOC_MAGIC_F;
#endif /* DEBUG_ALLOCATOR */
free(orig - ALLOC_OFF);
}
return NULL;
}
if (orig) {
if (ALLOC_C(orig) != ALLOC_MAGIC) ABORT("Bad alloc canary");
CHECK_PTR(orig);
#ifndef DEBUG_ALLOCATOR
ALLOC_C(orig) = ALLOC_MAGIC_F;
#endif /* !DEBUG_ALLOCATOR */
old_size = ALLOC_S(orig);
orig -= 6;
orig -= ALLOC_OFF;
ALLOC_CHECK_SIZE(old_size);
}
ALLOC_CHECK_SIZE(size);
ret = realloc(orig, size + 6);
#ifndef DEBUG_ALLOCATOR
ret = realloc(orig, size + ALLOC_OFF);
ALLOC_CHECK_RESULT(ret, size);
#else
/* Catch pointer issues sooner: force relocation and make sure that the
original buffer is wiped. */
ret = malloc(size + ALLOC_OFF);
ALLOC_CHECK_RESULT(ret, size);
ret += 6;
if (orig) {
memcpy(ret + ALLOC_OFF, orig + ALLOC_OFF, MIN(size, old_size));
memset(orig, 0xFF, old_size + ALLOC_OFF);
ALLOC_C(orig + ALLOC_OFF) = ALLOC_MAGIC_F;
free(orig);
}
#endif /* ^!DEBUG_ALLOCATOR */
ret += ALLOC_OFF;
ALLOC_C(ret) = ALLOC_MAGIC;
ALLOC_S(ret) = size;
@ -97,7 +170,26 @@ static inline void* __DFL_ck_realloc(void* orig, u32 size) {
}
static inline void* __DFL_ck_strdup(u8* str) {
static inline void* __DFL_ck_realloc_kb(void* orig, u32 size) {
#ifndef DEBUG_ALLOCATOR
if (orig) {
CHECK_PTR(orig);
if (ALLOC_S(orig) >= size) return orig;
size = ((size >> 10) + 1) << 10;
}
#endif /* !DEBUG_ALLOCATOR */
return __DFL_ck_realloc(orig, size);
}
static inline u8* __DFL_ck_strdup(u8* str) {
void* ret;
u32 size;
@ -106,10 +198,10 @@ static inline void* __DFL_ck_strdup(u8* str) {
size = strlen((char*)str) + 1;
ALLOC_CHECK_SIZE(size);
ret = malloc(size + 6);
ret = malloc(size + ALLOC_OFF);
ALLOC_CHECK_RESULT(ret, size);
ret += 6;
ret += ALLOC_OFF;
ALLOC_C(ret) = ALLOC_MAGIC;
ALLOC_S(ret) = size;
@ -118,16 +210,16 @@ static inline void* __DFL_ck_strdup(u8* str) {
}
static inline void* __DFL_ck_memdup(u8* mem, u32 size) {
static inline void* __DFL_ck_memdup(void* mem, u32 size) {
void* ret;
if (!mem || !size) return NULL;
ALLOC_CHECK_SIZE(size);
ret = malloc(size + 6);
ret = malloc(size + ALLOC_OFF);
ALLOC_CHECK_RESULT(ret, size);
ret += 6;
ret += ALLOC_OFF;
ALLOC_C(ret) = ALLOC_MAGIC;
ALLOC_S(ret) = size;
@ -136,88 +228,146 @@ static inline void* __DFL_ck_memdup(u8* mem, u32 size) {
}
static inline void __DFL_ck_free(void* mem) {
if (mem) {
if (ALLOC_C(mem) != ALLOC_MAGIC) ABORT("Bad alloc canary");
free(mem - 6);
}
static inline u8* __DFL_ck_memdup_str(u8* mem, u32 size) {
u8* ret;
if (!mem || !size) return NULL;
ALLOC_CHECK_SIZE(size);
ret = malloc(size + ALLOC_OFF + 1);
ALLOC_CHECK_RESULT(ret, size);
ret += ALLOC_OFF;
ALLOC_C(ret) = ALLOC_MAGIC;
ALLOC_S(ret) = size;
memcpy(ret, mem, size);
ret[size] = 0;
return ret;
}
static inline void __DFL_ck_free(void* mem) {
if (mem) {
CHECK_PTR(mem);
#ifdef DEBUG_ALLOCATOR
/* Catch pointer issues sooner. */
memset(mem - ALLOC_OFF, 0xFF, ALLOC_S(mem) + ALLOC_OFF);
#endif /* DEBUG_ALLOCATOR */
ALLOC_C(mem) = ALLOC_MAGIC_F;
free(mem - ALLOC_OFF);
}
}
#ifndef DEBUG_ALLOCATOR
/* Non-debugging mode - straightforward aliasing. */
#define ck_alloc __DFL_ck_alloc
#define ck_realloc __DFL_ck_realloc
#define ck_realloc_kb __DFL_ck_realloc_kb
#define ck_strdup __DFL_ck_strdup
#define ck_memdup __DFL_ck_memdup
#define ck_memdup_str __DFL_ck_memdup_str
#define ck_free __DFL_ck_free
#else
/* Debugging mode - include additional structures and support code. */
#define ALLOC_BUCKETS 1024
#define ALLOC_BUCKETS 4096
#define ALLOC_TRK_CHUNK 256
struct __AD_trk_obj {
struct TRK_obj {
void *ptr;
char *file, *func;
u32 line;
u32 line;
};
extern struct __AD_trk_obj* __AD_trk[ALLOC_BUCKETS];
extern u32 __AD_trk_cnt[ALLOC_BUCKETS];
extern struct TRK_obj* TRK[ALLOC_BUCKETS];
extern u32 TRK_cnt[ALLOC_BUCKETS];
#define __AD_H(_ptr) (((((u32)(long)(_ptr)) >> 16) ^ ((u32)(long)(_ptr))) % \
ALLOC_BUCKETS)
#ifndef __LP64__
#define TRKH(_ptr) (((((u32)_ptr) >> 16) ^ ((u32)_ptr)) % ALLOC_BUCKETS)
#else
#define TRKH(_ptr) (((((u64)_ptr) >> 16) ^ ((u64)_ptr)) % ALLOC_BUCKETS)
#endif
/* Adds a new entry to the list of allocated objects. */
static inline void __AD_alloc_buf(void* ptr, const char* file, const char* func,
u32 line) {
u32 i, b;
static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func,
u32 line) {
u32 i, bucket;
if (!ptr) return;
b = __AD_H(ptr);
bucket = TRKH(ptr);
for (i=0;i<__AD_trk_cnt[b];i++)
if (!__AD_trk[b][i].ptr) {
__AD_trk[b][i].ptr = ptr;
__AD_trk[b][i].file = (char*)file;
__AD_trk[b][i].func = (char*)func;
__AD_trk[b][i].line = line;
for (i = 0; i < TRK_cnt[bucket]; i++)
if (!TRK[bucket][i].ptr) {
TRK[bucket][i].ptr = ptr;
TRK[bucket][i].file = (char*)file;
TRK[bucket][i].func = (char*)func;
TRK[bucket][i].line = line;
return;
}
__AD_trk[b] = __DFL_ck_realloc(__AD_trk[b],
(__AD_trk_cnt[b] + 1) * sizeof(struct __AD_trk_obj));
/* No space available. */
//TRK[bucket] = __DFL_ck_realloc(TRK[bucket],
// (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj));
__AD_trk[b][__AD_trk_cnt[b]].ptr = ptr;
__AD_trk[b][__AD_trk_cnt[b]].file = (char*)file;
__AD_trk[b][__AD_trk_cnt[b]].func = (char*)func;
__AD_trk[b][__AD_trk_cnt[b]].line = line;
__AD_trk_cnt[b]++;
if (!(i % ALLOC_TRK_CHUNK)) {
TRK[bucket] = __DFL_ck_realloc(TRK[bucket],
TRK_cnt[bucket] + ALLOC_TRK_CHUNK * sizeof(struct TRK_obj));
}
TRK[bucket][i].ptr = ptr;
TRK[bucket][i].file = (char*)file;
TRK[bucket][i].func = (char*)func;
TRK[bucket][i].line = line;
TRK_cnt[bucket]++;
}
/* Removes entry from the list of allocated objects. */
static inline void __AD_free_buf(void* ptr, const char* file, const char* func,
u32 line) {
u32 i, b;
static inline void TRK_free_buf(void* ptr, const char* file, const char* func,
u32 line) {
u32 i, bucket;
if (!ptr) return;
b = __AD_H(ptr);
bucket = TRKH(ptr);
for (i=0;i<__AD_trk_cnt[b];i++)
if (__AD_trk[b][i].ptr == ptr) {
__AD_trk[b][i].ptr = 0;
for (i = 0; i < TRK_cnt[bucket]; i++)
if (TRK[bucket][i].ptr == ptr) {
TRK[bucket][i].ptr = 0;
return;
}
WARN("ALLOC: Attempt to free non-allocated memory in %s (%s:%u)",
@ -228,75 +378,125 @@ static inline void __AD_free_buf(void* ptr, const char* file, const char* func,
/* Does a final report on all non-deallocated objects. */
static inline void __AD_report(void) {
u32 i, b;
static inline void __TRK_report(void) {
u32 i, bucket;
fflush(0);
for (b=0;b<ALLOC_BUCKETS;b++)
for (i=0;i<__AD_trk_cnt[b];i++)
if (__AD_trk[b][i].ptr)
for (bucket = 0; bucket < ALLOC_BUCKETS; bucket++)
for (i = 0; i < TRK_cnt[bucket]; i++)
if (TRK[bucket][i].ptr)
WARN("ALLOC: Memory never freed, created in %s (%s:%u)",
__AD_trk[b][i].func, __AD_trk[b][i].file, __AD_trk[b][i].line);
TRK[bucket][i].func, TRK[bucket][i].file, TRK[bucket][i].line);
}
/* Simple wrappers for non-debugging functions: */
static inline void* __AD_ck_alloc(u32 size, const char* file, const char* func,
u32 line) {
static inline void* TRK_ck_alloc(u32 size, const char* file, const char* func,
u32 line) {
void* ret = __DFL_ck_alloc(size);
__AD_alloc_buf(ret, file, func, line);
TRK_alloc_buf(ret, file, func, line);
return ret;
}
static inline void* __AD_ck_realloc(void* orig, u32 size, const char* file,
const char* func, u32 line) {
void* ret = __DFL_ck_realloc(orig, size);
__AD_free_buf(orig, file, func, line);
__AD_alloc_buf(ret, file, func, line);
return ret;
}
static inline void* __AD_ck_strdup(u8* str, const char* file, const char* func,
u32 line) {
void* ret = __DFL_ck_strdup(str);
__AD_alloc_buf(ret, file, func, line);
return ret;
}
static inline void* __AD_ck_memdup(u8* mem, u32 size, const char* file,
static inline void* TRK_ck_realloc(void* orig, u32 size, const char* file,
const char* func, u32 line) {
void* ret = __DFL_ck_memdup(mem, size);
__AD_alloc_buf(ret, file, func, line);
void* ret = __DFL_ck_realloc(orig, size);
TRK_free_buf(orig, file, func, line);
TRK_alloc_buf(ret, file, func, line);
return ret;
}
static inline void __AD_ck_free(void* ptr, const char* file,
static inline void* TRK_ck_realloc_kb(void* orig, u32 size, const char* file,
const char* func, u32 line) {
void* ret = __DFL_ck_realloc_kb(orig, size);
TRK_free_buf(orig, file, func, line);
TRK_alloc_buf(ret, file, func, line);
return ret;
}
static inline void* TRK_ck_strdup(u8* str, const char* file, const char* func,
u32 line) {
void* ret = __DFL_ck_strdup(str);
TRK_alloc_buf(ret, file, func, line);
return ret;
}
static inline void* TRK_ck_memdup(void* mem, u32 size, const char* file,
const char* func, u32 line) {
void* ret = __DFL_ck_memdup(mem, size);
TRK_alloc_buf(ret, file, func, line);
return ret;
}
static inline void* TRK_ck_memdup_str(void* mem, u32 size, const char* file,
const char* func, u32 line) {
void* ret = __DFL_ck_memdup_str(mem, size);
TRK_alloc_buf(ret, file, func, line);
return ret;
}
static inline void TRK_ck_free(void* ptr, const char* file,
const char* func, u32 line) {
__AD_free_buf(ptr, file, func, line);
TRK_free_buf(ptr, file, func, line);
__DFL_ck_free(ptr);
}
/* Populates file / function / line number data to *_d wrapper calls: */
/* Alias user-facing names to tracking functions: */
#define ck_alloc(_p1) \
__AD_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
#define ck_realloc(_p1, _p2) \
__AD_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
#define ck_realloc_kb(_p1, _p2) \
TRK_ck_realloc_kb(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
#define ck_strdup(_p1) \
__AD_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__)
TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__)
#define ck_memdup(_p1, _p2) \
__AD_ck_memdup(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
TRK_ck_memdup(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
#define ck_memdup_str(_p1, _p2) \
TRK_ck_memdup_str(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
#define ck_free(_p1) \
__AD_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__)
TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__)
#endif /* ^!DEBUG_ALLOCATOR */
#define alloc_printf(_str...) ({ \
u8* _tmp; \
s32 _len = snprintf(NULL, 0, _str); \
if (_len < 0) FATAL("Whoa, snprintf() fails?!"); \
_tmp = ck_alloc(_len + 1); \
snprintf((char*)_tmp, _len + 1, _str); \
_tmp; \
})
#endif /* ! _HAVE_ALLOC_INL_H */

View File

@ -1425,7 +1425,7 @@ next_elem:
/* Analyzes response headers and body to detect stored XSS, redirection,
401, 500 codes, exception messages, source code, caching issues, etc. */
void content_checks(struct http_request* req, struct http_response* res) {
u8 content_checks(struct http_request* req, struct http_response* res) {
u8* tmp;
u32 off, tag_id, scan_id;
u8 high_risk = 0;
@ -1563,7 +1563,7 @@ void content_checks(struct http_request* req, struct http_response* res) {
else if (res->code >= 500)
problem(PROB_SERV_ERR, req, res, NULL, req->pivot, 0);
if (!res->pay_len) return;
if (!res->pay_len) return 0;
if (!is_mostly_ascii(res)) goto binary_checks;
@ -1831,7 +1831,7 @@ binary_checks:
/* No MIME checks on Content-Disposition: attachment responses. */
if ((tmp = GET_HDR((u8*)"Content-Disposition", &res->hdr)) &&
inl_strcasestr(tmp, (u8*)"attachment")) return;
inl_strcasestr(tmp, (u8*)"attachment")) return 0;
// if (!relaxed_mime) {
//
@ -1920,6 +1920,7 @@ binary_checks:
}
return 0;
}
@ -2357,13 +2358,23 @@ static void check_for_stuff(struct http_request* req,
}
if (strstr((char*)res->payload, "<b>Warning</b>: MySQL: ") ||
strstr((char*)res->payload, "Unclosed quotation mark") ||
strstr((char*)res->payload, "Syntax error in string in query expression") ||
strstr((char*)res->payload, "java.sql.SQLException") ||
strstr((char*)res->payload, "You have an error in your SQL syntax; ")) {
strstr((char*)res->payload, "SqlClient.SqlException: Syntax error") ||
strstr((char*)res->payload, "Incorrect syntax near") ||
strstr((char*)res->payload, "PostgreSQL query failed") ||
strstr((char*)res->payload, "Dynamic SQL Error") ||
strstr((char*)res->payload, "unable to perform query") ||
strstr((char*)res->payload, "[Microsoft][ODBC SQL Server Driver]") ||
strstr((char*)res->payload, "You have an error in your SQL syntax; ") ||
strstr((char*)res->payload, "[DM_QUERY_E_SYNTAX]")) {
problem(PROB_ERROR_POI, req, res, (u8*)"SQL server error", req->pivot, 0);
return;
}
if ((tmp = (u8*)strstr((char*)res->payload, "ORA-")) &&
if (((tmp = (u8*)strstr((char*)res->payload, "ORA-")) ||
(tmp = (u8*)strstr((char*)res->payload, "FRM-"))) &&
isdigit(tmp[4]) && tmp[9] == ':') {
problem(PROB_ERROR_POI, req, res, (u8*)"Oracle server error", req->pivot, 0);
return;
@ -2478,7 +2489,7 @@ static void check_for_stuff(struct http_request* req,
strstr((char*)sniffbuf, "<a href=\"?C=N;O=D\">") ||
strstr((char*)sniffbuf, "<h1>Index of /") ||
strstr((char*)sniffbuf, ">[To Parent Directory]<")) {
problem(PROB_FILE_POI, req, res, (u8*)"Directory listing", req->pivot, 0);
problem(PROB_DIR_LIST, req, res, (u8*)"Directory listing", req->pivot, 0);
return;
}
@ -2569,6 +2580,7 @@ static void check_for_stuff(struct http_request* req,
u32 del = strcspn((char*)cur, ",|;\n");
eol = (u8*)strchr((char*)cur, '\n');
if(!eol) break;
if (!cur[del] || cur[del] == '\n' || (cur[del] == ',' &&
cur[del+1] == ' ')) {
@ -2660,4 +2672,3 @@ void maybe_delete_payload(struct pivot_desc* pv) {
}
}

View File

@ -70,7 +70,7 @@ void scrape_response(struct http_request* req, struct http_response* res);
/* Analyzes response headers and body to detect stored XSS, redirection,
401, 500 codes, exception messages, source code, offensive comments, etc. */
void content_checks(struct http_request* req, struct http_response* res);
u8 content_checks(struct http_request* req, struct http_response* res);
/* Deletes payload of binary responses if requested. */

View File

@ -259,6 +259,7 @@ var issue_desc= {
"10401": "Resource not directly accessible",
"10402": "HTTP authentication required",
"10403": "Server error triggered",
"10404": "Directory listing enabled",
"10501": "All external links",
"10502": "External URL redirector",
"10503": "All e-mail addresses",
@ -289,6 +290,7 @@ var issue_desc= {
"30202": "Self-signed SSL certificate",
"30203": "SSL certificate host name mismatch",
"30204": "No SSL certificate data found",
"30205": "Weak SSL cipher negotiated",
"30301": "Directory listing restrictions bypassed",
"30401": "Redirection to attacker-supplied URLs",
"30402": "Attacker-supplied URLs in embedded content (lower risk)",
@ -322,6 +324,7 @@ var issue_desc= {
"50103": "Query injection vector",
"50104": "Format string vector",
"50105": "Integer overflow vector",
"50106": "File inclusion",
"50201": "SQL query or similar syntax in parameters",
"50301": "PUT request accepted"
@ -398,20 +401,47 @@ function toggle_node(dir, tid) {
/* Displays request or response dump in a faux window. */
var g_path = '';
var g_ignore = 0;
function show_dat(path, ignore) {
g_path = path;
g_ignore = ignore;
/* workaround for cases where there is no response */
if (typeof req !== 'undefined')
if (req !== null) req = null;
if (typeof res !== 'undefined')
if (res !== null) res = null;
prepare_view();
load_script(path + '/request.js', render_dat);
load_script(path + '/response.js', render_dat_res);
}
var req_text = '';
var res_text = '';
var finalize = 0;
var pX = 0;
var pY = 0;
function prepare_view() {
var out = document.getElementById('req_txtarea'),
cov = document.getElementById('cover');
cov = document.getElementById('cover');
document.body.style.overflow = 'hidden';
out.value = '';
var x = new XMLHttpRequest();
var content;
var path = g_path;
var ignore = g_ignore;
var pX = window.scrollX ? window.scrollX : document.body.scrollLeft;
var pY = window.scrollY ? window.scrollY : document.body.scrollTop;
pX = window.scrollX ? window.scrollX : document.body.scrollLeft;
pY = window.scrollY ? window.scrollY : document.body.scrollTop;
out.parentNode.style.left = pX;
out.parentNode.style.top = pY;
@ -421,56 +451,139 @@ function show_dat(path, ignore) {
out.parentNode.style.display = 'block';
cov.style.display = 'block';
x.open('GET', path + '/request.dat', false);
x.send(null);
content = '=== REQUEST ===\n\n' + x.responseText;
x.open('GET', path + '/response.dat', false);
x.send(null);
if (x.responseText.substr(0,5) == 'HTTP/')
content += '\n=== RESPONSE ===\n\n' + x.responseText + '\n=== END OF DATA ===\n';
else content += '\n=== RESPONSE NOT AVAILABLE ===\n\n=== END OF DATA ===\n';
out.value = content;
delete x;
req_text = '';
res_text = '';
finalize = 0;
out.focus();
window.scrollTo(pX, pY);
if (ignore) ignore_click = true;
return false;
}
function render_dat() {
if (typeof req !== 'undefined')
if (req != null) req_text = req.data;
if (req_text != null && finalize) finalize_view();
finalize = 1;
}
function render_dat_res() {
if (typeof res !== 'undefined')
if (res != null) res_text = res.data;
if (res_text != null && finalize) finalize_view();
finalize = 1;
}
function finalize_view() {
var out = document.getElementById('req_txtarea');
var content = '=== REQUEST ===\n\n' + req_text;
if (res_text.substr(0,5) == 'HTTP/')
content += '\n=== RESPONSE ===\n\n' + res_text + '\n=== END OF DATA ===\n';
else content += '\n=== RESPONSE NOT AVAILABLE ===\n\n=== END OF DATA ===\n';
out.value = content;
out.focus();
window.scrollTo(pX,pY);
}
/* Displays request or response dump in a proper window. */
var wind = null;
function show_win(path, ignore) {
var out = window.open('','_blank','scroll=yes,addressbar=no');
var x = new XMLHttpRequest();
var content;
x.open('GET', path + '/request.dat', false);
x.send(null);
g_path = path;
g_ignore = g_ignore;
wind = null;
content = '=== REQUEST ===\n\n' + x.responseText;
/* workaround when there is no response */
if (typeof req !== 'undefined')
if (req !== null) req = null;
if (typeof res !== 'undefined')
if (res !== null) res = null;
x.open('GET', path + '/response.dat', false);
x.send(null);
prepare_win();
if (x.responseText.substr(0,5) == 'HTTP/')
content += '\n=== RESPONSE ===\n\n' + x.responseText + '\n=== END OF DATA ===\n';
load_script(path + '/request.js', render_win);
load_script(path + '/response.js', render_win_res);
}
function prepare_win() {
wind = window.open('','_blank','scroll=yes,addressbar=no');
var out = wind;
var content = '';
var path = g_path;
var ignore = g_ignore;
req_text = '';
res_text = '';
finalize = 0;
}
/* Callback to render request or response dump */
function render_win() {
req_text = '';
if (typeof req !== 'undefined') req_text = req.data;
if (req_text != null && finalize) finalize_win();
finalize = 1;
}
function render_win_res() {
res_text = '';
if (typeof res !== 'undefined')
if (res != null) res_text = res.data;
if (res_text != null && finalize) finalize_win();
finalize = 1;
}
function finalize_win() {
if (typeof wind == 'undefined') return;
if (wind == null) return;
var out = wind;
var content = '';
content = '=== REQUEST ===\n\n' + req_text;
if (res_text.substr(0,5) == 'HTTP/')
content += '\n=== RESPONSE ===\n\n' + res_text + '\n=== END OF DATA ===\n';
else content += '\n=== RESPONSE NOT AVAILABLE ===\n\n=== END OF DATA ===\n';
out.document.body.innerHTML = '<pre></pre>';
out.document.body.firstChild.appendChild(out.document.createTextNode(content));
delete x;
if (ignore) ignore_click = true;
return false;
}
return false;
}
/* Hides request view. */
@ -487,31 +600,61 @@ function hide_dat() {
document.getElementById('cover').style.display = 'none'
}
/* Dynamically load JavaScript files */
function load_script(sname, callback) {
/* Remove previously loaded scripts */
var old_script = document.getElementsByName('tmp_script');
for (var i = 0; i < old_script.length; i++) {
document.removeChild(old_script[i]);
}
var head = document.getElementsByTagName('head')[0];
var script = document.createElement('script');
script.type = 'text/javascript';
script.id = sname;
script.name = 'tmp_script';
script.src = sname;
script.onload = callback;
script.onerror = callback;
head.appendChild(script);
}
/* Loads issues, children for a node, renders HTML. */
var g_add_html = '';
var g_tid = 0;
var g_dir = '';
function load_node(dir, tid) {
var x = new XMLHttpRequest();
g_dir = dir;
g_tid = tid;
var t = document.getElementById('c_' + tid);
var add_html = '';
x.open('GET', dir + 'child_index.js', false);
x.send(null);
eval(x.responseText);
x.open('GET', dir + 'issue_index.js', false);
x.send(null);
eval(x.responseText);
load_script(dir + 'child_index.js', function () {});
if (diff_mode) {
x.open('GET', dir + 'diff_data.js', false);
x.send(null);
eval(x.responseText);
load_script(dir + 'issue_index.js', function () {});
load_script(dir + 'diff_data.js', load_issues);
} else {
load_script(dir + 'issue_index.js', load_issues);
}
delete x;
}
next_opacity('c_' + tid, 0);
/* Function callback to render the node data */
function load_issues() {
var t = document.getElementById('c_' + g_tid);
next_opacity('c_' + g_tid, 0);
var add_html = '';
var dir = g_dir;
if (issue.length > 0)
add_html += '<div class=issue_ctr>\n';
@ -554,7 +697,7 @@ function load_node(dir, tid) {
add_html += '<li><div class="fetch_info">' +
'Fetch result: ' + i2.error + '</div>';
}
if (i2.extra.length > 0) add_html += '<div class="comment">Memo: ' + H(i2.extra) + '</div>\n';
}

1677
checks.c Normal file

File diff suppressed because it is too large Load Diff

59
checks.h Normal file
View File

@ -0,0 +1,59 @@
#ifndef _HAVE_CHECKS_H
#include "types.h"
#include "http_client.h"
#include "database.h"
/* The init crawler structure which loads the test/check combos */
void init_injection_checks(void);
/* The crawler structure helper functions */
void display_injection_checks(void);
void release_injection_checks(void);
void toggle_injection_checks(u8* str, u32 enable);
/* The inject state manager callback function is used in crawler.c to
direct the flow to the state manager where all the injection tests are
performed. */
u8 inject_state_manager(struct http_request* req, struct http_response* res);
#ifdef _VIA_CHECKS_C
/* The test/check struct with pointers to callback functions */
struct cb_handle {
u32 res_num; /* Amount of expected responses */
u32 res_keep; /* Bool for keeping req/res */
u8 allow_varies; /* Bool to accept pivots with res_varies */
u8 scrape; /* Scrape links, or not.. */
u32 pv_flag; /* Flag to match pivot type */
u8* name; /* Name or title of the check */
u8 (*tests)(struct pivot_desc* pivot);
u8 (*checks)(struct http_request*, struct http_response*);
u32 skip; /* Bool to disable the check */
};
/* Strings for traversal and file disclosure tests. The order should
not be changed */
static const char* disclosure_tests[] = {
"../../../../../../../../etc/hosts",
"..%2f..%2f..%2f..%2f..%2f..%2f..%2f..%2fetc%2fhosts%00.js",
"../../../../../../../../etc/passwd",
"..%2f..%2f..%2f..%2f..%2f..%2f..%2f..%2fetc%2fpasswd%00.js",
"..\\..\\..\\..\\..\\..\\..\\..\\boot.ini",
"..%5c..%5c..%5c..%5c..%5c..%5c..%5c..%5cboot.ini%00.js",
"../../../../../../../../WEB-INF/web.xml",
"..%2f..%2f..%2f..%2f..%2f..%2f..%2f..%2fWEB-INF%2fweb.xml%3f.js",
"file:///etc/hosts",
"file:///etc/passwd",
"file:///boot.ini",
0
};
#endif /* _VIA_CHECKS_C */
#endif /* _HAVE_CHECKS_H */

View File

@ -73,6 +73,12 @@
#define MAX_ALLOC 0x50000000 /* Refuse larger allocations. */
/* Detect use-after-free, at the expense of some performance cost: */
#ifdef DEBUG_ALLOCATOR
#define CHECK_UAF 1
#endif /* DEBUG_ALLOCATOR */
/* Configurable settings for crawl database (cmdline override): */
#define MAX_DEPTH 16 /* Maximum crawl tree depth */

1589
crawler.c

File diff suppressed because it is too large Load Diff

View File

@ -26,23 +26,43 @@
#include "http_client.h"
#include "database.h"
#ifdef _VIA_CRAWLER_C
/* Function called during startup to build the test/check structure */
/* Strings for traversal and file disclosure tests. The order should
not be changed */
void replace_slash(struct http_request* req, u8* new_val);
void handle_error(struct http_request* req, struct http_response* res, u8* desc, u8 stop);
void inject_done(struct pivot_desc*);
void destroy_misc_data(struct pivot_desc* pv, struct http_request* self);
struct pivot_desc* dir_parent(struct pivot_desc* pv);
static const char* disclosure_tests[] = {
"../../../../../../../../etc/hosts",
"../../../../../../../../etc/passwd",
"..\\..\\..\\..\\..\\..\\..\\..\\boot.ini",
"../../../../../../../../WEB-INF/web.xml",
"file:///etc/hosts",
"file:///etc/passwd",
"file:///boot.ini",
0
};
/* Internal helper macros: */
#endif
#define TPAR(_req) ((_req)->par.v[(_req)->pivot->fuzz_par])
#define SET_VECTOR(_state, _req, _str) do { \
if (_state == PSTATE_CHILD_INJECT) { \
replace_slash((_req), (u8*)_str); \
} else { \
ck_free(TPAR(_req)); \
TPAR(_req) = ck_strdup((u8*)_str); \
} \
} while (0)
#define APPEND_VECTOR(_state, _req, _str) do { \
if (_state == PSTATE_CHILD_INJECT) { \
replace_slash((_req), (u8*)_str); \
} else { \
u8* _n = ck_alloc(strlen((char*)TPAR(_req)) + strlen((char*)_str) + 1); \
sprintf((char*)_n, "%s%s", TPAR(_req), _str); \
ck_free(TPAR(_req)); \
TPAR(_req) = _n; \
} \
} while (0)
/* Classifies a response, with a special handling of "unavailable" and
"gateway timeout" codes. */
#define FETCH_FAIL(_res) ((_res)->state != STATE_OK || (_res)->code == 503 || \
(_res)->code == 504)
extern u32 crawl_prob; /* Crawl probability (1-100%) */
@ -107,6 +127,14 @@ void add_form_hint(u8* name, u8* value);
ck_free(_url); \
} while (0)
#define DEBUG_STATE_CALLBACK(_req, _state, _type) do { \
u8* _url = serialize_path(_req, 1, 1); \
DEBUG("* %s::%s: URL %s (running: %s)\n", __FUNCTION__, _state, _url, \
_type ? "checks" : "tests"); \
ck_free(_url); \
} while (0)
#define DEBUG_HELPER(_pv) do { \
u8* _url = serialize_path((_pv)->req, 1, 1); \
DEBUG("* %s: URL %s (%u, len %u)\n", __FUNCTION__, _url, (_pv)->res ? \
@ -117,6 +145,7 @@ void add_form_hint(u8* name, u8* value);
#else
#define DEBUG_CALLBACK(_req, _res)
#define DEBUG_STATE_CALLBACK(_req, _res, _cb)
#define DEBUG_HELPER(_pv)
#define DEBUG_PIVOT(_text, _pv)

View File

@ -560,6 +560,16 @@ u8 is_c_sens(struct pivot_desc* pv) {
return pv->csens;
}
/* Lookup an issue title */
u8* lookup_issue_title(u32 id) {
u32 i = 0;
while(pstructs[i].id && pstructs[i].id != id)
i++;
return pstructs[i].title;
}
/* Registers a problem, if not duplicate (res, extra may be NULL): */
@ -571,7 +581,9 @@ void problem(u32 type, struct http_request* req, struct http_response* res,
if (pv->type == PIVOT_NONE) FATAL("Uninitialized pivot point");
if (type == PROB_NONE || !req) FATAL("Invalid issue data");
#ifdef LOG_STDERR
DEBUG("--- NEW PROBLEM - type: %u, extra: '%s' ---\n", type, extra);
#endif /* LOG_STDERR */
/* Check for duplicates */
@ -588,6 +600,16 @@ void problem(u32 type, struct http_request* req, struct http_response* res,
pv->issue[pv->issue_cnt].req = req_copy(req, pv, 1);
pv->issue[pv->issue_cnt].res = res_copy(res);
#ifndef LOG_STDERR
u8* url = serialize_path(req, 1, 1);
u8* title = lookup_issue_title(type);
DEBUGC(L1, "\n--- NEW PROBLEM\n");
DEBUGC(L1, " - type: %u, %s\n", type, title);
DEBUGC(L1, " - url: %s\n", url);
DEBUGC(L2, " - extra: %s\n", extra);
ck_free(url);
#endif /* LOG_STDERR */
/* Mark copies of half-baked requests as done. */
if (res && res->state < STATE_OK) {
@ -1117,7 +1139,7 @@ wordlist_retry:
}
if (fields == 1 && !strcmp((char*)type, "#r")) {
printf("Found %s (readonly:%d)\n", type, read_only);
DEBUG("Found %s (readonly:%d)\n", type, read_only);
if (!read_only)
FATAL("Attempt to load read-only wordlist '%s' via -W (use -S instead).\n", fname);
@ -1513,4 +1535,3 @@ void destroy_database() {
ck_free(xss_req);
}

View File

@ -35,15 +35,15 @@
#define PIVOT_NONE 0 /* Invalid */
#define PIVOT_ROOT 1 /* Root pivot */
#define PIVOT_SERV 10 /* Top-level host pivot */
#define PIVOT_DIR 11 /* Directory pivot */
#define PIVOT_FILE 12 /* File pivot */
#define PIVOT_PATHINFO 13 /* PATH_INFO script */
#define PIVOT_SERV 2 /* Top-level host pivot */
#define PIVOT_DIR 4 /* Directory pivot */
#define PIVOT_FILE 8 /* File pivot */
#define PIVOT_PATHINFO 16 /* PATH_INFO script */
#define PIVOT_UNKNOWN 18 /* (Currently) unknown type */
#define PIVOT_UNKNOWN 32 /* (Currently) unknown type */
#define PIVOT_PARAM 100 /* Parameter fuzzing pivot */
#define PIVOT_VALUE 101 /* Parameter value pivot */
#define PIVOT_PARAM 64 /* Parameter fuzzing pivot */
#define PIVOT_VALUE 128 /* Parameter value pivot */
/* - Pivot states (initialized to PENDING or FETCH by database.c, then
advanced by crawler.c): */
@ -127,6 +127,9 @@ struct pivot_desc {
u32 r404_pending; /* ...for 404 probes */
u32 ck_pending; /* ...for behavior checks */
s32 check_idx; /* Current injection test */
u32 check_state; /* Current injection test */
struct http_sig r404[MAX_404]; /* 404 response signatures */
u32 r404_cnt; /* Number of sigs collected */
struct http_sig unk_sig; /* Original "unknown" sig. */
@ -139,7 +142,8 @@ struct pivot_desc {
struct http_response* misc_res[MISC_ENTRIES]; /* Saved responses */
u8 misc_cnt; /* Request / response count */
u8 i_skip[15]; /* Injection step skip flags */
#define MAX_CHECKS 32
u8 i_skip[MAX_CHECKS]; /* Injection step skip flags */
u8 i_skip_add;
u8 r404_skip;
@ -157,6 +161,7 @@ struct pivot_desc {
};
extern struct pivot_desc root_pivot;
extern u32 verbosity;
/* Checks child / descendant limits. */
@ -191,7 +196,11 @@ struct pivot_desc* host_pivot(struct pivot_desc* pv);
u8 is_c_sens(struct pivot_desc* pv);
/* Recorded security issues: */
/* Lookup an issue title */
u8* lookup_issue_title(u32 id);
/* Recorded security issues */
/* - Informational data (non-specific security-relevant notes): */
@ -208,6 +217,7 @@ u8 is_c_sens(struct pivot_desc* pv);
#define PROB_NO_ACCESS 10401 /* Resource not accessible */
#define PROB_AUTH_REQ 10402 /* Authentication requires */
#define PROB_SERV_ERR 10403 /* Server error */
#define PROB_DIR_LIST 10404 /* Directory listing */
#define PROB_EXT_LINK 10501 /* External link */
#define PROB_EXT_REDIR 10502 /* External redirector */
@ -250,8 +260,9 @@ u8 is_c_sens(struct pivot_desc* pv);
#define PROB_SSL_SELF_CERT 30202 /* Self-signed SSL cert */
#define PROB_SSL_BAD_HOST 30203 /* Certificate host mismatch */
#define PROB_SSL_NO_CERT 30204 /* No certificate data? */
#define PROB_SSL_WEAK_CIPHER 30205 /* Weak cipher negotiated */
#define PROB_DIR_LIST 30301 /* Dir listing bypass */
#define PROB_DIR_LIST_BYPASS 30301 /* Dir listing bypass */
#define PROB_URL_REDIR 30401 /* URL redirection */
#define PROB_USER_URL 30402 /* URL content inclusion */
@ -307,6 +318,106 @@ u8 is_c_sens(struct pivot_desc* pv);
#define PROB_PUT_DIR 50301 /* HTTP PUT accepted */
#ifdef _VIA_DATABASE_C
/* The definitions below are used to make problems, which are displayed
during runtime, more informational */
struct pstruct {
u32 id;
u8* title;
};
struct pstruct pstructs[] = {
/* - Informational data (non-specific security-relevant notes): */
{ PROB_SSL_CERT, (u8*)"SSL certificate issuer information" },
{ PROB_NEW_COOKIE, (u8*)"New HTTP cookie added" },
{ PROB_SERVER_CHANGE, (u8*)"New 'Server' header value seen" },
{ PROB_VIA_CHANGE, (u8*)"New 'Via' header value seen" },
{ PROB_X_CHANGE, (u8*)"New 'X-*' header value seen" },
{ PROB_NEW_404, (u8*)"New 404 signature seen" },
{ PROB_NO_ACCESS, (u8*)"Resource not directly accessible" },
{ PROB_AUTH_REQ, (u8*)"HTTP authentication required" },
{ PROB_SERV_ERR, (u8*)"Server error triggered" },
{ PROB_DIR_LIST, (u8*)"Directory listing found" },
{ PROB_EXT_LINK, (u8*)"All external links" },
{ PROB_EXT_REDIR, (u8*)"External URL redirector" },
{ PROB_MAIL_ADDR, (u8*)"All e-mail addresses" },
{ PROB_UNKNOWN_PROTO, (u8*)"Links to unknown protocols" },
{ PROB_UNKNOWN_FIELD, (u8*)"Unknown form field (can't autocomplete)" },
{ PROB_FORM, (u8*)"HTML form (not classified otherwise)" },
{ PROB_PASS_FORM, (u8*)"Password entry form - consider brute-force" },
{ PROB_FILE_FORM, (u8*)"File upload form" },
{ PROB_USER_LINK, (u8*)"User-supplied link rendered on a page" },
{ PROB_BAD_MIME_STAT, (u8*)"Incorrect or missing MIME type (low risk)" },
{ PROB_GEN_MIME_STAT, (u8*)"Generic MIME used (low risk)" },
{ PROB_BAD_CSET_STAT, (u8*)"Incorrect or missing charset (low risk)" },
{ PROB_CFL_HDRS_STAT, (u8*)"Conflicting MIME / charset info (low risk)" },
{ PROB_FUZZ_DIGIT, (u8*)"Numerical filename - consider enumerating" },
{ PROB_OGNL, (u8*)"OGNL-like parameter behavior" },
/* - Internal warnings (scan failures, etc): */
{ PROB_FETCH_FAIL, (u8*)"Resource fetch failed" },
{ PROB_LIMITS, (u8*)"Limits exceeded, fetch suppressed" },
{ PROB_404_FAIL, (u8*)"Directory behavior checks failed (no brute force)" },
{ PROB_PARENT_FAIL, (u8*)"Parent behavior checks failed (no brute force)" },
{ PROB_IPS_FILTER, (u8*)"IPS filtering enabled" },
{ PROB_IPS_FILTER_OFF, (u8*)"IPS filtering disabled again" },
{ PROB_VARIES, (u8*)"Response varies randomly, skipping checks" },
{ PROB_NOT_DIR, (u8*)"Node should be a directory, detection error?" },
/* - Low severity issues (limited impact or check specificity): */
{ PROB_URL_AUTH, (u8*)"HTTP credentials seen in URLs" },
{ PROB_SSL_CERT_DATE, (u8*)"SSL certificate expired or not yet valid" },
{ PROB_SSL_SELF_CERT, (u8*)"Self-signed SSL certificate" },
{ PROB_SSL_BAD_HOST, (u8*)"SSL certificate host name mismatch" },
{ PROB_SSL_NO_CERT, (u8*)"No SSL certificate data found" },
{ PROB_SSL_WEAK_CIPHER, (u8*)"Weak SSL cipher negotiated" },
{ PROB_DIR_LIST, (u8*)"Directory listing restrictions bypassed" },
{ PROB_URL_REDIR, (u8*)"Redirection to attacker-supplied URLs" },
{ PROB_USER_URL, (u8*)"Attacker-supplied URLs in embedded content (lower risk)" },
{ PROB_EXT_OBJ, (u8*)"External content embedded on a page (lower risk)" },
{ PROB_MIXED_OBJ, (u8*)"Mixed content embedded on a page (lower risk)" },
{ PROB_MIXED_FORM, (u8*)"HTTPS form submitting to a HTTP URL" },
{ PROB_VULN_FORM, (u8*)"HTML form with no apparent XSRF protection" },
{ PROB_JS_XSSI, (u8*)"JSON response with no apparent XSSI protection" },
{ PROB_CACHE_LOW, (u8*)"Incorrect caching directives (lower risk)" },
{ PROB_PROLOGUE, (u8*)"User-controlled response prefix (BOM / plugin attacks)" },
{ PROB_HEADER_INJECT, (u8*)"HTTP header injection vector" },
/* - Moderate severity issues (data compromise): */
{ PROB_BODY_XSS, (u8*)"XSS vector in document body" },
{ PROB_URL_XSS, (u8*)"XSS vector via arbitrary URLs" },
{ PROB_HTTP_INJECT, (u8*)"HTTP response header splitting" },
{ PROB_USER_URL_ACT, (u8*)"Attacker-supplied URLs in embedded content (higher risk)" },
{ PROB_EXT_SUB, (u8*)"External content embedded on a page (higher risk)" },
{ PROB_MIXED_SUB, (u8*)"Mixed content embedded on a page (higher risk)" },
{ PROB_BAD_MIME_DYN, (u8*)"Incorrect or missing MIME type (higher risk)" },
{ PROB_GEN_MIME_DYN, (u8*)"Generic MIME type (higher risk)" },
{ PROB_BAD_CSET_DYN, (u8*)"Incorrect or missing charset (higher risk)" },
{ PROB_CFL_HDRS_DYN, (u8*)"Conflicting MIME / charset info (higher risk)" },
{ PROB_FILE_POI, (u8*)"Interesting file" },
{ PROB_ERROR_POI, (u8*)"Interesting server message" },
{ PROB_DIR_TRAVERSAL, (u8*)"Directory traversal / file inclusion possible" },
{ PROB_CACHE_HI, (u8*)"Incorrect caching directives (higher risk)" },
{ PROB_PASS_NOSSL, (u8*)"Password form submits from or to non-HTTPS page" },
/* - High severity issues (system compromise): */
{ PROB_XML_INJECT, (u8*)"Server-side XML injection vector" },
{ PROB_SH_INJECT, (u8*)"Shell injection vector" },
{ PROB_SQL_INJECT, (u8*)"Query injection vector" },
{ PROB_FMT_STRING, (u8*)"Format string vector" },
{ PROB_INT_OVER, (u8*)"Integer overflow vector" },
{ PROB_FI_LOCAL, (u8*)"File inclusion" },
{ PROB_SQL_PARAM, (u8*)"SQL query or similar syntax in parameters" },
{ PROB_PUT_DIR, (u8*)"PUT request accepted" },
{ PROB_NONE, (u8*)"Invalid" }
};
#endif /* _VIA_DATABASE_C */
/* - Severity macros: */
#define PSEV(_x) ((_x) / 10000)
@ -438,4 +549,3 @@ void dump_signature(struct http_sig* sig);
void debug_same_page(struct http_sig* sig1, struct http_sig* sig2);
#endif /* _HAVE_DATABASE_H */

15
debug.h
View File

@ -74,6 +74,21 @@
#define F_DEBUG(x...) fprintf(stderr,x)
#define SAY(x...) printf(x)
#define L1 1 /* Informative, one line messages */
#define L2 2 /* Expand the above, dump reqs, resps */
#define L3 3 /* todo(heinenn) do we need this.. */
#ifdef LOG_STDERR
#define DEBUGC(_l, x...) DEBUG(x)
#else
#define DEBUGC(_l, x...) do { \
if(_l <= verbosity) { \
fprintf(stderr, x); \
} \
} while (0)
#endif /* LOG_STDERR */
#define WARN(x...) do { \
F_DEBUG(cYEL "[!] WARNING: " cBRI x); \
F_DEBUG(cRST "\n"); \

View File

@ -87,10 +87,11 @@ associated request cost):
scanner will not discover non-linked resources such as /admin,
/index.php.old, etc:
$ ./skipfish -W- -LV [...other options...]
$ ./skipfish -W- -L [...other options...]
This mode is very fast, but *NOT* recommended for general use because of
limited coverage. Use only where absolutely necessary.
This mode is very fast, but *NOT* recommended for general use because
the lack of dictionary bruteforcing will limited the coverage. Use
only where absolutely necessary.
2) Orderly scan with minimal extension brute-force. In this mode, the scanner
will not discover resources such as /admin, but will discover cases such as
@ -135,9 +136,6 @@ associated request cost):
complete - all-inclusive dictionary, over 210,000 requests.
complete-fast - An optimized version of the 'complete' dictionary
with 20-30% less requests.
Normal fuzzing mode is recommended when doing thorough assessments of
reasonably responsive servers; but it may be prohibitively expensive
when dealing with very large or very slow sites.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -86,6 +86,7 @@ e 1 1 1 sql
e 1 1 1 stackdump
e 1 1 1 svn-base
e 1 1 1 swf
e 1 1 1 swp
e 1 1 1 tar
e 1 1 1 tar.bz2
e 1 1 1 tar.gz
@ -106,3 +107,4 @@ e 1 1 1 xsl
e 1 1 1 xslt
e 1 1 1 yml
e 1 1 1 zip
e 1 1 1 ~

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +0,0 @@
es 1 2 2 php
ws 1 2 2 cgi-bin
eg 1 2 2 old
wg 1 2 2 admin
w? 1 0 0 localhost

View File

@ -127,6 +127,7 @@ u8* get_value(u8 type, u8* name, u32 offset,
if (type != par->t[i]) continue;
if (name && (!par->n[i] || strcasecmp((char*)par->n[i], (char*)name)))
continue;
if (offset != coff) { coff++; continue; }
return par->v[i];
}
@ -453,7 +454,7 @@ u8* url_decode_token(u8* str, u32 len, u8 plus) {
tokens. We otherwise let pretty much everything else go through, as it
may help with the exploitation of certain vulnerabilities. */
u8* url_encode_token(u8* str, u32 len, u8 also_slash) {
u8* url_encode_token(u8* str, u32 len, u8* enc_set) {
u8 *ret = ck_alloc(len * 3 + 1);
u8 *src = str, *dst = ret;
@ -461,8 +462,7 @@ u8* url_encode_token(u8* str, u32 len, u8 also_slash) {
while (len--) {
u8 c = *(src++);
if (c <= 0x20 || c >= 0x80 || strchr("#%&=+;,!$?", c) ||
(also_slash && c == '/')) {
if (c <= 0x20 || c >= 0x80 || strchr((char*)enc_set, c)) {
if (c == 0xFF) c = 0;
sprintf((char*)dst, "%%%02X", c);
dst += 3;
@ -681,7 +681,11 @@ u8* serialize_path(struct http_request* req, u8 with_host, u8 with_post) {
/* First print path... */
for (i=0;i<req->par.c;i++)
for (i=0;i<req->par.c;i++) {
u8 *enc = (u8*)ENC_PATH;
if(req->pivot && req->fuzz_par_enc && i == req->pivot->fuzz_par)
enc = req->fuzz_par_enc;
if (PATH_SUBTYPE(req->par.t[i])) {
switch (req->par.t[i]) {
@ -696,22 +700,27 @@ u8* serialize_path(struct http_request* req, u8 with_host, u8 with_post) {
if (req->par.n[i]) {
u32 len = strlen((char*)req->par.n[i]);
u8* str = url_encode_token(req->par.n[i], len, 1);
u8* str = url_encode_token(req->par.n[i], len, enc);
ASD(str); ASD("=");
ck_free(str);
}
if (req->par.v[i]) {
u32 len = strlen((char*)req->par.v[i]);
u8* str = url_encode_token(req->par.v[i], len, 1);
u8* str = url_encode_token(req->par.v[i], len, enc);
ASD(str);
ck_free(str);
}
}
}
/* Then actual parameters. */
for (i=0;i<req->par.c;i++)
for (i=0;i<req->par.c;i++) {
u8 *enc = (u8*)ENC_DEFAULT;
if(req->pivot && req->fuzz_par_enc && i == req->pivot->fuzz_par)
enc = req->fuzz_par_enc;
if (QUERY_SUBTYPE(req->par.t[i])) {
if (!got_search) {
@ -729,23 +738,29 @@ u8* serialize_path(struct http_request* req, u8 with_host, u8 with_post) {
if (req->par.n[i]) {
u32 len = strlen((char*)req->par.n[i]);
u8* str = url_encode_token(req->par.n[i], len, 0);
u8* str = url_encode_token(req->par.n[i], len, enc);
ASD(str); ASD("=");
ck_free(str);
}
if (req->par.v[i]) {
u32 len = strlen((char*)req->par.v[i]);
u8* str = url_encode_token(req->par.v[i], len, 0);
u8* str = url_encode_token(req->par.v[i], len, enc);
ASD(str);
ck_free(str);
}
}
}
got_search = 0;
if (with_post)
for (i=0;i<req->par.c;i++)
for (i=0;i<req->par.c;i++) {
u8 *enc = (u8*)ENC_DEFAULT;
if(req->pivot && req->fuzz_par_enc && i == req->pivot->fuzz_par)
enc = req->fuzz_par_enc;
if (POST_SUBTYPE(req->par.t[i])) {
if (!got_search) {
@ -755,18 +770,19 @@ u8* serialize_path(struct http_request* req, u8 with_host, u8 with_post) {
if (req->par.n[i]) {
u32 len = strlen((char*)req->par.n[i]);
u8* str = url_encode_token(req->par.n[i], len, 0);
u8* str = url_encode_token(req->par.n[i], len, enc);
ASD(str); ASD("=");
ck_free(str);
}
if (req->par.v[i]) {
u32 len = strlen((char*)req->par.v[i]);
u8* str = url_encode_token(req->par.v[i], len, 0);
u8* str = url_encode_token(req->par.v[i], len, enc);
ASD(str);
ck_free(str);
}
}
}
#undef ASD
@ -1076,7 +1092,7 @@ u8* build_request_data(struct http_request* req) {
ASD("\r\n");
} else if (global_http_par.t[i] == PARAM_COOKIE &&
!GET_CK(global_http_par.n[i], &req->par)) {
if (ck_pos) ADD_STR_DATA(ck_buf, ck_pos, ";");
if (ck_pos) ADD_STR_DATA(ck_buf, ck_pos, "; ");
ADD_STR_DATA(ck_buf, ck_pos, global_http_par.n[i]);
ADD_STR_DATA(ck_buf, ck_pos, "=");
ADD_STR_DATA(ck_buf, ck_pos, global_http_par.v[i]);
@ -1111,24 +1127,30 @@ u8* build_request_data(struct http_request* req) {
/* The default case: application/x-www-form-urlencoded. */
for (i=0;i<req->par.c;i++)
for (i=0;i<req->par.c;i++) {
u8 *enc = (u8*)ENC_DEFAULT;
if(req->pivot && req->fuzz_par_enc && i == req->pivot->fuzz_par)
enc = req->fuzz_par_enc;
if (req->par.t[i] == PARAM_POST) {
if (pay_pos) ADD_STR_DATA(pay_buf, pay_pos, "&");
if (req->par.n[i]) {
u32 len = strlen((char*)req->par.n[i]);
u8* str = url_encode_token(req->par.n[i], len, 0);
u8* str = url_encode_token(req->par.n[i], len, enc);
ADD_STR_DATA(pay_buf, pay_pos, str);
ADD_STR_DATA(pay_buf, pay_pos, "=");
ck_free(str);
}
if (req->par.v[i]) {
u32 len = strlen((char*)req->par.v[i]);
u8* str = url_encode_token(req->par.v[i], len, 0);
u8* str = url_encode_token(req->par.v[i], len, enc);
ADD_STR_DATA(pay_buf, pay_pos, str);
ck_free(str);
}
}
}
ASD("Content-Type: application/x-www-form-urlencoded\r\n");
} else if (req_type == PARAM_POST_O) {
@ -1756,6 +1778,7 @@ static void destroy_unlink_conn(struct conn_entry* c, u8 keep) {
/* Performs struct conn_entry for reuse following a clean shutdown. */
static void reuse_conn(struct conn_entry* c, u8 keep) {
if (c->q) destroy_unlink_queue(c->q, keep);
c->q = 0;
ck_free(c->read_buf);
@ -1868,6 +1891,14 @@ void async_request(struct http_request* req) {
static void check_ssl(struct conn_entry* c) {
X509 *p;
SSL_CIPHER *cp;
/* Test if a weak cipher has been negotiated */
cp = SSL_get_current_cipher(c->srv_ssl);
if(!(cp->algo_strength & SSL_MEDIUM) && !(cp->algo_strength & SSL_HIGH))
problem(PROB_SSL_WEAK_CIPHER, c->q->req, 0,
(u8*)SSL_CIPHER_get_name(cp),host_pivot(c->q->req->pivot), 0);
p = SSL_get_peer_certificate(c->srv_ssl);

View File

@ -76,6 +76,18 @@ struct param_array {
#define HEADER_SUBTYPE(_x) ((_x) >= PARAM_HEADER)
/* Different character sets to feed the encoding function */
#define ENC_DEFAULT "#&=+;,!$?%" /* Default encoding */
#define ENC_PATH "#&=+;,!$?%/" /* Path encoding with slash */
#define ENC_NULL "#&=+;,!$?" /* Encoding without % */
/* SSL Cipher strengths */
#define SSL_MEDIUM 0x00000040L
#define SSL_HIGH 0x00000080L
struct http_response;
struct queue_entry;
@ -113,6 +125,9 @@ struct http_request {
u8* trying_key; /* Current keyword ptr */
u8 trying_spec; /* Keyword specificity info */
u8* fuzz_par_enc; /* Fuzz target encoding */
};
/* Flags for http_response completion state: */
@ -292,7 +307,7 @@ u8* url_decode_token(u8* str, u32 len, u8 plus);
otherwise let pretty much everything else go through, as it may help with
the exploitation of certain vulnerabilities. */
u8* url_encode_token(u8* str, u32 len, u8 also_slash);
u8* url_encode_token(u8* str, u32 len, u8* enc_set);
/* Reconstructs URI from http_request data. Includes protocol and host
if with_host is non-zero. */

View File

@ -48,6 +48,7 @@ struct p_sig_desc {
static struct p_sig_desc* p_sig;
static u32 p_sig_cnt;
u8 suppress_dupes;
u32 verbosity = 0;
/* Response, issue sample data. */
@ -416,8 +417,23 @@ static void save_req_res(struct http_request* req, struct http_response* res, u8
u8* rd = build_request_data(req);
f = fopen("request.dat", "w");
if (!f) PFATAL("Cannot create 'request.dat'");
fwrite(rd, strlen((char*)rd), 1, f);
if (fwrite(rd, strlen((char*)rd), 1, f)) {};
fclose(f);
/* Write .js file with base64 encoded json data. */
u32 size = 0;
u8* rd_js;
NEW_STR(rd_js, size);
ADD_STR_DATA(rd_js, size, "var req = {'data':'");
ADD_STR_DATA(rd_js, size, js_escape(rd, 0));
ADD_STR_DATA(rd_js, size, "'}");
f = fopen("request.js", "w");
if (!f) PFATAL("Cannot create 'request.js'");
if (fwrite(rd_js, strlen((char*)rd_js), 1, f)) {};
fclose(f);
ck_free(rd_js);
ck_free(rd);
}
@ -425,16 +441,45 @@ static void save_req_res(struct http_request* req, struct http_response* res, u8
u32 i;
f = fopen("response.dat", "w");
if (!f) PFATAL("Cannot create 'response.dat'");
fprintf(f, "HTTP/1.1 %u %s\n", res->code, res->msg);
u64 msg_size = strlen((char*)res->msg);
u64 rs_size = msg_size + strlen("HTTP/1.1 1000 \n") + 1;
u8* rs = ck_alloc(rs_size);
snprintf((char*)rs, rs_size -1, "HTTP/1.1 %u %s\n", res->code, res->msg);
u32 s = strlen((char*)rs);
for (i=0;i<res->hdr.c;i++)
if (res->hdr.t[i] == PARAM_HEADER)
fprintf(f, "%s: %s\n", res->hdr.n[i], res->hdr.v[i]);
if (res->hdr.t[i] == PARAM_HEADER) {
ADD_STR_DATA(rs, s, res->hdr.n[i]);
ADD_STR_DATA(rs, s, ": ");
ADD_STR_DATA(rs, s, res->hdr.v[i]);
ADD_STR_DATA(rs, s, "\n");
}
fprintf(f, "\n");
fwrite(res->payload, res->pay_len, 1, f);
if(res->payload) {
ADD_STR_DATA(rs, s, "\n");
ADD_STR_DATA(rs, s, res->payload);
}
if (fwrite(rs, strlen((char*)rs), 1, f)) {};
fclose(f);
/* Write .js file with base64 encoded json data. */
u8* rs_js;
NEW_STR(rs_js, s);
ADD_STR_DATA(rs_js, s, "var res = {'data':'");
ADD_STR_DATA(rs_js, s, js_escape(rs, 0));
ADD_STR_DATA(rs_js, s, "'}");
f = fopen("response.js", "w");
if (!f) PFATAL("Cannot create 'response.js'");
if (fwrite(rs_js, strlen((char*)rs_js), 1, f)) {};
fclose(f);
ck_free(rs_js);
ck_free(rs);
/* Also collect MIME samples at this point. */
if (!req->pivot->dupe && res->sniffed_mime && sample) {
@ -785,10 +830,11 @@ static void save_pivots(FILE* f, struct pivot_desc* cur) {
}
if (cur->res)
fprintf(f, "dup=%u %s%scode=%u len=%u notes=%u\n", cur->dupe,
fprintf(f, "dup=%u %s%scode=%u len=%u notes=%u sig=0x%x\n", cur->dupe,
cur->bogus_par ? "bogus " : "",
cur->missing ? "returns_404 " : "",
cur->res->code, cur->res->pay_len, cur->issue_cnt);
cur->res->code, cur->res->pay_len,
cur->issue_cnt, cur->pv_sig);
else
fprintf(f, "not_fetched\n");

View File

@ -1,164 +1,267 @@
.\" vi:set wm=5
.TH SKIPFISH 1 "March 23, 2010"
.TH SKIPFISH 1 "May 6, 2012"
.SH NAME
skipfish \- active web application security reconnaissance tool
skipfish \- web application security scanner
.SH SYNOPSIS
.B skipfish
.RI [ options ] " -W wordlist -o output-directory start-url [start-url2 ...]"
.RI [ options ] " -o output-directory [ start-url | @url-file [ start-url2 ... ]]"
.br
.SH DESCRIPTION
.PP
\fBskipfish\fP is an active web application security reconnaissance tool.
It prepares an interactive sitemap for the targeted site by carrying out a recursive crawl and dictionary-based probes.
The resulting map is then annotated with the output from a number of active (but hopefully non-disruptive) security checks.
The final report generated by the tool is meant to serve as a foundation for professional web application security assessments.
.SH OPTIONS
It prepares an interactive sitemap for the targeted site by carrying out a recursive crawl and dictionary-based probes. The resulting map is then annotated with the output from a number of active (but hopefully non-disruptive) security checks. The final report generated by the tool is meant to serve as a foundation for professional web application security assessments.
.SH OPTIONS SUMMARY
.PP
.sp
.if n \{\
.RS 4
.\}
.fam C
.ps -1
.nf
.BB lightgray
Authentication and access options:
\-A user:pass \- use specified HTTP authentication credentials
\-F host=IP \- pretend that \'host\' resolves to \'IP\'
\-C name=val \- append a custom cookie to all requests
\-H name=val \- append a custom HTTP header to all requests
\-b (i|f|p) \- use headers consistent with MSIE / Firefox / iPhone
\-N \- do not accept any new cookies
.SS Authentication and access options:
.TP
.B \-A user:pass
use specified HTTP authentication credentials
.TP
.B \-F host=IP
pretend that 'host' resolves to 'IP'
.TP
.B \-C name=val
append a custom cookie to all requests
.TP
.B \-H name=val
append a custom HTTP header to all requests
.TP
.B \-b (i|f|p)
use headers consistent with MSIE / Firefox / iPhone
.TP
.B \-N
do not accept any new cookies
Crawl scope options:
\-d max_depth \- maximum crawl tree depth (16)
\-c max_child \- maximum children to index per node (512)
\-x max_desc \- maximum descendants to index per branch (8192)
\-r r_limit \- max total number of requests to send (100000000)
\-p crawl% \- node and link crawl probability (100%)
\-q hex \- repeat probabilistic scan with given seed
\-I string \- only follow URLs matching \'string\'
\-X string \- exclude URLs matching \'string\'
\-K string \- do not fuzz parameters named \'string\'
\-D domain \- crawl cross\-site links to another domain
\-B domain \- trust, but do not crawl, another domain
\-Z \- do not descend into 5xx locations
\-O \- do not submit any forms
\-P \- do not parse HTML, etc, to find new links
.SS Crawl scope options:
.TP
.B \-d max_depth
maximum crawl tree depth (default: 16)
.TP
.B \-c max_child
maximum children to index per node (default: 512)
.TP
.B \-x max_desc
maximum descendants to index per crawl tree branch (default: 8192)
.TP
.B \-r r_limit
max total number of requests to send (default: 100000000)
.TP
.B \-p crawl%
node and link crawl probability (default: 100%)
.TP
.B \-q hex
repeat a scan with a particular random seed
.TP
.B \-I string
only follow URLs matching 'string'
.TP
.B \-X string
exclude URLs matching 'string'
.TP
.B \-K string
do not fuzz query parameters or form fields named 'string'
.TP
.B \-Z
do not descend into directories that return HTTP 500 code
.TP
.B \-D domain
also crawl cross-site links to a specified domain
.TP
.B \-B domain
trust, but do not crawl, content included from a third-party domain
.TP
.B \-O
do not submit any forms
.TP
.B \-P
do not parse HTML and other documents to find new links
Reporting options:
\-o dir \- write output to specified directory (required)
\-M \- log warnings about mixed content / non\-SSL passwords
\-E \- log all caching intent mismatches
\-U \- log all external URLs and e\-mails seen
\-Q \- completely suppress duplicate nodes in reports
\-u \- be quiet, disable realtime progress stats
.SS Reporting options:
.TP
.B \-o dir
write output to specified directory (required)
.TP
.B \-M
log warnings about mixed content or non-SSL password forms
.TP
.B \-E
log all HTTP/1.0 / HTTP/1.1 caching intent mismatches
.TP
.B \-U
log all external URLs and e-mails seen
.TP
.B \-Q
completely suppress duplicate nodes in reports
.TP
.B \-u
be quiet, do not display realtime scan statistics
Dictionary management options:
\-W wordlist \- use a specified read\-write wordlist (required)
\-S wordlist \- load a supplemental read\-only wordlist
\-L \- do not auto\-learn new keywords for the site
\-Y \- do not fuzz extensions in directory brute\-force
\-R age \- purge words hit more than \'age\' scans ago
\-T name=val \- add new form auto\-fill rule
\-G max_guess \- maximum number of keyword guesses to keep (256)
.SS Dictionary management options:
.TP
.B \-S wordlist
load a specified read-only wordlist for brute-force tests
.TP
.B \-W wordlist
load a specified read-write wordlist for any site-specific learned words. This option is required but the specified file can be empty, to store the newly learned words and alternatively, you can use -W- to discard new words.
.TP
.B \-L
do not auto-learn new keywords for the site
.TP
.B \-Y
do not fuzz extensions during most directory brute-force steps
.TP
.B \-R age
purge words that resulted in a hit more than 'age' scans ago
.TP
.B \-T name=val
add new form auto-fill rule
.TP
.B \-G max_guess
maximum number of keyword guesses to keep in the jar (default: 256)
Performance settings:
\-l max_req \- max requests per second (0\..000000)
\-g max_conn \- max simultaneous TCP connections, global (40)
\-m host_conn \- max simultaneous connections, per target IP (10)
\-f max_fail \- max number of consecutive HTTP errors (100)
\-t req_tmout \- total request response timeout (20 s)
\-w rw_tmout \- individual network I/O timeout (10 s)
\-i idle_tmout \- timeout on idle HTTP connections (10 s)
\-s s_limit \- response size limit (200000 B)
\-e \- do not keep binary responses for reporting
.SS Performance settings:
.TP
.B \-l max_req
max requests per second (0 = unlimited)
.TP
.B \-g max_conn
maximum simultaneous TCP connections, global (default: 50)
.TP
.B \-m host_conn
maximum simultaneous connections, per target IP (default: 10)
.TP
.B \-f max_fail
maximum number of consecutive HTTP errors to accept (default: 100)
.TP
.B \-t req_tmout
total request response timeout (default: 20 s)
.TP
.B \-w rw_tmout
individual network I/O timeout (default: 10 s)
.TP
.B \-i idle_tmout
timeout on idle HTTP connections (default: 10 s)
.TP
.B \-s s_limit
response size limit (default: 200000 B)
.TP
.B \-e
do not keep binary responses for reporting
Safety settings:
\-k duration \- stop scanning after the given duration h:m:s
.SS Performance settings:
.TP
.B \-k duration
stop scanning after the given duration (format: h:m:s)
.SH AUTHENTICATION AND ACCESS
.PP
Some sites require authentication, and skipfish supports this in different ways. First there is basic HTTP authentication, for which you can use the \-A flag. Second, and more common, are sites that require authentication on a web application level. For these sites, the best approach is to capture authenticated session cookies and provide them to skipfish using the \-C flag (multiple if needed). Last, you'll need to put some effort in protecting the session from being destroyed by excluding logout links with \-X and/or by rejecting new cookies with \-N.
.IP "-A/--auth <username:password>"
For sites requiring basic HTTP authentication, you can use this flag to specify your credentials.
.IP "-F/--host <ip:hostname>"
Using this flag, you can set the \'\fIHost:\fP\' header value to define a custom mapping between a host and an IP (bypassing the resolver). This feature is particularly useful for not-yet-launched or legacy services that don't have the necessary DNS entries.
.IP "-H/--header <header:value>"
When it comes to customizing your HTTP requests, you can also use the -H option to insert any additional, non-standard headers. This flag also allows the default headers to be overwritten.
.IP "-C/--cookie <cookie:value>"
This flag can be used to add a cookie to the skipfish HTTP requests; This is particularly useful to perform authenticated scans by providing session cookies. When doing so, keep in mind that cetain URLs (e.g. /logout) may destroy your session; you can combat this in two ways: by using the -N option, which causes the scanner to reject attempts to set or delete cookies; or by using the -X option to exclude logout URLs.
.IP "-b/--user-agent <i|f|p>"
This flag allows the user-agent to be specified where \'\fIi\fP\' stands for Internet Explorer, \'\fIf\fP\' for Firefox and \'\fIp\fP\' for iPhone. Using this flag is recommended in case the target site shows different behavior based on the user-agent (e.g some sites use different templates for mobiles and desktop clients).
.IP "-N/--reject-cookies"
This flag causes skipfish to ignore cookies that are being set by the site. This helps to enforce stateless tests and also prevent that cookies set with \'-C\' are not overwritten.
.SH CRAWLING SCOPE
.PP
Some sites may be too big to scan in a reasonable timeframe. If the site features well-defined tarpits - for example, 100,000 nearly identical user profiles as a part of a social network - these specific locations can be excluded with -X or -S. In other cases, you may need to resort to other settings: -d limits crawl depth to a specified number of subdirectories; -c limits the number of children per directory; -x limits the total number of descendants per crawl tree branch; and -r limits the total number of requests to send in a scan.
.IP "-d/--max-depth <depth>"
Limit the depth of subdirectories being crawled (see above).
.IP "-c/--max-child <childs>"
Limit the amount of subdirectories per directory we crawl into (see above).
.IP "-x/--max-descendants <descendants>"
Limit the total number of descendants per crawl tree branch (see above).
.IP "-r/--max-requests <request>"
The maximum number of requests can be limited with this flag.
.IP "-p/--probability <0-100>"
By specifying a percentage between 1 and 100%, it is possible to tell the crawler to follow fewer than 100% of all links, and try fewer than 100% of all dictionary entries. This \- naturally \- limits the completeness of a scan, but unlike most other settings, it does so in a balanced, non-deterministic manner. It is extremely useful when you are setting up time-bound, but periodic assessments of your infrastructure.
.IP "-q/--seed <seed>"
This flag sets the initial random seed for the crawler to a specified value. This can be used to exactly reproduce a previous scan to compare results. Randomness is relied upon most heavily in the -p mode, but also influences a couple of other scan management decisions.
.IP "-I/--include <domain/path>"
With this flag, you can tell skipfish to only crawl and test URLs that match a certain string. This can help to narrow down the scope of a scan by only whitelisting certain sections of a web site (e.g. \-I /shop).
.IP "-X/--exclude <domain/path>"
The \-X option can be used to exclude files / directories from the scan. This is useful to avoid session termination (i.e. by excluding /logout) or just for speeding up your scans by excluding static content directories like /icons/, /doc/, /manuals/, and other standard, mundane locations along these lines.
.IP "-K/--skip-param <parameter name>"
This flag allows you to specify parameter names not to fuzz. (useful for applications that put session IDs in the URL, to minimize noise).
.IP "-D/--include-domain <domain>"
Allows you to specify additional hosts or domains to be in-scope for the test. By default, all hosts appearing in the command-line URLs are added to the list - but you can use -D to broaden these rules. The result of this will be that the crawler will follow links and tests links that point to these additional hosts.
.IP "-B/--trust-domain <domain>"
In some cases, you do not want to actually crawl a third-party domain, but you trust the owner of that domain enough not to worry about cross-domain content inclusion from that location. To suppress warnings, you can use the \-B option
.IP "-Z/--skip-error-pages"
Do not crawl into pages / directories that give an error 5XX.
.IP "-O/--skip-forms"
Using this flag will cause forms to be ignored during the scan.
.IP "-P/--ignore-links"
This flag will disable link extracting and effectively disables crawling. Using \-P is useful when you want to test one specific URL or when you want to feed skipfish a list of URLs that were collected with an external crawler.
.IP "--checks"
EXPERIMENTAL: Displays the crawler injection tests. The output shows the index number (useful for \-\-checks\-toggle), the check name and whether the check is enabled.
.IP "--checks-toggle <check1,check2,..>"
EXPERIMENTAL: Every injection test can be enabled/disabled with using this flag. As value, you need to provide the check numbers which can be obtained with the \-\-checks flag. Multiple checks can be toggled via a comma separated value (i.e. \-\-checks\-toggle 1,2 )
.SH REPORTING OPTIONS
.PP
.IP "-o/--output <dir>"
The report wil be written to this location. The directory is one of the two mandatory options and must not exist upon starting the scan.
.IP "-M/--log-mixed-content"
Enable the logging of mixed content. This is highly recommended when scanning SSL-only sites to detect insecure content inclusion via non-SSL protected links.
.IP "-E/--log-cache-mismatches"
This will cause additonal content caching error to be reported.
.IP "-U/--log-external-urls"
Log all external URLs and email addresses that were seen during the scan.
.IP "-Q/--log-unique-nodes"
Enable this to completely suppress duplicate nodes in reports.
.IP "-u/--quiet"
This will cause skipfish to suppress all console output during the scan.
.IP "-v/--verbose"
EXPERIMENTAL: Use this flag to enable runtime reporting of, for example, problems that are detected. Can be used multiple times to increase verbosity and should be used in combination with \-u unless you run skipfish with stderr redirected to a file.
.SH DICTIONARY MANAGEMENT
.PP
Make sure you've read the instructions provided in dictionaries/README-FIRST to select the right dictionary file and configure it correctly. This step has a profound impact on the quality of scan results later on.
.IP "-S/--wordlist <file>"
Load the specified (read-only) wordlist for use during the scan. This flag is optional but use of a dictionary is highly recommended when performing a blackbox scan as it will highlight hidden files and directories.
.IP "-W/--rw-wordlist <file>"
Specify an initially empty file for any newly learned site-specific keywords (which will come handy in future assessments). You can use \-W\- or \-W /dev/null if you don't want to store auto-learned keywords anywhere. Typically you will want to use one of the packaged dictonaries (i.e. complete.wl) and possibly add a custom dictionary.
.IP "-L/--no-keyword-learning"
During the scan, skipfish will try to learn and use new keywords. This flag disables that behavior and should be used when any form of brute-forcing is not desired.
.IP "-Y/--no-ext-fuzzing"
This flag will disable extension guessing during directory bruteforcing.
.IP "-R <age>"
Use of this flag allows old words to be purged from wordlists. It is intended to help keeping dictionaries clean when used in recurring scans.
.IP "-T/--form-value <name=value>"
Skipfish also features a form auto-completion mechanism in order to maximize scan coverage. The values should be non-malicious, as they are not meant to implement security checks \- but rather, to get past input validation logic. You can define additional rules, or override existing ones, with the \-T option (\-T form_field_name=field_value, e.g. \-T login=test123 \-T password=test321 - although note that \-C and \-A are a much better method of logging in).
.IP "-G <max guesses>"
During the scan, a temporary buffer of newly detected keywords is maintained. The size of this buffer can be changed with this flag and doing so influences bruteforcing.
.SH PERFORMANCE OPTIONS
The default performance setting should be fine for most servers but when the report indicates there were connection problems, you might want to tweak some of the values here. For unstable servers, the scan coverage is likely to improve when using low values for rate and connection flags.
.IP "-l/--max-rate <rate>"
This flag can be used to limit the amount of requests per second. This is very useful when the target server can't keep up with the high amount of requests that are generated by skipfish. Keeping the amount requests per second low can also help preventing some rate-based DoS protection mechanisms from kicking in and ruining the scan.
.IP "-g/--max-connections <number>"
The max simultaneous TCP connections (global) can be set with this flag.
.IP "-m/--max-host-connections <number>"
The max simultaneous TCP connections, per target IP, can be set with this flag.
.IP "-f/--max-fail <number>"
Controls the maximum number of consecutive HTTP errors you are willing to see before aborting the scan. For large scans, you probably want to set a higher value here.
.IP "-t/--request-timeout <timeout>"
Set the total request timeout, to account for really slow or really fast sites.
.IP "-w/--network-timeout <timeout>"
Set the network I/O timeout.
.IP "-i/--idle-timeout <timeout>"
Specify the timeout for idle HTTP connections.
.IP "-s/--response-size <size>"
Sets the maximum length of a response to fetch and parse (longer responses will be truncated).
.IP "-e/--discard-binary"
This prevents binary documents from being kept in memory for reporting purposes, and frees up a lot of RAM.
.SH EXAMPLES
\fBScan type: quick\fP
.br
skipfish \-o output/dir/ http://example.com
.br
.br
\fBScan type: extensive bruteforce\fP
.br
skipfish [...other options..] \fI\-S dictionaries/complete.wl\fP http://example.com
.br
.br
\fBScan type: without bruteforcing\fP
.br
skipfish [...other options..] -LY http://example.com
.br
\fBScan type: authenticated (basic)\fP
.br
skipfish [...other options..] \fI-A username:password\fP http://example.com
.br
\fBScan type: authenticated (cookie)\fP
.br
skipfish [...other options..] \-C jsession=myauthcookiehere \-X /logout http://example.com
.br
\fBScan type: flaky server\fP
.br
skipfish [...other options..] -l 5 -g 2 -t 30 -i 15 http://example.com
.br
.SH NOTES
The default values for all flags can be viewed by running \'./skipfish -h\' .
.SH AUTHOR
skipfish was written by Michal Zalewski <lcamtuf@google.com>,
with contributions from Niels Heinen <heinenn@google.com>,
Sebastian Roschke <s.roschke@googlemail.com>, and other parties.
.PP
This manual page was written by Thorsten Schifferdecker <tsd@debian.systs.org>,
for the Debian project (and may be used by others).
This manual page was written with the help of Thorsten Schifferdecker <tsd@debian.systs.org>.

View File

@ -39,14 +39,15 @@
#include "string-inl.h"
#include "crawler.h"
#include "checks.h"
#include "analysis.h"
#include "database.h"
#include "http_client.h"
#include "report.h"
#ifdef DEBUG_ALLOCATOR
struct __AD_trk_obj* __AD_trk[ALLOC_BUCKETS];
u32 __AD_trk_cnt[ALLOC_BUCKETS];
struct TRK_obj* TRK[ALLOC_BUCKETS];
u32 TRK_cnt[ALLOC_BUCKETS];
#endif /* DEBUG_ALLOCATOR */
/* Ctrl-C handler... */
@ -242,7 +243,8 @@ static void read_urls(u8* fn) {
int main(int argc, char** argv) {
s32 opt;
u32 loop_cnt = 0, purge_age = 0, seed;
u8 show_once = 0, be_quiet = 0, display_mode = 0, has_fake = 0;
u8 show_once = 0, no_statistics = 0, display_mode = 0, has_fake = 0;
s32 oindex = 0;
u8 *wordlist = NULL, *output_dir = NULL;
u8* gtimeout_str = NULL;
u32 gtimeout = 0;
@ -256,6 +258,60 @@ int main(int argc, char** argv) {
signal(SIGPIPE, SIG_IGN);
SSL_library_init();
/* Options, options, and options */
static struct option long_options[] = {
{"auth", required_argument, 0, 'A' },
{"host", required_argument, 0, 'F' },
{"cookie", required_argument, 0, 'C' },
{"reject-cookies", required_argument, 0, 'N' },
{"header", required_argument, 0, 'H' },
{"user-agent", required_argument, 0, 'b' },
#ifdef PROXY_SUPPORT
{"proxy", required_argument, 0, 'J' },
#endif /* PROXY_SUPPORT */
{"max-depth", required_argument, 0, 'd' },
{"max-child", required_argument, 0, 'c' },
{"max-descendants", required_argument, 0, 'x' },
{"max-requests", required_argument, 0, 'r' },
{"max-rate", required_argument, 0, 'l'},
{"probability", required_argument, 0, 'p' },
{"seed", required_argument, 0, 'q' },
{"include", required_argument, 0, 'I' },
{"exclude", required_argument, 0, 'X' },
{"skip-param", required_argument, 0, 'K' },
{"skip-forms", no_argument, 0, 'O' },
{"include-domain", required_argument, 0, 'D' },
{"ignore-links", no_argument, 0, 'P' },
{"no-ext-fuzzing", no_argument, 0, 'Y' },
{"log-mixed-content", no_argument, 0, 'M' },
{"skip-error-pages", no_argument, 0, 'Z' },
{"log-external-urls", no_argument, 0, 'U' },
{"log-cache-mismatches", no_argument, 0, 'E' },
{"form-value", no_argument, 0, 'T' },
{"rw-wordlist", required_argument, 0, 'W' },
{"no-keyword-learning", no_argument, 0, 'L' },
{"mode", required_argument, 0, 'J' },
{"wordlist", required_argument, 0, 'S'},
{"trust-domain", required_argument, 0, 'B' },
{"max-connections", required_argument, 0, 'g' },
{"max-host-connections", required_argument, 0, 'm' },
{"max-fail", required_argument, 0, 'f' },
{"request-timeout", required_argument, 0, 't' },
{"network-timeout", required_argument, 0, 'w' },
{"idle-timeout", required_argument, 0, 'i' },
{"response-size", required_argument, 0, 's' },
{"discard-binary", required_argument, 0, 'e' },
{"output", required_argument, 0, 'o' },
{"help", no_argument, 0, 'h' },
{"quiet", no_argument, 0, 'u' },
{"verbose", no_argument, 0, 'v' },
{"scan-timeout", required_argument, 0, 'k'},
{"checks", no_argument, 0, 0},
{"checks-toggle", required_argument, 0, 0},
{0, 0, 0, 0 }
};
/* Come up with a quasi-decent random seed. */
gettimeofday(&tv, NULL);
@ -263,9 +319,10 @@ int main(int argc, char** argv) {
SAY("skipfish version " VERSION " by <lcamtuf@google.com>\n");
while ((opt = getopt(argc, argv,
while ((opt = getopt_long(argc, argv,
"+A:B:C:D:EF:G:H:I:J:K:LMNOPQR:S:T:UW:X:YZ"
"b:c:d:ef:g:hi:k:l:m:o:p:q:r:s:t:uw:x:")) > 0)
"b:c:d:ef:g:hi:k:l:m:o:p:q:r:s:t:uvw:x:",
long_options, &oindex)) >= 0)
switch (opt) {
@ -505,9 +562,14 @@ int main(int argc, char** argv) {
break;
case 'u':
be_quiet = 1;
no_statistics = 1;
break;
case 'v':
verbosity++;
break;
case 'e':
delete_bin = 1;
break;
@ -521,6 +583,18 @@ int main(int argc, char** argv) {
no_500_dir = 1;
break;
case '?':
PFATAL("Unrecognized option.");
break;
case 0:
if(!strcmp( "checks", long_options[oindex].name ))
display_injection_checks();
if(!strcmp( "checks-toggle", long_options[oindex].name ))
toggle_injection_checks((u8*)optarg, 1);
break;
default:
usage(argv[0]);
@ -542,6 +616,11 @@ int main(int argc, char** argv) {
if (!output_dir)
FATAL("Output directory not specified (try -h for help).");
if(verbosity && !no_statistics && isatty(2))
FATAL("Please use -v in combination with the -u flag or, "
"run skipfish while redirecting stderr to a file. ");
if (resp_tmout < rw_tmout)
resp_tmout = rw_tmout;
@ -567,8 +646,10 @@ int main(int argc, char** argv) {
}
if (!wordlist)
FATAL("Wordlist not specified (try -h for help; see dictionaries/README-FIRST).");
if (!wordlist) {
wordlist = (u8*)"/dev/null";
DEBUG("* No wordlist specified with -W defaulting to /dev/null..\n");
}
load_keywords(wordlist, 0, purge_age);
@ -614,12 +695,13 @@ int main(int argc, char** argv) {
st_time = tv.tv_sec * 1000LL + tv.tv_usec / 1000;
#ifdef SHOW_SPLASH
if (!be_quiet) splash_screen();
if (!no_statistics) splash_screen();
#endif /* SHOW_SPLASH */
if (!be_quiet) SAY("\x1b[H\x1b[J");
if (!no_statistics) SAY("\x1b[H\x1b[J");
else SAY(cLGN "[*] " cBRI "Scan in progress, please stay tuned...\n");
/* Enter the crawler loop */
while ((next_from_queue() && !stop_soon) || (!show_once++)) {
u8 keybuf[8];
@ -639,7 +721,7 @@ int main(int argc, char** argv) {
req_sec = (req_count - queue_cur / 1.15) * 1000 / (run_time + 1);
if (be_quiet || ((loop_cnt++ % 100) && !show_once && idle == 0))
if (no_statistics || ((loop_cnt++ % 100) && !show_once && idle == 0))
continue;
if (clear_screen) {
@ -699,7 +781,7 @@ int main(int argc, char** argv) {
destroy_database();
destroy_http();
destroy_signatures();
__AD_report();
__TRK_report();
}
#endif /* DEBUG_ALLOCATOR */

View File

@ -39,4 +39,10 @@ typedef int64_t s64;
#define R(_ceil) ((u32)(random() % (_ceil)))
#ifndef MIN
# define MIN(_a,_b) ((_a) > (_b) ? (_b) : (_a))
# define MAX(_a,_b) ((_a) > (_b) ? (_a) : (_b))
#endif /* !MIN */
#endif /* ! _HAVE_TYPES_H */