-
Notifications
You must be signed in to change notification settings - Fork 2
/
enumeratelinks.py
77 lines (69 loc) · 3.62 KB
/
enumeratelinks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import os
import sys
import urllib.request
from common import *
def find_web_servers(target, infile, outfile):
''' Probe domains for http/https servers'''
print_bold_green("Probing for HTTP/HTTPS servers")
if not os.path.exists(target + "/" + outfile):
cmdstring = "cat " + target + "/" + infile + " | httprobe -c 100 | sed 's/https\?:\/\///' | sort | uniq > " + target + "/" + outfile
os.system(cmdstring)
cmdstring = "echo " + target + " >> " + target + "/" + outfile
os.system(cmdstring)
else:
print_yellow("Previous httprobe results exist. Skipping.")
def run_hakcrawler(target, infile, outfile):
''' Use hakcrawler to extract a list of endpoints from a file of domain names '''
print_message("green", "Crawling HTTP/HTTPS servers for urls")
if not os.path.exists(target + "/" + outfile):
with open(target + "/" + infile) as f:
http_servers = [line.rstrip() for line in f]
for server in http_servers:
print_yellow(server)
cmdstring = "hakrawler -url " + server + " -plain >> " + target + "/" + outfile
os.system(cmdstring)
else:
print_yellow("Previous hakcrawler results exist. Skipping.")
def run_getallurls(target, outfile):
''' Gather URLs from a variety of sources '''
print_bold_green("Getting known links from alienvault, wayback and common crawl")
if not os.path.exists(target + "/" + outfile):
cmdstring = "echo https://" + target + " | gau -subs > " + target + "/" + outfile
os.system(cmdstring)
else:
print_yellow("Previous getallurls results exist. Skipping.")
def find_injection_points(target, infile, outfile):
''' Extract endpoints more likely to yield reflected XSS from a file '''
print_bold_green("Extracting endpoints to test for XSS")
if not os.path.exists(target + "/" + outfile):
cmdstring = "cat " + target + "/" + infile + " | grep \"=\" | " + \
"egrep -iv \".(jpg|jpeg|git|css|tif|tiff|png|ttf|woff|woff2|ico|pdf|svg|txt|js)\" | " + \
"qsreplace -a > " + target + "/" + outfile
os.system(cmdstring)
else:
print_yellow("Previous xss injection point results exist. Skipping.")
# This is where the error is (down)
# checking in & outfiles should be extracted out into it's own function. DRY.
def validate_links(target, responsecode, infile, outfile):
''' Check the response code of links from a file '''
print_message("green", "Checking which links return a " + str(responsecode) + " response code")
if not os.path.exists(target + "/" + outfile):
try:
with open(target + "/" + infile, 'r') as rawlinksfile:
lines = 0
count = 0
for line in rawlinksfile: lines += 1
print_message("grey", "Links to check: " + str(lines))
for line in rawlinksfile:
count += 1
try:
if urllib.request.urlopen(line).getcode() == responsecode:
with open(target + "/" + outfile, 'a') as validatedlinksfile:
validatedlinksfile.write(line)
print_message("green", "Response " + str(responsecode) + ": " + line.rstrip("\n"))
except:
print_message("red", "Link " + count + "/" + lines + " doesn't respond as " + responsecode)
except IOError:
print_message("red", "Input file " + infile + " does not appear to exist.")
else:
print_message("yellow", "Previous link checking results exist. Skipping.")