mirror of
https://github.com/ZeroTier/ZeroTierOne
synced 2025-07-05 20:41:44 -07:00
Put old test code in attic.
This commit is contained in:
parent
9aee72099e
commit
0a3ef38cad
13 changed files with 0 additions and 0 deletions
BIN
attic/big-http-test/2015-11-10_01_50000.out.xz
Normal file
BIN
attic/big-http-test/2015-11-10_01_50000.out.xz
Normal file
Binary file not shown.
BIN
attic/big-http-test/2015-11-10_02_50000.out.xz
Normal file
BIN
attic/big-http-test/2015-11-10_02_50000.out.xz
Normal file
Binary file not shown.
BIN
attic/big-http-test/2015-11-10_03_12500_ec2-east-only.out.xz
Normal file
BIN
attic/big-http-test/2015-11-10_03_12500_ec2-east-only.out.xz
Normal file
Binary file not shown.
24
attic/big-http-test/Dockerfile
Normal file
24
attic/big-http-test/Dockerfile
Normal file
|
@ -0,0 +1,24 @@
|
|||
FROM centos:latest
|
||||
|
||||
MAINTAINER https://www.zerotier.com/
|
||||
|
||||
EXPOSE 9993/udp
|
||||
|
||||
ADD nodesource-el.repo /etc/yum.repos.d/nodesource-el.repo
|
||||
RUN yum -y update && yum install -y nodejs && yum clean all
|
||||
|
||||
RUN mkdir -p /var/lib/zerotier-one
|
||||
RUN mkdir -p /var/lib/zerotier-one/networks.d
|
||||
RUN touch /var/lib/zerotier-one/networks.d/ffffffffffffffff.conf
|
||||
|
||||
ADD package.json /
|
||||
RUN npm install
|
||||
|
||||
ADD zerotier-one /
|
||||
RUN chmod a+x /zerotier-one
|
||||
|
||||
ADD agent.js /
|
||||
ADD docker-main.sh /
|
||||
RUN chmod a+x /docker-main.sh
|
||||
|
||||
CMD ["./docker-main.sh"]
|
12
attic/big-http-test/README.md
Normal file
12
attic/big-http-test/README.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
HTTP one-to-all test
|
||||
======
|
||||
|
||||
*This is really internal use code. You're free to test it out but expect to do some editing/tweaking to make it work. We used this to run some massive scale tests of our new geo-cluster-based root server infrastructure prior to taking it live.*
|
||||
|
||||
Before using this code you will want to edit agent.js to change SERVER_HOST to the IP address of where you will run server.js. This should typically be an open Internet IP, since this makes reporting not dependent upon the thing being tested. Also note that this thing does no security of any kind. It's designed for one-off tests run over a short period of time, not to be anything that runs permanently. You will also want to edit the Dockerfile if you want to build containers and change the network ID to the network you want to run tests over.
|
||||
|
||||
This code can be deployed across a large number of VMs or containers to test and benchmark HTTP traffic within a virtual network at scale. The agent acts as a server and can query other agents, while the server collects agent data and tells agents about each other. It's designed to use RFC4193-based ZeroTier IPv6 addresses within the cluster, which allows the easy provisioning of a large cluster without IP conflicts.
|
||||
|
||||
The Dockerfile builds an image that launches the agent. The image must be "docker run" with "--device=/dev/net/tun --privileged" to permit it to open a tun/tap device within the container. (Unfortunately CAP_NET_ADMIN may not work due to a bug in Docker and/or Linux.) You can run a bunch with a command like:
|
||||
|
||||
for ((n=0;n<10;n++)); do docker run --device=/dev/net/tun --privileged -d zerotier/http-test; done
|
196
attic/big-http-test/agent.js
Normal file
196
attic/big-http-test/agent.js
Normal file
|
@ -0,0 +1,196 @@
|
|||
// ZeroTier distributed HTTP test agent
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Customizable parameters:
|
||||
|
||||
// Time between startup and first test attempt
|
||||
var TEST_STARTUP_LAG = 10000;
|
||||
|
||||
// Maximum interval between test attempts (actual timing is random % this)
|
||||
var TEST_INTERVAL_MAX = (60000 * 10);
|
||||
|
||||
// Test timeout in ms
|
||||
var TEST_TIMEOUT = 30000;
|
||||
|
||||
// Where should I get other agents' IDs and POST results?
|
||||
var SERVER_HOST = '52.26.196.147';
|
||||
var SERVER_PORT = 18080;
|
||||
|
||||
// Which port do agents use to serve up test data to each other?
|
||||
var AGENT_PORT = 18888;
|
||||
|
||||
// Payload size in bytes
|
||||
var PAYLOAD_SIZE = 5000;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var ipaddr = require('ipaddr.js');
|
||||
var os = require('os');
|
||||
var http = require('http');
|
||||
var async = require('async');
|
||||
|
||||
var express = require('express');
|
||||
var app = express();
|
||||
|
||||
// Find our ZeroTier-assigned RFC4193 IPv6 address
|
||||
var thisAgentId = null;
|
||||
var interfaces = os.networkInterfaces();
|
||||
if (!interfaces) {
|
||||
console.error('FATAL: os.networkInterfaces() failed.');
|
||||
process.exit(1);
|
||||
}
|
||||
for(var ifname in interfaces) {
|
||||
var ifaddrs = interfaces[ifname];
|
||||
if (Array.isArray(ifaddrs)) {
|
||||
for(var i=0;i<ifaddrs.length;++i) {
|
||||
if (ifaddrs[i].family == 'IPv6') {
|
||||
try {
|
||||
var ipbytes = ipaddr.parse(ifaddrs[i].address).toByteArray();
|
||||
if ((ipbytes.length === 16)&&(ipbytes[0] == 0xfd)&&(ipbytes[9] == 0x99)&&(ipbytes[10] == 0x93)) {
|
||||
thisAgentId = '';
|
||||
for(var j=0;j<16;++j) {
|
||||
var tmp = ipbytes[j].toString(16);
|
||||
if (tmp.length === 1)
|
||||
thisAgentId += '0';
|
||||
thisAgentId += tmp;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (thisAgentId === null) {
|
||||
console.error('FATAL: no ZeroTier-assigned RFC4193 IPv6 addresses found on any local interface!');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
//console.log(thisAgentId);
|
||||
|
||||
// Create a random (and therefore not very compressable) payload
|
||||
var payload = new Buffer(PAYLOAD_SIZE);
|
||||
for(var xx=0;xx<PAYLOAD_SIZE;++xx) {
|
||||
payload.writeUInt8(Math.round(Math.random() * 255.0),xx);
|
||||
}
|
||||
|
||||
function agentIdToIp(agentId)
|
||||
{
|
||||
var ip = '';
|
||||
ip += agentId.substr(0,4);
|
||||
ip += ':';
|
||||
ip += agentId.substr(4,4);
|
||||
ip += ':';
|
||||
ip += agentId.substr(8,4);
|
||||
ip += ':';
|
||||
ip += agentId.substr(12,4);
|
||||
ip += ':';
|
||||
ip += agentId.substr(16,4);
|
||||
ip += ':';
|
||||
ip += agentId.substr(20,4);
|
||||
ip += ':';
|
||||
ip += agentId.substr(24,4);
|
||||
ip += ':';
|
||||
ip += agentId.substr(28,4);
|
||||
return ip;
|
||||
};
|
||||
|
||||
var lastTestResult = null;
|
||||
var allOtherAgents = {};
|
||||
|
||||
function doTest()
|
||||
{
|
||||
var submit = http.request({
|
||||
host: SERVER_HOST,
|
||||
port: SERVER_PORT,
|
||||
path: '/'+thisAgentId,
|
||||
method: 'POST'
|
||||
},function(res) {
|
||||
var body = '';
|
||||
res.on('data',function(chunk) { body += chunk.toString(); });
|
||||
res.on('end',function() {
|
||||
|
||||
if (body) {
|
||||
try {
|
||||
var peers = JSON.parse(body);
|
||||
if (Array.isArray(peers)) {
|
||||
for(var xx=0;xx<peers.length;++xx)
|
||||
allOtherAgents[peers[xx]] = true;
|
||||
}
|
||||
} catch (e) {}
|
||||
}
|
||||
|
||||
var agents = Object.keys(allOtherAgents);
|
||||
if (agents.length > 1) {
|
||||
|
||||
var target = agents[Math.floor(Math.random() * agents.length)];
|
||||
while (target === thisAgentId)
|
||||
target = agents[Math.floor(Math.random() * agents.length)];
|
||||
|
||||
var testRequest = null;
|
||||
var timeoutId = null;
|
||||
timeoutId = setTimeout(function() {
|
||||
if (testRequest !== null)
|
||||
testRequest.abort();
|
||||
timeoutId = null;
|
||||
},TEST_TIMEOUT);
|
||||
var startTime = Date.now();
|
||||
|
||||
testRequest = http.get({
|
||||
host: agentIdToIp(target),
|
||||
port: AGENT_PORT,
|
||||
path: '/'
|
||||
},function(res) {
|
||||
var bytes = 0;
|
||||
res.on('data',function(chunk) { bytes += chunk.length; });
|
||||
res.on('end',function() {
|
||||
lastTestResult = {
|
||||
source: thisAgentId,
|
||||
target: target,
|
||||
time: (Date.now() - startTime),
|
||||
bytes: bytes,
|
||||
timedOut: (timeoutId === null),
|
||||
error: null
|
||||
};
|
||||
if (timeoutId !== null)
|
||||
clearTimeout(timeoutId);
|
||||
return setTimeout(doTest,Math.round(Math.random() * TEST_INTERVAL_MAX) + 1);
|
||||
});
|
||||
}).on('error',function(e) {
|
||||
lastTestResult = {
|
||||
source: thisAgentId,
|
||||
target: target,
|
||||
time: (Date.now() - startTime),
|
||||
bytes: 0,
|
||||
timedOut: (timeoutId === null),
|
||||
error: e.toString()
|
||||
};
|
||||
if (timeoutId !== null)
|
||||
clearTimeout(timeoutId);
|
||||
return setTimeout(doTest,Math.round(Math.random() * TEST_INTERVAL_MAX) + 1);
|
||||
});
|
||||
|
||||
} else {
|
||||
return setTimeout(doTest,1000);
|
||||
}
|
||||
|
||||
});
|
||||
}).on('error',function(e) {
|
||||
console.log('POST failed: '+e.toString());
|
||||
return setTimeout(doTest,1000);
|
||||
});
|
||||
if (lastTestResult !== null) {
|
||||
submit.write(JSON.stringify(lastTestResult));
|
||||
lastTestResult = null;
|
||||
}
|
||||
submit.end();
|
||||
};
|
||||
|
||||
// Agents just serve up a test payload
|
||||
app.get('/',function(req,res) { return res.status(200).send(payload); });
|
||||
|
||||
var expressServer = app.listen(AGENT_PORT,function () {
|
||||
// Start timeout-based loop
|
||||
setTimeout(doTest(),TEST_STARTUP_LAG);
|
||||
});
|
9
attic/big-http-test/big-test-kill.sh
Executable file
9
attic/big-http-test/big-test-kill.sh
Executable file
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Kills all running Docker containers on all big-test-hosts
|
||||
|
||||
export PATH=/bin:/usr/bin:/usr/local/bin:/usr/sbin:/sbin
|
||||
|
||||
pssh -h big-test-hosts -x '-t -t' -i -OUserKnownHostsFile=/dev/null -OStrictHostKeyChecking=no -t 0 -p 256 "sudo docker ps -aq | xargs -r sudo docker rm -f"
|
||||
|
||||
exit 0
|
13
attic/big-http-test/big-test-start.sh
Executable file
13
attic/big-http-test/big-test-start.sh
Executable file
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
|
||||
# More than 500 container seems to result in a lot of sporadic failures, probably due to Linux kernel scaling issues with virtual network ports
|
||||
# 250 with a 16GB RAM VM like Amazon m4.xlarge seems good
|
||||
NUM_CONTAINERS=250
|
||||
CONTAINER_IMAGE=zerotier/http-test
|
||||
SCALE_UP_DELAY=10
|
||||
|
||||
export PATH=/bin:/usr/bin:/usr/local/bin:/usr/sbin:/sbin
|
||||
|
||||
pssh -h big-test-hosts -x '-t -t' -i -OUserKnownHostsFile=/dev/null -OStrictHostKeyChecking=no -t 0 -p 256 "sudo sysctl -w net.netfilter.nf_conntrack_max=262144 ; for ((n=0;n<$NUM_CONTAINERS;n++)); do sudo docker run --device=/dev/net/tun --privileged -d $CONTAINER_IMAGE; sleep $SCALE_UP_DELAY; done"
|
||||
|
||||
exit 0
|
65
attic/big-http-test/crunch-results.js
Normal file
65
attic/big-http-test/crunch-results.js
Normal file
|
@ -0,0 +1,65 @@
|
|||
//
|
||||
// Pipe the output of server.js into this to convert raw test results into bracketed statistics
|
||||
// suitable for graphing.
|
||||
//
|
||||
|
||||
// Time duration per statistical bracket
|
||||
var BRACKET_SIZE = 10000;
|
||||
|
||||
// Number of bytes expected from each test
|
||||
var EXPECTED_BYTES = 5000;
|
||||
|
||||
var readline = require('readline');
|
||||
var rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout,
|
||||
terminal: false
|
||||
});
|
||||
|
||||
var count = 0.0;
|
||||
var overallCount = 0.0;
|
||||
var totalFailures = 0.0;
|
||||
var totalOverallFailures = 0.0;
|
||||
var totalMs = 0;
|
||||
var totalData = 0;
|
||||
var devices = {};
|
||||
var lastBracketTs = 0;
|
||||
|
||||
rl.on('line',function(line) {
|
||||
line = line.trim();
|
||||
var ls = line.split(',');
|
||||
if (ls.length == 7) {
|
||||
var ts = parseInt(ls[0]);
|
||||
var fromId = ls[1];
|
||||
var toId = ls[2];
|
||||
var ms = parseFloat(ls[3]);
|
||||
var bytes = parseInt(ls[4]);
|
||||
var timedOut = (ls[5] == 'true') ? true : false;
|
||||
var errMsg = ls[6];
|
||||
|
||||
count += 1.0;
|
||||
overallCount += 1.0;
|
||||
if ((bytes !== EXPECTED_BYTES)||(timedOut)) {
|
||||
totalFailures += 1.0;
|
||||
totalOverallFailures += 1.0;
|
||||
}
|
||||
totalMs += ms;
|
||||
totalData += bytes;
|
||||
|
||||
devices[fromId] = true;
|
||||
devices[toId] = true;
|
||||
|
||||
if (lastBracketTs === 0)
|
||||
lastBracketTs = ts;
|
||||
|
||||
if (((ts - lastBracketTs) >= BRACKET_SIZE)&&(count > 0.0)) {
|
||||
console.log(count.toString()+','+overallCount.toString()+','+(totalMs / count)+','+(totalFailures / count)+','+(totalOverallFailures / overallCount)+','+totalData+','+Object.keys(devices).length);
|
||||
|
||||
count = 0.0;
|
||||
totalFailures = 0.0;
|
||||
totalMs = 0;
|
||||
totalData = 0;
|
||||
lastBracketTs = ts;
|
||||
}
|
||||
} // else ignore junk
|
||||
});
|
16
attic/big-http-test/docker-main.sh
Executable file
16
attic/big-http-test/docker-main.sh
Executable file
|
@ -0,0 +1,16 @@
|
|||
#!/bin/bash
|
||||
|
||||
export PATH=/bin:/usr/bin:/usr/local/bin:/sbin:/usr/sbin
|
||||
|
||||
/zerotier-one -d >>zerotier-one.out 2>&1
|
||||
|
||||
# Wait for ZeroTier to start and join the network
|
||||
while [ ! -d "/proc/sys/net/ipv6/conf/zt0" ]; do
|
||||
sleep 0.25
|
||||
done
|
||||
|
||||
# Wait just a bit longer for stuff to settle
|
||||
sleep 5
|
||||
|
||||
exec node --harmony /agent.js >>agent.out 2>&1
|
||||
#exec node --harmony /agent.js
|
6
attic/big-http-test/nodesource-el.repo
Normal file
6
attic/big-http-test/nodesource-el.repo
Normal file
|
@ -0,0 +1,6 @@
|
|||
[nodesource]
|
||||
name=Node.js Packages for Enterprise Linux 7 - $basearch
|
||||
baseurl=https://rpm.nodesource.com/pub_4.x/el/7/$basearch
|
||||
failovermethod=priority
|
||||
enabled=1
|
||||
gpgcheck=0
|
16
attic/big-http-test/package.json
Normal file
16
attic/big-http-test/package.json
Normal file
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"name": "zerotier-test-http",
|
||||
"version": "1.0.0",
|
||||
"description": "ZeroTier in-network HTTP test",
|
||||
"main": "agent.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"author": "ZeroTier, Inc.",
|
||||
"license": "GPL-3.0",
|
||||
"dependencies": {
|
||||
"async": "^1.5.0",
|
||||
"express": "^4.13.3",
|
||||
"ipaddr.js": "^1.0.3"
|
||||
}
|
||||
}
|
53
attic/big-http-test/server.js
Normal file
53
attic/big-http-test/server.js
Normal file
|
@ -0,0 +1,53 @@
|
|||
// ZeroTier distributed HTTP test coordinator and result-reporting server
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Customizable parameters:
|
||||
|
||||
var SERVER_PORT = 18080;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
var fs = require('fs');
|
||||
|
||||
var express = require('express');
|
||||
var app = express();
|
||||
|
||||
app.use(function(req,res,next) {
|
||||
req.rawBody = '';
|
||||
req.on('data', function(chunk) { req.rawBody += chunk.toString(); });
|
||||
req.on('end', function() { return next(); });
|
||||
});
|
||||
|
||||
var knownAgents = {};
|
||||
|
||||
app.post('/:agentId',function(req,res) {
|
||||
var agentId = req.params.agentId;
|
||||
if ((!agentId)||(agentId.length !== 32))
|
||||
return res.status(404).send('');
|
||||
|
||||
if (req.rawBody) {
|
||||
var receiveTime = Date.now();
|
||||
var resultData = null;
|
||||
try {
|
||||
resultData = JSON.parse(req.rawBody);
|
||||
console.log(Date.now().toString()+','+resultData.source+','+resultData.target+','+resultData.time+','+resultData.bytes+','+resultData.timedOut+',"'+((resultData.error) ? resultData.error : '')+'"');
|
||||
} catch (e) {}
|
||||
}
|
||||
|
||||
knownAgents[agentId] = true;
|
||||
var thisUpdate = [];
|
||||
var agents = Object.keys(knownAgents);
|
||||
if (agents.length < 100)
|
||||
thisUpdate = agents;
|
||||
else {
|
||||
for(var xx=0;xx<100;++xx)
|
||||
thisUpdate.push(agents[Math.floor(Math.random() * agents.length)]);
|
||||
}
|
||||
|
||||
return res.status(200).send(JSON.stringify(thisUpdate));
|
||||
});
|
||||
|
||||
var expressServer = app.listen(SERVER_PORT,function () {
|
||||
console.log('LISTENING ON '+SERVER_PORT);
|
||||
console.log('');
|
||||
});
|
Loading…
Add table
Add a link
Reference in a new issue