New upstream version 3.1.1
This commit is contained in:
parent
4e9934e5ec
commit
e7b41df57b
229 changed files with 57000 additions and 12055 deletions
9
scripts/README.md
Normal file
9
scripts/README.md
Normal file
|
@ -0,0 +1,9 @@
|
|||
This directory contains executables that are not compiled. Some of these may
|
||||
end up installed for use by end users, but many of them are for use during
|
||||
development, builds and tests.
|
||||
|
||||
Nothing in this directory should need compiling to use and they should be
|
||||
written such that they do not need configuring (e.g: they might probe several
|
||||
directories for their requirements)
|
||||
|
||||
See the [Scripts Documentation](../docs/Scripts.md) for further details
|
38
scripts/cmake_all.sh
Executable file
38
scripts/cmake_all.sh
Executable file
|
@ -0,0 +1,38 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Well, cmake might be approximately the same as ./configure && make, but it
|
||||
# never rolls off the fingers as easily
|
||||
#
|
||||
|
||||
if [ ! -f CMakeLists.txt ]; then
|
||||
echo ERROR: run this script from the TOPDIR
|
||||
exit 1
|
||||
fi
|
||||
|
||||
OPTS=""
|
||||
#OPTS+=" -DN2N_OPTION_USE_PTHREAD=ON"
|
||||
#OPTS+=" -DN2N_OPTION_USE_OPENSSL=ON"
|
||||
#OPTS+=" -DN2N_OPTION_USE_CAPLIB=ON"
|
||||
#OPTS+=" -DN2N_OPTION_USE_PCAPLIB=ON"
|
||||
#OPTS+=" -DN2N_OPTION_USE_ZSTD=ON"
|
||||
#OPTS+=" -DN2N_OPTION_USE_PORTMAPPING=ON"
|
||||
|
||||
#OPTS+=" -DOPENSSL_USE_STATIC_LIBS=true"
|
||||
|
||||
set -e
|
||||
|
||||
rm -rf build
|
||||
|
||||
cmake -E make_directory build
|
||||
cd build
|
||||
|
||||
# Shell check wants me to use an array in this scenario. Bourne shell
|
||||
# arrays are my line in the sand showing that a script should not be
|
||||
# written in such a horrible language. Since it would be silly to rewrite
|
||||
# a one-page wrapper script in python, we submit that this check is wrong.
|
||||
# shellcheck disable=SC2086
|
||||
cmake .. $OPTS
|
||||
|
||||
cmake --build . --config Release
|
||||
|
||||
ctest
|
21
scripts/hack_fakeautoconf.sh
Executable file
21
scripts/hack_fakeautoconf.sh
Executable file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Specifically for windows, where installing autoconf looks suspiciously
|
||||
# like boiling the ocean.
|
||||
|
||||
sed \
|
||||
-e "s%@CC@%gcc%g" \
|
||||
-e "s%@AR@%ar%g" \
|
||||
-e "s%@CFLAGS@%$CFLAGS%g" \
|
||||
-e "s%@LDFLAGS@%$LDFLAGS%g" \
|
||||
-e "s%@N2N_LIBS@%$LDLIBS%g" \
|
||||
< Makefile.in > Makefile
|
||||
|
||||
sed \
|
||||
-e "s%@ADDITIONAL_TOOLS@%%g" \
|
||||
< tools/Makefile.in > tools/Makefile
|
||||
|
||||
cat <<EOF >include/config.h
|
||||
#define PACKAGE_VERSION "FIXME"
|
||||
#define PACKAGE_OSNAME "FIXME"
|
||||
EOF
|
61
scripts/indent.sh
Executable file
61
scripts/indent.sh
Executable file
|
@ -0,0 +1,61 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Given one or more input source files, run a re-indenter on them.
|
||||
|
||||
help() {
|
||||
echo "Usage: scripts/indent [-i] [file...]"
|
||||
echo " -i modify file in place with reindent results"
|
||||
echo ""
|
||||
echo "By default, will output a diff and exitcode if changed are needed"
|
||||
echo "If modifying files, no exit code or diff is output"
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ -z "$1" ] && help
|
||||
[ "$1" = "-h" ] && help
|
||||
[ "$1" = "--help" ] && help
|
||||
|
||||
INPLACE=0
|
||||
if [ "$1" = "-i" ]; then
|
||||
shift
|
||||
INPLACE=1
|
||||
fi
|
||||
|
||||
## indentOneClang() {
|
||||
## rm -f "$1.indent"
|
||||
## clang-format "$1" >"$1.indent"
|
||||
## if [ $? -ne 0 ]; then
|
||||
## echo "Error while formatting \"$1\""
|
||||
## RESULT=1
|
||||
## return
|
||||
## fi
|
||||
## diff -u "$1" "$1.indent"
|
||||
## if [ $? -ne 0 ]; then
|
||||
## RESULT=1
|
||||
## fi
|
||||
## }
|
||||
|
||||
indentOne() {
|
||||
IFILE="$1"
|
||||
if [ "$INPLACE" -eq 0 ]; then
|
||||
OFILE="$1.indent"
|
||||
rm -f "$OFILE"
|
||||
else
|
||||
OFILE="$1"
|
||||
fi
|
||||
if ! uncrustify -c uncrustify.cfg -f "$IFILE" -o "$OFILE"; then
|
||||
echo "Error while formatting \"$1\""
|
||||
RESULT=1
|
||||
return
|
||||
fi
|
||||
if ! diff -u "$IFILE" "$OFILE"; then
|
||||
RESULT=1
|
||||
fi
|
||||
}
|
||||
|
||||
RESULT=0
|
||||
while [ -n "$1" ]; do
|
||||
indentOne "$1"
|
||||
shift
|
||||
done
|
||||
exit $RESULT
|
|
@ -1,30 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script makes a SRPM - a source RPM file which can be built into the
|
||||
# appropriate distro specific RPM for any platform.
|
||||
#
|
||||
# To build the binary package:
|
||||
# rpm -i n2n-<ver>.src.rpm
|
||||
# rpmbuild -bb n2n.spec
|
||||
#
|
||||
# Look for the "Wrote:" line to see where the final RPM is.
|
||||
#
|
||||
# To run this script cd to the n2n directory and run it as follows
|
||||
# scripts/mk_SRPMS.sh
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
set -x
|
||||
|
||||
BASE=`pwd`
|
||||
|
||||
TARFILE=`${BASE}/scripts/mk_tar.sh`
|
||||
|
||||
test -f ${TARFILE}
|
||||
|
||||
echo "Building SRPM"
|
||||
# -ts means build source RPM from tarfile
|
||||
rpmbuild -ts ${TARFILE}
|
||||
|
||||
echo "Done"
|
|
@ -1,46 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script makes a SRPM - a source RPM file which can be built into the
|
||||
# appropriate distro specific RPM for any platform.
|
||||
#
|
||||
# To build the binary package:
|
||||
# rpm -i n2n-<ver>.src.rpm
|
||||
# rpmbuild -bb n2n.spec
|
||||
#
|
||||
# Look for the "Wrote:" line to see where the final RPM is.
|
||||
#
|
||||
# To run this script cd to the n2n directory and run it as follows
|
||||
# scripts/mk_SRPMS.sh
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
set -x
|
||||
|
||||
BASE=`pwd`
|
||||
|
||||
TARFILE=`${BASE}/scripts/mk_tar.sh`
|
||||
TEMPDIR="build_deb"
|
||||
|
||||
test -f ${TARFILE}
|
||||
|
||||
echo "Building .deb"
|
||||
|
||||
if [ -d ${TEMPDIR} ]; then
|
||||
echo "Removing ${TEMPDIR} directory"
|
||||
rm -rf ${TEMPDIR} >&2
|
||||
fi
|
||||
|
||||
mkdir ${TEMPDIR}
|
||||
|
||||
pushd ${TEMPDIR}
|
||||
|
||||
tar xzf ${TARFILE} #At original location
|
||||
|
||||
cd n2n*
|
||||
|
||||
dpkg-buildpackage -rfakeroot
|
||||
|
||||
popd
|
||||
|
||||
echo "Done"
|
|
@ -1,104 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script makes a SRPM - a source RPM file which can be built into the
|
||||
# appropriate distro specific RPM for any platform.
|
||||
#
|
||||
# To build the binary package:
|
||||
# rpm -i n2n-<ver>.src.rpm
|
||||
# rpmbuild -bb n2n.spec
|
||||
#
|
||||
# Look for the "Wrote:" line to see where the final RPM is.
|
||||
#
|
||||
# To run this script cd to the n2n directory and run it as follows
|
||||
# scripts/mk_SRPMS.sh
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
function exit_fail()
|
||||
{
|
||||
echo "$1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
PACKAGE="n2n"
|
||||
PKG_VERSION="1.3"
|
||||
PKG_AND_VERSION="${PACKAGE}-${PKG_VERSION}"
|
||||
|
||||
TEMPDIR="tmp"
|
||||
|
||||
SOURCE_MANIFEST="
|
||||
README
|
||||
edge.c
|
||||
lzoconf.h
|
||||
lzodefs.h
|
||||
Makefile
|
||||
minilzo.c
|
||||
minilzo.h
|
||||
n2n.c
|
||||
n2n.h
|
||||
n2n.spec
|
||||
supernode.c
|
||||
tuntap_linux.c
|
||||
tuntap_freebsd.c
|
||||
tuntap_osx.c
|
||||
twofish.c
|
||||
twofish.h
|
||||
edge.8
|
||||
supernode.1
|
||||
debian/changelog
|
||||
debian/compat
|
||||
debian/control
|
||||
debian/copyright
|
||||
debian/n2n.dirs
|
||||
debian/n2n.docs
|
||||
debian/n2n.install
|
||||
debian/n2n.manpages
|
||||
debian/README.Debian
|
||||
debian/rules
|
||||
"
|
||||
|
||||
BASE=`pwd`
|
||||
|
||||
for F in ${SOURCE_MANIFEST}; do
|
||||
test -e $F || exit_fail "Cannot find $F. Maybe you're in the wrong directory. Please execute from n2n directory."; >&2
|
||||
done
|
||||
|
||||
echo "Found critical files. Proceeding." >&2
|
||||
|
||||
if [ -d ${TEMPDIR} ]; then
|
||||
echo "Removing ${TEMPDIR} directory"
|
||||
rm -rf ${TEMPDIR} >&2
|
||||
fi
|
||||
|
||||
mkdir ${TEMPDIR} >&2
|
||||
|
||||
pushd ${TEMPDIR} >&2
|
||||
|
||||
echo "Creating staging directory ${PWD}/${PKG_AND_VERSION}" >&2
|
||||
|
||||
if [ -d ${PKG_AND_VERSION} ] ; then
|
||||
echo "Removing ${PKG_AND_VERSION} directory"
|
||||
rm -rf ${PKG_AND_VERSION} >&2
|
||||
fi
|
||||
|
||||
mkdir ${PKG_AND_VERSION}
|
||||
|
||||
pushd ${BASE} >&2
|
||||
|
||||
echo "Copying in files" >&2
|
||||
for F in ${SOURCE_MANIFEST}; do
|
||||
cp --parents -a $F ${TEMPDIR}/${PKG_AND_VERSION}/
|
||||
done
|
||||
|
||||
popd >&2
|
||||
|
||||
TARFILE="${PKG_AND_VERSION}.tar.gz"
|
||||
echo "Creating ${TARFILE}" >&2
|
||||
tar czf ${BASE}/${TARFILE} ${PKG_AND_VERSION}
|
||||
|
||||
popd >&2
|
||||
|
||||
rm -rf ${TEMPDIR} >&2
|
||||
|
||||
echo ${BASE}/${TARFILE}
|
324
scripts/munin/n2n_
Executable file
324
scripts/munin/n2n_
Executable file
|
@ -0,0 +1,324 @@
|
|||
#!/usr/bin/env perl
|
||||
use warnings;
|
||||
use strict;
|
||||
#
|
||||
# Requires
|
||||
# libjson-perl
|
||||
#
|
||||
|
||||
# Magic Markers
|
||||
#
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf suggest
|
||||
|
||||
package JsonUDP;
|
||||
use warnings;
|
||||
use strict;
|
||||
|
||||
use IO::Socket::INET;
|
||||
use JSON;
|
||||
|
||||
sub new {
|
||||
my $class = shift;
|
||||
my $port = shift || 5644;
|
||||
my $self = {};
|
||||
bless($self, $class);
|
||||
|
||||
$self->{sock} = IO::Socket::INET->new(
|
||||
PeerAddr => '127.0.0.1',
|
||||
PeerPort => $port,
|
||||
Proto => 'udp',
|
||||
);
|
||||
$self->{json} = JSON->new->utf8->relaxed->pretty->canonical;
|
||||
$self->{tag} = 0;
|
||||
$self->{debug} = 0;
|
||||
return $self;
|
||||
}
|
||||
|
||||
sub _tx {
|
||||
my $self = shift;
|
||||
my $msgline = shift;
|
||||
return $self->{sock}->send($msgline);
|
||||
}
|
||||
|
||||
sub _rx {
|
||||
my $self = shift;
|
||||
my $tag = shift;
|
||||
|
||||
my $db = [];
|
||||
my $error;
|
||||
|
||||
while(1) {
|
||||
my $jsontxt;
|
||||
$self->{sock}->recv($jsontxt,1024);
|
||||
if ($self->{debug}) {
|
||||
print($jsontxt);
|
||||
}
|
||||
my $msg = $self->{json}->decode($jsontxt);
|
||||
|
||||
# ignore packets not for us
|
||||
if ($msg->{_tag} ne $tag) {
|
||||
next;
|
||||
}
|
||||
|
||||
# Save most recent error for return
|
||||
if ($msg->{_type} eq 'error') {
|
||||
$error = $msg;
|
||||
next;
|
||||
}
|
||||
|
||||
if ($msg->{_type} eq 'end') {
|
||||
if ($error) {
|
||||
# TODO: an error channel
|
||||
return undef;
|
||||
}
|
||||
return $db;
|
||||
}
|
||||
|
||||
if ($msg->{_type} eq 'row') {
|
||||
delete $msg->{_tag};
|
||||
delete $msg->{_type};
|
||||
push @$db, $msg;
|
||||
next;
|
||||
}
|
||||
|
||||
# Ignore any unknown _type
|
||||
}
|
||||
}
|
||||
|
||||
sub read {
|
||||
my $self = shift;
|
||||
my $cmdline = shift;
|
||||
my $tag = $self->{tag}++;
|
||||
|
||||
# TODO:
|
||||
# Add a read cache
|
||||
|
||||
$self->_tx(sprintf("r %i %s", $tag, $cmdline));
|
||||
return $self->_rx($tag);
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
package main;
|
||||
use warnings;
|
||||
use strict;
|
||||
|
||||
my $config = {
|
||||
edge_pkts => {
|
||||
p2p_tx_pkt => {
|
||||
label => 'Peer to Peer tx rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
p2p_rx_pkt => {
|
||||
label => 'Peer to Peer rx rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
super_tx_pkt => {
|
||||
label => 'Peer to Supernode tx rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
super_rx_pkt => {
|
||||
label => 'Peer to Supernode rx rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
super_broadcast_tx_pkt => {
|
||||
label => 'Broadcast to Supernode tx rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
super_broadcast_rx_pkt => {
|
||||
label => 'Broadcast to Supernode rx rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
transop_tx_pkt => {
|
||||
label => 'Transform tx rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
transop_rx_pkt => {
|
||||
label => 'Transform rx rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
},
|
||||
edge_counts => {
|
||||
edges => {
|
||||
label => 'Current known peers',
|
||||
type => 'GAUGE',
|
||||
},
|
||||
supernodes => {
|
||||
label => 'Current known supernodes',
|
||||
type => 'GAUGE',
|
||||
},
|
||||
},
|
||||
supernode_pkts => {
|
||||
errors_tx_pkt => {
|
||||
label => 'Error rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
reg_super_rx_pkt => {
|
||||
label => 'Connect rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
reg_super_nak => {
|
||||
label => 'Connect error rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
forward_tx_pkt => {
|
||||
label => 'Packets forwarded rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
broadcast_tx_pkt => {
|
||||
label => 'Broadcast packet rate',
|
||||
type => 'DERIVE',
|
||||
min => 0,
|
||||
},
|
||||
},
|
||||
supernode_counts => {
|
||||
edges => {
|
||||
label => 'Current known edges',
|
||||
type => 'GAUGE',
|
||||
},
|
||||
communities => {
|
||||
label => 'Current known communities',
|
||||
type => 'GAUGE',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
my $fetchinfo = {
|
||||
edge_pkts => {
|
||||
port => 5644,
|
||||
read => "packetstats",
|
||||
},
|
||||
edge_counts => {
|
||||
port => 5644,
|
||||
count => [
|
||||
"edges",
|
||||
"supernodes",
|
||||
],
|
||||
},
|
||||
supernode_pkts => {
|
||||
port => 5645,
|
||||
read => "packetstats",
|
||||
},
|
||||
supernode_counts => {
|
||||
port => 5645,
|
||||
count => [
|
||||
"edges",
|
||||
"communities",
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
sub do_config {
|
||||
my $rpc = shift;
|
||||
my $name = shift;
|
||||
|
||||
print("graph_title n2n $name status\n");
|
||||
print("graph_category network\n");
|
||||
my @names;
|
||||
while (my ($fieldname, $field) = each(%{$config->{$name}})) {
|
||||
push @names, $fieldname;
|
||||
while (my ($key, $val) = each(%{$field})) {
|
||||
print($fieldname.'.'.$key," ",$val,"\n");
|
||||
}
|
||||
}
|
||||
|
||||
# Ensure stable order
|
||||
print("graph_order ", join(' ', sort(@names)), "\n");
|
||||
}
|
||||
|
||||
sub do_fetch {
|
||||
my $rpc = shift;
|
||||
my $name = shift;
|
||||
my $db;
|
||||
|
||||
my $read_table = $fetchinfo->{$name}->{read};
|
||||
if (defined($read_table)) {
|
||||
$db = $rpc->read($read_table);
|
||||
for my $row (@$db) {
|
||||
my $type = $row->{type};
|
||||
delete $row->{type};
|
||||
while (my ($key, $val) = each(%{$row})) {
|
||||
my $metricname = $type."_".$key;
|
||||
print($metricname,".value ",$val,"\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
my $count_tables = $fetchinfo->{$name}->{count};
|
||||
if (defined($count_tables)) {
|
||||
for my $table (@{$count_tables}) {
|
||||
$db = $rpc->read($table);
|
||||
print($table,".value ", scalar(@$db), "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub do_autoconf {
|
||||
# quick check to see if this plugin should be enabled
|
||||
if (`pgrep supernode`) {
|
||||
print("yes\n");
|
||||
} elsif (`pgrep edge`) {
|
||||
print("yes\n");
|
||||
} else {
|
||||
print("no - neither edge nor supernode are running\n");
|
||||
}
|
||||
}
|
||||
|
||||
sub do_suggest {
|
||||
my $ports = {};
|
||||
if (`pgrep supernode`) {
|
||||
$ports->{5645}=1;
|
||||
}
|
||||
if (`pgrep edge`) {
|
||||
$ports->{5644}=1;
|
||||
}
|
||||
|
||||
while (my ($name, $info) = each(%{$fetchinfo})) {
|
||||
my $port = $info->{port};
|
||||
next if (!defined($port)); # this not a real fetchinfo
|
||||
next if (!defined($ports->{$port})); # not linked to a running daemon
|
||||
print($name,"\n");
|
||||
}
|
||||
}
|
||||
|
||||
my $subc = {
|
||||
'fetch' => \&do_fetch,
|
||||
'config' => \&do_config,
|
||||
'autoconf' => \&do_autoconf,
|
||||
'suggest' => \&do_suggest,
|
||||
};
|
||||
|
||||
sub main() {
|
||||
my $name = $ARGV[1] || $0;
|
||||
$name =~ s%^.*/n2n_([^/]+)%$1%;
|
||||
|
||||
my $port = $fetchinfo->{$name}->{port};
|
||||
my $rpc = JsonUDP->new($port);
|
||||
|
||||
my $cmd = $ARGV[0];
|
||||
if (!defined($cmd)) {
|
||||
$cmd = 'fetch';
|
||||
}
|
||||
|
||||
my $func = $subc->{$cmd};
|
||||
if (!defined($func)) {
|
||||
die("bad sub command");
|
||||
}
|
||||
|
||||
return $func->($rpc, $name);
|
||||
}
|
||||
main();
|
||||
|
301
scripts/n2n-ctl
Executable file
301
scripts/n2n-ctl
Executable file
|
@ -0,0 +1,301 @@
|
|||
#!/usr/bin/env python3
|
||||
# Licensed under GPLv3
|
||||
#
|
||||
# Simple script to query the management interface of a running n2n edge node
|
||||
|
||||
import argparse
|
||||
import socket
|
||||
import json
|
||||
import collections
|
||||
|
||||
|
||||
class JsonUDP():
|
||||
"""encapsulate communication with the edge"""
|
||||
|
||||
def __init__(self, port):
|
||||
self.address = "127.0.0.1"
|
||||
self.port = port
|
||||
self.tag = 0
|
||||
self.key = None
|
||||
self.debug = False
|
||||
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
self.sock.settimeout(1)
|
||||
|
||||
def _next_tag(self):
|
||||
tagstr = str(self.tag)
|
||||
self.tag = (self.tag + 1) % 1000
|
||||
return tagstr
|
||||
|
||||
def _cmdstr(self, msgtype, cmdline):
|
||||
"""Create the full command string to send"""
|
||||
tagstr = self._next_tag()
|
||||
|
||||
options = [tagstr]
|
||||
if self.key is not None:
|
||||
options += ['1'] # Flags set for auth key field
|
||||
options += [self.key]
|
||||
optionsstr = ':'.join(options)
|
||||
|
||||
return tagstr, ' '.join((msgtype, optionsstr, cmdline))
|
||||
|
||||
def _rx(self, tagstr):
|
||||
"""Wait for rx packets"""
|
||||
|
||||
seen_begin = False
|
||||
while not seen_begin:
|
||||
# TODO: there are no timeouts with any of the recv calls
|
||||
data, _ = self.sock.recvfrom(1024)
|
||||
data = json.loads(data.decode('utf8'))
|
||||
|
||||
# TODO: We assume the first packet we get will be tagged for us
|
||||
assert(data['_tag'] == tagstr)
|
||||
|
||||
if data['_type'] == 'error':
|
||||
raise ValueError('Error: {}'.format(data['error']))
|
||||
|
||||
if data['_type'] == 'replacing':
|
||||
# a signal that we have evicted an earlier subscribe
|
||||
continue
|
||||
|
||||
if data['_type'] == 'subscribe':
|
||||
return True
|
||||
|
||||
if data['_type'] == 'begin':
|
||||
seen_begin = True
|
||||
|
||||
# Ideally, we would confirm that this is our "begin", but that
|
||||
# would need the cmd passed into this method, and that would
|
||||
# probably require parsing the cmdline passed to us :-(
|
||||
# assert(data['cmd'] == cmd)
|
||||
|
||||
continue
|
||||
|
||||
raise ValueError('Unknown data type {} from '
|
||||
'edge'.format(data['_type']))
|
||||
|
||||
result = list()
|
||||
error = None
|
||||
|
||||
while True:
|
||||
data, _ = self.sock.recvfrom(1024)
|
||||
data = json.loads(data.decode('utf8'))
|
||||
|
||||
if data['_tag'] != tagstr:
|
||||
# this packet is not for us, ignore it
|
||||
continue
|
||||
|
||||
if data['_type'] == 'error':
|
||||
# we still expect an end packet, so save the error
|
||||
error = ValueError('Error: {}'.format(data['error']))
|
||||
continue
|
||||
|
||||
if data['_type'] == 'end':
|
||||
if error:
|
||||
raise error
|
||||
return result
|
||||
|
||||
if data['_type'] != 'row':
|
||||
raise ValueError('Unknown data type {} from '
|
||||
'edge'.format(data['_type']))
|
||||
|
||||
# remove our boring metadata
|
||||
del data['_tag']
|
||||
del data['_type']
|
||||
|
||||
if self.debug:
|
||||
print(data)
|
||||
|
||||
result.append(data)
|
||||
|
||||
def _call(self, msgtype, cmdline):
|
||||
"""Perform a rpc call"""
|
||||
tagstr, msgstr = self._cmdstr(msgtype, cmdline)
|
||||
self.sock.sendto(msgstr.encode('utf8'), (self.address, self.port))
|
||||
return self._rx(tagstr)
|
||||
|
||||
def read(self, cmdline):
|
||||
return self._call('r', cmdline)
|
||||
|
||||
def write(self, cmdline):
|
||||
return self._call('w', cmdline)
|
||||
|
||||
def sub(self, cmdline):
|
||||
return self._call('s', cmdline)
|
||||
|
||||
def readevent(self):
|
||||
self.sock.settimeout(3600)
|
||||
|
||||
data, _ = self.sock.recvfrom(1024)
|
||||
data = json.loads(data.decode('utf8'))
|
||||
# assert(data['_tag'] == tagstr)
|
||||
assert(data['_type'] == 'event')
|
||||
|
||||
del data['_tag']
|
||||
del data['_type']
|
||||
return data
|
||||
|
||||
|
||||
def str_table(rows, columns, orderby):
|
||||
"""Given an array of dicts, do a simple table print"""
|
||||
result = list()
|
||||
widths = collections.defaultdict(lambda: 0)
|
||||
|
||||
if len(rows) == 0:
|
||||
# No data to show, be sure not to truncate the column headings
|
||||
for col in columns:
|
||||
widths[col] = len(col)
|
||||
else:
|
||||
for row in rows:
|
||||
for col in columns:
|
||||
if col in row:
|
||||
widths[col] = max(widths[col], len(str(row[col])))
|
||||
|
||||
for col in columns:
|
||||
if widths[col] == 0:
|
||||
widths[col] = 1
|
||||
result += "{:{}.{}} ".format(col, widths[col], widths[col])
|
||||
result += "\n"
|
||||
|
||||
if orderby is not None:
|
||||
rows = sorted(rows, key=lambda row: row.get(orderby, 0))
|
||||
|
||||
for row in rows:
|
||||
for col in columns:
|
||||
if col in row:
|
||||
data = row[col]
|
||||
else:
|
||||
data = ''
|
||||
result += "{:{}} ".format(data, widths[col])
|
||||
result += "\n"
|
||||
|
||||
return ''.join(result)
|
||||
|
||||
|
||||
def subcmd_show_supernodes(rpc, args):
|
||||
rows = rpc.read('supernodes')
|
||||
columns = [
|
||||
'version',
|
||||
'current',
|
||||
'macaddr',
|
||||
'sockaddr',
|
||||
'uptime',
|
||||
]
|
||||
|
||||
return str_table(rows, columns, args.orderby)
|
||||
|
||||
|
||||
def subcmd_show_edges(rpc, args):
|
||||
rows = rpc.read('edges')
|
||||
columns = [
|
||||
'mode',
|
||||
'ip4addr',
|
||||
'macaddr',
|
||||
'sockaddr',
|
||||
'desc',
|
||||
]
|
||||
|
||||
return str_table(rows, columns, args.orderby)
|
||||
|
||||
|
||||
def subcmd_show_help(rpc, args):
|
||||
result = 'Commands with pretty-printed output:\n\n'
|
||||
for name, cmd in subcmds.items():
|
||||
result += "{:12} {}\n".format(name, cmd['help'])
|
||||
|
||||
result += "\n"
|
||||
result += "Possble remote commands:\n"
|
||||
result += "(those without a pretty-printer will pass-through)\n\n"
|
||||
rows = rpc.read('help')
|
||||
for row in rows:
|
||||
result += "{:12} {}\n".format(row['cmd'], row['help'])
|
||||
return result
|
||||
|
||||
|
||||
subcmds = {
|
||||
'help': {
|
||||
'func': subcmd_show_help,
|
||||
'help': 'Show available commands',
|
||||
},
|
||||
'supernodes': {
|
||||
'func': subcmd_show_supernodes,
|
||||
'help': 'Show the list of supernodes',
|
||||
},
|
||||
'edges': {
|
||||
'func': subcmd_show_edges,
|
||||
'help': 'Show the list of edges/peers',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def subcmd_default(rpc, args):
|
||||
"""Just pass command through to edge"""
|
||||
cmdline = ' '.join([args.cmd] + args.args)
|
||||
if args.write:
|
||||
rows = rpc.write(cmdline)
|
||||
elif args.read:
|
||||
rows = rpc.read(cmdline)
|
||||
elif args.sub:
|
||||
if not rpc.sub(cmdline):
|
||||
raise ValueError('Could not subscribe')
|
||||
while True:
|
||||
event = rpc.readevent()
|
||||
# FIXME: violates layering..
|
||||
print(json.dumps(event, sort_keys=True, indent=4))
|
||||
else:
|
||||
raise ValueError('Unknown request type')
|
||||
return json.dumps(rows, sort_keys=True, indent=4)
|
||||
|
||||
|
||||
def main():
|
||||
ap = argparse.ArgumentParser(
|
||||
description='Query the running local n2n edge')
|
||||
ap.add_argument('-t', '--mgmtport', action='store', default=5644,
|
||||
help='Management Port (default=5644)', type=int)
|
||||
ap.add_argument('-k', '--key', action='store',
|
||||
help='Password for mgmt commands')
|
||||
ap.add_argument('-d', '--debug', action='store_true',
|
||||
help='Also show raw internal data')
|
||||
ap.add_argument('--raw', action='store_true',
|
||||
help='Force cmd to avoid any pretty printing')
|
||||
ap.add_argument('--orderby', action='store',
|
||||
help='Hint to a pretty printer on how to sort')
|
||||
|
||||
group = ap.add_mutually_exclusive_group()
|
||||
group.add_argument('--read', action='store_true',
|
||||
help='Make a read request (default)')
|
||||
group.add_argument('--write', action='store_true',
|
||||
help='Make a write request (only to non pretty'
|
||||
'printed cmds)')
|
||||
group.add_argument('--sub', action='store_true',
|
||||
help='Make a subscribe request')
|
||||
|
||||
ap.add_argument('cmd', action='store',
|
||||
help='Command to run (try "help" for list)')
|
||||
ap.add_argument('args', action='store', nargs="*",
|
||||
help='Optional args for the command')
|
||||
|
||||
args = ap.parse_args()
|
||||
|
||||
if not args.read and not args.write and not args.sub:
|
||||
args.read = True
|
||||
|
||||
if args.raw or (args.cmd not in subcmds):
|
||||
func = subcmd_default
|
||||
else:
|
||||
func = subcmds[args.cmd]['func']
|
||||
|
||||
rpc = JsonUDP(args.mgmtport)
|
||||
rpc.debug = args.debug
|
||||
rpc.key = args.key
|
||||
|
||||
try:
|
||||
result = func(rpc, args)
|
||||
except socket.timeout as e:
|
||||
print(e)
|
||||
exit(1)
|
||||
|
||||
print(result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
76
scripts/n2n-gateway.sh
Executable file
76
scripts/n2n-gateway.sh
Executable file
|
@ -0,0 +1,76 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# This is a sample script to route all the host traffic towards a remote
|
||||
# gateway, which is reacheable via the n2n virtual interface.
|
||||
#
|
||||
# This assumes the n2n connection is already been established and the
|
||||
# VPN gateway can be pinged by this host.
|
||||
#
|
||||
|
||||
#######################################################
|
||||
# CONFIG
|
||||
#######################################################
|
||||
|
||||
# The IP address of the gateway through the n2n interface
|
||||
N2N_GATEWAY="192.168.100.1"
|
||||
|
||||
# The IP address of the supernode as configured in n2n
|
||||
N2N_SUPERNODE="1.2.3.4"
|
||||
|
||||
# The n2n interface name
|
||||
N2N_INTERFACE="n2n0"
|
||||
|
||||
# The DNS server to use. Must be a public DNS or a DNS located on the
|
||||
# N2N virtual network, otherwise DNS query information will be leaked
|
||||
# outside the VPN.
|
||||
DNS_SERVER="8.8.8.8"
|
||||
|
||||
#######################################################
|
||||
# END CONFIG
|
||||
#######################################################
|
||||
|
||||
if [[ $UID -ne 0 ]]; then
|
||||
echo "This script must be run as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ip route get $N2N_GATEWAY | grep -q $N2N_INTERFACE ; then
|
||||
echo "Cannot reach the gateway ($N2N_GATEWAY) via $N2N_INTERFACE. Is edge running?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Determine the current internet gateway
|
||||
internet_gateway=$(ip route get 8.8.8.8 | head -n1 | awk '{ print $3 }')
|
||||
|
||||
# Backup the DNS resolver configuration and use the specified server
|
||||
cp /etc/resolv.conf /etc/resolv.conf.my_bak
|
||||
echo "Using DNS server $DNS_SERVER"
|
||||
echo "nameserver $DNS_SERVER" > /etc/resolv.conf
|
||||
|
||||
# The public IP of the supernode must be reachable via the internet gateway
|
||||
# Whereas all the other traffic will go through the new VPN gateway.
|
||||
ip route add $N2N_SUPERNODE via "$internet_gateway"
|
||||
ip route del default
|
||||
echo "Forwarding traffic via $N2N_GATEWAY"
|
||||
ip route add default via $N2N_GATEWAY
|
||||
|
||||
function stopService {
|
||||
echo "Deleting custom routes"
|
||||
ip route del default
|
||||
ip route del $N2N_SUPERNODE via "$internet_gateway"
|
||||
|
||||
echo "Restoring original gateway $internet_gateway"
|
||||
ip route add default via "$internet_gateway"
|
||||
|
||||
echo "Restoring original DNS"
|
||||
mv /etc/resolv.conf.my_bak /etc/resolv.conf
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
# setup signal handlers
|
||||
trap "stopService" SIGHUP SIGINT SIGTERM
|
||||
|
||||
# enter wait loop
|
||||
echo "VPN is now up"
|
||||
while :; do sleep 300; done
|
472
scripts/n2n-httpd
Executable file
472
scripts/n2n-httpd
Executable file
|
@ -0,0 +1,472 @@
|
|||
#!/usr/bin/env python3
|
||||
# Licensed under GPLv3
|
||||
#
|
||||
# Simple http server to allow user control of n2n edge nodes
|
||||
#
|
||||
# Currently only for demonstration
|
||||
# - needs nicer looking html written
|
||||
# - needs more json interfaces in edge
|
||||
#
|
||||
# Try it out with
|
||||
# http://localhost:8080/
|
||||
# http://localhost:8080/edge/edges
|
||||
# http://localhost:8080/edge/supernodes
|
||||
|
||||
import argparse
|
||||
import socket
|
||||
import json
|
||||
import socketserver
|
||||
import http.server
|
||||
import signal
|
||||
import functools
|
||||
import base64
|
||||
|
||||
from http import HTTPStatus
|
||||
|
||||
import os
|
||||
import sys
|
||||
import importlib.machinery
|
||||
import importlib.util
|
||||
|
||||
|
||||
def import_filename(modulename, filename):
|
||||
# look in the same dir as this script
|
||||
pathname = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||
filename)
|
||||
loader = importlib.machinery.SourceFileLoader(modulename, pathname)
|
||||
spec = importlib.util.spec_from_loader(modulename, loader)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
|
||||
try:
|
||||
loader.exec_module(module)
|
||||
except FileNotFoundError:
|
||||
print("Script {} not found".format(pathname), file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return module
|
||||
|
||||
|
||||
# We share the implementation of the RPC class with the n2n-ctl script. We
|
||||
# cannot just import the module as 'n2n-ctl' has a dash in its name :-(
|
||||
JsonUDP = import_filename('n2nctl', 'n2n-ctl').JsonUDP
|
||||
|
||||
|
||||
pages = {
|
||||
"/script.js": {
|
||||
"content_type": "text/javascript",
|
||||
"content": """
|
||||
var verbose=-1;
|
||||
|
||||
function rows2verbose(id, unused, data) {
|
||||
row0 = data[0]
|
||||
verbose = row0['traceLevel']
|
||||
|
||||
let div = document.getElementById(id);
|
||||
div.innerHTML=verbose
|
||||
}
|
||||
|
||||
function rows2keyvalue(id, keys, data) {
|
||||
let s = "<table border=1 cellspacing=0>"
|
||||
data.forEach((row) => {
|
||||
keys.forEach((key) => {
|
||||
if (key in row) {
|
||||
s += "<tr><th>" + key + "<td>" + row[key];
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
s += "</table>"
|
||||
let div = document.getElementById(id);
|
||||
div.innerHTML=s
|
||||
}
|
||||
|
||||
function rows2keyvalueall(id, unused, data) {
|
||||
let s = "<table border=1 cellspacing=0>"
|
||||
data.forEach((row) => {
|
||||
Object.keys(row).forEach((key) => {
|
||||
s += "<tr><th>" + key + "<td>" + row[key];
|
||||
});
|
||||
});
|
||||
|
||||
s += "</table>"
|
||||
let div = document.getElementById(id);
|
||||
div.innerHTML=s
|
||||
}
|
||||
|
||||
function rows2table(id, columns, data) {
|
||||
let s = "<table border=1 cellspacing=0>"
|
||||
s += "<tr>"
|
||||
columns.forEach((col) => {
|
||||
s += "<th>" + col
|
||||
});
|
||||
data.forEach((row) => {
|
||||
s += "<tr>"
|
||||
columns.forEach((col) => {
|
||||
val = row[col]
|
||||
if (typeof val === "undefined") {
|
||||
val = ''
|
||||
}
|
||||
s += "<td>" + val
|
||||
});
|
||||
});
|
||||
|
||||
s += "</table>"
|
||||
let div = document.getElementById(id);
|
||||
div.innerHTML=s
|
||||
}
|
||||
|
||||
function do_get(url, id, handler, handler_param) {
|
||||
fetch(url)
|
||||
.then(function (response) {
|
||||
if (!response.ok) {
|
||||
throw new Error('Fetch got ' + response.status)
|
||||
}
|
||||
return response.json();
|
||||
})
|
||||
.then(function (data) {
|
||||
handler(id,handler_param,data);
|
||||
|
||||
// update the timestamp on success
|
||||
let now = Math.round(new Date().getTime() / 1000);
|
||||
let time = document.getElementById('time');
|
||||
time.innerHTML=now;
|
||||
})
|
||||
.catch(function (err) {
|
||||
console.log('error: ' + err);
|
||||
});
|
||||
}
|
||||
|
||||
function do_post(url, body, id, handler, handler_param) {
|
||||
fetch(url, {method:'POST', body: body})
|
||||
.then(function (response) {
|
||||
if (!response.ok) {
|
||||
throw new Error('Fetch got ' + response.status)
|
||||
}
|
||||
return response.json();
|
||||
})
|
||||
.then(function (data) {
|
||||
handler(id,handler_param,data);
|
||||
})
|
||||
.catch(function (err) {
|
||||
console.log('error: ' + err);
|
||||
});
|
||||
}
|
||||
|
||||
function do_stop(tracelevel) {
|
||||
// FIXME: uses global in script library
|
||||
fetch(nodetype + '/stop', {method:'POST'})
|
||||
}
|
||||
|
||||
function setverbose(tracelevel) {
|
||||
if (tracelevel < 0) {
|
||||
tracelevel = 0;
|
||||
}
|
||||
// FIXME: uses global in script library
|
||||
do_post(
|
||||
nodetype + '/verbose', tracelevel, 'verbose',
|
||||
rows2verbose, null
|
||||
);
|
||||
}
|
||||
|
||||
function refresh_setup(interval) {
|
||||
var timer = setInterval(refresh_job, interval);
|
||||
}
|
||||
""",
|
||||
},
|
||||
"/": {
|
||||
"content_type": "text/html; charset=utf-8",
|
||||
"content": """
|
||||
<html>
|
||||
<head>
|
||||
<title>n2n edge management</title>
|
||||
</head>
|
||||
<body>
|
||||
<table>
|
||||
<tr>
|
||||
<td>Last Updated:
|
||||
<td><div id="time"></div>
|
||||
<td><button onclick=refresh_job()>update</button>
|
||||
<td><button onclick=do_stop()>stop edge</button>
|
||||
<tr>
|
||||
<td>Logging Verbosity:
|
||||
<td>
|
||||
<div id="verbose"></div>
|
||||
<td>
|
||||
<button onclick=setverbose(verbose+1)>+</button>
|
||||
<button onclick=setverbose(verbose-1)>-</button>
|
||||
</table>
|
||||
<br>
|
||||
<div id="communities"></div>
|
||||
<br>
|
||||
Edges/Peers:
|
||||
<div id="edges"></div>
|
||||
<br>
|
||||
Supernodes:
|
||||
<div id="supernodes"></div>
|
||||
<br>
|
||||
<div id="timestamps"></div>
|
||||
<br>
|
||||
<div id="packetstats"></div>
|
||||
|
||||
<script src="script.js"></script>
|
||||
<script>
|
||||
// FIXME: hacky global
|
||||
var nodetype="edge";
|
||||
|
||||
function refresh_job() {
|
||||
do_get(
|
||||
nodetype + '/verbose', 'verbose',
|
||||
rows2verbose, null
|
||||
);
|
||||
do_get(
|
||||
nodetype + '/communities', 'communities',
|
||||
rows2keyvalue, ['community']
|
||||
);
|
||||
do_get(
|
||||
nodetype + '/supernodes', 'supernodes',
|
||||
rows2table, ['version','current','macaddr','sockaddr','uptime']
|
||||
);
|
||||
do_get(
|
||||
nodetype + '/edges', 'edges',
|
||||
rows2table, ['mode','ip4addr','macaddr','sockaddr','desc']
|
||||
);
|
||||
do_get(
|
||||
nodetype + '/timestamps', 'timestamps',
|
||||
rows2keyvalueall, null
|
||||
);
|
||||
do_get(
|
||||
nodetype + '/packetstats', 'packetstats',
|
||||
rows2table, ['type','tx_pkt','rx_pkt']
|
||||
);
|
||||
}
|
||||
|
||||
refresh_setup(10000);
|
||||
refresh_job();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
""",
|
||||
},
|
||||
"/supernode.html": {
|
||||
"content_type": "text/html; charset=utf-8",
|
||||
"content": """
|
||||
<html>
|
||||
<head>
|
||||
<title>n2n supernode management</title>
|
||||
</head>
|
||||
<body>
|
||||
<table>
|
||||
<tr>
|
||||
<td>Last Updated:
|
||||
<td><div id="time"></div>
|
||||
<td><button onclick=refresh_job()>update</button>
|
||||
<td><button onclick=do_stop()>stop supernode</button>
|
||||
<tr>
|
||||
<td>Logging Verbosity:
|
||||
<td>
|
||||
<div id="verbose"></div>
|
||||
<td>
|
||||
<button onclick=setverbose(verbose+1)>+</button>
|
||||
<button onclick=setverbose(verbose-1)>-</button>
|
||||
<td><button onclick=do_reload()>reload communities</button>
|
||||
</table>
|
||||
<br>
|
||||
Communities:
|
||||
<div id="communities"></div>
|
||||
<br>
|
||||
Edges/Peers:
|
||||
<div id="edges"></div>
|
||||
<br>
|
||||
<div id="timestamps"></div>
|
||||
<br>
|
||||
<div id="packetstats"></div>
|
||||
|
||||
<script src="script.js"></script>
|
||||
<script>
|
||||
// FIXME: hacky global
|
||||
var nodetype="supernode";
|
||||
|
||||
function do_reload() {
|
||||
fetch(nodetype + '/reload_communities', {method:'POST'})
|
||||
}
|
||||
|
||||
function refresh_job() {
|
||||
do_get(
|
||||
nodetype + '/verbose', 'verbose',
|
||||
rows2verbose, null
|
||||
);
|
||||
do_get(
|
||||
nodetype + '/communities', 'communities',
|
||||
rows2table, ['community','ip4addr','is_federation','purgeable']
|
||||
);
|
||||
do_get(
|
||||
nodetype + '/edges', 'edges',
|
||||
rows2table,
|
||||
['community','ip4addr','macaddr','sockaddr','proto','desc']
|
||||
);
|
||||
do_get(
|
||||
nodetype + '/timestamps', 'timestamps',
|
||||
rows2keyvalueall, null
|
||||
);
|
||||
do_get(
|
||||
nodetype + '/packetstats', 'packetstats',
|
||||
rows2table, ['type','tx_pkt','rx_pkt']
|
||||
);
|
||||
}
|
||||
|
||||
refresh_setup(10000);
|
||||
refresh_job();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
""",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class SimpleHandler(http.server.BaseHTTPRequestHandler):
|
||||
|
||||
def __init__(self, rpc, snrpc, *args, **kwargs):
|
||||
self.rpc = rpc
|
||||
self.snrpc = snrpc
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def log_request(self, code='-', size='-'):
|
||||
# Dont spam the output
|
||||
pass
|
||||
|
||||
def _simplereply(self, number, message):
|
||||
self.send_response(number)
|
||||
self.end_headers()
|
||||
self.wfile.write(message.encode('utf8'))
|
||||
|
||||
def _replyjson(self, data):
|
||||
self.send_response(HTTPStatus.OK)
|
||||
self.send_header('Content-type', 'application/json')
|
||||
self.end_headers()
|
||||
self.wfile.write(json.dumps(data).encode('utf8'))
|
||||
|
||||
def _replyunauth(self):
|
||||
self.send_response(HTTPStatus.UNAUTHORIZED)
|
||||
self.send_header('WWW-Authenticate', 'Basic realm="n2n"')
|
||||
self.end_headers()
|
||||
|
||||
def _extractauth(self, rpc):
|
||||
# Avoid caching the key inside the object for all clients
|
||||
rpc.key = None
|
||||
|
||||
header = self.headers.get('Authorization')
|
||||
if header is not None:
|
||||
authtype, encoded = header.split(' ')
|
||||
if authtype == 'Basic':
|
||||
user, key = base64.b64decode(encoded).decode('utf8').split(':')
|
||||
rpc.key = key
|
||||
|
||||
if rpc.key is None:
|
||||
rpc.key = rpc.defaultkey
|
||||
|
||||
def _rpc(self, method, cmdline):
|
||||
try:
|
||||
data = method(cmdline)
|
||||
except ValueError as e:
|
||||
if str(e) == "Error: badauth":
|
||||
self._replyunauth()
|
||||
return
|
||||
|
||||
self._simplereply(HTTPStatus.BAD_REQUEST, 'Bad Command')
|
||||
return
|
||||
except socket.timeout as e:
|
||||
self._simplereply(HTTPStatus.REQUEST_TIMEOUT, str(e))
|
||||
return
|
||||
|
||||
self._replyjson(data)
|
||||
return
|
||||
|
||||
def _rpc_read(self, rpc):
|
||||
self._extractauth(rpc)
|
||||
tail = self.path.split('/')
|
||||
cmd = tail[2]
|
||||
# if reads ever need args, could use more of the tail
|
||||
|
||||
self._rpc(rpc.read, cmd)
|
||||
|
||||
def _rpc_write(self, rpc):
|
||||
self._extractauth(rpc)
|
||||
content_length = int(self.headers['Content-Length'])
|
||||
post_data = self.rfile.read(content_length).decode('utf8')
|
||||
|
||||
tail = self.path.split('/')
|
||||
cmd = tail[2]
|
||||
cmdline = cmd + ' ' + post_data
|
||||
|
||||
self._rpc(rpc.write, cmdline)
|
||||
|
||||
def do_GET(self):
|
||||
if self.path.startswith("/edge/"):
|
||||
self._rpc_read(self.rpc)
|
||||
return
|
||||
|
||||
if self.path.startswith("/supernode/"):
|
||||
self._rpc_read(self.snrpc)
|
||||
return
|
||||
|
||||
if self.path in pages:
|
||||
page = pages[self.path]
|
||||
|
||||
self.send_response(HTTPStatus.OK)
|
||||
self.send_header('Content-type', page['content_type'])
|
||||
self.end_headers()
|
||||
self.wfile.write(page['content'].encode('utf8'))
|
||||
return
|
||||
|
||||
self._simplereply(HTTPStatus.NOT_FOUND, 'Not Found')
|
||||
return
|
||||
|
||||
def do_POST(self):
|
||||
if self.path.startswith("/edge/"):
|
||||
self._rpc_write(self.rpc)
|
||||
return
|
||||
|
||||
if self.path.startswith("/supernode/"):
|
||||
self._rpc_write(self.snrpc)
|
||||
return
|
||||
|
||||
|
||||
def main():
|
||||
ap = argparse.ArgumentParser(
|
||||
description='Control the running local n2n edge via http')
|
||||
ap.add_argument('-t', '--mgmtport', action='store', default=5644,
|
||||
help='Management Port (default=5644)', type=int)
|
||||
ap.add_argument('--snmgmtport', action='store', default=5645,
|
||||
help='Supernode Management Port (default=5645)', type=int)
|
||||
ap.add_argument('-k', '--key', action='store',
|
||||
help='Password for mgmt commands')
|
||||
ap.add_argument('-d', '--debug', action='store_true',
|
||||
help='Also show raw internal data')
|
||||
ap.add_argument('port', action='store',
|
||||
default=8080, type=int, nargs='?',
|
||||
help='Serve requests on TCP port (default 8080)')
|
||||
|
||||
args = ap.parse_args()
|
||||
|
||||
rpc = JsonUDP(args.mgmtport)
|
||||
rpc.debug = args.debug
|
||||
rpc.defaultkey = args.key
|
||||
|
||||
snrpc = JsonUDP(args.snmgmtport)
|
||||
snrpc.debug = args.debug
|
||||
snrpc.defaultkey = args.key
|
||||
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
|
||||
socketserver.TCPServer.allow_reuse_address = True
|
||||
handler = functools.partial(SimpleHandler, rpc, snrpc)
|
||||
|
||||
httpd = socketserver.TCPServer(("", args.port), handler)
|
||||
try:
|
||||
httpd.serve_forever()
|
||||
except KeyboardInterrupt:
|
||||
return
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
47
scripts/test_harness.sh
Executable file
47
scripts/test_harness.sh
Executable file
|
@ -0,0 +1,47 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Run with the name of a test list file.
|
||||
#
|
||||
# This expects to find the tests in the tools dir or scripts dir and the
|
||||
# expected results in the tests dir.
|
||||
|
||||
# boilerplate so we can support whaky cmake dirs
|
||||
[ -z "$TOPDIR" ] && TOPDIR="."
|
||||
[ -z "$BINDIR" ] && BINDIR="."
|
||||
export TOPDIR
|
||||
export BINDIR
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo need test list filename
|
||||
exit 1
|
||||
fi
|
||||
TESTLIST="$1"
|
||||
LISTDIR=$(dirname "$TESTLIST")
|
||||
|
||||
TESTS=$(sed -e "s/#.*//" "$TESTLIST")
|
||||
|
||||
# Actually run the tests
|
||||
for i in $TESTS; do
|
||||
# Look in several places for the test program
|
||||
if [ -e "$BINDIR/$i" ]; then
|
||||
TEST="$BINDIR/$i"
|
||||
elif [ -e "$BINDIR/tools/$i" ]; then
|
||||
TEST="$BINDIR/tools/$i"
|
||||
elif [ -e "$LISTDIR/../scripts/$i" ]; then
|
||||
TEST="$LISTDIR/../scripts/$i"
|
||||
else
|
||||
echo "Could not find test $i"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -e "$LISTDIR/$i.expected" ]; then
|
||||
echo "Could not find testdata $LISTDIR/$i.expected"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$TEST >$LISTDIR/$i.out"
|
||||
set -e
|
||||
"$TEST" >"$LISTDIR/$i.out"
|
||||
cmp "$LISTDIR/$i.expected" "$LISTDIR/$i.out"
|
||||
set +e
|
||||
done
|
49
scripts/test_integration_edge.sh
Executable file
49
scripts/test_integration_edge.sh
Executable file
|
@ -0,0 +1,49 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Do some quick tests via the Json API against the edge
|
||||
#
|
||||
|
||||
AUTH=n2n
|
||||
|
||||
# boilerplate so we can support whaky cmake dirs
|
||||
[ -z "$TOPDIR" ] && TOPDIR=.
|
||||
[ -z "$BINDIR" ] && BINDIR=.
|
||||
|
||||
docmd() {
|
||||
echo "###"
|
||||
"$@"
|
||||
echo
|
||||
}
|
||||
|
||||
# start a supernode
|
||||
docmd ${BINDIR}/supernode -v
|
||||
|
||||
# Start the edge in the background
|
||||
docmd sudo ${BINDIR}/edge -l localhost:7654 -c test >/dev/null
|
||||
# TODO:
|
||||
# - send edge messages to stderr?
|
||||
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl communities
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl packetstats
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl edges --raw
|
||||
|
||||
# TODO:
|
||||
# docmd ${TOPDIR}/scripts/n2n-ctl supernodes --raw
|
||||
# - need fixed mac address
|
||||
# - need to mask out:
|
||||
# - version string
|
||||
# - last_seen timestamp
|
||||
# - uptime
|
||||
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl verbose
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl --write verbose 1 2>/dev/null
|
||||
echo $?
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl -k $AUTH --write verbose 1
|
||||
|
||||
# looks strange, but we are querying the state of the "stop" verb
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl stop
|
||||
|
||||
# stop them both
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl -k $AUTH --write stop
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl -t 5645 -k $AUTH --write stop
|
||||
|
33
scripts/test_integration_supernode.sh
Executable file
33
scripts/test_integration_supernode.sh
Executable file
|
@ -0,0 +1,33 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Do some quick tests via the Json API against the supernode
|
||||
#
|
||||
|
||||
AUTH=n2n
|
||||
|
||||
# boilerplate so we can support whaky cmake dirs
|
||||
[ -z "$TOPDIR" ] && TOPDIR=.
|
||||
[ -z "$BINDIR" ] && BINDIR=.
|
||||
|
||||
docmd() {
|
||||
echo "###"
|
||||
"$@"
|
||||
echo
|
||||
}
|
||||
|
||||
# start it running in the background
|
||||
docmd ${BINDIR}/supernode -v
|
||||
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl -t 5645 communities
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl -t 5645 packetstats
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl -t 5645 edges --raw
|
||||
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl -t 5645 verbose
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl -t 5645 -k $AUTH --write verbose 1
|
||||
|
||||
# looks strange, but we are querying the state of the "stop" verb
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl -t 5645 stop
|
||||
|
||||
# stop it
|
||||
docmd ${TOPDIR}/scripts/n2n-ctl -t 5645 -k $AUTH --write stop
|
||||
|
57
scripts/version.sh
Executable file
57
scripts/version.sh
Executable file
|
@ -0,0 +1,57 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Output the current version number
|
||||
#
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [short|hash]"
|
||||
echo
|
||||
echo "Determine the correct version number for the current build"
|
||||
exit 0
|
||||
}
|
||||
|
||||
# We assume this script is in the TOPDIR/scripts directory and use that
|
||||
# to find any other files we need
|
||||
TOPDIR=$(dirname "$0")/..
|
||||
|
||||
VER_FILE_SHORT=$(cat "${TOPDIR}/VERSION")
|
||||
|
||||
if [ -d "$TOPDIR/.git" ]; then
|
||||
# If there is a .git directory in our TOPDIR, then this is assumed to be
|
||||
# real git checkout
|
||||
|
||||
cd "$TOPDIR" || exit 1
|
||||
|
||||
VER_GIT_SHORT=$(git describe --abbrev=0)
|
||||
|
||||
if [ "$VER_FILE_SHORT" != "$VER_GIT_SHORT" ]; then
|
||||
echo "Error: VERSION file does not match tag version ($VER_FILE_SHORT != $VER_GIT_SHORT)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VER_SHORT="$VER_GIT_SHORT"
|
||||
VER_HASH=$(git rev-parse --short HEAD)
|
||||
VER=$(git describe --abbrev=7 --dirty)
|
||||
else
|
||||
# If there is no .git directory in our TOPDIR, we fall back on relying on
|
||||
# the VERSION file
|
||||
|
||||
VER_SHORT="$VER_FILE_SHORT"
|
||||
VER_HASH="HEAD"
|
||||
VER="$VER_FILE_SHORT"
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
hash)
|
||||
echo "$VER_HASH"
|
||||
;;
|
||||
short)
|
||||
echo "$VER_SHORT"
|
||||
;;
|
||||
"")
|
||||
echo "$VER"
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
Loading…
Add table
Add a link
Reference in a new issue