diff --git a/bin/libbfa.js b/bin/libbfa.js index c4b99374fc2f010f1be5da3740e2dfc8e771c2fa..5f562c411337eb04a304d7ae120970607998746f 100644 --- a/bin/libbfa.js +++ b/bin/libbfa.js @@ -3,13 +3,13 @@ "use strict" var request = require('request'); +var net = require('net'); module.exports = class Libbfa { constructor() { this.fs = require('fs'); this.Web3 = require('web3'); - var net = require('net'); // // BFAHOME if ( undefined == process.env.BFAHOME ) @@ -38,7 +38,7 @@ module.exports = class Libbfa files.push( filename ); }); } - // found none? + // found any? if ( files.length > 0 ) { files.sort(); @@ -54,10 +54,13 @@ module.exports = class Libbfa this.socketurl = this.nodedir+'/geth.ipc'; // overwrite with newer ipc method if ( this.sockettype == 'ipc' ) { this.provider = new this.Web3.providers.IpcProvider( this.nodedir+'/geth.ipc', net ); + this.req_url = 'http://unix:' + this.nodedir + '/geth.ipc:/'; } else if ( this.sockettype == 'ws' ) { this.provider = new this.Web3.providers.WebsocketProvider( this.socketurl ); + this.req_url = this.socketurl; } else if ( this.sockettype == 'http') { this.provider = new this.Web3.providers.HttpProvider( this.socketurl ); + this.req_url = this.socketurl; } else { fatal("Unknown sockettype."); } @@ -92,20 +95,25 @@ module.exports = class Libbfa newweb3() { var w3 = new this.Web3( this.provider ); + var req_url = this.req_url; + var _bfa = this; + // This could just remain the same number all the time. + var unneededcounter = 1; + w3.jsonify = function( opname, params ) + { + var obj = {}; + obj.id = unneededcounter++; + obj.jsonrpc = "2.0"; + obj.method = opname; + obj.params = params; + return obj; + }; w3.rpcreq = function( opname, params, callback ) { - var extra = params.join(','); - var body = JSON.parse("{"+ - '"jsonrpc":"2.0",' + - '"id":1,' + - '"method":"' + opname + '",' + - '"params":[' + extra + ']' - +"}" - ); request.post({ - uri: 'http://localhost:8545', + uri: req_url, json: true, - body: body, + body: w3.jsonify( opname, params ), callback: function RPCresponse( err, obj ) { var r; @@ -121,26 +129,74 @@ module.exports = class Libbfa } }); }; + w3.req = function( opname, params, callback ) + { + if ( _bfa.sockettype == 'ipc' ) + { + w3.ipcreq( opname, params, callback ); + } + else + { + w3.rpcreq( opname, params, callback ); + } + } + w3.ipcreq = function( opname, params, callback ) + { + var socket = net.connect( _bfa.socketurl ); + var result; + var err; + socket.on("ready", () => { + // When the socket has been established. + // We create a new connection per request, because it + // is easier than reliably handling JSON object boundaries + // in a TCP stream . + // Writes out data and closes our end of the connection. + // Geth will reply and then close it's end. + socket.end( JSON.stringify( w3.jsonify(opname,params).valueOf())); + }); + socket.on("data", (d) => { + try { + result = JSON.parse( d.toString() ); + } + catch { + err = d.toString(); + } + }); + socket.on("timeout", () => { + socket.close(); + }); + socket.on("error", (e) => { + console.error(e); + err = e; + }); + socket.on("close", () => { + if ( result.error && result.error.code && result.error.message ) + err = 'Error ' + result.error.code + ": "+ result.error.message; + else + result = result.result; + callback( err, result ); + }); + } w3.bfa = { clique: { getSigners: function clique_getSigners( cb ) - { w3.rpcreq( 'clique_getSigners', [], cb ) }, + { w3.req( 'clique_getSigners', [], cb ) }, }, miner: { start: function miner_start() - { w3.rpcreq( 'miner_start', [], function(){} ) }, + { w3.req( 'miner_start', [], function(){} ) }, stop: function miner_stop() - { w3.rpcreq( 'miner_stop', [], function(){} ) } + { w3.req( 'miner_stop', [], function(){} ) } }, admin: { peers: function admin_peers( cb ) - { w3.rpcreq( 'admin_peers', [], cb ) }, + { w3.req( 'admin_peers', [], cb ) }, addPeer: function admin_addPeer( peer ) - { w3.rpcreq( 'admin_addPeer', [ "\""+peer+"\"" ], function(){} ) } + { w3.req( 'admin_addPeer', [ peer ], function(){} ) } }, personal: { listWallets: function personal_listWallets( cb ) - { w3.rpcreq( 'personal_listWallets', [], cb ) } + { w3.req( 'personal_listWallets', [], cb ) } } }; if ( undefined != process.env.BFAACCOUNT ) { diff --git a/bin/localstate.pl b/bin/localstate.pl index f3af0d1d20a2031c89afb18c90c6ae5c0324c3fc..9864f021343f1fea0a5fdaaf6f6e4b9beba17943 100755 --- a/bin/localstate.pl +++ b/bin/localstate.pl @@ -275,8 +275,7 @@ if ( $result ) $txn =~ s/^0x([a-fA-F\d]+)$/hex($1)/e; my $gold = rpc( $libbfa, 'eth_getBalance', qq("$account"), '"latest"' ); $gold = Math::BigInt->new( $gold ) if $gold =~ /^0x/; - #$gold = Math::BigInt->new( $gold ) if $gold =~ s/^0x([\da-fA-F]{2})/0x0000$1/; - printf "Account %d: %s %-6s %3d transaction%s, %s satoshi.\n", $i, $account, $maymine, $txn, ($txn==1?' ':'s'), $gold; + printf "Account %d: %s %-6s %3d transaction%s, %s wei.\n", $i, $account, $maymine, $txn, ($txn==1?' ':'s'), $gold; } } else diff --git a/bin/monitor.js b/bin/monitor.js index 4414f05d625c6b32c09af25ab58ddd38bd95ba39..0596f89767877e73296453229da7e8913feb2765 100755 --- a/bin/monitor.js +++ b/bin/monitor.js @@ -10,12 +10,15 @@ var bfa = new Libbfa(); var web3 = bfa.newweb3(); var lastUnlock = 0; var netid = 0; +var peerscache = bfa.networkdir + '/peers.cache'; +if ( bfa.fs.existsSync( bfa.networkdir + '/cache' ) ) + peerscache = bfa.networkdir + '/cache/peers.cache'; function readPeersCache() { - if ( ! bfa.fs.existsSync( bfa.networkdir + '/peers.cache' ) ) + if ( ! bfa.fs.existsSync( peerscache ) ) return []; - var data = bfa.fs.readFileSync( bfa.networkdir + '/peers.cache' ).toString(); + var data = bfa.fs.readFileSync( peerscache ).toString(); var p = []; if ( data.length > 0 ) p = data.split(/\r?\n/); @@ -32,11 +35,10 @@ function writePeersCache( peers ) if (peers.length > 100) peers.splice( 0, peers.length - 100 ); // peers.cache is a list of peers we have connected out to in the past. - var filename = bfa.networkdir + '/peers.cache'; var txt = peers.join("\n"); if (txt.length > 0 && (txt.substring(txt.length-1) != "\n")) txt += "\n"; - bfa.fs.writeFileSync( filename, txt, { mode: 0o644 } ); + bfa.fs.writeFileSync( peerscache, txt, { mode: 0o644 } ); } function dnspeercachelookup() @@ -212,7 +214,11 @@ function mayseal() web3.bfa.clique.getSigners( function gotListOfSealers(e,x) { - if (e) return; + if (e) + { + console.error( e ); + return; + } var lcsealers = x.map( name => name.toLowerCase() ); var isSigner = (lcsealers.indexOf(me) > -1); if ( isSigner ) @@ -312,7 +318,7 @@ function timer() netid = x; } ) .catch( err => { - console.log("monitor.js non-fatal: "+err) + console.error("monitor.js non-fatal: "+err) }); return; } diff --git a/bin/singlestart.sh b/bin/singlestart.sh index d8fb623bf88304d2e37b56e60295157ae28a5174..aa7da1c932b63df4d67c36c56e7d40034d5b0782 100755 --- a/bin/singlestart.sh +++ b/bin/singlestart.sh @@ -13,6 +13,7 @@ trap "exit 1" ERR trap "reaper" SIGINT SIGCHLD unset LOGDIR LOGPIPE PIDIDX declare -A PIDIDX +trap "killallprocs" SIGTERM function reaper() { @@ -40,17 +41,21 @@ function reaper() max=30 } -if [ "$VIRTUALIZATION" = "DOCKER" ] -then - echo "Some log info can be seen with: docker logs bfanode" -fi +function killallprocs() +{ + if [ ${#PIDIDX[*]} -gt 0 ] + then + echo "*** Killing all remaining processes: ${PIDIDX[*]} (${!PIDIDX[*]})." + kill -KILL ${!PIDIDX[*]} 2>/dev/null || true + fi +} # You can start as: # BFAHOME=/home/bfa/bfa singlestart.sh # singlestart.sh /home/bfa/bfa if [ -z "${BFAHOME}" -a -n "$1" -a -f "$1" ] then - BFAHOME="$1" + export BFAHOME="$1" fi if [ -z "${BFAHOME}" ]; then echo "\$BFAHOME not set. Did you source `dirname $0`/env ?" >&2; exit 1; fi # @@ -70,22 +75,26 @@ else fi source ${BFAHOME}/bin/libbfa.sh -echo "Logging mostly everything to ${BFANODEDIR}/log" -echo "Consider running: tail -n 1000 -F ${BFANODEDIR}/log" if [ "$VIRTUALIZATION" = "DOCKER" ] then - echo "or: docker exec -i bfanode bfalog.sh" -fi + echo "See log info with \"docker logs\"" +else + echo "Logging mostly everything to ${BFANODEDIR}/log" + echo "Consider running: tail -n 1000 -F ${BFANODEDIR}/log" + echo "or: bfalog.sh" -echo "*** Setting up logging." -# Clean up logging -LOGDIR=$( mktemp -d ) -trap "rm -rf ${LOGDIR}" EXIT -LOGPIPE=${LOGDIR}/logpipe -mknod ${LOGPIPE} p -${BFAHOME}/bin/log.sh ${BFANODEDIR}/log < ${LOGPIPE} & -PIDIDX[$!]="log.sh" -exec > ${LOGPIPE} 2>&1 + # Docker has it's own logging facility, so we will not use our own + # logging functionality if we're in docker. + echo "*** Setting up logging." + # Clean up logging + LOGDIR=$( mktemp -d ) + trap "rm -rf ${LOGDIR}" EXIT + LOGPIPE=${LOGDIR}/logpipe + mknod ${LOGPIPE} p + ${BFAHOME}/bin/log.sh ${BFANODEDIR}/log < ${LOGPIPE} & + PIDIDX[$!]="log.sh" + exec > ${LOGPIPE} 2>&1 +fi echo "*** Starting geth." # "NoPruning=true" means "--gcmode archive" @@ -112,8 +121,4 @@ do sleep 1 max=$(( $max - 1 )) done -if [ ${#PIDIDX[*]} -gt 0 ] -then - echo "*** Killing all remaining processes: ${PIDIDX[*]} (${!PIDIDX[*]})." - kill -KILL ${!PIDIDX[*]} 2>/dev/null || true -fi +killallprocs