Compare commits

..

31 Commits

Author SHA1 Message Date
jrandom
503b289240 * 2004-10-10 0.4.1.2 released 2004-10-10 19:33:08 +00:00
jrandom
35e3bbb862 2004-10-10 cervantes
* Update the I2PTunnel HTTP proxy to strip out the i2paddresshelper from
      the request.
2004-10-10 14:57:15 +00:00
jrandom
8dc261da79 2004-10-09 jrandom
* Added a watchdog timer to do some baseline liveliness checking to help
      debug some odd errors.
    * Added a pair of summary stats for bandwidth usage, allowing easy export
      with the other stats ("bw.sendBps" and "bw.receiveBps")
    * Trimmed another memory allocation on message reception.
2004-10-10 00:03:25 +00:00
jrandom
65676f8988 2004-10-08 jrandom
* Revamp the AESInputStream so it doesn't allocate any temporary objects
      during its operation.
2004-10-08 22:53:03 +00:00
jrandom
730da3aa27 2004-10-08 jrandom
* Don't kill the establisher threads during a soft restart.
    * Attempt to validate the peer's routerInfo earlier during handshaking.
    * Revamp the AESOutputStream so it doesn't allocate any temporary objects
      during its operation.
2004-10-08 18:38:48 +00:00
jrandom
ff8674bca9 2004-10-07 jrandom
* Reimplement the I2NP reading with less temporary memory allocation.
      There is still significant GC churn, especially under load, but this
      should help.
    * Catch some oddball errors in the transport (message timeout while
      establishing).
2004-10-08 02:08:10 +00:00
jrandom
c7cfef3b61 2004-10-07 jrandom
* Expire queued messages even when the writer is blocked.
    * Reimplement most of the I2NP writing with less temporary memory
      allocations (I2NP reading still gobbles memory).
2004-10-07 19:19:51 +00:00
jrandom
32188b1cc0 expose some direct byte formatting methods
allow SHA256 to be run against a partial array
append to the stats.log instead of overwriting it
2004-10-07 16:48:46 +00:00
jrandom
37479d8c0d logging 2004-10-07 16:45:11 +00:00
jrandom
f5c7d6576d no need to double b0rk 2004-10-07 16:42:55 +00:00
jrandom
38c422bbc0 2004-10-06 jrandom
* Implement an active queue management scheme on the TCP transports,
      dropping messages probabalistically as the queue fills up.  The
      estimated queue capacity is determined by the rate at which messages
      have been sent to the peer (averaged at 1, 5, and 60m periods).  As
      we exceed 1/2 of the estimated capacity, we drop messages throughout
      the queue probabalistically with regards to their size.  This is based
      on RFC 2309's RED, with the minimum threshold set to 1/2 the
      estimated connection capacity.  We may want to consider using a send
      rate and queue size measured across all connections, to deal with our
      own local bandwidth saturation, but we'll try the per-con metrics first.
2004-10-06 21:03:51 +00:00
jrandom
39d4e5ea81 list the shutdown time w/ the clock fudge factor included 2004-10-06 16:47:36 +00:00
jrandom
4191ad1cbf 2004-10-06 jrandom
* Enable explicit disabling of the systray entirely for windows machines
      with strange configurations: add -Dsystray.disable=true to the java
      command line.  (thanks mihi!)
2004-10-06 13:23:38 +00:00
jrandom
29287da37c 2004-10-05 jrandom
* Allow peers on the same LAN to communicate with each other safely even
      when they cannot talk to each other through the external address.
2004-10-06 01:12:03 +00:00
jrandom
98c780415b 2004-10-05 jrandom
* Display how much time is left before the graceful shutdown is complete.
    * Debug some improperly failed messages on timeout or disconnection.
2004-10-05 19:21:47 +00:00
jrandom
756af9c699 oops 2004-10-05 18:21:44 +00:00
jrandom
7f9076bb1d updated beyond.i2p (after verification) 2004-10-05 16:04:16 +00:00
jrandom
2404f1ab9a added b.i2p 2004-10-05 15:57:08 +00:00
jrandom
64bcfd09ec 2004-10-05 jrandom
* Don't go into a fast busy if an I2PTunnel 'server' is explicitly killed
      (thanks mule!)
    * Handle some more error conditions regarding abruptly closing sockets
      (thanks Jonva!)
2004-10-05 15:38:37 +00:00
jrandom
6251d22c6e added tinyurl.i2p 2004-10-05 15:26:20 +00:00
jrandom
de1b4937a1 2004-10-04 jrandom
* Update the shitlist to reject a peer for an exponentially increasing
      period of time (with an upper bounds of an hour).
    * Various minor stat and debugging fixes
2004-10-04 17:30:22 +00:00
jrandom
d092dd79ba get rid of the really really frequent temporary object creation 2004-10-04 13:53:10 +00:00
jrandom
a3ba968386 24h time 2004-10-04 01:17:01 +00:00
jrandom
5ca2b97128 24h time 2004-10-04 01:00:38 +00:00
jrandom
c9daad1cfd added detonate.i2p 2004-10-04 00:16:13 +00:00
jrandom
0526d5b53a cli to splot the stat log 2004-10-03 23:53:16 +00:00
jrandom
34163fb8e4 dont overwrite index.html anymore (0.4.1.2 wont) 2004-10-03 21:06:17 +00:00
jrandom
98d2d661a8 2004-10-03 jrandom
* Add a new stat logging component to optionally dump the raw stats to
      disk as they are generated, rather than rely upon the summarized data.
      By default, this is off, but the router property "stat.logFilters" can
      be set to a comma delimited list of stats (e.g. "client.sendAckTime")
      which will be written to the file "stats.log" (or whatever the property
      "stat.logFile" is set to).  This can also log profile related stats,
      such as "dbResponseTime" or "tunnelTestResponseTime".
2004-10-03 20:48:43 +00:00
jrandom
d9f0a0fd74 dont list an explicit webdefault.xml (use the default) 2004-10-03 14:04:21 +00:00
jrandom
d20d043e0f 2004-10-02 jrandom
* Assure that we quickly fail messages bound for shitlisted peers.
    * Address a race on startup where the first peer contacted could hang the
      router (thanks Romster!)
    * Only whine about an intermittent inability to query the time server once
2004-10-02 19:05:24 +00:00
jrandom
ce186e1872 2004-10-02 jrandom
* Command line utility to verify a peer's reachability - simply run
      net.i2p.router.transport.tcp.ConnectionHandler hostname port# and it
      will print out whether that peer is reachable or not (using a simple
      verification handshake).
2004-10-02 12:31:15 +00:00
83 changed files with 2232 additions and 968 deletions

View File

@@ -199,10 +199,14 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable
host = getHostName(destination);
if ( (host != null) && ("i2p".equals(host)) ) {
int pos2;
if ((pos2 = line.indexOf("?")) != -1) {
if ((pos2 = request.indexOf("?")) != -1) {
// Try to find an address helper in the fragments
String fragments = line.substring(pos2 + 1);
// and split the request into it's component parts for rebuilding later
String fragments = request.substring(pos2 + 1);
String uriPath = request.substring(0, pos2);
pos2 = fragments.indexOf(" ");
String protocolVersion = fragments.substring(pos2 + 1);
String urlEncoding = "";
fragments = fragments.substring(0, pos2);
fragments = fragments + "&";
String fragment;
@@ -215,8 +219,17 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable
if (pos2 >= 0) {
addressHelpers.put(destination,fragment.substring(pos2 + 1));
}
}
} else {
// append each fragment unless it's the address helper
if ("".equals(urlEncoding)) {
urlEncoding = "?" + fragment;
} else {
urlEncoding = urlEncoding + "&" + fragment;
}
}
}
// reconstruct the request minus the i2paddresshelper GET var
request = uriPath + urlEncoding + " " + protocolVersion;
}
String addressHelper = (String) addressHelpers.get(destination);

View File

@@ -148,6 +148,7 @@ public class I2PTunnelServer extends I2PTunnelTask implements Runnable {
I2PServerSocket i2pss = sockMgr.getServerSocket();
while (true) {
I2PSocket i2ps = i2pss.accept();
if (i2ps == null) throw new I2PException("I2PServerSocket closed");
I2PThread t = new I2PThread(new Handler(i2ps));
t.start();
}

View File

@@ -118,7 +118,7 @@ public class I2PSocketManager implements I2PSessionListener {
return;
}
if (msg.length < 4) {
_log.error(getName() + ": ==== packet too short ====");
_log.warn(getName() + ": ==== packet too short ====");
return;
}
int type = msg[0] & 0xff;
@@ -155,7 +155,7 @@ public class I2PSocketManager implements I2PSessionListener {
return;
}
} catch (I2PException ise) {
_log.error(getName() + ": Error processing", ise);
_log.warn(getName() + ": Error processing", ise);
} catch (IllegalStateException ise) {
_log.debug(getName() + ": Error processing", ise);
}
@@ -242,7 +242,7 @@ public class I2PSocketManager implements I2PSessionListener {
}
return;
} catch (Exception t) {
_log.error(getName() + ": Ignoring error on disconnect for socket " + s, t);
_log.warn(getName() + ": Ignoring error on disconnect for socket " + s, t);
}
}
@@ -306,7 +306,7 @@ public class I2PSocketManager implements I2PSessionListener {
replySentOk = _session.sendMessage(d, packet);
}
if (!replySentOk) {
_log.error(getName() + ": Error sending close to " + d.calculateHash().toBase64()
_log.warn(getName() + ": Error sending close to " + d.calculateHash().toBase64()
+ " in response to a new con message",
new Exception("Failed creation"));
}
@@ -363,13 +363,13 @@ public class I2PSocketManager implements I2PSessionListener {
return;
} else {
if ( (payload.length > 0) && (_log.shouldLog(Log.ERROR)) )
_log.error(getName() + ": Disconnect packet had " + payload.length + " bytes");
_log.warn(getName() + ": Disconnect packet had " + payload.length + " bytes");
if (s != null)
s.internalClose();
return;
}
} catch (Exception t) {
_log.error(getName() + ": Ignoring error on disconnect", t);
_log.warn(getName() + ": Ignoring error on disconnect", t);
return;
}
}
@@ -507,8 +507,8 @@ public class I2PSocketManager implements I2PSessionListener {
return s;
} catch (InterruptedIOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error(getName() + ": Timeout waiting for ack from syn for id "
if (_log.shouldLog(Log.WARN))
_log.warn(getName() + ": Timeout waiting for ack from syn for id "
+ lcID + " to " + peer.calculateHash().toBase64().substring(0,6)
+ " for socket " + s, ioe);
synchronized (lock) {
@@ -532,8 +532,8 @@ public class I2PSocketManager implements I2PSessionListener {
s.internalClose();
throw ex;
} catch (IOException ex) {
if (_log.shouldLog(Log.ERROR))
_log.error(getName() + ": Error sending syn on id "
if (_log.shouldLog(Log.WARN))
_log.warn(getName() + ": Error sending syn on id "
+ lcID + " to " + peer.calculateHash().toBase64().substring(0,6)
+ " for socket " + s, ex);
synchronized (lock) {
@@ -553,7 +553,7 @@ public class I2PSocketManager implements I2PSessionListener {
throw ex;
} catch (Exception e) {
s.internalClose();
_log.error(getName() + ": Unhandled error connecting on "
_log.warn(getName() + ": Unhandled error connecting on "
+ lcID + " to " + peer.calculateHash().toBase64().substring(0,6)
+ " for socket " + s, e);
throw new ConnectException("Unhandled error connecting: " + e.getMessage());
@@ -626,7 +626,7 @@ public class I2PSocketManager implements I2PSessionListener {
_session.destroySession();
_log.debug(getName() + ": I2P session destroyed");
} catch (I2PSessionException e) {
_log.error(getName() + ": Error destroying I2P session", e);
_log.warn(getName() + ": Error destroying I2P session", e);
}
}
@@ -652,7 +652,7 @@ public class I2PSocketManager implements I2PSessionListener {
try {
return _session.sendMessage(peer, new byte[] { (byte) CHAFF});
} catch (I2PException ex) {
_log.error(getName() + ": I2PException:", ex);
_log.warn(getName() + ": I2PException:", ex);
return false;
}
}

View File

@@ -0,0 +1,34 @@
package net.i2p.router.web;
import net.i2p.data.DataHelper;
import net.i2p.router.RouterContext;
/**
* Simple helper to query the appropriate router for data necessary to render
* any emergency notices
*/
public class NoticeHelper {
private RouterContext _context;
/**
* Configure this bean to query a particular router context
*
* @param contextId begging few characters of the routerHash, or null to pick
* the first one we come across.
*/
public void setContextId(String contextId) {
try {
_context = ContextHelper.getContext(contextId);
} catch (Throwable t) {
t.printStackTrace();
}
}
public String getSystemNotice() {
if (_context.router().gracefulShutdownInProgress()) {
return "Graceful shutdown in "
+ DataHelper.formatDuration(_context.router().getShutdownTimeRemaining());
} else {
return "";
}
}
}

View File

@@ -25,3 +25,7 @@
<jsp:setProperty name="navhelper" property="contextId" value="<%=(String)session.getAttribute("i2p.contextId")%>" />
<jsp:getProperty name="navhelper" property="clientAppLinks" />
</h4>
<jsp:useBean class="net.i2p.router.web.NoticeHelper" id="noticehelper" scope="request" />
<jsp:setProperty name="noticehelper" property="contextId" value="<%=(String)session.getAttribute("i2p.contextId")%>" />
<b><jsp:getProperty name="noticehelper" property="systemNotice" /></b>

View File

@@ -49,7 +49,7 @@ public class SysTray implements SysTrayMenuListener {
if (!(new File("router.config")).exists())
openRouterConsole("http://localhost:" + _portString + "/index.jsp");
if (System.getProperty("os.name").startsWith("Windows"))
if ( (System.getProperty("os.name").startsWith("Windows")) && (!Boolean.getBoolean("systray.disable")) )
_instance = new SysTray();
}

View File

@@ -232,9 +232,6 @@
<copy file="hosts.txt" todir="pkg-temp/" />
<mkdir dir="pkg-temp/eepsite" />
<mkdir dir="pkg-temp/eepsite/webapps" />
<mkdir dir="pkg-temp/eepsite/logs" />
<mkdir dir="pkg-temp/eepsite/docroot" />
<copy file="installer/resources/eepsite_index.html" tofile="pkg-temp/eepsite/docroot/index.html" />
<copy file="installer/resources/jetty.xml" tofile="pkg-temp/eepsite/jetty.xml" />
<zip destfile="i2pupdate.zip" basedir="pkg-temp" />
</target>

View File

@@ -14,8 +14,8 @@ package net.i2p;
*
*/
public class CoreVersion {
public final static String ID = "$Revision: 1.21 $ $Date: 2004/09/30 10:58:55 $";
public final static String VERSION = "0.4.1.1";
public final static String ID = "$Revision: 1.22 $ $Date: 2004/10/01 12:23:00 $";
public final static String VERSION = "0.4.1.2";
public static void main(String args[]) {
System.out.println("I2P Core version: " + VERSION);

View File

@@ -46,66 +46,81 @@ public class AESInputStream extends FilterInputStream {
private long _cumulativePrepared; // how many bytes decrypted and added to _readyBuf
private long _cumulativePaddingStripped; // how many bytes have been stripped
private ByteArrayOutputStream _encryptedBuf; // read from the stream but not yet decrypted
private List _readyBuf; // list of Bytes ready to be consumed, where index 0 is the first
/** read but not yet decrypted */
private byte _encryptedBuf[];
/** how many bytes have been added to the encryptedBuf since it was decrypted? */
private int _writesSinceDecrypt;
/** decrypted bytes ready for reading (first available == index of 0) */
private int _decryptedBuf[];
/** how many bytes are available for reading without decrypt? */
private int _decryptedSize;
private final static int BLOCK_SIZE = CryptixRijndael_Algorithm._BLOCK_SIZE;
private final static int READ_SIZE = BLOCK_SIZE;
private final static int DECRYPT_SIZE = BLOCK_SIZE - 1;
public AESInputStream(I2PAppContext context, InputStream source, SessionKey key, byte iv[]) {
public AESInputStream(I2PAppContext context, InputStream source, SessionKey key, byte[] iv) {
super(source);
_context = context;
_log = context.logManager().getLog(AESInputStream.class);
_key = key;
_lastBlock = new byte[BLOCK_SIZE];
System.arraycopy(iv, 0, _lastBlock, 0, BLOCK_SIZE);
_encryptedBuf = new ByteArrayOutputStream(BLOCK_SIZE);
_readyBuf = new ArrayList(1024);
_encryptedBuf = new byte[BLOCK_SIZE];
_writesSinceDecrypt = 0;
_decryptedBuf = new int[BLOCK_SIZE-1];
_decryptedSize = 0;
_cumulativePaddingStripped = 0;
_eofFound = false;
}
public int read() throws IOException {
while ((!_eofFound) && (_readyBuf.size() <= 0)) {
refill(READ_SIZE);
while ((!_eofFound) && (_decryptedSize <= 0)) {
refill();
}
Integer nval = getNext();
if (nval != null) {
return nval.intValue();
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("No byte available. eof? " + _eofFound);
if (_eofFound)
if (_decryptedSize > 0) {
int c = _decryptedBuf[0];
System.arraycopy(_decryptedBuf, 1, _decryptedBuf, 0, _decryptedBuf.length-1);
_decryptedSize--;
return c;
} else if (_eofFound) {
return -1;
throw new IOException("Not EOF, but none available? " + _readyBuf.size() + "/" + _encryptedBuf.size()
+ "/" + _cumulativeRead + "... impossible");
} else {
throw new IOException("Not EOF, but none available? " + _decryptedSize
+ "/" + _writesSinceDecrypt
+ "/" + _cumulativeRead + "... impossible");
}
}
public int read(byte dest[]) throws IOException {
for (int i = 0; i < dest.length; i++) {
int val = read();
if (val == -1) {
// no more to read... can they expect more?
if (_eofFound && (i == 0)) return -1;
return i;
}
dest[i] = (byte) val;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Read the full buffer of size " + dest.length);
return dest.length;
return read(dest, 0, dest.length);
}
public int read(byte dest[], int off, int len) throws IOException {
byte buf[] = new byte[len];
int read = read(buf);
if (read == -1) return -1;
System.arraycopy(buf, 0, dest, off, read);
return read;
for (int i = 0; i < len; i++) {
int val = read();
if (val == -1) {
// no more to read... can they expect more?
if (_eofFound && (i == 0)) {
if (_log.shouldLog(Log.DEBUG))
_log.info("EOF? " + _eofFound
+ "\nread=" + i + " decryptedSize=" + _decryptedSize
+ " \nencryptedSize=" + _writesSinceDecrypt
+ " \ntotal=" + _cumulativeRead
+ " \npadding=" + _cumulativePaddingStripped
+ " \nprepared=" + _cumulativePrepared);
return -1;
} else {
if (i != len)
if (_log.shouldLog(Log.DEBUG))
_log.info("non-terminal eof: " + _eofFound + " i=" + i + " len=" + len);
}
return i;
}
dest[off+i] = (byte)val;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Read the full buffer of size " + len);
return len;
}
public long skip(long numBytes) throws IOException {
@@ -117,25 +132,15 @@ public class AESInputStream extends FilterInputStream {
}
public int available() throws IOException {
return _readyBuf.size();
return _decryptedSize;
}
public void close() throws IOException {
//_log.debug("We have " + _encryptedBuf.size() + " available to decrypt... doing so");
//decrypt();
//byte buf[] = new byte[_readyBuf.size()];
//for (int i = 0; i < buf.length; i++)
// buf[i] = ((Integer)_readyBuf.get(i)).byteValue();
//_log.debug("After decrypt: readyBuf.size: " + _readyBuf.size() + "\n val:\t" + Base64.encode(buf));
int ready = _readyBuf.size();
int encrypted = _readyBuf.size();
_readyBuf.clear();
_encryptedBuf.reset();
in.close();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Cumulative bytes read from source/decrypted/stripped: " + _cumulativeRead + "/"
+ _cumulativePrepared + "/" + _cumulativePaddingStripped + "] remaining [" + ready + " ready, "
+ encrypted + " still encrypted]");
+ _cumulativePrepared + "/" + _cumulativePaddingStripped + "] remaining [" + _decryptedSize + " ready, "
+ _writesSinceDecrypt + " still encrypted]");
}
public void mark(int readLimit) { // nop
@@ -149,116 +154,60 @@ public class AESInputStream extends FilterInputStream {
return false;
}
/**
* Retrieve the next ready byte, or null if no bytes are ready. this does not refill or block
*
*/
private Integer getNext() {
if (_readyBuf.size() > 0) {
return (Integer) _readyBuf.remove(0);
}
return null;
}
/**
* Read at least one new byte from the underlying stream, and up to max new bytes,
* but not necessarily enough for a new decrypted block. This blocks until at least
* one new byte is read from the stream
*
*/
private void refill(int max) throws IOException {
private void refill() throws IOException {
if (!_eofFound) {
byte buf[] = new byte[max];
int read = in.read(buf);
int read = in.read(_encryptedBuf, _writesSinceDecrypt, _encryptedBuf.length - _writesSinceDecrypt);
if (read == -1) {
_eofFound = true;
} else if (read > 0) {
//_log.debug("Read from the source stream " + read + " bytes");
_cumulativeRead += read;
_encryptedBuf.write(buf, 0, read);
_writesSinceDecrypt += read;
}
}
if (false) return; // true to keep the data for decrypt/display on close
if (_encryptedBuf.size() > 0) {
if (_encryptedBuf.size() >= DECRYPT_SIZE) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("We have " + _encryptedBuf.size() + " available to decrypt... doing so");
decrypt();
if ( (_encryptedBuf.size() > 0) && (_log.shouldLog(Log.DEBUG)) )
_log.debug("Bytes left in the encrypted buffer after decrypt: " + _encryptedBuf.size());
} else {
if (_eofFound) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("EOF and not enough bytes to decrypt [size = " + _encryptedBuf.size()
+ " totalCumulative: " + _cumulativeRead + "/"+_cumulativePrepared +"]!");
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Not enough bytes to decrypt [size = " + _encryptedBuf.size()
+ " totalCumulative: " + _cumulativeRead + "/"+_cumulativePrepared +"]");
}
}
if (_writesSinceDecrypt == BLOCK_SIZE) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("We have " + _writesSinceDecrypt + " available to decrypt... doing so");
decryptBlock();
if ( (_writesSinceDecrypt > 0) && (_log.shouldLog(Log.DEBUG)) )
_log.debug("Bytes left in the encrypted buffer after decrypt: "
+ _writesSinceDecrypt);
}
}
/**
* Take (n*BLOCK_SIZE) bytes off the _encryptedBuf, decrypt them, and place
* them on _readyBuf
*
* Decrypt the
*/
private void decrypt() throws IOException {
byte encrypted[] = _encryptedBuf.toByteArray();
_encryptedBuf.reset();
if ((encrypted == null) || (encrypted.length <= 0))
private void decryptBlock() throws IOException {
if (_writesSinceDecrypt != BLOCK_SIZE)
throw new IOException("Error decrypting - no data to decrypt");
if (_decryptedSize != 0)
throw new IOException("wtf, decrypted size is not 0? " + _decryptedSize);
_context.aes().decrypt(_encryptedBuf, 0, _encryptedBuf, 0, _key, _lastBlock, BLOCK_SIZE);
DataHelper.xor(_encryptedBuf, 0, _lastBlock, 0, _encryptedBuf, 0, BLOCK_SIZE);
int payloadBytes = countBlockPayload(_encryptedBuf, 0);
int numBlocks = encrypted.length / BLOCK_SIZE;
if ((encrypted.length % BLOCK_SIZE) != 0) {
// it was flushed / handled off the BLOCK_SIZE segments, so put the excess
// back into the _encryptedBuf for later handling
int trailing = encrypted.length % BLOCK_SIZE;
_encryptedBuf.write(encrypted, encrypted.length - trailing, trailing);
byte nencrypted[] = new byte[encrypted.length - trailing];
System.arraycopy(encrypted, 0, nencrypted, 0, nencrypted.length);
encrypted = nencrypted;
if (_log.shouldLog(Log.WARN))
_log.warn("Decrypt got odd segment - " + trailing
+ " bytes pushed back for later decryption - corrupted or slow data stream perhaps?");
} else {
if (_log.shouldLog(Log.INFO))
_log.info(encrypted.length + " bytes makes up " + numBlocks + " blocks to decrypt normally");
for (int i = 0; i < payloadBytes; i++) {
int c = _encryptedBuf[i];
if (c <= 0)
c += 256;
_decryptedBuf[i] = c;
}
_decryptedSize = payloadBytes;
for (int i = 0; i < numBlocks; i++) {
_context.aes().decrypt(encrypted, i * BLOCK_SIZE, encrypted, i * BLOCK_SIZE, _key, _lastBlock, BLOCK_SIZE);
DataHelper.xor(encrypted, i * BLOCK_SIZE, _lastBlock, 0, encrypted, i * BLOCK_SIZE, BLOCK_SIZE);
int payloadBytes = countBlockPayload(encrypted, i * BLOCK_SIZE);
for (int j = 0; j < payloadBytes; j++) {
int c = encrypted[j + i * BLOCK_SIZE];
if (c <= 0)
c += 256;
_readyBuf.add(new Integer(c));
}
_cumulativePaddingStripped += BLOCK_SIZE - payloadBytes;
_cumulativePrepared += payloadBytes;
System.arraycopy(encrypted, i * BLOCK_SIZE, _lastBlock, 0, BLOCK_SIZE);
}
_cumulativePaddingStripped += BLOCK_SIZE - payloadBytes;
_cumulativePrepared += payloadBytes;
int remaining = encrypted.length % BLOCK_SIZE;
if (remaining != 0) {
_encryptedBuf.write(encrypted, encrypted.length - remaining, remaining);
_log.debug("After pushing " + remaining
+ " bytes back onto the buffer, lets delay 1s our action so we don't fast busy until the net transfers data");
try {
Thread.sleep(1000);
} catch (InterruptedException ie) { // nop
}
} else {
//_log.debug("No remaining encrypted bytes beyond the block size");
}
System.arraycopy(_encryptedBuf, 0, _lastBlock, 0, BLOCK_SIZE);
_writesSinceDecrypt = 0;
}
/**
@@ -303,19 +252,32 @@ public class AESInputStream extends FilterInputStream {
}
int remainingBytes() {
return _encryptedBuf.size();
return _writesSinceDecrypt;
}
int readyBytes() {
return _readyBuf.size();
return _decryptedSize;
}
/**
* Test AESOutputStream/AESInputStream
*/
public static void main(String args[]) {
public static void main(String args[]) {
I2PAppContext ctx = new I2PAppContext();
try {
System.out.println("pwd=" + new java.io.File(".").getAbsolutePath());
System.out.println("Beginning");
runTest(ctx);
} catch (Throwable e) {
ctx.logManager().getLog(AESInputStream.class).error("Fail", e);
}
try { Thread.sleep(30*1000); } catch (InterruptedException ie) {}
System.out.println("Done");
}
private static void runTest(I2PAppContext ctx) {
Log log = ctx.logManager().getLog(AESInputStream.class);
log.setMinimumPriority(Log.DEBUG);
byte orig[] = new byte[1024 * 32];
RandomSource.getInstance().nextBytes(orig);
//byte orig[] = "you are my sunshine, my only sunshine".getBytes();
@@ -351,10 +313,51 @@ public class AESInputStream extends FilterInputStream {
}
log.info("Done testing 0 byte data");
for (int i = 0; i <= 32768; i++) {
orig = new byte[i];
ctx.random().nextBytes(orig);
try {
log.info("Testing " + orig.length);
runTest(ctx, orig, key, iv);
} catch (RuntimeException re) {
log.error("Error testing " + orig.length);
throw re;
}
}
/*
orig = new byte[615280];
RandomSource.getInstance().nextBytes(orig);
for (int i = 0; i < 20; i++) {
runTest(ctx, orig, key, iv);
}
log.info("Done testing 615280 byte data");
*/
/*
for (int i = 0; i < 100; i++) {
orig = new byte[ctx.random().nextInt(1024*1024)];
ctx.random().nextBytes(orig);
try {
runTest(ctx, orig, key, iv);
} catch (RuntimeException re) {
log.error("Error testing " + orig.length);
throw re;
}
}
log.info("Done testing 100 random lengths");
*/
orig = new byte[32];
RandomSource.getInstance().nextBytes(orig);
runOffsetTest(ctx, orig, key, iv);
try {
runOffsetTest(ctx, orig, key, iv);
} catch (Exception e) {
log.info("Error running offset test", e);
}
log.info("Done testing offset test (it should have come back with a statement NOT EQUAL!)");
@@ -389,18 +392,21 @@ public class AESInputStream extends FilterInputStream {
Hash newHash = SHA256Generator.getInstance().calculateHash(fin);
boolean eq = origHash.equals(newHash);
if (eq)
log.info("Equal hashes. hash: " + origHash);
else
log.error("NOT EQUAL! \norig: \t" + Base64.encode(orig) + "\nnew : \t" + Base64.encode(fin));
if (eq) {
//log.info("Equal hashes. hash: " + origHash);
} else {
throw new RuntimeException("NOT EQUAL! len=" + orig.length + " read=" + read
+ "\norig: \t" + Base64.encode(orig) + "\nnew : \t"
+ Base64.encode(fin));
}
boolean ok = DataHelper.eq(orig, fin);
log.debug("EQ data? " + ok + " origLen: " + orig.length + " fin.length: " + fin.length);
log.debug("Time to D(E(" + orig.length + ")): " + (end - start) + "ms");
log.debug("Time to E(" + orig.length + "): " + (endE - start) + "ms");
log.debug("Time to D(" + orig.length + "): " + (end - endE) + "ms");
} catch (Throwable t) {
log.error("ERROR transferring", t);
} catch (IOException ioe) {
log.error("ERROR transferring", ioe);
}
//try { Thread.sleep(5000); } catch (Throwable t) {}
}
@@ -441,15 +447,16 @@ public class AESInputStream extends FilterInputStream {
if (eq)
log.info("Equal hashes. hash: " + origHash);
else
log.error("NOT EQUAL! \norig: \t" + Base64.encode(orig) + "\nnew : \t" + Base64.encode(fin));
throw new RuntimeException("NOT EQUAL! len=" + orig.length + "\norig: \t" + Base64.encode(orig) + "\nnew : \t" + Base64.encode(fin));
boolean ok = DataHelper.eq(orig, fin);
log.debug("EQ data? " + ok + " origLen: " + orig.length + " fin.length: " + fin.length);
log.debug("Time to D(E(" + orig.length + ")): " + (end - start) + "ms");
log.debug("Time to E(" + orig.length + "): " + (endE - start) + "ms");
log.debug("Time to D(" + orig.length + "): " + (end - endE) + "ms");
} catch (Throwable t) {
log.error("ERROR transferring", t);
} catch (RuntimeException re) {
throw re;
} catch (IOException ioe) {
log.error("ERROR transferring", ioe);
}
//try { Thread.sleep(5000); } catch (Throwable t) {}
}

View File

@@ -34,7 +34,15 @@ public class AESOutputStream extends FilterOutputStream {
private I2PAppContext _context;
private SessionKey _key;
private byte[] _lastBlock;
private ByteArrayOutputStream _inBuf;
/**
* buffer containing the unwritten bytes. The first unwritten
* byte is _lastCommitted+1, and the last unwritten byte is _nextWrite-1
* (aka the next byte to be written on the array is _nextWrite)
*/
private byte[] _unencryptedBuf;
private byte _writeBlock[];
/** how many bytes have we been given since we flushed it to the stream? */
private int _writesSinceCommit;
private long _cumulativeProvided; // how many bytes provided to this stream
private long _cumulativeWritten; // how many bytes written to the underlying stream
private long _cumulativePadding; // how many bytes of padding written
@@ -51,31 +59,32 @@ public class AESOutputStream extends FilterOutputStream {
_key = key;
_lastBlock = new byte[BLOCK_SIZE];
System.arraycopy(iv, 0, _lastBlock, 0, BLOCK_SIZE);
_inBuf = new ByteArrayOutputStream(MAX_BUF);
_unencryptedBuf = new byte[MAX_BUF];
_writeBlock = new byte[BLOCK_SIZE];
_writesSinceCommit = 0;
}
public void write(int val) throws IOException {
_cumulativeProvided++;
_inBuf.write(val);
if (_inBuf.size() > MAX_BUF) doFlush();
_unencryptedBuf[_writesSinceCommit++] = (byte)(val & 0xFF);
if (_writesSinceCommit == _unencryptedBuf.length)
doFlush();
}
public void write(byte src[]) throws IOException {
_cumulativeProvided += src.length;
_inBuf.write(src);
if (_inBuf.size() > MAX_BUF) doFlush();
write(src, 0, src.length);
}
public void write(byte src[], int off, int len) throws IOException {
_cumulativeProvided += len;
_inBuf.write(src, off, len);
if (_inBuf.size() > MAX_BUF) doFlush();
// i'm too lazy to unroll this into the partial writes (dealing with
// wrapping around the buffer size)
for (int i = 0; i < len; i++)
write(src[i+off]);
}
public void close() throws IOException {
flush();
out.close();
_inBuf.reset();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Cumulative bytes provided to this stream / written out / padded: "
+ _cumulativeProvided + "/" + _cumulativeWritten + "/" + _cumulativePadding);
@@ -87,8 +96,10 @@ public class AESOutputStream extends FilterOutputStream {
}
private void doFlush() throws IOException {
writeEncrypted(_inBuf.toByteArray());
_inBuf.reset();
if (_log.shouldLog(Log.INFO))
_log.info("doFlush(): writesSinceCommit=" + _writesSinceCommit);
writeEncrypted();
_writesSinceCommit = 0;
}
/**
@@ -101,39 +112,37 @@ public class AESOutputStream extends FilterOutputStream {
* times).
*
*/
private void writeEncrypted(byte src[]) throws IOException {
if ((src == null) || (src.length == 0)) return;
int numBlocks = src.length / (BLOCK_SIZE - 1);
private void writeEncrypted() throws IOException {
int numBlocks = _writesSinceCommit / (BLOCK_SIZE - 1);
byte block[] = new byte[BLOCK_SIZE];
if (_log.shouldLog(Log.INFO))
_log.info("writeE(): #=" + _writesSinceCommit + " blocks=" + numBlocks);
for (int i = 0; i < numBlocks; i++) {
DataHelper.xor(src, i * 15, _lastBlock, 0, block, 0, 15);
DataHelper.xor(_unencryptedBuf, i * 15, _lastBlock, 0, _writeBlock, 0, 15);
// the padding byte for "full" blocks
block[BLOCK_SIZE - 1] = (byte)(_lastBlock[BLOCK_SIZE - 1] ^ 0x01);
_context.aes().encrypt(block, 0, block, 0, _key, _lastBlock, BLOCK_SIZE);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Padding block " + i + " of " + numBlocks + " with 1 byte");
out.write(block);
System.arraycopy(block, 0, _lastBlock, 0, BLOCK_SIZE);
_writeBlock[BLOCK_SIZE - 1] = (byte)(_lastBlock[BLOCK_SIZE - 1] ^ 0x01);
_context.aes().encrypt(_writeBlock, 0, _writeBlock, 0, _key, _lastBlock, BLOCK_SIZE);
out.write(_writeBlock);
System.arraycopy(_writeBlock, 0, _lastBlock, 0, BLOCK_SIZE);
_cumulativeWritten += BLOCK_SIZE;
_cumulativePadding++;
}
if (src.length % 15 != 0) {
if (_writesSinceCommit % 15 != 0) {
// we need to do non trivial padding
int remainingBytes = src.length - numBlocks * 15;
int remainingBytes = _writesSinceCommit - numBlocks * 15;
int paddingBytes = BLOCK_SIZE - remainingBytes;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Padding " + src.length + " with " + paddingBytes + " bytes in " + numBlocks + " blocks");
System.arraycopy(src, numBlocks * 15, block, 0, remainingBytes);
Arrays.fill(block, remainingBytes, BLOCK_SIZE, (byte) paddingBytes);
DataHelper.xor(block, 0, _lastBlock, 0, block, 0, BLOCK_SIZE);
_context.aes().encrypt(block, 0, block, 0, _key, _lastBlock, BLOCK_SIZE);
out.write(block);
System.arraycopy(block, 0, _lastBlock, 0, BLOCK_SIZE);
_log.debug("Padding " + _writesSinceCommit + " with " + paddingBytes + " bytes in " + (numBlocks+1) + " blocks");
System.arraycopy(_unencryptedBuf, numBlocks * 15, _writeBlock, 0, remainingBytes);
Arrays.fill(_writeBlock, remainingBytes, BLOCK_SIZE, (byte) paddingBytes);
DataHelper.xor(_writeBlock, 0, _lastBlock, 0, _writeBlock, 0, BLOCK_SIZE);
_context.aes().encrypt(_writeBlock, 0, _writeBlock, 0, _key, _lastBlock, BLOCK_SIZE);
out.write(_writeBlock);
System.arraycopy(_writeBlock, 0, _lastBlock, 0, BLOCK_SIZE);
_cumulativePadding += paddingBytes;
_cumulativeWritten += BLOCK_SIZE;
}
}
}

View File

@@ -62,40 +62,43 @@ public class SHA256Generator {
* @return hash of the source
*/
public Hash calculateHash(byte[] source) {
long length = source.length * 8;
return calculateHash(source, 0, source.length);
}
public Hash calculateHash(byte[] source, int start, int len) {
long length = len * 8;
int k = 448 - (int) ((length + 1) % 512);
if (k < 0) {
k += 512;
}
int padbytes = k / 8;
int wordlength = source.length / 4 + padbytes / 4 + 3;
int wordlength = len / 4 + padbytes / 4 + 3;
int[] M0 = new int[wordlength];
int wordcount = 0;
int x = 0;
for (x = 0; x < (source.length / 4) * 4; x += 4) {
M0[wordcount] = source[x] << 24 >>> 24 << 24;
M0[wordcount] |= source[x + 1] << 24 >>> 24 << 16;
M0[wordcount] |= source[x + 2] << 24 >>> 24 << 8;
M0[wordcount] |= source[x + 3] << 24 >>> 24 << 0;
for (x = 0; x < (len / 4) * 4; x += 4) {
M0[wordcount] = source[x+start] << 24 >>> 24 << 24;
M0[wordcount] |= source[x+start + 1] << 24 >>> 24 << 16;
M0[wordcount] |= source[x+start + 2] << 24 >>> 24 << 8;
M0[wordcount] |= source[x+start + 3] << 24 >>> 24 << 0;
wordcount++;
}
switch (source.length - (wordcount + 1) * 4 + 4) {
switch (len - (wordcount + 1) * 4 + 4) {
case 0:
M0[wordcount] |= 0x80000000;
break;
case 1:
M0[wordcount] = source[x] << 24 >>> 24 << 24;
M0[wordcount] = source[x+start] << 24 >>> 24 << 24;
M0[wordcount] |= 0x00800000;
break;
case 2:
M0[wordcount] = source[x] << 24 >>> 24 << 24;
M0[wordcount] |= source[x + 1] << 24 >>> 24 << 16;
M0[wordcount] = source[x+start] << 24 >>> 24 << 24;
M0[wordcount] |= source[x+start + 1] << 24 >>> 24 << 16;
M0[wordcount] |= 0x00008000;
break;
case 3:
M0[wordcount] = source[x] << 24 >>> 24 << 24;
M0[wordcount] |= source[x + 1] << 24 >>> 24 << 16;
M0[wordcount] |= source[x + 2] << 24 >>> 24 << 8;
M0[wordcount] = source[x+start] << 24 >>> 24 << 24;
M0[wordcount] |= source[x+start + 1] << 24 >>> 24 << 16;
M0[wordcount] |= source[x+start + 2] << 24 >>> 24 << 8;
M0[wordcount] |= 0x00000080;
break;
}

View File

@@ -261,7 +261,66 @@ public class DataHelper {
throw new DataFormatException("Invalid value (must be positive)", iae);
}
}
public static byte[] toLong(int numBytes, long value) throws IllegalArgumentException {
if (numBytes <= 0) throw new IllegalArgumentException("Invalid number of bytes");
if (value < 0) throw new IllegalArgumentException("Negative value not allowed");
byte val[] = new byte[numBytes];
for (int i = 0; i < numBytes; i++)
val[numBytes-i-1] = (byte)(value >>> (i*8));
return val;
}
public static long fromLong(byte src[], int offset, int numBytes) {
if ( (src == null) || (src.length == 0) )
return 0;
long rv = 0;
for (int i = 0; i < numBytes; i++) {
long cur = src[offset+i] & 0xFF;
if (cur < 0) cur = cur+256;
cur = (cur << (8*(numBytes-i-1)));
rv += cur;
}
if (rv < 0)
throw new IllegalArgumentException("wtf, fromLong got a negative? " + rv + ": offset="+ offset +" numBytes="+numBytes);
return rv;
}
public static void main(String args[]) {
for (int i = 0; i <= 0xFF; i++)
testLong(1, i);
System.out.println("Test 1byte passed");
for (long i = 0; i <= 0xFFFF; i++)
testLong(2, i);
System.out.println("Test 2byte passed");
for (long i = 0; i <= 0xFFFFFF; i++)
testLong(3, i);
System.out.println("Test 3byte passed");
for (long i = 0; i <= 0xFFFFFFFF; i++)
testLong(4, i);
System.out.println("Test 4byte passed");
for (long i = 0; i <= 0xFFFFFFFF; i++)
testLong(8, i);
System.out.println("Test 8byte passed");
}
private static void testLong(int numBytes, long value) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(numBytes);
writeLong(baos, numBytes, value);
byte written[] = baos.toByteArray();
byte extract[] = toLong(numBytes, value);
if (!eq(written, extract))
throw new RuntimeException("testLong("+numBytes+","+value+") FAILED");
long read = fromLong(extract, 0, extract.length);
if (read != value)
throw new RuntimeException("testLong("+numBytes+","+value+") FAILED on read (" + read + ")");
} catch (Exception e) {
throw new RuntimeException(e.getMessage());
}
}
/** Read in a date from the stream as specified by the I2P data structure spec.
* A date is an 8 byte unsigned integer in network byte order specifying the number of
* milliseconds since midnight on January 1, 1970 in the GMT timezone. If the number is
@@ -272,7 +331,7 @@ public class DataHelper {
* @return date read, or null
*/
public static Date readDate(InputStream in) throws DataFormatException, IOException {
long date = readLong(in, 8);
long date = readLong(in, DATE_LENGTH);
if (date == 0L) return null;
return new Date(date);
@@ -287,10 +346,25 @@ public class DataHelper {
public static void writeDate(OutputStream out, Date date)
throws DataFormatException, IOException {
if (date == null)
writeLong(out, 8, 0L);
writeLong(out, DATE_LENGTH, 0L);
else
writeLong(out, 8, date.getTime());
writeLong(out, DATE_LENGTH, date.getTime());
}
public static byte[] toDate(Date date) throws IllegalArgumentException {
if (date == null)
return toLong(DATE_LENGTH, 0L);
else
return toLong(DATE_LENGTH, date.getTime());
}
public static Date fromDate(byte src[], int offset) throws IllegalArgumentException {
long when = fromLong(src, offset, DATE_LENGTH);
if (when <= 0)
return null;
else
return new Date(when);
}
public static final int DATE_LENGTH = 8;
/** Read in a string from the stream as specified by the I2P data structure spec.
* A string is 1 or more bytes where the first byte is the number of bytes (not characters!)
@@ -364,12 +438,16 @@ public class DataHelper {
public static void writeBoolean(OutputStream out, Boolean bool)
throws DataFormatException, IOException {
if (bool == null)
writeLong(out, 1, 2);
writeLong(out, 1, BOOLEAN_UNKNOWN);
else if (Boolean.TRUE.equals(bool))
writeLong(out, 1, 1);
writeLong(out, 1, BOOLEAN_TRUE);
else
writeLong(out, 1, 0);
writeLong(out, 1, BOOLEAN_FALSE);
}
public static final byte BOOLEAN_TRUE = 0x1;
public static final byte BOOLEAN_FALSE = 0x0;
public static final byte BOOLEAN_UNKNOWN = 0x2;
//
// The following comparator helpers make it simpler to write consistently comparing
@@ -620,9 +698,12 @@ public class DataHelper {
/** decompress the GZIP compressed data (returning null on error) */
public static byte[] decompress(byte orig[]) throws IOException {
return decompress(orig, 0, orig.length);
}
public static byte[] decompress(byte orig[], int offset, int length) throws IOException {
if ((orig == null) || (orig.length <= 0)) return orig;
GZIPInputStream in = new GZIPInputStream(new ByteArrayInputStream(orig), orig.length);
ByteArrayOutputStream baos = new ByteArrayOutputStream(orig.length * 2);
GZIPInputStream in = new GZIPInputStream(new ByteArrayInputStream(orig, offset, length), length);
ByteArrayOutputStream baos = new ByteArrayOutputStream(length * 2);
byte buf[] = new byte[4 * 1024];
while (true) {
int read = in.read(buf);

View File

@@ -33,9 +33,8 @@ public class Signature extends DataStructureImpl {
FAKE_SIGNATURE[i] = 0x00;
}
public Signature() {
setData(null);
}
public Signature() { this(null); }
public Signature(byte data[]) { setData(data); }
public byte[] getData() {
return _data;

View File

@@ -29,9 +29,8 @@ public class SigningPrivateKey extends DataStructureImpl {
public final static int KEYSIZE_BYTES = 20;
public SigningPrivateKey() {
setData(null);
}
public SigningPrivateKey() { this(null); }
public SigningPrivateKey(byte data[]) { setData(data); }
public byte[] getData() {
return _data;

View File

@@ -29,9 +29,8 @@ public class SigningPublicKey extends DataStructureImpl {
public final static int KEYSIZE_BYTES = 128;
public SigningPublicKey() {
setData(null);
}
public SigningPublicKey() { this(null); }
public SigningPublicKey(byte data[]) { setData(data); }
public byte[] getData() {
return _data;

View File

@@ -35,12 +35,25 @@ public class TunnelId extends DataStructureImpl {
public final static int TYPE_PARTICIPANT = 3;
public TunnelId() {
setTunnelId(-1);
setType(TYPE_UNSPECIFIED);
_tunnelId = -1;
_type = TYPE_UNSPECIFIED;
}
public TunnelId(long id) {
if (id <= 0) throw new IllegalArgumentException("wtf, tunnelId " + id);
_tunnelId = id;
_type = TYPE_UNSPECIFIED;
}
public TunnelId(long id, int type) {
if (id <= 0) throw new IllegalArgumentException("wtf, tunnelId " + id);
_tunnelId = id;
_type = type;
}
public long getTunnelId() { return _tunnelId; }
public void setTunnelId(long id) { _tunnelId = id; }
public void setTunnelId(long id) {
_tunnelId = id;
if (id <= 0) throw new IllegalArgumentException("wtf, tunnelId " + id);
}
/**
* is this tunnel inbound, outbound, or a participant (kept in memory only and used only for the router).s

View File

@@ -0,0 +1,187 @@
package net.i2p.stat;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.StringTokenizer;
import net.i2p.I2PAppContext;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
/**
*
*/
public class BufferedStatLog implements StatLog {
private I2PAppContext _context;
private Log _log;
private StatEvent _events[];
private int _eventNext;
private int _lastWrite;
/** flush stat events to disk after this many events (or 30s)*/
private int _flushFrequency;
private List _statFilters;
private String _lastFilters;
private BufferedWriter _out;
private String _outFile;
public BufferedStatLog(I2PAppContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(BufferedStatLog.class);
_events = new StatEvent[1000];
for (int i = 0; i < 1000; i++)
_events[i] = new StatEvent();
_eventNext = 0;
_lastWrite = _events.length-1;
_statFilters = new ArrayList(10);
_flushFrequency = 500;
I2PThread writer = new I2PThread(new StatLogWriter(), "StatLogWriter");
writer.setDaemon(true);
writer.start();
}
public void addData(String scope, String stat, long value, long duration) {
synchronized (_events) {
_events[_eventNext].init(scope, stat, value, duration);
_eventNext = (_eventNext + 1) % _events.length;
if (_eventNext == _lastWrite)
_lastWrite = (_lastWrite + 1) % _events.length; // drop an event
if (_log.shouldLog(Log.DEBUG))
_log.debug("AddData next=" + _eventNext + " lastWrite=" + _lastWrite);
if (_eventNext > _lastWrite) {
if (_eventNext - _lastWrite >= _flushFrequency)
_events.notifyAll();
} else {
if (_events.length - 1 - _lastWrite + _eventNext >= _flushFrequency)
_events.notifyAll();
}
}
}
private boolean shouldLog(String stat) {
synchronized (_statFilters) {
return _statFilters.contains(stat) || _statFilters.contains("*");
}
}
private void updateFilters() {
String val = _context.getProperty("stat.logFilters");
if (val != null) {
if ( (_lastFilters != null) && (_lastFilters.equals(val)) ) {
// noop
} else {
StringTokenizer tok = new StringTokenizer(val, ",");
synchronized (_statFilters) {
_statFilters.clear();
while (tok.hasMoreTokens())
_statFilters.add(tok.nextToken().trim());
}
}
_lastFilters = val;
} else {
synchronized (_statFilters) { _statFilters.clear(); }
}
String filename = _context.getProperty("stat.logFile");
if (filename == null)
filename = "stats.log";
if ( (_outFile != null) && (_outFile.equals(filename)) ) {
// noop
} else {
if (_out != null) try { _out.close(); } catch (IOException ioe) {}
_outFile = filename;
try {
_out = new BufferedWriter(new FileWriter(_outFile, true));
} catch (IOException ioe) { ioe.printStackTrace(); }
}
}
private class StatLogWriter implements Runnable {
private SimpleDateFormat _fmt = new SimpleDateFormat("yyyyMMdd HH:mm:ss.SSS");
public void run() {
int writeStart = -1;
int writeEnd = -1;
while (true) {
synchronized (_events) {
if (_eventNext > _lastWrite) {
if (_eventNext - _lastWrite < _flushFrequency)
try { _events.wait(30*1000); } catch (InterruptedException ie) {}
} else {
if (_events.length - 1 - _lastWrite + _eventNext < _flushFrequency)
try { _events.wait(30*1000); } catch (InterruptedException ie) {}
}
writeStart = (_lastWrite + 1) % _events.length;
writeEnd = _eventNext;
_lastWrite = (writeEnd == 0 ? _events.length-1 : writeEnd - 1);
}
if (writeStart != writeEnd) {
try {
if (_log.shouldLog(Log.DEBUG))
_log.debug("writing " + writeStart +"->"+ writeEnd);
writeEvents(writeStart, writeEnd);
} catch (Exception e) {
_log.error("error writing " + writeStart +"->"+ writeEnd, e);
}
}
}
}
private void writeEvents(int start, int end) {
try {
updateFilters();
int cur = start;
while (cur != end) {
if (shouldLog(_events[cur].getStat())) {
String when = null;
synchronized (_fmt) {
when = _fmt.format(new Date(_events[cur].getTime()));
}
_out.write(when);
_out.write(" ");
if (_events[cur].getScope() == null)
_out.write("noScope ");
else
_out.write(_events[cur].getScope() + " ");
_out.write(_events[cur].getStat()+" ");
_out.write(_events[cur].getValue()+" ");
_out.write(_events[cur].getDuration()+"\n");
}
cur = (cur + 1) % _events.length;
}
_out.flush();
} catch (IOException ioe) {
_log.error("Error writing out", ioe);
}
}
}
private class StatEvent {
private long _time;
private String _scope;
private String _stat;
private long _value;
private long _duration;
public long getTime() { return _time; }
public String getScope() { return _scope; }
public String getStat() { return _stat; }
public long getValue() { return _value; }
public long getDuration() { return _duration; }
public void init(String scope, String stat, long value, long duration) {
_scope = scope;
_stat = stat;
_value = value;
_duration = duration;
_time = _context.clock().now();
}
}
}

View File

@@ -19,6 +19,8 @@ public class RateStat {
private String _description;
/** actual rate objects for this statistic */
private Rate _rates[];
/** component we tell about events as they occur */
private StatLog _statLog;
public RateStat(String name, String description, String group, long periods[]) {
_statName = name;
@@ -28,11 +30,13 @@ public class RateStat {
for (int i = 0; i < periods.length; i++)
_rates[i] = new Rate(periods[i]);
}
public void setStatLog(StatLog sl) { _statLog = sl; }
/**
* update all of the rates for the various periods with the given value.
*/
public void addData(long value, long eventDuration) {
if (_statLog != null) _statLog.addData(_groupName, _statName, value, eventDuration);
for (int i = 0; i < _rates.length; i++)
_rates[i].addData(value, eventDuration);
}

View File

@@ -0,0 +1,8 @@
package net.i2p.stat;
/**
* Component to be notified when a particular event occurs
*/
public interface StatLog {
public void addData(String scope, String stat, long value, long duration);
}

View File

@@ -0,0 +1,76 @@
package net.i2p.stat;
import java.io.IOException;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.FileWriter;
import java.text.SimpleDateFormat;
import java.text.ParseException;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
/**
* Simple CLI to splot the stat logs into per-stat files containing
* #seconds since beginning and the value (ready for loading into your
* favorite plotting tool)
*/
public class StatLogSplitter {
private static final String DATE_FORMAT = "yyyyMMdd HH:mm:ss.SSS";
private static SimpleDateFormat _fmt = new SimpleDateFormat(DATE_FORMAT);
public static void main(String args[]) {
if (args.length != 1) {
System.err.println("Usage: StatLogSplitter filename");
return;
}
splitLog(args[0]);
}
private static void splitLog(String filename) {
Map outputFiles = new HashMap(4);
try {
BufferedReader in = new BufferedReader(new FileReader(filename));
String line;
long first = 0;
while ( (line = in.readLine()) != null) {
String date = line.substring(0, DATE_FORMAT.length()).trim();
int endGroup = line.indexOf(' ', DATE_FORMAT.length()+1);
int endStat = line.indexOf(' ', endGroup+1);
int endValue = line.indexOf(' ', endStat+1);
String group = line.substring(DATE_FORMAT.length()+1, endGroup).trim();
String stat = line.substring(endGroup, endStat).trim();
String value = line.substring(endStat, endValue).trim();
String duration = line.substring(endValue).trim();
//System.out.println(date + " " + group + " " + stat + " " + value + " " + duration);
try {
Date when = _fmt.parse(date);
if (first <= 0) first = when.getTime();
long val = Long.parseLong(value);
long time = Long.parseLong(duration);
if (!outputFiles.containsKey(stat)) {
outputFiles.put(stat, new FileWriter(stat + ".dat"));
System.out.println("Including data to " + stat + ".dat");
}
FileWriter out = (FileWriter)outputFiles.get(stat);
double s = (when.getTime()-first)/1000.0;
//long s = when.getTime();
out.write(s + " " + val + " [" + line + "]\n");
out.flush();
} catch (ParseException pe) {
continue;
} catch (NumberFormatException nfe){
continue;
}
}
} catch (IOException ioe) {
ioe.printStackTrace();
}
for (Iterator iter = outputFiles.values().iterator(); iter.hasNext(); ) {
FileWriter out = (FileWriter)iter.next();
try { out.close(); } catch (IOException ioe) {}
}
}
}

View File

@@ -27,6 +27,7 @@ public class StatManager {
private Map _frequencyStats;
/** stat name to RateStat */
private Map _rateStats;
private StatLog _statLog;
/**
* The stat manager should only be constructed and accessed through the
@@ -39,6 +40,18 @@ public class StatManager {
_context = context;
_frequencyStats = Collections.synchronizedMap(new HashMap(128));
_rateStats = Collections.synchronizedMap(new HashMap(128));
_statLog = new BufferedStatLog(context);
}
public StatLog getStatLog() { return _statLog; }
public void setStatLog(StatLog log) {
_statLog = log;
synchronized (_rateStats) {
for (Iterator iter = _rateStats.values().iterator(); iter.hasNext(); ) {
RateStat rs = (RateStat)iter.next();
rs.setStatLog(log);
}
}
}
/**
@@ -64,7 +77,9 @@ public class StatManager {
*/
public void createRateStat(String name, String description, String group, long periods[]) {
if (_rateStats.containsKey(name)) return;
_rateStats.put(name, new RateStat(name, description, group, periods));
RateStat rs = new RateStat(name, description, group, periods);
if (_statLog != null) rs.setStatLog(_statLog);
_rateStats.put(name, rs);
}
/** update the given frequency statistic, taking note that an event occurred (and recalculating all frequencies) */

View File

@@ -115,7 +115,7 @@ public class NtpClient {
//System.out.println("host: " + serverName + " rtt: " + roundTripDelay + " offset: " + localClockOffset + " seconds");
return (long)(System.currentTimeMillis() + localClockOffset*1000);
} catch (IOException ioe) {
ioe.printStackTrace();
//ioe.printStackTrace();
return -1;
}
}

View File

@@ -100,6 +100,7 @@ public class Timestamper implements Runnable {
if (_log.shouldLog(Log.INFO))
_log.info("Starting up timestamper");
boolean alreadyBitched = false;
try {
while (true) {
updateConfig();
@@ -118,7 +119,9 @@ public class Timestamper implements Runnable {
_log.debug("Stamp time");
stampTime(now);
} catch (IllegalArgumentException iae) {
_log.log(Log.CRIT, "Unable to reach any of the NTP servers - network disconnected?");
if (!alreadyBitched)
_log.log(Log.CRIT, "Unable to reach any of the NTP servers - network disconnected?");
alreadyBitched = true;
}
}
try { Thread.sleep(_queryFrequency); } catch (InterruptedException ie) {}

View File

@@ -1,4 +1,96 @@
$Id: history.txt,v 1.27 2004/10/01 09:35:49 jrandom Exp $
$Id: history.txt,v 1.43 2004/10/10 09:57:15 jrandom Exp $
* 2004-10-10 0.4.1.2 released
2004-10-10 cervantes
* Update the I2PTunnel HTTP proxy to strip out the i2paddresshelper from
the request.
2004-10-09 jrandom
* Added a watchdog timer to do some baseline liveliness checking to help
debug some odd errors.
* Added a pair of summary stats for bandwidth usage, allowing easy export
with the other stats ("bw.sendBps" and "bw.receiveBps")
* Trimmed another memory allocation on message reception.
2004-10-08 jrandom
* Revamp the AESInputStream so it doesn't allocate any temporary objects
during its operation.
2004-10-08 jrandom
* Don't kill the establisher threads during a soft restart.
* Attempt to validate the peer's routerInfo earlier during handshaking.
* Revamp the AESOutputStream so it doesn't allocate any temporary objects
during its operation.
2004-10-07 jrandom
* Reimplement the I2NP reading with less temporary memory allocation.
There is still significant GC churn, especially under load, but this
should help.
* Catch some oddball errors in the transport (message timeout while
establishing).
2004-10-07 jrandom
* Expire queued messages even when the writer is blocked.
* Reimplement most of the I2NP writing with less temporary memory
allocations (I2NP reading still gobbles memory).
2004-10-06 jrandom
* Implement an active queue management scheme on the TCP transports,
dropping messages probabalistically as the queue fills up. The
estimated queue capacity is determined by the rate at which messages
have been sent to the peer (averaged at 1, 5, and 60m periods). As
we exceed 1/2 of the estimated capacity, we drop messages throughout
the queue probabalistically with regards to their size. This is based
on RFC 2309's RED, with the minimum threshold set to 1/2 the
estimated connection capacity. We may want to consider using a send
rate and queue size measured across all connections, to deal with our
own local bandwidth saturation, but we'll try the per-con metrics first.
2004-10-06 jrandom
* Enable explicit disabling of the systray entirely for windows machines
with strange configurations: add -Dsystray.disable=true to the java
command line. (thanks mihi!)
2004-10-05 jrandom
* Allow peers on the same LAN to communicate with each other safely even
when they cannot talk to each other through the external address.
2004-10-05 jrandom
* Display how much time is left before the graceful shutdown is complete.
* Debug some improperly failed messages on timeout or disconnection.
2004-10-05 jrandom
* Don't go into a fast busy if an I2PTunnel 'server' is explicitly killed
(thanks mule!)
* Handle some more error conditions regarding abruptly closing sockets
(thanks Jonva!)
2004-10-04 jrandom
* Update the shitlist to reject a peer for an exponentially increasing
period of time (with an upper bounds of an hour).
* Various minor stat and debugging fixes
2004-10-03 jrandom
* Add a new stat logging component to optionally dump the raw stats to
disk as they are generated, rather than rely upon the summarized data.
By default, this is off, but the router property "stat.logFilters" can
be set to a comma delimited list of stats (e.g. "client.sendAckTime")
which will be written to the file "stats.log" (or whatever the property
"stat.logFile" is set to). This can also log profile related stats,
such as "dbResponseTime" or "tunnelTestResponseTime".
2004-10-02 jrandom
* Assure that we quickly fail messages bound for shitlisted peers.
* Address a race on startup where the first peer contacted could hang the
router (thanks Romster!)
* Only whine about an intermittent inability to query the time server once
2004-10-02 jrandom
* Command line utility to verify a peer's reachability - simply run
net.i2p.router.transport.tcp.ConnectionHandler hostname port# and it
will print out whether that peer is reachable or not (using a simple
verification handshake).
* 2004-10-01 0.4.1.1 released

View File

@@ -1,6 +1,10 @@
; TC's hosts.txt guaranteed freshness
; $Id: hosts.txt,v 1.52 2004/09/30 08:25:31 jrandom Exp $
; $Id: hosts.txt,v 1.56 2004/10/05 10:57:08 jrandom Exp $
; changelog:
; (1.79) updated beyond.i2p's key
; (1.78) added b.i2p
; (1.77) added tinyurl.i2p
; (1.76) added detonate.i2p
; (1.75) added identiguy.i2p
; (1.74) added jabber-2.i2p
; (1.73) added jake.i2p
@@ -176,3 +180,8 @@ pop.postman.i2p=W4ZehJpFv9HnM4qnWb6c305H1vWRoghgSz~UXVVl7AM6cNYPU~DHzI0ezxyYIcsy
jake.i2p=BzdF-Ib~jWRj9i1hypIasw67yv0YDEdCqloJr4Qq2Wvkt56slqzd0Qri6FQnWsGfTpmilvGI1H-7WAyU5KXc~nQ7F7KB~Im9cI1mIeMKeGK4Yzb6jV5SHctDZw2j1hqMpdjU76r45WiOgiAaNFin10SJ8IriNqce9VehnOupB53tpvoj1GnEIyxxaOsSDmxIakFZ8MsLW~s9wyxnlfvP-iOXLMpM6Achhcmxj-DWDcjwK-6rvLYoG6SY5vmpPNzhuyAa-8di8N3zBxL6WTf1Ox9UQrpXXZ1luYDUNxpFfPjzl9ZPpv4Ax6QzJvWn8B85GYs9ZkS1bpbtbbmQ1DTlOpwFMmidr0vaf68hNGvVY5yUgPZy28s8jvrz4AMEfAGlsozRkxr2JRwm3Oj3dqeW6ICKRwnThkjnXnGhQiIDV~bDfkgBzWvh4TyIYm5NBwAYFZdSx81Eno4sthfElFTeSDj0TjCKegNmAd6t4D82JMk8abws0MC-pIqaNLZRU1YVAAAA
jabber-2.i2p=62UDEWG936i-w144HMYX~1aiRnvIkRg2Rbjor3KXUPfKaPXx6A6R54nhLYVI0Zt3Y6bquRIlvZd1KKb6VnQ~GbPhDKvOIzrSP~G7IajZOdncG4QlsuJYwOVVOSVAkR55pxdwwVHXkoR4~4hlahYGvGAayvvDxxWojr-dWg-IIUnEv8hjjstUfphSy~Jx17de0G~8n0DSZ~YKu83Cnu9sSydoXqqczq3KChFDjfUYtohNiZA8lXfrfv5K3pEeTOCemE1rDflPGu~5fawk3G1xNetuZQCKdkFNSxtkRpj3Vm2ej1ApLUi92zog3ekBbxEwfiP0DfC4BvdwgAimVGH8MpXS4BijlDNNbCWak6Pdr24OqVwLJXzWfqb11WFH-mc2JYJCPhJCvESYs~iK3JOZ0QzbW9F-495oB6RD-9guK9bBQHC1lMsNVhaj3Mh8gohKghvy2MrTNbirHKDjyyiAj5RrT1OjWoZNBMvXPEZEtEowuzZptP-tTf19mhGfPFOjAAAA
identiguy.i2p=EhDXPsItHi7Dfx8~~0iHPzkjiK1568wqEifARTr-ngIznwVaBmeEnSHXfLkA-7F~Nrqw95yQLlZpja6N1DyQNtJXPx-Db3apXuKwsuTNSGgZme2kFHXPh7PCsfzARwxn4d2-Nx75V4BSCgQr0gRjDa~L~JmtxPRUpZX2NyBvD0w5MSaD~9t4RuCRQ2gVDpUUDcTh2jkm2bX-R9vRIHhCZLasHxDR7rJC9-38uMsQ6ywd4ulQQYIs0kB1sEofdXUypyqphQU9xIxz7azgzKJp~dHnJpJ6SpaH7UrDN-YfOkAvGFBbk9k-u3NGl571Y205dQF~l3XHvVKf6NS9~JvtVh0JibwQ2qLVPy3Y~TtViffvpunNGOrfnVrT5-1c3lWNHx8gTalY3R-vU5rDd8Bz3AhNM6KWktZq8LiHHhJUxIkebXZsid-f2g~QgRRywxG7ksIlKsCMEn076f03L-oWJIlrAg5MdQd-qlg1Bs1IhrR8YT8jNYrX1bGdlmPrJDpiAAAA
detonate.i2p=6swCa-wAcpBdiCnMdObQlQ15isZTpAV8rlArR54Q5dZkALb5iMCjt0hYMoz89R9ycmtxcTNkgdNX37JsuAjeBFk~nUI11EGSyu-T3AOJg0sxUkoESuxqyLajqDSx8WVziI95LM8zOg~Gv0hpObbziMzu8hzLML2fTWF681sBA6gFzVSnjK9zA3cbWX7f0zGo4crruXsjrmJqWfS2~fNt5RYwHth57u-rZ8a4vl58uyv1Gb1~FAlyjyGMcr7eaQq3UJdEgHOHtXZSoGXZSAerpFreeHqC6Mu~-~-T7iph3jlfNyJYxLOBD8QKMPW~hGW9l6ips5zcc0KFjX2PWH5y4nbttb1LNvI6zn-q1F5t-ITP7UIovWcINfSzy5YoiuT1K6ZSay9DQSeuldBt9ajXDCkJKxOiZlB9C0RAmd9WDyAIKt~HhdnlpA~yOUuthBzrAe7iXt7lUqY3Rwqiby1CgqPKY941ChVY57AwWyxG4clM-a2MB8GuBImfqhp2LDFWAAAA
tinyurl.i2p=mfjh9H8RPkh6vYTSpFoghxSnrOzwBeE8ri3mIKwQOak0v0382ZawEDavYKcjIGg6xqzC0KA8KcOu0h151HzHZgCq2Qbzb6VGGSyyXqtsPGxSIjifIveSkIqNZE~ThySOiWXJ9Qp1EmFe9zA2m6Ey~lZL~yD8ackLzB6VY32xQN7y8kvPSt39Boty0mPXSvS5faVAd0UkbuEHOCP~~Z-E~WB2L2-ZWUD~P3zrZK560fa9-aU8g6pCjIJ8Vqx19tbkvYA~IUPtgXsgmjX6hWLpxN1oHE18jsj1IcAu0TCfldWyOtqcGOeBo20BVLZnx7-AqS0TiZhoIj7nog3x3xoOH2w6JomdJ9AT~4Z08pz0HvymVud0PXKOU7yP~QBj1~Us05-k0F-W6SmU6906l5YfNoXACurwy0VtgTmv7oJyOWpPdhFkZ98PwPL16W4iYsvZx6lDxvCxViAkYLw-fAkmueKrn2ZBau6TlhksvZt-Fqyjs~iZ4yme1243d87IZNb5AAAA
b.i2p=b2Ol9hg3uAOK3mB7sK~ydOZQlZIcc0Cixh~M89i71VgzxsK6NYCvFuiW8YeDZ9p3e-cNQrGTxrV6405km0F~pfKKyu09E-~-zAM19SHFmVWbT-XLEpfo9krLQu7416VMRPsPfLISzRU2wAptW2N6CuQmOEp8telmilhUiEqYTC7GBzmo-XNIKs9XSs3-6wSLkZ0haRJtS7YDdmjrDE-UXhu55E7VhUnm7TAxSH4bdUtM5tDUU7Q8lq~t7y9qSu0EWwvMBnkR9-p8SlcVBr-Zy4aG10OUcvnA07TA~Y8OKtEzJZ1PvzuZVIKerAQXuLK~SMB-9Xni7v7pbGW6Lok6dG9KJjrLORTpURAFgC~mux096li0VcotR6eMfBwGH9zLWgBruknix3TCrmUoafzCP4vyuDrugN8-0k54HZemGb601~9PJQYwyP-V4b33LJdq1d-tKDvgFqR3mm-YrEgZbUQ22BSsI3Jx~tWc3hft8KHkfF3zV6Op4TB04XeQ-WupAAAA
beyond.i2p=WNtBKmOowfzhsCX7dWwddaKVZttpm6aSYDiXzExh2ABPS7Ts9WBZxkqWS8CdEHq6ZSOUGzT6kyQYRGmF9~uTZ~nkI-UG8Hqh6uibT0H6-NOR0k2Qjp5y49VB9VRHJ6I2Vxg1uo09edWYKUatMgl7GESInmhZPDF4NOAq1SD6TQpLfAvVxPWglISZWg2Br3K-u9cHqH3Q0fvyy-Cka9ALZiBkXlZ5vccy5B2KE2JXdI1fAn1TLx8dKuR11Xsd-aNjPofis1Ak-cTia9YjKYSiO4v0A7uguQwBZCmTwI-uXLzUeOZjg69NuIhFUi2hN3tuNvCqAg73ct0lAMnfZ1d3oT9BGW6Aqh4fMTXr8MPT0Qpq9te7KF7L5q-LJ-9JFnYVvjx0P-6YUe-mH89HwyL4nip-ET8jzgIRyYE-WNzc1Q5KOGcrQVZJl7Vv5lziCCMxYoSKqiThps1vv-lC1uEyPbMYANkJVJdx9bnTqlQ8r9rlmFFQEmBCojc7LeRZpslTAAAA

View File

@@ -83,14 +83,14 @@
<!-- Add a all web application within the webapps directory. -->
<!-- + No virtual host specified -->
<!-- + Look in the webapps directory relative to jetty.home or . -->
<!-- + Use the webdefault.xml resource for the defaults descriptor -->
<!-- + Use the default webdefault.xml in jetty's install -->
<!-- + Upack the war file -->
<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
<Set name="rootWebApp">root</Set>
<Call name="addWebApplications">
<Arg></Arg>
<Arg>./eepsite/webapps/</Arg>
<Arg>./eepsite/webdefault.xml</Arg>
<Arg></Arg>
<Arg type="boolean">true</Arg>
</Call>

View File

@@ -39,30 +39,39 @@ public class DataMessage extends I2NPMessageImpl {
public int getSize() { return _data.length; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
int size = (int)DataHelper.readLong(in, 4);
if ( (size <= 0) || (size > MAX_SIZE) )
throw new I2NPMessageException("wtf, size out of range? " + size);
_data = new byte[size];
int read = read(in, _data);
if (read != size)
throw new DataFormatException("Not enough bytes to read (read = " + read + ", expected = " + size + ")");
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
int curIndex = offset;
long size = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
if (size > 64*1024)
throw new I2NPMessageException("wtf, size=" + size);
_data = new byte[(int)size];
System.arraycopy(data, curIndex, _data, 0, (int)size);
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
ByteArrayOutputStream os = new ByteArrayOutputStream((_data != null ? _data.length + 4 : 4));
try {
DataHelper.writeLong(os, 4, (_data != null ? _data.length : 0));
os.write(_data);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
/** calculate the message body's length (not including the header and footer */
protected int calculateWrittenLength() {
if (_data == null)
return 4;
else
return 4 + _data.length;
}
/** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) {
if (_data == null) {
out[curIndex++] = 0x0;
out[curIndex++] = 0x0;
out[curIndex++] = 0x0;
out[curIndex++] = 0x0;
} else {
byte len[] = DataHelper.toLong(4, _data.length);
System.arraycopy(len, 0, out, curIndex, 4);
curIndex += 4;
System.arraycopy(_data, 0, out, curIndex, _data.length);
curIndex += _data.length;
}
return os.toByteArray();
return curIndex;
}
public int getType() { return MESSAGE_TYPE; }

View File

@@ -124,63 +124,96 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
_dontIncludePeers = null;
}
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_key = new Hash();
_key.readBytes(in);
_fromHash = new Hash();
_fromHash.readBytes(in);
Boolean val = DataHelper.readBoolean(in);
if (val == null)
int curIndex = offset;
byte keyData[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
_key = new Hash(keyData);
byte fromData[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, fromData, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
_fromHash = new Hash(fromData);
boolean tunnelSpecified = false;
switch (data[curIndex]) {
case DataHelper.BOOLEAN_TRUE:
tunnelSpecified = true;
break;
case DataHelper.BOOLEAN_FALSE:
tunnelSpecified = false;
break;
default:
throw new I2NPMessageException("Tunnel must be explicitly specified (or not)");
boolean tunnelSpecified = val.booleanValue();
if (tunnelSpecified) {
_replyTunnel = new TunnelId();
_replyTunnel.readBytes(in);
}
int numPeers = (int)DataHelper.readLong(in, 2);
if ( (numPeers < 0) || (numPeers >= (1<<16) ) )
throw new DataFormatException("Invalid number of peers - " + numPeers);
Set peers = new HashSet(numPeers);
for (int i = 0; i < numPeers; i++) {
Hash peer = new Hash();
peer.readBytes(in);
peers.add(peer);
}
_dontIncludePeers = peers;
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
curIndex++;
if (tunnelSpecified) {
_replyTunnel = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
curIndex += 4;
}
int numPeers = (int)DataHelper.fromLong(data, curIndex, 2);
curIndex += 2;
if ( (numPeers < 0) || (numPeers >= (1<<16) ) )
throw new I2NPMessageException("Invalid number of peers - " + numPeers);
Set peers = new HashSet(numPeers);
for (int i = 0; i < numPeers; i++) {
byte peer[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
peers.add(new Hash(peer));
}
_dontIncludePeers = peers;
}
protected int calculateWrittenLength() {
int totalLength = 0;
totalLength += Hash.HASH_LENGTH*2; // key+fromHash
totalLength += 1; // hasTunnel?
if (_replyTunnel != null)
totalLength += 4;
totalLength += 2; // numPeers
if (_dontIncludePeers != null)
totalLength += Hash.HASH_LENGTH * _dontIncludePeers.size();
return totalLength;
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
if (_key == null) throw new I2NPMessageException("Key being searched for not specified");
if (_fromHash == null) throw new I2NPMessageException("From address not specified");
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
_key.writeBytes(os);
_fromHash.writeBytes(os);
if (_replyTunnel != null) {
DataHelper.writeBoolean(os, Boolean.TRUE);
_replyTunnel.writeBytes(os);
} else {
DataHelper.writeBoolean(os, Boolean.FALSE);
}
if ( (_dontIncludePeers == null) || (_dontIncludePeers.size() <= 0) ) {
DataHelper.writeLong(os, 2, 0);
} else {
DataHelper.writeLong(os, 2, _dontIncludePeers.size());
for (Iterator iter = _dontIncludePeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
peer.writeBytes(os);
}
}
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
System.arraycopy(_key.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
System.arraycopy(_fromHash.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
if (_replyTunnel != null) {
out[curIndex++] = DataHelper.BOOLEAN_TRUE;
byte id[] = DataHelper.toLong(4, _replyTunnel.getTunnelId());
System.arraycopy(id, 0, out, curIndex, 4);
curIndex += 4;
} else {
out[curIndex++] = DataHelper.BOOLEAN_FALSE;
}
return os.toByteArray();
if ( (_dontIncludePeers == null) || (_dontIncludePeers.size() <= 0) ) {
out[curIndex++] = 0x0;
out[curIndex++] = 0x0;
} else {
byte len[] = DataHelper.toLong(2, _dontIncludePeers.size());
out[curIndex++] = len[0];
out[curIndex++] = len[1];
for (Iterator iter = _dontIncludePeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
System.arraycopy(peer.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
}
}
return curIndex;
}
public int getType() { return MESSAGE_TYPE; }

View File

@@ -57,56 +57,58 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl {
public Hash getFromHash() { return _from; }
public void setFromHash(Hash from) { _from = from; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_key = new Hash();
_key.readBytes(in);
int num = (int)DataHelper.readLong(in, 1);
_peerHashes.clear();
for (int i = 0; i < num; i++) {
Hash peer = new Hash();
peer.readBytes(in);
addReply(peer);
}
_from = new Hash();
_from.readBytes(in);
_context.statManager().addRateData("netDb.searchReplyMessageReceive", num*32 + 64, 1);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
int curIndex = offset;
byte keyData[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
_key = new Hash(keyData);
int num = (int)DataHelper.fromLong(data, curIndex, 1);
curIndex++;
_peerHashes.clear();
for (int i = 0; i < num; i++) {
byte peer[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
addReply(new Hash(peer));
}
byte from[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, from, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
_from = new Hash(from);
_context.statManager().addRateData("netDb.searchReplyMessageReceive", num*32 + 64, 1);
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
/** calculate the message body's length (not including the header and footer */
protected int calculateWrittenLength() {
return Hash.HASH_LENGTH + 1 + getNumReplies()*Hash.HASH_LENGTH + Hash.HASH_LENGTH;
}
/** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
if (_key == null)
throw new I2NPMessageException("Key in reply to not specified");
if (_peerHashes == null)
throw new I2NPMessageException("Peer replies are null");
if (_from == null)
throw new I2NPMessageException("No 'from' address specified!");
byte rv[] = null;
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
_key.writeBytes(os);
DataHelper.writeLong(os, 1, _peerHashes.size());
for (int i = 0; i < getNumReplies(); i++) {
Hash peer = getReply(i);
peer.writeBytes(os);
}
_from.writeBytes(os);
rv = os.toByteArray();
_context.statManager().addRateData("netDb.searchReplyMessageSendSize", rv.length, 1);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
System.arraycopy(_key.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
byte len[] = DataHelper.toLong(1, _peerHashes.size());
out[curIndex++] = len[0];
for (int i = 0; i < getNumReplies(); i++) {
System.arraycopy(getReply(i).getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
}
return rv;
System.arraycopy(_from.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
return curIndex;
}
public int getType() { return MESSAGE_TYPE; }

View File

@@ -35,6 +35,8 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
private int _type;
private LeaseSet _leaseSet;
private RouterInfo _info;
private byte[] _leaseSetCache;
private byte[] _routerInfoCache;
private long _replyToken;
private TunnelId _replyTunnel;
private Hash _replyGateway;
@@ -117,76 +119,109 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
public Hash getReplyGateway() { return _replyGateway; }
public void setReplyGateway(Hash peer) { _replyGateway = peer; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_key = new Hash();
_key.readBytes(in);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Hash read: " + _key.toBase64());
_type = (int)DataHelper.readLong(in, 1);
_replyToken = DataHelper.readLong(in, 4);
if (_replyToken > 0) {
_replyTunnel = new TunnelId();
_replyTunnel.readBytes(in);
_replyGateway = new Hash();
_replyGateway.readBytes(in);
} else {
_replyTunnel = null;
_replyGateway = null;
int curIndex = offset;
byte keyData[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
_key = new Hash(keyData);
_type = (int)DataHelper.fromLong(data, curIndex, 1);
curIndex++;
_replyToken = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
if (_replyToken > 0) {
_replyTunnel = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
curIndex += 4;
byte gw[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, gw, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
_replyGateway = new Hash(gw);
} else {
_replyTunnel = null;
_replyGateway = null;
}
if (_type == KEY_TYPE_LEASESET) {
_leaseSet = new LeaseSet();
try {
_leaseSet.readBytes(new ByteArrayInputStream(data, curIndex, data.length-curIndex));
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the leaseSet", dfe);
}
if (_type == KEY_TYPE_LEASESET) {
_leaseSet = new LeaseSet();
_leaseSet.readBytes(in);
} else if (_type == KEY_TYPE_ROUTERINFO) {
_info = new RouterInfo();
int compressedSize = (int)DataHelper.readLong(in, 2);
byte compressed[] = new byte[compressedSize];
int read = DataHelper.read(in, compressed);
if (read != compressedSize)
throw new I2NPMessageException("Invalid compressed data size (expected "
+ compressedSize + " read " + read + ")");
ByteArrayInputStream bais = new ByteArrayInputStream(DataHelper.decompress(compressed));
_info.readBytes(bais);
} else {
throw new I2NPMessageException("Invalid type of key read from the structure - " + _type);
} else if (_type == KEY_TYPE_ROUTERINFO) {
_info = new RouterInfo();
int compressedSize = (int)DataHelper.fromLong(data, curIndex, 2);
curIndex += 2;
byte decompressed[] = DataHelper.decompress(data, curIndex, compressedSize);
try {
_info.readBytes(new ByteArrayInputStream(decompressed));
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the routerInfo", dfe);
}
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
} else {
throw new I2NPMessageException("Invalid type of key read from the structure - " + _type);
}
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
/** calculate the message body's length (not including the header and footer */
protected int calculateWrittenLength() {
int len = Hash.HASH_LENGTH + 1 + 4; // key+type+replyToken
if (_replyToken > 0)
len += 4 + Hash.HASH_LENGTH; // replyTunnel+replyGateway
if (_type == KEY_TYPE_LEASESET) {
_leaseSetCache = _leaseSet.toByteArray();
len += _leaseSetCache.length;
} else if (_type == KEY_TYPE_ROUTERINFO) {
byte uncompressed[] = _info.toByteArray();
byte compressed[] = DataHelper.compress(uncompressed);
_routerInfoCache = compressed;
len += compressed.length + 2;
}
return len;
}
/** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
if (_key == null) throw new I2NPMessageException("Invalid key");
if ( (_type != KEY_TYPE_LEASESET) && (_type != KEY_TYPE_ROUTERINFO) ) throw new I2NPMessageException("Invalid key type");
if ( (_type == KEY_TYPE_LEASESET) && (_leaseSet == null) ) throw new I2NPMessageException("Missing lease set");
if ( (_type == KEY_TYPE_ROUTERINFO) && (_info == null) ) throw new I2NPMessageException("Missing router info");
ByteArrayOutputStream os = new ByteArrayOutputStream(256);
try {
_key.writeBytes(os);
DataHelper.writeLong(os, 1, _type);
DataHelper.writeLong(os, 4, _replyToken);
if (_replyToken > 0) {
_replyTunnel.writeBytes(os);
_replyGateway.writeBytes(os);
} else {
// noop
}
if (_type == KEY_TYPE_LEASESET) {
_leaseSet.writeBytes(os);
} else if (_type == KEY_TYPE_ROUTERINFO) {
ByteArrayOutputStream baos = new ByteArrayOutputStream(4*1024);
_info.writeBytes(baos);
byte uncompressed[] = baos.toByteArray();
byte compressed[] = DataHelper.compress(uncompressed);
DataHelper.writeLong(os, 2, compressed.length);
os.write(compressed);
}
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
System.arraycopy(_key.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
byte type[] = DataHelper.toLong(1, _type);
out[curIndex++] = type[0];
byte tok[] = DataHelper.toLong(4, _replyToken);
System.arraycopy(tok, 0, out, curIndex, 4);
curIndex += 4;
if (_replyToken > 0) {
byte id[] = DataHelper.toLong(4, _replyTunnel.getTunnelId());
System.arraycopy(id, 0, out, curIndex, 4);
curIndex += 4;
System.arraycopy(_replyGateway.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
}
return os.toByteArray();
if (_type == KEY_TYPE_LEASESET) {
// initialized in calculateWrittenLength
System.arraycopy(_leaseSetCache, 0, out, curIndex, _leaseSetCache.length);
curIndex += _leaseSetCache.length;
} else if (_type == KEY_TYPE_ROUTERINFO) {
byte len[] = DataHelper.toLong(2, _routerInfoCache.length);
out[curIndex++] = len[0];
out[curIndex++] = len[1];
System.arraycopy(_routerInfoCache, 0, out, curIndex, _routerInfoCache.length);
curIndex += _routerInfoCache.length;
}
return curIndex;
}
public int getType() { return MESSAGE_TYPE; }

View File

@@ -42,27 +42,30 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
public Date getArrival() { return _arrival; }
public void setArrival(Date arrival) { _arrival = arrival; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_id = DataHelper.readLong(in, 4);
_arrival = DataHelper.readDate(in);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
int curIndex = offset;
_id = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
_arrival = DataHelper.fromDate(data, curIndex);
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
/** calculate the message body's length (not including the header and footer */
protected int calculateWrittenLength() {
return 4 + DataHelper.DATE_LENGTH; // id + arrival
}
/** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
if ( (_id < 0) || (_arrival == null) ) throw new I2NPMessageException("Not enough data to write out");
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
DataHelper.writeLong(os, 4, _id);
DataHelper.writeDate(os, _arrival);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
return os.toByteArray();
byte id[] = DataHelper.toLong(4, _id);
System.arraycopy(id, 0, out, curIndex, 4);
curIndex += 4;
byte date[] = DataHelper.toDate(_arrival);
System.arraycopy(date, 0, out, curIndex, DataHelper.DATE_LENGTH);
curIndex += DataHelper.DATE_LENGTH;
return curIndex;
}
public int getType() { return MESSAGE_TYPE; }

View File

@@ -98,7 +98,7 @@ public class GarlicClove extends DataStructureImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Wrote instructions: " + _instructions);
_msg.writeBytes(out);
out.write(_msg.toByteArray());
DataHelper.writeLong(out, 4, _cloveId);
DataHelper.writeDate(out, _expiration);
if (_log.shouldLog(Log.DEBUG))

View File

@@ -35,30 +35,29 @@ public class GarlicMessage extends I2NPMessageImpl {
public byte[] getData() { return _data; }
public void setData(byte[] data) { _data = data; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
long len = DataHelper.readLong(in, 4);
_data = new byte[(int)len];
int read = read(in, _data);
if (read != len)
throw new I2NPMessageException("Incorrect size read [" + read + " read, expected " + len + "]");
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
int curIndex = offset;
long len = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
if ( (len <= 0) || (len > 64*1024) ) throw new I2NPMessageException("size="+len);
_data = new byte[(int)len];
System.arraycopy(data, curIndex, _data, 0, (int)len);
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
if ( (_data == null) || (_data.length <= 0) ) throw new I2NPMessageException("Not enough data to write out");
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
DataHelper.writeLong(os, 4, _data.length);
os.write(_data);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
return os.toByteArray();
/** calculate the message body's length (not including the header and footer */
protected int calculateWrittenLength() {
return 4 + _data.length;
}
/** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
byte len[] = DataHelper.toLong(4, _data.length);
System.arraycopy(len, 0, out, curIndex, 4);
curIndex += 4;
System.arraycopy(_data, 0, out, curIndex, _data.length);
curIndex += _data.length;
return curIndex;
}
public int getType() { return MESSAGE_TYPE; }

View File

@@ -28,11 +28,28 @@ public interface I2NPMessage extends DataStructure {
*
* @param in stream to read from
* @param type I2NP message type
* @param buffer scratch buffer to be used when reading and parsing
* @return size of the message read (including headers)
* @throws I2NPMessageException if the stream doesn't contain a valid message
* that this class can read.
* @throws IOException if there is a problem reading from the stream
*/
public void readBytes(InputStream in, int type) throws I2NPMessageException, IOException;
public int readBytes(InputStream in, int type, byte buffer[]) throws I2NPMessageException, IOException;
/**
* Read the body into the data structures, after the initial type byte and
* the uniqueId / expiration, using the current class's format as defined by
* the I2NP specification
*
* @param data data to read from
* @param offset where to start in the data array
* @param dataSize how long into the data to read
* @param type I2NP message type
* @throws I2NPMessageException if the stream doesn't contain a valid message
* that this class can read.
* @throws IOException if there is a problem reading from the stream
*/
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException;
/**
* Return the unique identifier for this type of I2NP message, as defined in
@@ -52,5 +69,8 @@ public interface I2NPMessage extends DataStructure {
public Date getMessageExpiration();
/** How large the message is, including any checksums */
public int getSize();
public int getMessageSize();
/** write the message to the buffer, returning the number of bytes written */
public int toByteArray(byte buffer[]);
}

View File

@@ -26,9 +26,13 @@ public class I2NPMessageHandler {
private I2PAppContext _context;
private long _lastReadBegin;
private long _lastReadEnd;
private int _lastSize;
private byte _messageBuffer[];
public I2NPMessageHandler(I2PAppContext context) {
_context = context;
_log = context.logManager().getLog(I2NPMessageHandler.class);
_messageBuffer = null;
_lastSize = -1;
}
/**
@@ -39,25 +43,39 @@ public class I2NPMessageHandler {
* message - if it is an unknown type or has improper formatting, etc.
*/
public I2NPMessage readMessage(InputStream in) throws IOException, I2NPMessageException {
if (_messageBuffer == null) _messageBuffer = new byte[38*1024]; // more than necessary
try {
int type = (int)DataHelper.readLong(in, 1);
_lastReadBegin = System.currentTimeMillis();
I2NPMessage msg = createMessage(in, type);
msg.readBytes(in, type);
I2NPMessage msg = createMessage(type);
if (msg == null)
throw new I2NPMessageException("The type "+ type + " is an unknown I2NP message");
try {
_lastSize = msg.readBytes(in, type, _messageBuffer);
} catch (IOException ioe) {
throw ioe;
} catch (I2NPMessageException ime) {
throw ime;
} catch (Exception e) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error reading the stream", e);
throw new IOException("Unknown error reading the " + msg.getClass().getName()
+ ": " + e.getMessage());
}
_lastReadEnd = System.currentTimeMillis();
return msg;
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the message", dfe);
}
}
public long getLastReadTime() { return _lastReadEnd - _lastReadBegin; }
public int getLastSize() { return _lastSize; }
/**
* Yes, this is fairly ugly, but its the only place it ever happens.
*
*/
private I2NPMessage createMessage(InputStream in, int type) throws IOException, I2NPMessageException {
private I2NPMessage createMessage(int type) throws I2NPMessageException {
switch (type) {
case DatabaseStoreMessage.MESSAGE_TYPE:
return new DatabaseStoreMessage(_context);
@@ -78,7 +96,7 @@ public class I2NPMessageHandler {
case TunnelCreateStatusMessage.MESSAGE_TYPE:
return new TunnelCreateStatusMessage(_context);
default:
throw new I2NPMessageException("The type "+ type + " is an unknown I2NP message");
return null;
}
}

View File

@@ -31,6 +31,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
protected I2PAppContext _context;
private Date _expiration;
private long _uniqueId;
private byte _data[];
public final static long DEFAULT_EXPIRATION_MS = 1*60*1000; // 1 minute by default
@@ -39,36 +40,18 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
_log = context.logManager().getLog(I2NPMessageImpl.class);
_expiration = new Date(_context.clock().now() + DEFAULT_EXPIRATION_MS);
_uniqueId = _context.random().nextLong(MAX_ID_VALUE);
_context.statManager().createRateStat("i2np.writeTime", "How long it takes to write an I2NP message", "I2NP", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("i2np.readTime", "How long it takes to read an I2NP message", "I2NP", new long[] { 10*60*1000, 60*60*1000 });
}
/**
* Write out the payload part of the message (not including the initial
* 1 byte type)
*
*/
protected abstract byte[] writeMessage() throws I2NPMessageException, IOException;
/**
* Read the body into the data structures, after the initial type byte and
* the uniqueId / expiration, using the current class's format as defined by
* the I2NP specification
*
* @param in stream to read from
* @param type I2NP message type
* @throws I2NPMessageException if the stream doesn't contain a valid message
* that this class can read.
* @throws IOException if there is a problem reading from the stream
*/
protected abstract void readMessage(InputStream in, int type) throws I2NPMessageException, IOException;
public void readBytes(InputStream in) throws DataFormatException, IOException {
try {
readBytes(in, -1);
readBytes(in, -1, new byte[1024]);
} catch (I2NPMessageException ime) {
throw new DataFormatException("Bad bytes", ime);
}
}
public void readBytes(InputStream in, int type) throws I2NPMessageException, IOException {
public int readBytes(InputStream in, int type, byte buffer[]) throws I2NPMessageException, IOException {
try {
if (type < 0)
type = (int)DataHelper.readLong(in, 1);
@@ -77,36 +60,43 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
int size = (int)DataHelper.readLong(in, 2);
Hash h = new Hash();
h.readBytes(in);
byte data[] = new byte[size];
int read = DataHelper.read(in, data);
if (read != size)
throw new I2NPMessageException("Payload is too short [" + read + ", wanted " + size + "]");
Hash calc = _context.sha().calculateHash(data);
if (buffer.length < size) {
if (size > 64*1024) throw new I2NPMessageException("size=" + size);
buffer = new byte[size];
}
int cur = 0;
while (cur < size) {
int numRead = in.read(buffer, cur, size- cur);
if (numRead == -1) {
throw new I2NPMessageException("Payload is too short [" + numRead + ", wanted " + size + "]");
}
cur += numRead;
}
Hash calc = _context.sha().calculateHash(buffer, 0, size);
if (!calc.equals(h))
throw new I2NPMessageException("Hash does not match");
long start = _context.clock().now();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
readMessage(new ByteArrayInputStream(data), type);
readMessage(buffer, 0, size, type);
long time = _context.clock().now() - start;
if (time > 50)
_context.statManager().addRateData("i2np.readTime", time, time);
return size + Hash.HASH_LENGTH + 1 + 4 + DataHelper.DATE_LENGTH;
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the message header", dfe);
}
}
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
try {
DataHelper.writeLong(out, 1, getType());
DataHelper.writeLong(out, 4, _uniqueId);
DataHelper.writeDate(out, _expiration);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Writing bytes: type = " + getType() + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
byte[] data = writeMessage();
DataHelper.writeLong(out, 2, data.length);
Hash h = _context.sha().calculateHash(data);
h.writeBytes(out);
out.write(data);
} catch (I2NPMessageException ime) {
throw new DataFormatException("Error writing out the I2NP message data", ime);
}
int size = getMessageSize();
if (size < 47) throw new DataFormatException("Unable to build the message");
byte buf[] = new byte[size];
int read = toByteArray(buf);
if (read < 0)
out.write(buf, 0, read);
}
/**
@@ -122,14 +112,76 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
public Date getMessageExpiration() { return _expiration; }
public void setMessageExpiration(Date exp) { _expiration = exp; }
public int getSize() {
public synchronized int getMessageSize() {
return calculateWrittenLength()+47; // 47 bytes in the header
}
public byte[] toByteArray() {
byte data[] = new byte[getMessageSize()];
int written = toByteArray(data);
if (written != data.length) {
_log.error("Error writing out " + data.length + " for " + getClass().getName());
return null;
}
return data;
}
public int toByteArray(byte buffer[]) {
long start = _context.clock().now();
byte prefix[][] = new byte[][] { DataHelper.toLong(1, getType()),
DataHelper.toLong(4, _uniqueId),
DataHelper.toDate(_expiration),
new byte[2],
new byte[Hash.HASH_LENGTH]};
byte suffix[][] = new byte[][] { };
try {
byte msg[] = writeMessage();
return msg.length + 43;
} catch (IOException ioe) {
return 0;
int writtenLen = toByteArray(buffer, prefix, suffix);
int prefixLen = 1+4+8+2+Hash.HASH_LENGTH;
int suffixLen = 0;
int payloadLen = writtenLen - prefixLen - suffixLen;
Hash h = _context.sha().calculateHash(buffer, prefixLen, payloadLen);
byte len[] = DataHelper.toLong(2, payloadLen);
buffer[1+4+8] = len[0];
buffer[1+4+8+1] = len[1];
for (int i = 0; i < Hash.HASH_LENGTH; i++)
System.arraycopy(h.getData(), 0, buffer, 1+4+8+2, Hash.HASH_LENGTH);
long time = _context.clock().now() - start;
if (time > 50)
_context.statManager().addRateData("i2np.writeTime", time, time);
return writtenLen;
} catch (I2NPMessageException ime) {
return 0;
_context.logManager().getLog(getClass()).error("Error writing", ime);
throw new IllegalStateException("Unable to serialize the message: " + ime.getMessage());
}
}
/** calculate the message body's length (not including the header and footer */
protected abstract int calculateWrittenLength();
/**
* write the message body to the output array, starting at the given index.
* @return the index into the array after the last byte written
*/
protected abstract int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException;
protected int toByteArray(byte out[], byte[][] prefix, byte[][] suffix) throws I2NPMessageException {
int curIndex = 0;
for (int i = 0; i < prefix.length; i++) {
System.arraycopy(prefix[i], 0, out, curIndex, prefix[i].length);
curIndex += prefix[i].length;
}
curIndex = writeMessageBody(out, curIndex);
for (int i = 0; i < suffix.length; i++) {
System.arraycopy(suffix[i], 0, out, curIndex, suffix[i].length);
curIndex += suffix[i].length;
}
return curIndex;
}
}

View File

@@ -81,7 +81,7 @@ public class I2NPMessageReader {
* reader
*
*/
public void messageReceived(I2NPMessageReader reader, I2NPMessage message, long msToRead);
public void messageReceived(I2NPMessageReader reader, I2NPMessage message, long msToRead, int bytesRead);
/**
* Notify the listener that an exception was thrown while reading from the given
* reader
@@ -122,7 +122,8 @@ public class I2NPMessageReader {
I2NPMessage msg = _handler.readMessage(_stream);
if (msg != null) {
long msToRead = _handler.getLastReadTime();
_listener.messageReceived(I2NPMessageReader.this, msg, msToRead);
int bytesRead = _handler.getLastSize();
_listener.messageReceived(I2NPMessageReader.this, msg, msToRead, bytesRead);
}
} catch (I2NPMessageException ime) {
if (_log.shouldLog(Log.WARN))

View File

@@ -28,7 +28,8 @@ public class TunnelConfigurationSessionKey extends DataStructureImpl {
private final static Log _log = new Log(TunnelConfigurationSessionKey.class);
private SessionKey _key;
public TunnelConfigurationSessionKey() { setKey(null); }
public TunnelConfigurationSessionKey() { this(null); }
public TunnelConfigurationSessionKey(SessionKey key) { setKey(key); }
public SessionKey getKey() { return _key; }
public void setKey(SessionKey key) { _key= key; }

View File

@@ -19,6 +19,8 @@ import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
import net.i2p.data.SessionTag;
import net.i2p.data.SigningPrivateKey;
import net.i2p.data.SigningPublicKey;
import net.i2p.data.TunnelId;
import net.i2p.util.Log;
@@ -52,6 +54,8 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
private TunnelId _replyTunnel;
private Hash _replyPeer;
private byte[] _certificateCache;
public static final int PARTICIPANT_TYPE_GATEWAY = 1;
public static final int PARTICIPANT_TYPE_ENDPOINT = 2;
public static final int PARTICIPANT_TYPE_OTHER = 3;
@@ -124,91 +128,190 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
public void setReplyPeer(Hash peer) { _replyPeer = peer; }
public Hash getReplyPeer() { return _replyPeer; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_participantType = (int)DataHelper.readLong(in, 1);
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
_nextRouter = new Hash();
_nextRouter.readBytes(in);
_nextTunnelId = new TunnelId();
_nextTunnelId.readBytes(in);
}
_tunnelId = new TunnelId();
_tunnelId.readBytes(in);
_tunnelDuration = DataHelper.readLong(in, 4);
_configKey = new TunnelConfigurationSessionKey();
_configKey.readBytes(in);
_maxPeakMessagesPerMin = DataHelper.readLong(in, 4);
_maxAvgMessagesPerMin = DataHelper.readLong(in, 4);
_maxPeakBytesPerMin = DataHelper.readLong(in, 4);
_maxAvgBytesPerMin = DataHelper.readLong(in, 4);
int curIndex = offset;
_participantType = (int)DataHelper.fromLong(data, curIndex, 1);
curIndex++;
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
byte peer[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
_nextRouter = new Hash(peer);
int flags = (int)DataHelper.readLong(in, 1);
_includeDummyTraffic = flagsIncludeDummy(flags);
_reorderMessages = flagsReorder(flags);
_verificationPubKey = new TunnelSigningPublicKey();
_verificationPubKey.readBytes(in);
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
_verificationPrivKey = new TunnelSigningPrivateKey();
_verificationPrivKey.readBytes(in);
}
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
_tunnelKey = new TunnelSessionKey();
_tunnelKey.readBytes(in);
}
_certificate = new Certificate();
_certificate.readBytes(in);
_replyTag = new SessionTag();
_replyTag.readBytes(in);
_replyKey = new SessionKey();
_replyKey.readBytes(in);
_replyTunnel = new TunnelId();
_replyTunnel.readBytes(in);
_replyPeer = new Hash();
_replyPeer.readBytes(in);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
_nextTunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
curIndex += 4;
}
_tunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
curIndex += 4;
if (_tunnelId.getTunnelId() <= 0)
throw new I2NPMessageException("wtf, tunnelId == " + _tunnelId);
_tunnelDuration = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
byte key[] = new byte[SessionKey.KEYSIZE_BYTES];
System.arraycopy(data, curIndex, key, 0, SessionKey.KEYSIZE_BYTES);
curIndex += SessionKey.KEYSIZE_BYTES;
_configKey = new TunnelConfigurationSessionKey(new SessionKey(key));
_maxPeakMessagesPerMin = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
_maxAvgMessagesPerMin = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
_maxPeakBytesPerMin = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
_maxAvgBytesPerMin = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
int flags = (int)DataHelper.fromLong(data, curIndex, 1);
curIndex++;
_includeDummyTraffic = flagsIncludeDummy(flags);
_reorderMessages = flagsReorder(flags);
key = new byte[SigningPublicKey.KEYSIZE_BYTES];
System.arraycopy(data, curIndex, key, 0, SigningPublicKey.KEYSIZE_BYTES);
curIndex += SigningPublicKey.KEYSIZE_BYTES;
_verificationPubKey = new TunnelSigningPublicKey(new SigningPublicKey(key));
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
key = new byte[SigningPrivateKey.KEYSIZE_BYTES];
System.arraycopy(data, curIndex, key, 0, SigningPrivateKey.KEYSIZE_BYTES);
curIndex += SigningPrivateKey.KEYSIZE_BYTES;
_verificationPrivKey = new TunnelSigningPrivateKey(new SigningPrivateKey(key));
}
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
key = new byte[SessionKey.KEYSIZE_BYTES];
System.arraycopy(data, curIndex, key, 0, SessionKey.KEYSIZE_BYTES);
curIndex += SessionKey.KEYSIZE_BYTES;
_tunnelKey = new TunnelSessionKey(new SessionKey(key));
}
int certType = (int) DataHelper.fromLong(data, curIndex, 1);
curIndex++;
int certLength = (int) DataHelper.fromLong(data, curIndex, 2);
curIndex += 2;
if (certLength <= 0) {
_certificate = new Certificate(certType, null);
} else {
if (certLength > 16*1024) throw new I2NPMessageException("cert size " + certLength);
byte certPayload[] = new byte[certLength];
System.arraycopy(data, curIndex, certPayload, 0, certLength);
curIndex += certLength;
_certificate = new Certificate(certType, certPayload);
}
byte tag[] = new byte[SessionTag.BYTE_LENGTH];
System.arraycopy(data, curIndex, tag, 0, SessionTag.BYTE_LENGTH);
curIndex += SessionTag.BYTE_LENGTH;
_replyTag = new SessionTag(tag);
key = new byte[SessionKey.KEYSIZE_BYTES];
System.arraycopy(data, curIndex, key, 0, SessionKey.KEYSIZE_BYTES);
curIndex += SessionKey.KEYSIZE_BYTES;
_replyKey = new SessionKey(key);
_replyTunnel = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
curIndex += 4;
byte peer[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
_replyPeer = new Hash(peer);
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
DataHelper.writeLong(os, 1, _participantType);
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
_nextRouter.writeBytes(os);
_nextTunnelId.writeBytes(os);
}
_tunnelId.writeBytes(os);
DataHelper.writeLong(os, 4, _tunnelDuration);
_configKey.writeBytes(os);
DataHelper.writeLong(os, 4, _maxPeakMessagesPerMin);
DataHelper.writeLong(os, 4, _maxAvgMessagesPerMin);
DataHelper.writeLong(os, 4, _maxPeakBytesPerMin);
DataHelper.writeLong(os, 4, _maxAvgBytesPerMin);
long flags = getFlags();
DataHelper.writeLong(os, 1, flags);
_verificationPubKey.writeBytes(os);
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
_verificationPrivKey.writeBytes(os);
}
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
_tunnelKey.writeBytes(os);
}
_certificate.writeBytes(os);
_replyTag.writeBytes(os);
_replyKey.writeBytes(os);
_replyTunnel.writeBytes(os);
_replyPeer.writeBytes(os);
} catch (Throwable t) {
throw new I2NPMessageException("Error writing out the message data", t);
/** calculate the message body's length (not including the header and footer */
protected int calculateWrittenLength() {
int length = 0;
length += 1; // participantType
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
length += Hash.HASH_LENGTH;
length += 4; // nextTunnelId
}
return os.toByteArray();
length += 4; // tunnelId
length += 4; // duration;
length += SessionKey.KEYSIZE_BYTES;
length += 4*4; // max limits
length += 1; // flags
length += SigningPublicKey.KEYSIZE_BYTES;
if (_participantType == PARTICIPANT_TYPE_GATEWAY)
length += SigningPrivateKey.KEYSIZE_BYTES;
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT)
|| (_participantType == PARTICIPANT_TYPE_GATEWAY) )
length += SessionKey.KEYSIZE_BYTES;
_certificateCache = _certificate.toByteArray();
length += _certificateCache.length;
length += SessionTag.BYTE_LENGTH;
length += SessionKey.KEYSIZE_BYTES;
length += 4; // replyTunnel
length += Hash.HASH_LENGTH; // replyPeer
return length;
}
/** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
byte type[] = DataHelper.toLong(1, _participantType);
out[curIndex++] = type[0];
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
System.arraycopy(_nextRouter.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
byte id[] = DataHelper.toLong(4, _nextTunnelId.getTunnelId());
System.arraycopy(id, 0, out, curIndex, 4);
curIndex += 4;
}
byte id[] = DataHelper.toLong(4, _tunnelId.getTunnelId());
System.arraycopy(id, 0, out, curIndex, 4);
curIndex += 4;
byte duration[] = DataHelper.toLong(4, _tunnelDuration);
System.arraycopy(duration, 0, out, curIndex, 4);
curIndex += 4;
System.arraycopy(_configKey.getKey().getData(), 0, out, curIndex, SessionKey.KEYSIZE_BYTES);
curIndex += SessionKey.KEYSIZE_BYTES;
byte val[] = DataHelper.toLong(4, _maxPeakMessagesPerMin);
System.arraycopy(val, 0, out, curIndex, 4);
curIndex += 4;
val = DataHelper.toLong(4, _maxAvgMessagesPerMin);
System.arraycopy(val, 0, out, curIndex, 4);
curIndex += 4;
val = DataHelper.toLong(4, _maxPeakBytesPerMin);
System.arraycopy(val, 0, out, curIndex, 4);
curIndex += 4;
val = DataHelper.toLong(4, _maxAvgBytesPerMin);
System.arraycopy(val, 0, out, curIndex, 4);
curIndex += 4;
long flags = getFlags();
byte flag[] = DataHelper.toLong(1, flags);
out[curIndex++] = flag[0];
System.arraycopy(_verificationPubKey.getKey().getData(), 0, out, curIndex, SigningPublicKey.KEYSIZE_BYTES);
curIndex += SigningPublicKey.KEYSIZE_BYTES;
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
System.arraycopy(_verificationPrivKey.getKey().getData(), 0, out, curIndex, SigningPrivateKey.KEYSIZE_BYTES);
curIndex += SigningPrivateKey.KEYSIZE_BYTES;
}
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
System.arraycopy(_tunnelKey.getKey().getData(), 0, out, curIndex, SessionKey.KEYSIZE_BYTES);
curIndex += SessionKey.KEYSIZE_BYTES;
}
System.arraycopy(_certificateCache, 0, out, curIndex, _certificateCache.length);
curIndex += _certificateCache.length;
System.arraycopy(_replyTag.getData(), 0, out, curIndex, SessionTag.BYTE_LENGTH);
curIndex += SessionTag.BYTE_LENGTH;
System.arraycopy(_replyKey.getData(), 0, out, curIndex, SessionKey.KEYSIZE_BYTES);
curIndex += SessionKey.KEYSIZE_BYTES;
id = DataHelper.toLong(4, _replyTunnel.getTunnelId());
System.arraycopy(id, 0, out, curIndex, 4);
curIndex += 4;
System.arraycopy(_replyPeer.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
return curIndex;
}
private boolean flagsIncludeDummy(long flags) {
@@ -304,4 +407,5 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
buf.append("]");
return buf.toString();
}
}

View File

@@ -46,7 +46,11 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl {
}
public TunnelId getTunnelId() { return _tunnelId; }
public void setTunnelId(TunnelId id) { _tunnelId = id; }
public void setTunnelId(TunnelId id) {
_tunnelId = id;
if ( (id != null) && (id.getTunnelId() <= 0) )
throw new IllegalArgumentException("wtf, tunnelId " + id);
}
public int getStatus() { return _status; }
public void setStatus(int status) { _status = status; }
@@ -57,31 +61,42 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl {
public Hash getFromHash() { return _from; }
public void setFromHash(Hash from) { _from = from; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_tunnelId = new TunnelId();
_tunnelId.readBytes(in);
_status = (int)DataHelper.readLong(in, 1);
_from = new Hash();
_from.readBytes(in);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
int curIndex = offset;
_tunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
curIndex += 4;
if (_tunnelId.getTunnelId() <= 0)
throw new I2NPMessageException("wtf, negative tunnelId? " + _tunnelId);
_status = (int)DataHelper.fromLong(data, curIndex, 1);
curIndex++;
byte peer[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
_from = new Hash(peer);
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
if ( (_tunnelId == null) || (_from == null) ) throw new I2NPMessageException("Not enough data to write out");
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
_tunnelId.writeBytes(os);
DataHelper.writeLong(os, 1, (_status < 0 ? 255 : _status));
_from.writeBytes(os);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
return os.toByteArray();
/** calculate the message body's length (not including the header and footer */
protected int calculateWrittenLength() {
return 4 + 1 + Hash.HASH_LENGTH; // id + status + from
}
/** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
if ( (_tunnelId == null) || (_from == null) ) throw new I2NPMessageException("Not enough data to write out");
if (_tunnelId.getTunnelId() < 0) throw new I2NPMessageException("Negative tunnelId!? " + _tunnelId);
byte id[] = DataHelper.toLong(4, _tunnelId.getTunnelId());
System.arraycopy(id, 0, out, curIndex, 4);
curIndex += 4;
byte status[] = DataHelper.toLong(1, _status);
out[curIndex++] = status[0];
System.arraycopy(_from.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
return curIndex;
}
public int getType() { return MESSAGE_TYPE; }

View File

@@ -15,6 +15,8 @@ import java.io.InputStream;
import net.i2p.I2PAppContext;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.Signature;
import net.i2p.data.TunnelId;
import net.i2p.util.Log;
@@ -44,10 +46,16 @@ public class TunnelMessage extends I2NPMessageImpl {
}
public TunnelId getTunnelId() { return _tunnelId; }
public void setTunnelId(TunnelId id) { _tunnelId = id; }
public void setTunnelId(TunnelId id) {
_tunnelId = id;
}
public byte[] getData() { return _data; }
public void setData(byte data[]) { _data = data; }
public void setData(byte data[]) {
_data = data;
if ( (data != null) && (_data.length <= 0) )
throw new IllegalArgumentException("Empty tunnel payload?");
}
public TunnelVerificationStructure getVerificationStructure() { return _verification; }
public void setVerificationStructure(TunnelVerificationStructure verification) { _verification = verification; }
@@ -55,71 +63,93 @@ public class TunnelMessage extends I2NPMessageImpl {
public byte[] getEncryptedDeliveryInstructions() { return _encryptedInstructions; }
public void setEncryptedDeliveryInstructions(byte instructions[]) { _encryptedInstructions = instructions; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_tunnelId = new TunnelId();
_tunnelId.readBytes(in);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Read tunnel message for tunnel " + _tunnelId);
_size = DataHelper.readLong(in, 4);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Read tunnel message size: " + _size);
if (_size < 0) throw new I2NPMessageException("Invalid size in the structure: " + _size);
_data = new byte[(int)_size];
int read = read(in, _data);
if (read != _size)
throw new I2NPMessageException("Incorrect number of bytes read (" + read + ", expected " + _size);
int includeVerification = (int)DataHelper.readLong(in, 1);
if (includeVerification == FLAG_INCLUDESTRUCTURE) {
_verification = new TunnelVerificationStructure();
_verification.readBytes(in);
int len = (int)DataHelper.readLong(in, 2);
_encryptedInstructions = new byte[len];
read = read(in, _encryptedInstructions);
if (read != len)
throw new I2NPMessageException("Incorrect number of bytes read for instructions (" + read + ", expected " + len + ")");
}
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
int curIndex = offset;
_tunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
curIndex += 4;
if (_tunnelId.getTunnelId() <= 0)
throw new I2NPMessageException("Invalid tunnel Id " + _tunnelId);
_size = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
if (_size < 0) throw new I2NPMessageException("Invalid size in the structure: " + _size);
if (_size > 64*1024) throw new I2NPMessageException("Invalid size in the structure: " + _size);
_data = new byte[(int)_size];
System.arraycopy(data, curIndex, _data, 0, (int)_size);
curIndex += _size;
int includeVerification = (int)DataHelper.fromLong(data, curIndex, 1);
curIndex++;
if (includeVerification == FLAG_INCLUDESTRUCTURE) {
byte vHash[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, vHash, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
byte vSig[] = new byte[Signature.SIGNATURE_BYTES];
System.arraycopy(data, curIndex, vSig, 0, Signature.SIGNATURE_BYTES);
curIndex += Signature.SIGNATURE_BYTES;
_verification = new TunnelVerificationStructure(new Hash(vHash), new Signature(vSig));
int len = (int)DataHelper.fromLong(data, curIndex, 2);
curIndex += 2;
if ( (len <= 0) || (len > 4*1024) ) throw new I2NPMessageException("wtf, size of instructions: " + len);
_encryptedInstructions = new byte[len];
System.arraycopy(data, curIndex, _encryptedInstructions, 0, len);
curIndex += len;
}
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
if ( (_tunnelId == null) || (_data == null) || (_data.length <= 0) )
throw new I2NPMessageException("Not enough data to write out");
ByteArrayOutputStream os = new ByteArrayOutputStream(64+_data.length);
try {
_tunnelId.writeBytes(os);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Writing tunnel message for tunnel " + _tunnelId);
DataHelper.writeLong(os, 4, _data.length);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Writing tunnel message length: " + _data.length);
os.write(_data);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Writing tunnel message data");
if ( (_verification == null) || (_encryptedInstructions == null) ) {
DataHelper.writeLong(os, 1, FLAG_DONT_INCLUDESTRUCTURE);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Writing DontIncludeStructure flag");
} else {
DataHelper.writeLong(os, 1, FLAG_INCLUDESTRUCTURE);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Writing IncludeStructure flag, then the verification structure, then the " +
"E(instr).length [" + _encryptedInstructions.length + "], then the E(instr)");
_verification.writeBytes(os);
DataHelper.writeLong(os, 2, _encryptedInstructions.length);
os.write(_encryptedInstructions);
}
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
/** calculate the message body's length (not including the header and footer */
protected int calculateWrittenLength() {
int length = 0;
length += 4; // tunnelId
length += 4; // data length
length += _data.length;
if ( (_verification == null) || (_encryptedInstructions == null) ) {
length += 1; // include verification?
} else {
length += 1; // include verification?
length += Hash.HASH_LENGTH + Signature.SIGNATURE_BYTES;
length += 2; // instructions length
length += _encryptedInstructions.length;
}
byte rv[] = os.toByteArray();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Overall data being written: " + rv.length);
return rv;
return length;
}
/** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
if ( (_tunnelId == null) || (_data == null) )
throw new I2NPMessageException("Not enough data to write out (id=" + _tunnelId + " data=" + _data + ")");
if (_data.length <= 0)
throw new I2NPMessageException("Not enough data to write out (data.length=" + _data.length + ")");
byte id[] = DataHelper.toLong(4, _tunnelId.getTunnelId());
System.arraycopy(id, 0, out, curIndex, 4);
curIndex += 4;
byte len[] = DataHelper.toLong(4, _data.length);
System.arraycopy(len, 0, out, curIndex, 4);
curIndex += 4;
System.arraycopy(_data, 0, out, curIndex, _data.length);
curIndex += _data.length;
if ( (_verification == null) || (_encryptedInstructions == null) ) {
byte flag[] = DataHelper.toLong(1, FLAG_DONT_INCLUDESTRUCTURE);
out[curIndex++] = flag[0];
} else {
byte flag[] = DataHelper.toLong(1, FLAG_INCLUDESTRUCTURE);
out[curIndex++] = flag[0];
System.arraycopy(_verification.getMessageHash().getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
System.arraycopy(_verification.getAuthorizationSignature().getData(), 0, out, curIndex, Signature.SIGNATURE_BYTES);
curIndex += Signature.SIGNATURE_BYTES;
len = DataHelper.toLong(2, _encryptedInstructions.length);
System.arraycopy(len, 0, out, curIndex, 2);
curIndex += 2;
System.arraycopy(_encryptedInstructions, 0, out, curIndex, _encryptedInstructions.length);
curIndex += _encryptedInstructions.length;
}
return curIndex;
}
public int getType() { return MESSAGE_TYPE; }

View File

@@ -28,7 +28,8 @@ public class TunnelSessionKey extends DataStructureImpl {
private final static Log _log = new Log(TunnelSessionKey.class);
private SessionKey _key;
public TunnelSessionKey() { setKey(null); }
public TunnelSessionKey() { this(null); }
public TunnelSessionKey(SessionKey key) { setKey(key); }
public SessionKey getKey() { return _key; }
public void setKey(SessionKey key) { _key= key; }

View File

@@ -29,7 +29,8 @@ public class TunnelSigningPrivateKey extends DataStructureImpl {
private final static Log _log = new Log(EndPointPrivateKey.class);
private SigningPrivateKey _key;
public TunnelSigningPrivateKey() { setKey(null); }
public TunnelSigningPrivateKey() { this(null); }
public TunnelSigningPrivateKey(SigningPrivateKey key) { setKey(key); }
public SigningPrivateKey getKey() { return _key; }
public void setKey(SigningPrivateKey key) { _key= key; }

View File

@@ -28,7 +28,8 @@ public class TunnelSigningPublicKey extends DataStructureImpl {
private final static Log _log = new Log(TunnelSigningPublicKey.class);
private SigningPublicKey _key;
public TunnelSigningPublicKey() { setKey(null); }
public TunnelSigningPublicKey() { this(null); }
public TunnelSigningPublicKey(SigningPublicKey key) { setKey(key); }
public SigningPublicKey getKey() { return _key; }
public void setKey(SigningPublicKey key) { _key= key; }

View File

@@ -29,9 +29,10 @@ public class TunnelVerificationStructure extends DataStructureImpl {
private Hash _msgHash;
private Signature _authSignature;
public TunnelVerificationStructure() {
setMessageHash(null);
setAuthorizationSignature(null);
public TunnelVerificationStructure() { this(null, null); }
public TunnelVerificationStructure(Hash messageHash, Signature authSig) {
setMessageHash(messageHash);
setAuthorizationSignature(authSig);
}
public Hash getMessageHash() { return _msgHash; }

View File

@@ -65,6 +65,8 @@ public abstract class ClientManagerFacade implements Service {
public abstract void messageReceived(ClientMessage msg);
public boolean verifyClientLiveliness() { return true; }
/**
* Return the client's current config, or null if not connected
*

View File

@@ -289,6 +289,37 @@ public class JobQueue {
}
boolean isAlive() { return _alive; }
/**
* When did the most recently begin job start?
*/
public long getLastJobBegin() {
long when = -1;
// not synchronized, so might b0rk if the runners are changed
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
long cur = ((JobQueueRunner)iter.next()).getLastBegin();
if (cur > when)
cur = when;
}
return when;
}
/**
* retrieve the most recently begin and still currently active job, or null if
* no jobs are running
*/
public Job getLastJob() {
Job j = null;
long when = -1;
// not synchronized, so might b0rk if the runners are changed
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
JobQueueRunner cur = (JobQueueRunner)iter.next();
if (cur.getLastBegin() > when) {
j = cur.getCurrentJob();
when = cur.getLastBegin();
}
}
return j;
}
/**
* Blocking call to retrieve the next ready job
*

View File

@@ -11,6 +11,7 @@ class JobQueueRunner implements Runnable {
private long _numJobs;
private Job _currentJob;
private Job _lastJob;
private long _lastBegin;
public JobQueueRunner(RouterContext context, int id) {
_context = context;
@@ -31,6 +32,7 @@ class JobQueueRunner implements Runnable {
public int getRunnerId() { return _id; }
public void stopRunning() { _keepRunning = false; }
public void startRunning() { _keepRunning = true; }
public long getLastBegin() { return _lastBegin; }
public void run() {
long lastActive = _context.clock().now();
long jobNum = 0;
@@ -103,6 +105,7 @@ class JobQueueRunner implements Runnable {
private void runCurrentJob() {
try {
_lastBegin = _context.clock().now();
_currentJob.runJob();
} catch (OutOfMemoryError oom) {
try {

View File

@@ -21,7 +21,6 @@ import java.util.Set;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.util.Log;
@@ -35,7 +34,6 @@ public class OutNetMessage {
private Log _log;
private RouterContext _context;
private RouterInfo _target;
private Hash _targetHash;
private I2NPMessage _message;
/** cached message class name, for use after we discard the message */
private String _messageType;
@@ -123,8 +121,6 @@ public class OutNetMessage {
*/
public RouterInfo getTarget() { return _target; }
public void setTarget(RouterInfo target) { _target = target; }
public Hash getTargetHash() { return _targetHash; }
public void setTargetHash(Hash target) { _targetHash = target; }
/**
* Specifies the message to be sent
*
@@ -143,37 +139,18 @@ public class OutNetMessage {
public long getMessageSize() {
if (_messageSize <= 0) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(2048); // large enough to hold most messages
_message.writeBytes(baos);
long sz = baos.size();
baos.reset();
_messageSize = sz;
} catch (DataFormatException dfe) {
_log.error("Error serializing the I2NPMessage for the OutNetMessage", dfe);
} catch (IOException ioe) {
_log.error("Error serializing the I2NPMessage for the OutNetMessage", ioe);
}
_messageSize = _message.getMessageSize();
}
return _messageSize;
}
public byte[] getMessageData() {
public int getMessageData(byte outBuffer[]) {
if (_message == null) {
return null;
return -1;
} else {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); // large enough to hold most messages
_message.writeBytes(baos);
byte data[] = baos.toByteArray();
_messageSize = data.length;
return data;
} catch (DataFormatException dfe) {
_log.error("Error serializing the I2NPMessage for the OutNetMessage", dfe);
} catch (IOException ioe) {
_log.error("Error serializing the I2NPMessage for the OutNetMessage", ioe);
}
return null;
int len = _message.toByteArray(outBuffer);
_messageSize = len;
return len;
}
}

View File

@@ -127,6 +127,11 @@ public class Router {
_gracefulShutdownDetector.setName("Graceful shutdown hook");
_gracefulShutdownDetector.start();
I2PThread watchdog = new I2PThread(new RouterWatchdog(_context));
watchdog.setName("RouterWatchdog");
watchdog.setDaemon(true);
watchdog.start();
_shutdownTasks = new HashSet(0);
}
@@ -260,10 +265,35 @@ public class Router {
*
*/
private final class CoallesceStatsJob extends JobImpl {
public CoallesceStatsJob() { super(Router.this._context); }
public CoallesceStatsJob() {
super(Router.this._context);
Router.this._context.statManager().createRateStat("bw.receiveBps", "How fast we receive data", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
Router.this._context.statManager().createRateStat("bw.sendBps", "How fast we send data", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
}
public String getName() { return "Coallesce stats"; }
public void runJob() {
Router.this._context.statManager().coallesceStats();
RateStat receiveRate = _context.statManager().getRate("transport.receiveMessageSize");
if (receiveRate != null) {
Rate rate = receiveRate.getRate(60*1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double bps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
Router.this._context.statManager().addRateData("bw.receiveBps", (long)bps, 60*1000);
}
}
RateStat sendRate = _context.statManager().getRate("transport.sendMessageSize");
if (sendRate != null) {
Rate rate = receiveRate.getRate(60*1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double bps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
Router.this._context.statManager().addRateData("bw.sendBps", (long)bps, 60*1000);
}
}
requeue(60*1000);
}
}
@@ -660,6 +690,14 @@ public class Router {
public boolean gracefulShutdownInProgress() {
return (null != _config.getProperty(PROP_SHUTDOWN_IN_PROGRESS));
}
/** How long until the graceful shutdown will kill us? */
public long getShutdownTimeRemaining() {
long exp = _context.tunnelManager().getLastParticipatingExpiration();
if (exp < 0)
return -1;
else
return exp + 2*CLOCK_FUDGE_FACTOR - _context.clock().now();
}
/**
* Simple thread that sits and waits forever, managing the

View File

@@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.37 $ $Date: 2004/10/01 09:35:49 $";
public final static String VERSION = "0.4.1.1";
public final static long BUILD = 3;
public final static String ID = "$Revision: 1.50 $ $Date: 2004/10/09 19:03:27 $";
public final static String VERSION = "0.4.1.2";
public final static long BUILD = 0;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION);
System.out.println("Router ID: " + RouterVersion.ID);

View File

@@ -0,0 +1,67 @@
package net.i2p.router;
import net.i2p.data.DataHelper;
import net.i2p.util.Log;
/**
* Periodically check to make sure things haven't gone totally haywire (and if
* they have, restart the JVM)
*
*/
class RouterWatchdog implements Runnable {
private Log _log;
private RouterContext _context;
private static final long MAX_JOB_RUN_LAG = 60*1000;
public RouterWatchdog(RouterContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(RouterWatchdog.class);
}
public boolean verifyJobQueueLiveliness() {
long when = _context.jobQueue().getLastJobBegin();
if (when < 0)
return true;
long howLongAgo = _context.clock().now() - when;
if (howLongAgo > MAX_JOB_RUN_LAG) {
Job cur = _context.jobQueue().getLastJob();
if (cur != null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Last job was queued up " + DataHelper.formatDuration(howLongAgo)
+ " ago: " + cur);
return false;
} else {
// no prob, just normal lag
return true;
}
} else {
return true;
}
}
public boolean verifyClientLiveliness() {
return _context.clientManager().verifyClientLiveliness();
}
private boolean shutdownOnHang() {
return true;
}
public void run() {
while (true) {
try { Thread.sleep(60*1000); } catch (InterruptedException ie) {}
monitorRouter();
}
}
public void monitorRouter() {
boolean ok = verifyJobQueueLiveliness();
ok = ok && verifyClientLiveliness();
if (!ok && shutdownOnHang()) {
_log.log(Log.CRIT, "Router hung! hard restart!");
System.exit(Router.EXIT_HARD_RESTART);
}
}
}

View File

@@ -16,6 +16,7 @@ import java.util.Iterator;
import java.util.Map;
import net.i2p.data.Hash;
import net.i2p.router.peermanager.PeerProfile;
import net.i2p.util.Log;
/**
@@ -62,8 +63,16 @@ public class Shitlist {
if (_log.shouldLog(Log.INFO))
_log.info("Shitlisting router " + peer.toBase64(), new Exception("Shitlist cause"));
long period = SHITLIST_DURATION_MS;
PeerProfile prof = _context.profileOrganizer().getProfile(peer);
if (prof != null)
period = SHITLIST_DURATION_MS << prof.incrementShitlists();
if (period > 60*60*1000)
period = 60*60*1000;
synchronized (_shitlist) {
Date oldDate = (Date)_shitlist.put(peer, new Date(_context.clock().now()));
Date oldDate = (Date)_shitlist.put(peer, new Date(_context.clock().now() + period));
wasAlready = (null == oldDate);
if (reason != null) {
_shitlistCause.put(peer, reason);
@@ -79,12 +88,20 @@ public class Shitlist {
}
public void unshitlistRouter(Hash peer) {
unshitlistRouter(peer, true);
}
private void unshitlistRouter(Hash peer, boolean realUnshitlist) {
if (peer == null) return;
_log.info("Unshitlisting router " + peer.toBase64());
synchronized (_shitlist) {
_shitlist.remove(peer);
_shitlistCause.remove(peer);
}
if (realUnshitlist) {
PeerProfile prof = _context.profileOrganizer().getProfile(peer);
if (prof != null)
prof.unshitlist();
}
}
public boolean isShitlisted(Hash peer) {
@@ -95,10 +112,10 @@ public class Shitlist {
if (shitlistDate == null) return false;
// check validity
if (shitlistDate.getTime() > _context.clock().now() - SHITLIST_DURATION_MS) {
if (shitlistDate.getTime() > _context.clock().now()) {
return true;
} else {
unshitlistRouter(peer);
unshitlistRouter(peer, false);
return false;
}
}
@@ -115,13 +132,13 @@ public class Shitlist {
shitlist = new HashMap(_shitlist);
}
long limit = _context.clock().now() - SHITLIST_DURATION_MS;
long limit = _context.clock().now();
for (Iterator iter = shitlist.keySet().iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
Date shitDate = (Date)shitlist.get(key);
if (shitDate.getTime() < limit) {
unshitlistRouter(key);
unshitlistRouter(key, false);
}
}
}
@@ -146,7 +163,7 @@ public class Shitlist {
Date shitDate = (Date)shitlist.get(key);
buf.append("<li><b>").append(key.toBase64()).append("</b>");
buf.append(" <a href=\"netdb.jsp#").append(key.toBase64().substring(0, 6)).append("\">(?)</a>");
buf.append(" was shitlisted on ");
buf.append(" expiring on ");
buf.append(shitDate);
String cause = (String)causes.get(key);
if (cause != null) {

View File

@@ -67,4 +67,7 @@ public interface TunnelManagerFacade extends Service {
public int getFreeTunnelCount();
/** how many outbound tunnels do we have available? */
public int getOutboundTunnelCount();
/** When does the last tunnel we are participating in expire? */
public long getLastParticipatingExpiration();
}

View File

@@ -52,7 +52,7 @@ public class ClientManager {
_log = context.logManager().getLog(ClientManager.class);
_context.statManager().createRateStat("client.receiveMessageSize",
"How large are messages received by the client?",
"Client Messages",
"ClientMessages",
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_runners = new HashMap();
_pendingRunners = new HashSet();
@@ -234,7 +234,7 @@ public class ClientManager {
return false;
}
private ClientConnectionRunner getRunner(Destination dest) {
ClientConnectionRunner getRunner(Destination dest) {
ClientConnectionRunner rv = null;
long beforeLock = _context.clock().now();
long inLock = 0;
@@ -300,7 +300,7 @@ public class ClientManager {
}
}
private Set getRunnerDestinations() {
Set getRunnerDestinations() {
Set dests = new HashSet();
long beforeLock = _context.clock().now();
long inLock = 0;

View File

@@ -10,7 +10,9 @@ package net.i2p.router.client;
import java.io.IOException;
import java.io.Writer;
import java.util.Iterator;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
@@ -68,6 +70,27 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
startup();
}
private static final long MAX_TIME_TO_REBUILD = 5*60*1000;
public boolean verifyClientLiveliness() {
boolean lively = true;
for (Iterator iter = _manager.getRunnerDestinations().iterator(); iter.hasNext(); ) {
Destination dest = (Destination)iter.next();
ClientConnectionRunner runner = _manager.getRunner(dest);
if ( (runner == null) || (runner.getIsDead())) continue;
LeaseSet ls = runner.getLeaseSet();
if (ls == null)
continue; // still building
long howLongAgo = _context.clock().now() - ls.getEarliestLeaseDate();
if (howLongAgo > MAX_TIME_TO_REBUILD) {
if (_log.shouldLog(Log.ERROR))
_log.error("Client " + dest.calculateHash().toBase64().substring(0,6)
+ " has a leaseSet that expired " + DataHelper.formatDuration(howLongAgo));
lively = false;
}
}
return lively;
}
/**
* Request that a particular client authorize the Leases contained in the
* LeaseSet, after which the onCreateJob is queued up. If that doesn't occur

View File

@@ -46,7 +46,8 @@ public class GarlicMessageParser {
_log.warn("Error decrypting", dfe);
}
if (decrData == null) {
_log.debug("Decryption of garlic message failed");
if (_log.shouldLog(Log.WARN))
_log.warn("Decryption of garlic message failed (data = " + encData + ")", new Exception("Decrypt fail"));
return null;
} else {
return readCloveSet(decrData);

View File

@@ -335,21 +335,11 @@ public class HandleTunnelMessageJob extends JobImpl {
+ router.toBase64());
TunnelMessage msg = new TunnelMessage(getContext());
msg.setTunnelId(id);
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
body.writeBytes(baos);
msg.setData(baos.toByteArray());
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, router, FORWARD_TIMEOUT, FORWARD_PRIORITY));
msg.setData(body.toByteArray());
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, router, FORWARD_TIMEOUT, FORWARD_PRIORITY));
String bodyType = body.getClass().getName();
getContext().messageHistory().wrap(bodyType, body.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
} catch (DataFormatException dfe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the message to forward to the tunnel", dfe);
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the message to forward to the tunnel", ioe);
}
String bodyType = body.getClass().getName();
getContext().messageHistory().wrap(bodyType, body.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
}
private void sendToRouter(Hash router, I2NPMessage body) {
@@ -406,8 +396,8 @@ public class HandleTunnelMessageJob extends JobImpl {
_log.error("Error parsing the message body", ime);
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error reading the message body", ioe);
}
_log.error("Error parsing the message body", ioe);
}
return null;
}
@@ -421,6 +411,11 @@ public class HandleTunnelMessageJob extends JobImpl {
_log.error("Error decrypting the message", getAddedBy());
return null;
}
if (decrypted.length <= 0) {
if (_log.shouldLog(Log.ERROR))
_log.error("Received an empty decrypted message? encrypted length: " + encryptedMessage.length, getAddedBy());
return null;
}
return getBody(decrypted);
}

View File

@@ -135,22 +135,16 @@ class MessageHandler {
_log.info("Handle " + message.getClass().getName() + " to send to remote tunnel "
+ tunnelId.getTunnelId() + " on router " + to.toBase64());
TunnelMessage msg = new TunnelMessage(_context);
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
try {
message.writeBytes(baos);
msg.setData(baos.toByteArray());
msg.setTunnelId(tunnelId);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Placing message of type " + message.getClass().getName()
+ " into the new tunnel message bound for " + tunnelId.getTunnelId()
+ " on " + to.toBase64());
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, to, (int)timeoutMs, priority));
msg.setData(message.toByteArray());
msg.setTunnelId(tunnelId);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Placing message of type " + message.getClass().getName()
+ " into the new tunnel message bound for " + tunnelId.getTunnelId()
+ " on " + to.toBase64());
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, to, (int)timeoutMs, priority));
String bodyType = message.getClass().getName();
_context.messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
} catch (Exception e) {
_log.warn("Unable to forward on according to the instructions to the remote tunnel", e);
}
String bodyType = message.getClass().getName();
_context.messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
}
private void handleLocalDestination(DeliveryInstructions instructions, I2NPMessage message, Hash fromHash) {

View File

@@ -100,14 +100,14 @@ public class OutboundClientMessageJob extends JobImpl {
super(ctx);
_log = ctx.logManager().getLog(OutboundClientMessageJob.class);
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAttemptAverage", "How many different tunnels do we have to try when sending a client message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAckTime", "How long does it take to get an ACK back from a message?", "Client Messages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendsPerFailure", "How many send attempts do we make when they all fail?", "Client Messages", new long[] { 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "Client Messages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "Client Messages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "Client Messages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAttemptAverage", "How many different tunnels do we have to try when sending a client message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAckTime", "How long does it take to get an ACK back from a message?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendsPerFailure", "How many send attempts do we make when they all fail?", "ClientMessages", new long[] { 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;

View File

@@ -135,33 +135,19 @@ public class SendTunnelMessageJob extends JobImpl {
*/
private void forwardToGateway() {
TunnelMessage msg = new TunnelMessage(getContext());
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
_message.writeBytes(baos);
msg.setData(baos.toByteArray());
msg.setTunnelId(_tunnelId);
msg.setMessageExpiration(new Date(_expiration));
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg,
_destRouter, _onSend,
_onReply, _onFailure,
_selector,
(int)(_expiration-getContext().clock().now()),
_priority));
msg.setData(_message.toByteArray());
msg.setTunnelId(_tunnelId);
msg.setMessageExpiration(new Date(_expiration));
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg,
_destRouter, _onSend,
_onReply, _onFailure,
_selector,
(int)(_expiration-getContext().clock().now()),
_priority));
String bodyType = _message.getClass().getName();
getContext().messageHistory().wrap(bodyType, _message.getUniqueId(),
TunnelMessage.class.getName(), msg.getUniqueId());
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the tunnel message to send to the tunnel", ioe);
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
} catch (DataFormatException dfe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the tunnel message to send to the tunnel", dfe);
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
}
String bodyType = _message.getClass().getName();
getContext().messageHistory().wrap(bodyType, _message.getUniqueId(),
TunnelMessage.class.getName(), msg.getUniqueId());
return;
}
@@ -391,7 +377,8 @@ public class SendTunnelMessageJob extends JobImpl {
private byte[] encrypt(DataStructure struct, SessionKey key, int paddedSize) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(paddedSize);
struct.writeBytes(baos);
byte data[] = struct.toByteArray();
baos.write(data);
byte iv[] = new byte[16];
Hash h = getContext().sha().calculateHash(key.getData());
@@ -400,9 +387,6 @@ public class SendTunnelMessageJob extends JobImpl {
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out data to encrypt", ioe);
} catch (DataFormatException dfe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error formatting data to encrypt", dfe);
}
return null;
}
@@ -451,17 +435,8 @@ public class SendTunnelMessageJob extends JobImpl {
tmsg.setEncryptedDeliveryInstructions(null);
tmsg.setTunnelId(_targetTunnelId);
tmsg.setVerificationStructure(null);
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
try {
_message.writeBytes(baos);
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the message to be forwarded...??", ioe);
} catch (DataFormatException dfe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing message to be forwarded...???", dfe);
}
tmsg.setData(baos.toByteArray());
byte data[] = _message.toByteArray();
tmsg.setData(data);
msg = tmsg;
} else {
if (_log.shouldLog(Log.DEBUG))
@@ -559,31 +534,11 @@ public class SendTunnelMessageJob extends JobImpl {
outM.setOnSendJob(_onSend);
outM.setPriority(_priority);
outM.setReplySelector(_selector);
if (_destRouter != null)
outM.setTargetHash(_destRouter);
else
outM.setTargetHash(getContext().routerHash());
outM.setTarget(getContext().netDb().lookupRouterInfoLocally(_destRouter));
getContext().messageRegistry().registerPending(outM);
_onFailure = new FakeOnFailJob(getContext(), outM, _onFailure);
// we dont really need the data
outM.discardData();
}
private class FakeOnFailJob extends JobImpl {
private OutNetMessage _fakeMessage;
private Job _realOnFailJob;
public FakeOnFailJob(RouterContext ctx, OutNetMessage msg, Job realOnFailJob) {
super(ctx);
_fakeMessage = msg;
_realOnFailJob = realOnFailJob;
}
public String getName() { return "Fake message failure job"; }
public void runJob() {
getContext().messageRegistry().unregisterPending(_fakeMessage);
if (_realOnFailJob != null)
getContext().jobQueue().addJob(_realOnFailJob);
}
}
public String getName() { return "Send Tunnel Message"; }
}

View File

@@ -27,8 +27,8 @@ public class DatabaseLookupMessageHandler implements HandlerJobBuilder {
public DatabaseLookupMessageHandler(RouterContext context) {
_context = context;
_log = context.logManager().getLog(DatabaseLookupMessageHandler.class);
_context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsDropped", "How many netDb lookups did we drop due to throttling?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsDropped", "How many netDb lookups did we drop due to throttling?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {

View File

@@ -51,8 +51,8 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
public HandleDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) {
super(ctx);
_log = getContext().logManager().getLog(HandleDatabaseLookupMessageJob.class);
getContext().statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_message = receivedMessage;
_from = from;
_fromHash = fromHash;
@@ -185,23 +185,13 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
long expiration = REPLY_TIMEOUT + getContext().clock().now();
TunnelMessage msg = new TunnelMessage(getContext());
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
message.writeBytes(baos);
msg.setData(baos.toByteArray());
msg.setTunnelId(replyTunnel);
msg.setMessageExpiration(new Date(expiration));
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, toPeer, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
msg.setData(message.toByteArray());
msg.setTunnelId(replyTunnel);
msg.setMessageExpiration(new Date(expiration));
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, toPeer, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
String bodyType = message.getClass().getName();
getContext().messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the tunnel message to send to the tunnel", ioe);
} catch (DataFormatException dfe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the tunnel message to send to the tunnel", dfe);
}
String bodyType = message.getClass().getName();
getContext().messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
}
public String getName() { return "Handle Database Lookup Message"; }

View File

@@ -38,7 +38,7 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
public HandleDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash) {
super(ctx);
_log = ctx.logManager().getLog(HandleDatabaseStoreMessageJob.class);
ctx.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_message = receivedMessage;
_from = from;
_fromHash = fromHash;

View File

@@ -82,15 +82,15 @@ class SearchJob extends JobImpl {
_isLease = isLease;
_peerSelector = new PeerSelector(getContext());
_expiration = getContext().clock().now() + timeoutMs;
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchMessageCount", "Overall number of mesages for all searches sent", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchMessageCount", "Overall number of mesages for all searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
if (_log.shouldLog(Log.DEBUG))
_log.debug("Search (" + getClass().getName() + " for " + key.toBase64(), new Exception("Search enqueued by"));
}

View File

@@ -73,9 +73,9 @@ class StoreJob extends JobImpl {
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set toSkip) {
super(context);
_log = context.logManager().getLog(StoreJob.class);
getContext().statManager().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.storePeers", "How many peers each netDb must be sent to before success?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.ackTime", "How long does it take for a peer to ack a netDb store?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.storePeers", "How many peers each netDb must be sent to before success?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.ackTime", "How long does it take for a peer to ack a netDb store?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_facade = facade;
_state = new StoreState(getContext(), key, data, toSkip);
_onSuccess = onSuccess;

View File

@@ -28,10 +28,12 @@ public class DBHistory {
private long _lastLookupReceived;
private long _unpromptedDbStoreNew;
private long _unpromptedDbStoreOld;
private String _statGroup;
public DBHistory(RouterContext context) {
public DBHistory(RouterContext context, String statGroup) {
_context = context;
_log = context.logManager().getLog(DBHistory.class);
_statGroup = statGroup;
_successfulLookups = 0;
_failedLookups = 0;
_failedLookupRate = null;
@@ -45,7 +47,7 @@ public class DBHistory {
_lastLookupReceived = -1;
_unpromptedDbStoreNew = 0;
_unpromptedDbStoreOld = 0;
createRates();
createRates(statGroup);
}
/** how many times we have sent them a db lookup and received the value back from them */
@@ -212,15 +214,17 @@ public class DBHistory {
_invalidReplyRate.load(props, "dbHistory.invalidReplyRate", true);
} catch (IllegalArgumentException iae) {
_log.warn("DB History invalid reply rate is corrupt, resetting", iae);
createRates();
createRates(_statGroup);
}
}
private void createRates() {
private void createRates(String statGroup) {
if (_failedLookupRate == null)
_failedLookupRate = new RateStat("dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", "dbHistory", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_failedLookupRate = new RateStat("dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", statGroup, new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
if (_invalidReplyRate == null)
_invalidReplyRate = new RateStat("dbHistory.invalidReplyRate", "How often does this peer give us a bad (nonexistant, forged, etc) peer?", "dbHistory", new long[] { 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
_invalidReplyRate = new RateStat("dbHistory.invalidReplyRate", "How often does this peer give us a bad (nonexistant, forged, etc) peer?", statGroup, new long[] { 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
_failedLookupRate.setStatLog(_context.statManager().getStatLog());
_invalidReplyRate.setStatLog(_context.statManager().getStatLog());
}
private final static long getLong(Properties props, String key) {

View File

@@ -44,10 +44,8 @@ public class PeerProfile {
private DBHistory _dbHistory;
// does this peer profile contain expanded data, or just the basics?
private boolean _expanded;
private int _consecutiveShitlists;
public PeerProfile(RouterContext context) {
this(context, null, true);
}
public PeerProfile(RouterContext context, Hash peer) {
this(context, peer, true);
}
@@ -60,6 +58,7 @@ public class PeerProfile {
_capacityValue = 0;
_integrationValue = 0;
_isFailing = false;
_consecutiveShitlists = 0;
_peer = peer;
if (expand)
expandProfile();
@@ -77,6 +76,9 @@ public class PeerProfile {
*/
public boolean getIsExpanded() { return _expanded; }
public int incrementShitlists() { return _consecutiveShitlists++; }
public void unshitlist() { _consecutiveShitlists = 0; }
/**
* Is this peer active at the moment (sending/receiving messages within the last
* 5 minutes)
@@ -236,28 +238,37 @@ public class PeerProfile {
*
*/
public void expandProfile() {
String group = (null == _peer ? "profileUnknown" : _peer.toBase64().substring(0,6));
if (_sendSuccessSize == null)
_sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", "profile", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", group, new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
if (_sendFailureSize == null)
_sendFailureSize = new RateStat("sendFailureSize", "How large messages that could not be sent were", "profile", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } );
_sendFailureSize = new RateStat("sendFailureSize", "How large messages that could not be sent were", group, new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_receiveSize == null)
_receiveSize = new RateStat("receiveSize", "How large received messages are", "profile", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000 } );
_receiveSize = new RateStat("receiveSize", "How large received messages are", group, new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_dbResponseTime == null)
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", "profile", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_tunnelCreateResponseTime == null)
_tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", "profile", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
_tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_tunnelTestResponseTime == null)
_tunnelTestResponseTime = new RateStat("tunnelTestResponseTime", "how long it takes to successfully test a tunnel this peer participates in (in milliseconds)", "profile", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
_tunnelTestResponseTime = new RateStat("tunnelTestResponseTime", "how long it takes to successfully test a tunnel this peer participates in (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_commError == null)
_commError = new RateStat("commErrorRate", "how long between communication errors with the peer (e.g. disconnection)", "profile", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
_commError = new RateStat("commErrorRate", "how long between communication errors with the peer (e.g. disconnection)", group, new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_dbIntroduction == null)
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", "profile", new long[] { 60*60*1000l, 24*60*60*1000l, 7*24*60*60*1000l });
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", group, new long[] { 60*60*1000l, 24*60*60*1000l, 7*24*60*60*1000l });
if (_tunnelHistory == null)
_tunnelHistory = new TunnelHistory(_context);
_tunnelHistory = new TunnelHistory(_context, group);
if (_dbHistory == null)
_dbHistory = new DBHistory(_context);
_dbHistory = new DBHistory(_context, group);
_sendSuccessSize.setStatLog(_context.statManager().getStatLog());
_sendFailureSize.setStatLog(_context.statManager().getStatLog());
_receiveSize.setStatLog(_context.statManager().getStatLog());
_dbResponseTime.setStatLog(_context.statManager().getStatLog());
_tunnelCreateResponseTime.setStatLog(_context.statManager().getStatLog());
_tunnelTestResponseTime.setStatLog(_context.statManager().getStatLog());
_commError.setStatLog(_context.statManager().getStatLog());
_dbIntroduction.setStatLog(_context.statManager().getStatLog());
_expanded = true;
}

View File

@@ -23,22 +23,26 @@ public class TunnelHistory {
private volatile long _lastFailed;
private RateStat _rejectRate;
private RateStat _failRate;
private String _statGroup;
public TunnelHistory(RouterContext context) {
public TunnelHistory(RouterContext context, String statGroup) {
_context = context;
_log = context.logManager().getLog(TunnelHistory.class);
_statGroup = statGroup;
_lifetimeAgreedTo = 0;
_lifetimeFailed = 0;
_lifetimeRejected = 0;
_lastAgreedTo = 0;
_lastFailed = 0;
_lastRejected = 0;
createRates();
createRates(statGroup);
}
private void createRates() {
_rejectRate = new RateStat("tunnelHistory.rejectRate", "How often does this peer reject a tunnel request?", "tunnelHistory", new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
_failRate = new RateStat("tunnelHistory.failRate", "How often do tunnels this peer accepts fail?", "tunnelHistory", new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
private void createRates(String statGroup) {
_rejectRate = new RateStat("tunnelHistory.rejectRate", "How often does this peer reject a tunnel request?", statGroup, new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
_failRate = new RateStat("tunnelHistory.failRate", "How often do tunnels this peer accepts fail?", statGroup, new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
_rejectRate.setStatLog(_context.statManager().getStatLog());
_failRate.setStatLog(_context.statManager().getStatLog());
}
/** total tunnels the peer has agreed to participate in */
@@ -126,7 +130,7 @@ public class TunnelHistory {
_log.debug("Loading tunnelHistory.failRate");
} catch (IllegalArgumentException iae) {
_log.warn("TunnelHistory rates are corrupt, resetting", iae);
createRates();
createRates(_statGroup);
}
}

View File

@@ -38,14 +38,16 @@ public class GetBidsJob extends JobImpl {
public String getName() { return "Fetch bids for a message to be delivered"; }
public void runJob() {
Hash to = _msg.getTarget().getIdentity().getHash();
if (getContext().shitlist().isShitlisted(to)) {
_log.warn("Attempt to send a message to a shitlisted peer - " + to);
getContext().messageRegistry().peerFailed(to);
fail();
return;
}
Hash us = getContext().routerHash();
if (_msg.getTarget().getIdentity().getHash().equals(us)) {
if (to.equals(us)) {
_log.error("wtf, send a message to ourselves? nuh uh. msg = " + _msg, getAddedBy());
fail();
return;
@@ -54,11 +56,8 @@ public class GetBidsJob extends JobImpl {
List bids = _facade.getBids(_msg);
if (bids.size() <= 0) {
_log.warn("No bids available for the message " + _msg);
Hash target = _msg.getTargetHash();
if (target == null)
target = _msg.getTarget().getIdentity().getHash();
getContext().shitlist().shitlistRouter(target, "No bids");
getContext().netDb().fail(target);
getContext().shitlist().shitlistRouter(to, "No bids");
getContext().netDb().fail(to);
fail();
} else {
TransportBid bid = (TransportBid)bids.get(0);
@@ -79,10 +78,7 @@ public class GetBidsJob extends JobImpl {
getContext().messageRegistry().unregisterPending(_msg);
}
if (_msg.getTargetHash() != null)
getContext().profileManager().messageFailed(_msg.getTargetHash());
else
getContext().profileManager().messageFailed(_msg.getTarget().getIdentity().getHash());
getContext().profileManager().messageFailed(_msg.getTarget().getIdentity().getHash());
_msg.discardData();
}

View File

@@ -279,14 +279,24 @@ public class OutboundMessageRegistry {
public void peerFailed(Hash peer) {
List failed = null;
int numFailed = 0;
synchronized (_pendingMessages) {
for (Iterator iter = _pendingMessages.values().iterator(); iter.hasNext(); ) {
OutNetMessage msg = (OutNetMessage)iter.next();
if ( (msg.getTargetHash() != null) && (msg.getTargetHash().equals(peer)) ) {
if (failed == null)
failed = new ArrayList(4);
failed.add(msg);
iter.remove();
if (msg.getTarget() != null) {
Hash to = msg.getTarget().getIdentity().calculateHash();
if (to.equals(peer)) {
if (failed == null)
failed = new ArrayList(4);
failed.add(msg);
iter.remove();
numFailed++;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer failed: " + peer.toBase64().substring(0,6)
+ " but not killing a message to "
+ to.toBase64().substring(0,6));
}
}
}
}
@@ -299,6 +309,8 @@ public class OutboundMessageRegistry {
}
}
if (_log.shouldLog(Log.WARN))
_log.warn("Peer failed: " + peer.toBase64().substring(0,6) + " killing " + numFailed);
}
public void renderStatusHTML(Writer out) throws IOException {
@@ -314,6 +326,7 @@ public class OutboundMessageRegistry {
OutNetMessage msg = (OutNetMessage)msgs.get(exp);
buf.append("<li>").append(msg.getMessageType());
buf.append(": expiring on ").append(new Date(exp.longValue()));
buf.append(" targetting ").append(msg.getTarget().getIdentity().getHash());
if (msg.getReplySelector() != null)
buf.append(" with reply selector ").append(msg.getReplySelector().toString());
else

View File

@@ -226,9 +226,9 @@ public abstract class TransportImpl implements Transport {
}
}
_context.statManager().addRateData("transport.sendProcessingTime", msg.getLifetime(), msg.getLifetime());
if (sendSuccessful) {
_context.statManager().addRateData("transport.sendProcessingTime", lifetime, lifetime);
_context.profileManager().messageSent(msg.getTarget().getIdentity().getHash(), getStyle(), sendTime, msg.getMessageSize());
_context.statManager().addRateData("transport.sendMessageSize", msg.getMessageSize(), sendTime);
} else {

View File

@@ -66,7 +66,8 @@ public class VMCommSystem extends CommSystemFacade {
} else {
_context.jobQueue().addJob(msg.getOnSendJob());
_context.profileManager().messageSent(msg.getTarget().getIdentity().getHash(), "vm", sendTime, msg.getMessageSize());
byte data[] = msg.getMessageData();
byte data[] = new byte[(int)msg.getMessageSize()];
msg.getMessageData(data);
_context.statManager().addRateData("transport.sendMessageSize", data.length, sendTime);
if (data.length < 1024)

View File

@@ -104,7 +104,8 @@ public class ConnectionBuilder {
try {
return doEstablishConnection();
} catch (Exception e) { // catchall in case the timeout gets us flat footed
_log.error("Error connecting", e);
if (_socket != null)
fail("Error connecting", e);
return null;
}
}
@@ -436,7 +437,15 @@ public class ConnectionBuilder {
}
_actualPeer = peer;
return true;
try {
_context.netDb().store(peer.getIdentity().getHash(), peer);
return true;
} catch (IllegalArgumentException iae) {
fail("Peer sent us bad info - " + _target.getIdentity().getHash().toBase64().substring(0,6)
+ ": " + iae.getMessage());
return false;
}
} catch (IOException ioe) {
fail("Error reading the verified info from "
+ _target.getIdentity().calculateHash().toBase64().substring(0,6)
@@ -583,7 +592,15 @@ public class ConnectionBuilder {
}
_actualPeer = peer;
return true;
try {
_context.netDb().store(peer.getIdentity().getHash(), peer);
return true;
} catch (IllegalArgumentException iae) {
fail("Peer sent us bad info - " + _target.getIdentity().getHash().toBase64().substring(0,6)
+ ": " + iae.getMessage());
return false;
}
} catch (IOException ioe) {
fail("Error reading the verified info from "
+ _target.getIdentity().calculateHash().toBase64().substring(0,6)
@@ -644,7 +661,6 @@ public class ConnectionBuilder {
//_connectionOut = _rawOut;
Hash peer = _actualPeer.getIdentity().getHash();
_context.netDb().store(peer, _actualPeer);
_transport.getTagManager().replaceTag(peer, _nextConnectionTag, _key);
}

View File

@@ -443,7 +443,14 @@ public class ConnectionHandler {
SimpleDateFormat fmt = new SimpleDateFormat("yyyyMMddhhmmssSSS");
props.setProperty("SKEW", fmt.format(new Date(_context.clock().now())));
} else {
status = STATUS_OK;
try {
_context.netDb().store(_actualPeer.getIdentity().getHash(), _actualPeer);
status = STATUS_OK;
} catch (IllegalArgumentException iae) {
// bad peer info
status = STATUS_UNKNOWN;
props.setProperty("REASON", "RouterInfoFailed");
}
}
baos.write(status);
@@ -460,7 +467,7 @@ public class ConnectionHandler {
verification.writeBytes(_rawOut);
_rawOut.flush();
return handleStatus(status, clockSkew);
return handleStatus(status, clockSkew);
} catch (IOException ioe) {
fail("Error writing the peer info to " + _from
+ ": " + ioe.getMessage(), ioe);
@@ -601,7 +608,14 @@ public class ConnectionHandler {
} else if (!sigOk) {
status = STATUS_SIGNATURE_FAILED;
} else {
status = STATUS_OK;
try {
_context.netDb().store(_actualPeer.getIdentity().getHash(), _actualPeer);
status = STATUS_OK;
} catch (IllegalArgumentException iae) {
// bad peer info
status = STATUS_UNKNOWN;
props.setProperty("REASON", "RouterInfoFailed");
}
}
if (_actualPeer.getIdentity().getHash().equals(_context.routerHash())) {
@@ -675,67 +689,77 @@ public class ConnectionHandler {
private boolean verifyReachability() {
if (_actualPeer == null) return false;
_remoteAddress = new TCPAddress(_actualPeer.getTargetAddress(TCPTransport.STYLE));
if (!_transport.allowAddress(_remoteAddress))
if ( (_remoteAddress.getPort() <= 0) || (_remoteAddress.getPort() > 65535) )
return false;
//if (true) return true;
Socket s = null;
TCPAddress testAddress = _remoteAddress;
// if it is a LAN address, test with that address and not the public one
if (!TCPAddress.isPubliclyRoutable(_from)) {
testAddress = new TCPAddress(_from, _remoteAddress.getPort());
}
try {
s = new Socket(_remoteAddress.getAddress(), _remoteAddress.getPort());
OutputStream out = s.getOutputStream();
InputStream in = s.getInputStream();
try { s.setSoTimeout(TCPListener.HANDLE_TIMEOUT); } catch (SocketException se) {}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Beginning verification of reachability");
// send: 0xFFFF + #versions + v1 [+ v2 [etc]] + properties
DataHelper.writeLong(out, 2, FLAG_TEST);
out.write(TCPTransport.SUPPORTED_PROTOCOLS.length);
for (int i = 0; i < TCPTransport.SUPPORTED_PROTOCOLS.length; i++)
out.write(TCPTransport.SUPPORTED_PROTOCOLS[i]);
DataHelper.writeProperties(out, null);
out.flush();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Verification of reachability request sent");
// read: 0xFFFF + versionOk + #bytesIP + IP + currentTime + properties
int flag = (int)DataHelper.readLong(in, 2);
if (flag != FLAG_TEST)
throw new IOException("Unable to verify the peer - invalid response");
int version = in.read();
if (version == -1)
throw new IOException("Unable to verify the peer - invalid version");
if (version == FLAG_PROTOCOL_NONE)
throw new IOException("Unable to verify the peer - no matching version");
int numBytes = in.read();
if ( (numBytes == -1) || (numBytes > 32) )
throw new IOException("Unable to verify the peer - invalid num bytes");
byte ip[] = new byte[numBytes];
int read = DataHelper.read(in, ip);
if (read != numBytes)
throw new IOException("Unable to verify the peer - invalid num bytes");
Date now = DataHelper.readDate(in);
Properties opts = DataHelper.readProperties(in);
return true;
return verifyReachability(testAddress);
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error verifying "
+ _actualPeer.getIdentity().calculateHash().toBase64().substring(0,6)
+ "at " + _remoteAddress, ioe);
+ "at " + testAddress, ioe);
return false;
} catch (DataFormatException dfe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error verifying "
+ _actualPeer.getIdentity().calculateHash().toBase64().substring(0,6)
+ "at " + _remoteAddress, dfe);
+ "at " + testAddress, dfe);
return false;
}
}
private static boolean verifyReachability(TCPAddress address) throws IOException, DataFormatException {
//if (true) return true;
Socket s = new Socket(address.getAddress(), address.getPort());
OutputStream out = s.getOutputStream();
InputStream in = s.getInputStream();
try { s.setSoTimeout(TCPListener.HANDLE_TIMEOUT); } catch (SocketException se) {}
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("Beginning verification of reachability");
// send: 0xFFFF + #versions + v1 [+ v2 [etc]] + properties
DataHelper.writeLong(out, 2, FLAG_TEST);
out.write(TCPTransport.SUPPORTED_PROTOCOLS.length);
for (int i = 0; i < TCPTransport.SUPPORTED_PROTOCOLS.length; i++)
out.write(TCPTransport.SUPPORTED_PROTOCOLS[i]);
DataHelper.writeProperties(out, null);
out.flush();
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("Verification of reachability request sent");
// read: 0xFFFF + versionOk + #bytesIP + IP + currentTime + properties
int flag = (int)DataHelper.readLong(in, 2);
if (flag != FLAG_TEST)
throw new IOException("Unable to verify the peer - invalid response");
int version = in.read();
if (version == -1)
throw new IOException("Unable to verify the peer - invalid version");
if (version == FLAG_PROTOCOL_NONE)
throw new IOException("Unable to verify the peer - no matching version");
int numBytes = in.read();
if ( (numBytes == -1) || (numBytes > 32) )
throw new IOException("Unable to verify the peer - invalid num bytes");
byte ip[] = new byte[numBytes];
int read = DataHelper.read(in, ip);
if (read != numBytes)
throw new IOException("Unable to verify the peer - invalid num bytes");
Date now = DataHelper.readDate(in);
Properties opts = DataHelper.readProperties(in);
return true;
}
/**
* The peer contacting us is just testing us. Verify that we are reachable
* by following the protocol, then close the socket. This is called only
@@ -817,7 +841,6 @@ public class ConnectionHandler {
//_connectionOut = _rawOut;
Hash peer = _actualPeer.getIdentity().getHash();
_context.netDb().store(peer, _actualPeer);
_transport.getTagManager().replaceTag(peer, _nextConnectionTag, _key);
}
@@ -851,4 +874,30 @@ public class ConnectionHandler {
if (_log.shouldLog(Log.WARN))
_log.warn(error, e);
}
/**
* Verify the reachability of a peer.
* Usage: <code>ConnectionHandler hostname portNum</code>
*/
public static void main(String args[]) {
if (false) args = new String[] { "dev.i2p.net", "4108" };
if ( (args == null) || (args.length != 2) ) {
System.out.println("Usage: ConnectionHandler hostname portNum");
System.exit(0);
}
try {
int port = Integer.parseInt(args[1]);
TCPAddress addr = new TCPAddress(args[0], port);
boolean ok = verifyReachability(addr);
if (ok)
System.out.println("Peer is reachable: " + addr.toString());
else
System.out.println("Peer is not reachable: " + addr.toString());
} catch (Exception e) {
System.out.println("Peer is not reachable: " + args[0] + ":" + args[1]);
e.printStackTrace();
}
}
}

View File

@@ -20,6 +20,7 @@ class ConnectionRunner implements Runnable {
private RouterContext _context;
private TCPConnection _con;
private boolean _keepRunning;
private byte _writeBuffer[];
public ConnectionRunner(RouterContext ctx, TCPConnection con) {
_context = ctx;
@@ -30,6 +31,7 @@ class ConnectionRunner implements Runnable {
public void startRunning() {
_keepRunning = true;
_writeBuffer = new byte[38*1024]; // expansion factor
String name = "TCP " + _context.routerHash().toBase64().substring(0,6)
+ " to "
@@ -47,6 +49,8 @@ class ConnectionRunner implements Runnable {
if (msg == null) {
if (_keepRunning)
_log.error("next message is null but we should keep running?");
_con.closeConnection();
return;
} else {
sendMessage(msg);
}
@@ -54,14 +58,32 @@ class ConnectionRunner implements Runnable {
}
private void sendMessage(OutNetMessage msg) {
byte data[] = msg.getMessageData();
if (data == null) {
byte buf[] = _writeBuffer;
int written = 0;
try {
written = msg.getMessageData(_writeBuffer);
} catch (ArrayIndexOutOfBoundsException aioobe) {
I2NPMessage m = msg.getMessage();
if (m != null) {
buf = m.toByteArray();
written = buf.length;
}
} catch (Exception e) {
_log.log(Log.CRIT, "getting the message data", e);
_con.closeConnection();
return;
}
if (written <= 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("message " + msg.getMessageType() + "/" + msg.getMessageId()
+ " expired before it could be sent");
msg.timestamp("ConnectionRunner.sendMessage noData");
_con.sent(msg, false, 0);
return;
}
msg.timestamp("ConnectionRunner.sendMessage data");
OutputStream out = _con.getOutputStream();
boolean ok = false;
@@ -70,7 +92,7 @@ class ConnectionRunner implements Runnable {
try {
synchronized (out) {
before = _context.clock().now();
out.write(data);
out.write(buf, 0, written);
out.flush();
after = _context.clock().now();
}
@@ -84,6 +106,7 @@ class ConnectionRunner implements Runnable {
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error writing out the message", ioe);
_con.closeConnection();
}
_con.sent(msg, ok, after - before);
}

View File

@@ -29,12 +29,12 @@ public class MessageHandler implements I2NPMessageReader.I2NPMessageEventListene
_con.closeConnection();
}
public void messageReceived(I2NPMessageReader reader, I2NPMessage message, long msToRead) {
public void messageReceived(I2NPMessageReader reader, I2NPMessage message, long msToRead, int size) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Just received message " + message.getUniqueId() + " from "
+ _identHash.toBase64().substring(0,6)
+ " readTime = " + msToRead + "ms type = " + message.getClass().getName());
_transport.messageReceived(message, _ident, _identHash, msToRead, message.getSize());
_transport.messageReceived(message, _ident, _identHash, msToRead, size);
}
public void readError(I2NPMessageReader reader, Exception error) {

View File

@@ -112,13 +112,16 @@ public class TCPAddress {
public void setPort(int port) { _port = port; }
public boolean isPubliclyRoutable() {
if (_host == null) return false;
return isPubliclyRoutable(_host);
}
public static boolean isPubliclyRoutable(String host) {
if (host == null) return false;
try {
InetAddress addr = InetAddress.getByName(_host);
InetAddress addr = InetAddress.getByName(host);
byte quad[] = addr.getAddress();
if (quad.length != 4) {
if (_log.shouldLog(Log.ERROR))
_log.error("Refusing IPv6 address (" + _host + " / " + addr.getHostAddress() + ") "
_log.error("Refusing IPv6 address (" + host + " / " + addr.getHostAddress() + ") "
+ " since not all peers support it, and we don't support restricted routes");
return false;
}

View File

@@ -16,6 +16,8 @@ import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.I2NPMessageReader;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.stat.RateStat;
import net.i2p.stat.Rate;
import net.i2p.util.Log;
/**
@@ -35,6 +37,7 @@ public class TCPConnection {
private TCPTransport _transport;
private ConnectionRunner _runner;
private I2NPMessageReader _reader;
private RateStat _sendRate;
private long _started;
private boolean _closed;
@@ -51,6 +54,9 @@ public class TCPConnection {
_started = -1;
_closed = false;
_runner = new ConnectionRunner(_context, this);
_context.statManager().createRateStat("tcp.probabalisticDropQueueSize", "How many bytes were queued to be sent when a message as dropped probabalistically?", "TCP", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l } );
_context.statManager().createRateStat("tcp.queueSize", "How many bytes were queued on a connection?", "TCP", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l } );
_context.statManager().createRateStat("tcp.sendBps", "How fast are we sending data to a peer?", "TCP", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l } );
}
/** Who are we talking with (or null if not identified) */
@@ -72,7 +78,11 @@ public class TCPConnection {
*
*/
public void runConnection() {
String name = "TCP Read [" + _ident.calculateHash().toBase64().substring(0,6) + "]";
String peer = _ident.calculateHash().toBase64().substring(0,6);
String name = "TCP Read [" + peer + "]";
_sendRate = new RateStat("tcp.sendRatePeer", "How many bytes are in the messages sent to " + peer, peer, new long[] { 60*1000, 5*60*1000, 60*60*1000 });
_reader = new I2NPMessageReader(_context, _in, new MessageHandler(_transport, this), name);
_reader.startReading();
_runner.startRunning();
@@ -110,7 +120,8 @@ public class TCPConnection {
if (_socket != null) try { _socket.close(); } catch (IOException ioe) {}
List msgs = clearPendingMessages();
for (int i = 0; i < msgs.size(); i++) {
OutNetMessage msg = (OutNetMessage)msgs.get(0);
OutNetMessage msg = (OutNetMessage)msgs.get(i);
msg.timestamp("closeConnection");
_transport.afterSend(msg, false, true, -1);
}
_context.profileManager().commErrorOccurred(_ident.getHash());
@@ -139,10 +150,120 @@ public class TCPConnection {
*
*/
public void addMessage(OutNetMessage msg) {
msg.timestamp("TCPConnection.addMessage");
List expired = null;
int remaining = 0;
synchronized (_pendingMessages) {
_pendingMessages.add(msg);
expired = locked_expireOld();
locked_throttle();
remaining = _pendingMessages.size();
_pendingMessages.notifyAll();
}
if (expired != null) {
for (int i = 0; i < expired.size(); i++) {
OutNetMessage cur = (OutNetMessage)expired.get(i);
cur.timestamp("TCPConnection.addMessage expired");
if (_log.shouldLog(Log.WARN))
_log.warn("Message " + cur.getMessageId() + " expired on the queue to "
+ _ident.getHash().toBase64().substring(0,6)
+ " (queue size " + remaining + ") with lifetime "
+ cur.getLifetime());
sent(cur, false, 0);
}
}
}
/**
* Implement a probabalistic dropping of messages on the queue to the
* peer along the lines of RFC2309.
*
*/
private void locked_throttle() {
int bytesQueued = 0;
long earliestExpiration = -1;
for (int i = 0; i < _pendingMessages.size(); i++) {
OutNetMessage msg = (OutNetMessage)_pendingMessages.get(i);
bytesQueued += (int)msg.getMessageSize();
if ( (earliestExpiration < 0) || (msg.getExpiration() < earliestExpiration) )
earliestExpiration = msg.getExpiration();
}
if (bytesQueued > 0)
_context.statManager().addRateData("tcp.queueSize", bytesQueued, _pendingMessages.size());
long sendRate = getSendRate();
long bytesSendableUntilFirstExpire = sendRate * (earliestExpiration - _context.clock().now()) / 1000;
// try to keep the queue less than half full
long excessQueued = bytesQueued - (bytesSendableUntilFirstExpire/2);
if ( (excessQueued > 0) && (_pendingMessages.size() > 1) && (_transport != null) )
locked_probabalisticDrop(excessQueued);
}
/** how many Bps we are sending data to the peer (or 2KBps if we don't know) */
public long getSendRate() {
if (_sendRate == null) return 2*1024;
_sendRate.coallesceStats();
Rate r = _sendRate.getRate(60*1000);
if (r == null) {
return 2*1024;
} else if (r.getLastEventCount() <= 2) {
r = _sendRate.getRate(5*60*1000);
if (r.getLastEventCount() <= 2)
r = _sendRate.getRate(60*60*1000);
}
if (r.getLastEventCount() <= 2) {
return 2*1024;
} else {
long bps = (long)(r.getLastTotalValue() * 1000 / r.getLastTotalEventTime());
_context.statManager().addRateData("tcp.sendBps", bps, 0);
return bps;
}
}
/**
* Probabalistically drop messages in relation to their size vs how much
* we've exceeded our target queue usage.
*/
private void locked_probabalisticDrop(long excessBytesQueued) {
for (int i = 0; i < _pendingMessages.size() && excessBytesQueued > 0; i++) {
OutNetMessage msg = (OutNetMessage)_pendingMessages.get(i);
int p = getDropProbability(msg.getMessageSize(), excessBytesQueued);
if (_context.random().nextInt(100) > p) {
_pendingMessages.remove(i);
i--;
msg.timestamp("Probabalistically dropped due to queue size " + excessBytesQueued);
sent(msg, false, -1);
_context.statManager().addRateData("tcp.probabalisticDropQueueSize", excessBytesQueued, msg.getLifetime());
// since we've already dropped down this amount, lets reduce the
// number of additional messages dropped
excessBytesQueued -= msg.getMessageSize();
}
}
}
private int getDropProbability(long msgSize, long excessBytesQueued) {
if (msgSize > excessBytesQueued)
return 100;
return (int)(100.0*(msgSize/excessBytesQueued));
}
private List locked_expireOld() {
long now = _context.clock().now();
List expired = null;
for (int i = 0; i < _pendingMessages.size(); i++) {
OutNetMessage cur = (OutNetMessage)_pendingMessages.get(i);
if (cur.getExpiration() < now) {
_pendingMessages.remove(i);
if (expired == null)
expired = new ArrayList(1);
expired.add(cur);
i--;
}
}
return expired;
}
/**
@@ -157,7 +278,9 @@ public class TCPConnection {
while ( (msg == null) && (!_closed) ) {
List expired = null;
long now = _context.clock().now();
int queueSize = 0;
synchronized (_pendingMessages) {
queueSize = _pendingMessages.size();
for (int i = 0; i < _pendingMessages.size(); i++) {
OutNetMessage cur = (OutNetMessage)_pendingMessages.get(i);
if (cur.getExpiration() < now) {
@@ -182,10 +305,19 @@ public class TCPConnection {
if (expired != null) {
for (int i = 0; i < expired.size(); i++) {
OutNetMessage cur = (OutNetMessage)expired.get(i);
cur.timestamp("TCPConnection.getNextMessage expired");
if (_log.shouldLog(Log.WARN))
_log.warn("Message " + cur.getMessageId() + " expired on the queue to "
+ _ident.getHash().toBase64().substring(0,6)
+ " (queue size " + queueSize + ") with lifetime "
+ cur.getLifetime());
sent(cur, false, 0);
}
}
}
if (msg != null)
msg.timestamp("TCPConnection.getNextMessage retrieved");
return msg;
}
@@ -231,5 +363,7 @@ public class TCPConnection {
*/
void sent(OutNetMessage msg, boolean ok, long time) {
_transport.afterSend(msg, ok, true, time);
if (ok)
_sendRate.addData(msg.getMessageSize(), msg.getLifetime());
}
}

View File

@@ -24,6 +24,10 @@ public class TCPConnectionEstablisher implements Runnable {
public void run() {
while (true) {
RouterInfo info = _transport.getNextPeer();
if (info == null) {
try { Thread.sleep(5*1000); } catch (InterruptedException ie) {}
continue;
}
ConnectionBuilder cb = new ConnectionBuilder(_context, _transport, info);
TCPConnection con = null;

View File

@@ -168,6 +168,11 @@ public class TCPTransport extends TransportImpl {
// _log.debug("Outbound message ready: " + msg);
if (msg != null) {
if (msg.getTarget() == null)
throw new IllegalStateException("Null target for a ready message?");
msg.timestamp("TCPTransport.outboundMessageReady");
TCPConnection con = null;
boolean newPeer = false;
synchronized (_connectionLock) {
@@ -269,7 +274,9 @@ public class TCPTransport extends TransportImpl {
_context.shitlist().shitlistRouter(con.getAttemptedPeer(), "Changed identities");
if (changedMsgs != null) {
for (int i = 0; i < changedMsgs.size(); i++) {
afterSend((OutNetMessage)changedMsgs.get(i), false, false, 0);
OutNetMessage cur = (OutNetMessage)changedMsgs.get(i);
cur.timestamp("changedIdents");
afterSend(cur, false, false, 0);
}
}
}
@@ -280,6 +287,7 @@ public class TCPTransport extends TransportImpl {
con.setTransport(this);
con.closeConnection();
} else {
con.setTransport(this);
if (waitingMsgs != null) {
for (int i = 0; i < waitingMsgs.size(); i++) {
@@ -289,7 +297,6 @@ public class TCPTransport extends TransportImpl {
_context.shitlist().unshitlistRouter(ident.calculateHash());
con.setTransport(this);
con.runConnection();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Connection set to run");
@@ -582,6 +589,7 @@ public class TCPTransport extends TransportImpl {
_context.netDb().fail(peer);
for (int i = 0; i < msgs.size(); i++) {
OutNetMessage cur = (OutNetMessage)msgs.get(i);
cur.timestamp("no TCP addresses");
afterSend(cur, false, false, 0);
}
continue;
@@ -595,6 +603,7 @@ public class TCPTransport extends TransportImpl {
_context.netDb().fail(peer);
for (int i = 0; i < msgs.size(); i++) {
OutNetMessage cur = (OutNetMessage)msgs.get(i);
cur.timestamp("invalid addresses");
afterSend(cur, false, false, 0);
}
continue; // invalid
@@ -608,6 +617,7 @@ public class TCPTransport extends TransportImpl {
_context.netDb().fail(peer);
for (int i = 0; i < msgs.size(); i++) {
OutNetMessage cur = (OutNetMessage)msgs.get(i);
cur.timestamp("points at us");
afterSend(cur, false, false, 0);
}
continue;
@@ -619,18 +629,21 @@ public class TCPTransport extends TransportImpl {
_context.netDb().fail(peer);
for (int i = 0; i < msgs.size(); i++) {
OutNetMessage cur = (OutNetMessage)msgs.get(i);
cur.timestamp("points at our ip");
afterSend(cur, false, false, 0);
}
continue;
}
if (!allowAddress(tcpAddr)) {
_log.error("Message points at illegal address! " + msg.getTarget());
_log.error("Message points at illegal address! "
+ msg.getTarget().getIdentity().calculateHash().toBase64().substring(0,6));
iter.remove();
_context.shitlist().shitlistRouter(peer, "Invalid addressaddress...");
_context.netDb().fail(peer);
for (int i = 0; i < msgs.size(); i++) {
OutNetMessage cur = (OutNetMessage)msgs.get(i);
cur.timestamp("points at an illegal address");
afterSend(cur, false, false, 0);
}
continue;
@@ -669,6 +682,7 @@ public class TCPTransport extends TransportImpl {
// connectionEstablished clears them otherwise)
for (int i = 0; i < msgs.size(); i++) {
OutNetMessage msg = (OutNetMessage)msgs.get(i);
msg.timestamp("establishmentComplete(failed)");
afterSend(msg, false);
}
}
@@ -684,6 +698,12 @@ public class TCPTransport extends TransportImpl {
buf.append("<li>");
buf.append(con.getRemoteRouterIdentity().getHash().toBase64().substring(0,6));
buf.append(": up for ").append(DataHelper.formatDuration(con.getLifetime()));
buf.append(" transferring at ");
long bps = con.getSendRate();
if (bps < 1024)
buf.append(bps).append("Bps");
else
buf.append((int)(bps/1024)).append("KBps");
buf.append("</li>\n");
}
buf.append("</ul>\n");

View File

@@ -149,6 +149,7 @@ public class PoolingTunnelManagerFacade implements TunnelManagerFacade {
*
*/
public void peerFailed(Hash peer) {
if (_pool == null) return; // just initialized
int numFailed = 0;
boolean shouldKill = false;
for (Iterator iter = _pool.getManagedTunnelIds().iterator(); iter.hasNext(); ) {
@@ -233,5 +234,15 @@ public class PoolingTunnelManagerFacade implements TunnelManagerFacade {
if (_pool != null)
_pool.renderStatusHTML(out);
}
public long getLastParticipatingExpiration() {
long last = -1;
for (Iterator iter = _pool.getParticipatingTunnels().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getParticipatingTunnel(id);
if ( (info != null) && (info.getSettings().getExpiration() > last) )
last = info.getSettings().getExpiration();
}
return last;
}
}

View File

@@ -69,7 +69,7 @@ class I2NPMessageReaderTest implements I2NPMessageReader.I2NPMessageEventListene
_log.debug("Disconnected");
}
public void messageReceived(I2NPMessageReader reader, I2NPMessage message, long msToRead) {
public void messageReceived(I2NPMessageReader reader, I2NPMessage message, long msToRead, int size) {
_log.debug("Message received: " + message);
}