Compare commits

..

15 Commits

Author SHA1 Message Date
jrandom
870e94e184 * 2006-05-09 0.6.1.18 released
2006-05-09  jrandom
    * Further tunnel creation timeout revamp
2006-05-09 21:17:17 +00:00
complication
6b0d507644 2006-05-07 Complication
* Fix problem whereby repeated calls to allowed() would make
      the 1-tunnel exception permit more than one concurrent build
2006-05-08 03:19:46 +00:00
jrandom
70cf9e4ca7 2006-05-06 jrandom
* Readjust the tunnel creation timeouts to reject less but fail earlier,
      while tracking the extended timeout events.
2006-05-06 20:27:34 +00:00
jrandom
2a3974c71d 2006-05-04 jrandom
* Short circuit a highly congested part of the stat logging unless its
      required (may or may not help with a synchronization issue reported by
      andreas)
2006-05-04 23:08:48 +00:00
complication
46ac9292e8 2006-05-03 Complication
* Allow a single build attempt to proceed despite 1-minute overload
      only if the 1-second rate shows enough spare bandwidth
      (e.g. overload has already eased)
2006-05-03 11:13:26 +00:00
complication
4307097472 2006-05-02 Complication
* Correct a misnamed property in SummaryHelper.java
      to avoid confusion
    * Make the maximum allowance of our own concurrent
      tunnel builds slightly adaptive: one concurrent build per 6 KB/s
      within the fixed range 2..10
    * While overloaded, try to avoid completely choking our own build attempts,
      instead prefer limiting them to 1
2006-05-03 04:30:26 +00:00
complication
ed3fdaf4f1 2006-05-02 Complication
* Fixed URL in previous update, sorry
2006-05-03 02:11:06 +00:00
complication
378a9a8f5c 2006-05-02 Complication
* Weekly news.xml update
2006-05-03 02:03:01 +00:00
jrandom
4ef6180455 2006-05-01 jrandom
* Adjust the tunnel build timeouts to cut down on expirations, and
      increased the SSU connection establishment retransmission rate to
      something less glacial.
    * For the first 5 minutes of uptime, be less aggressive with tunnel
      exploration, opting for more reliable peers to start with.
2006-05-01 22:40:21 +00:00
jrandom
d4970e23c0 2006-05-01 jrandom
* Fix for a netDb lookup race (thanks cervantes!)
2006-05-01 19:09:02 +00:00
duck
0c9f165016 fix typos 2006-05-01 15:39:37 +00:00
jrandom
be3a899ecb 2006-04-27 jrandom
* Avoid a race in the message reply registry (thanks cervantes!)
2006-04-28 00:31:20 +00:00
jrandom
7a6a749004 2006-04-27 jrandom
* Fixed the tunnel expiration desync code (thanks Complication!)
2006-04-28 00:08:40 +00:00
complication
17271ee3f0 2006-04-25 Complication
* weekly news.xml update
2006-04-26 02:30:05 +00:00
complication
99bcfa90df 2006-04-24 Complication
* Update news.xml to reflect 0.6.1.17
2006-04-24 12:43:25 +00:00
23 changed files with 204 additions and 79 deletions

View File

@@ -213,11 +213,11 @@ public class SummaryHelper {
}
/**
* How fast we have been receiving data over the last minute (pretty printed
* How fast we have been receiving data over the last second (pretty printed
* string with 2 decimal places representing the KBps)
*
*/
public String getInboundMinuteKBps() {
public String getInboundSecondKBps() {
if (_context == null)
return "0.0";
double kbps = _context.bandwidthLimiter().getReceiveBps()/1024d;
@@ -225,11 +225,11 @@ public class SummaryHelper {
return fmt.format(kbps);
}
/**
* How fast we have been sending data over the last minute (pretty printed
* How fast we have been sending data over the last second (pretty printed
* string with 2 decimal places representing the KBps)
*
*/
public String getOutboundMinuteKBps() {
public String getOutboundSecondKBps() {
if (_context == null)
return "0.0";
double kbps = _context.bandwidthLimiter().getSendBps()/1024d;

View File

@@ -65,7 +65,7 @@
%><hr />
<u><b><a href="config.jsp" title="Configure the bandwidth limits">Bandwidth in/out</a></b></u><br />
<b>1s:</b> <jsp:getProperty name="helper" property="inboundMinuteKBps" />/<jsp:getProperty name="helper" property="outboundMinuteKBps" />KBps<br />
<b>1s:</b> <jsp:getProperty name="helper" property="inboundSecondKBps" />/<jsp:getProperty name="helper" property="outboundSecondKBps" />KBps<br />
<b>5m:</b> <jsp:getProperty name="helper" property="inboundFiveMinuteKBps" />/<jsp:getProperty name="helper" property="outboundFiveMinuteKBps" />KBps<br />
<b>Total:</b> <jsp:getProperty name="helper" property="inboundLifetimeKBps" />/<jsp:getProperty name="helper" property="outboundLifetimeKBps" />KBps<br />
<b>Used:</b> <jsp:getProperty name="helper" property="inboundTransferred" />/<jsp:getProperty name="helper" property="outboundTransferred" /><br />

View File

@@ -14,8 +14,8 @@ package net.i2p;
*
*/
public class CoreVersion {
public final static String ID = "$Revision: 1.59 $ $Date: 2006/04/15 02:58:13 $";
public final static String VERSION = "0.6.1.17";
public final static String ID = "$Revision: 1.60 $ $Date: 2006/04/23 16:06:13 $";
public final static String VERSION = "0.6.1.18";
public static void main(String args[]) {
System.out.println("I2P Core version: " + VERSION);

View File

@@ -29,6 +29,8 @@ public class BufferedStatLog implements StatLog {
private String _lastFilters;
private BufferedWriter _out;
private String _outFile;
/** short circuit for adding data, set to true if some filters are set, false if its empty (so we can skip the sync) */
private volatile boolean _filtersSpecified;
private static final int BUFFER_SIZE = 1024;
private static final boolean DISABLE_LOGGING = false;
@@ -44,6 +46,7 @@ public class BufferedStatLog implements StatLog {
_lastWrite = _events.length-1;
_statFilters = new ArrayList(10);
_flushFrequency = 500;
_filtersSpecified = false;
I2PThread writer = new I2PThread(new StatLogWriter(), "StatLogWriter");
writer.setDaemon(true);
writer.start();
@@ -51,6 +54,7 @@ public class BufferedStatLog implements StatLog {
public void addData(String scope, String stat, long value, long duration) {
if (DISABLE_LOGGING) return;
if (!shouldLog(stat)) return;
synchronized (_events) {
_events[_eventNext].init(scope, stat, value, duration);
_eventNext = (_eventNext + 1) % _events.length;
@@ -72,6 +76,7 @@ public class BufferedStatLog implements StatLog {
}
private boolean shouldLog(String stat) {
if (!_filtersSpecified) return false;
synchronized (_statFilters) {
return _statFilters.contains(stat) || _statFilters.contains("*");
}
@@ -88,11 +93,18 @@ public class BufferedStatLog implements StatLog {
_statFilters.clear();
while (tok.hasMoreTokens())
_statFilters.add(tok.nextToken().trim());
if (_statFilters.size() > 0)
_filtersSpecified = true;
else
_filtersSpecified = false;
}
}
_lastFilters = val;
} else {
synchronized (_statFilters) { _statFilters.clear(); }
synchronized (_statFilters) {
_statFilters.clear();
_filtersSpecified = false;
}
}
String filename = _context.getProperty(StatManager.PROP_STAT_FILE);
@@ -146,7 +158,7 @@ public class BufferedStatLog implements StatLog {
updateFilters();
int cur = start;
while (cur != end) {
if (shouldLog(_events[cur].getStat())) {
//if (shouldLog(_events[cur].getStat())) {
String when = null;
synchronized (_fmt) {
when = _fmt.format(new Date(_events[cur].getTime()));
@@ -164,7 +176,7 @@ public class BufferedStatLog implements StatLog {
_out.write(" ");
_out.write(Long.toString(_events[cur].getDuration()));
_out.write("\n");
}
//}
cur = (cur + 1) % _events.length;
}
_out.flush();

View File

@@ -1,4 +1,52 @@
$Id: history.txt,v 1.461 2006/04/19 12:47:02 jrandom Exp $
$Id: history.txt,v 1.471 2006/05/07 22:19:46 complication Exp $
* 2006-05-09 0.6.1.18 released
2006-05-09 jrandom
* Further tunnel creation timeout revamp
2006-05-07 Complication
* Fix problem whereby repeated calls to allowed() would make
the 1-tunnel exception permit more than one concurrent build
2006-05-06 jrandom
* Readjust the tunnel creation timeouts to reject less but fail earlier,
while tracking the extended timeout events.
2006-05-04 jrandom
* Short circuit a highly congested part of the stat logging unless its
required (may or may not help with a synchronization issue reported by
andreas)
2006-05-03 Complication
* Allow a single build attempt to proceed despite 1-minute overload
only if the 1-second rate shows enough spare bandwidth
(e.g. overload has already eased)
2006-05-02 Complication
* Correct a misnamed property in SummaryHelper.java
to avoid confusion
* Make the maximum allowance of our own concurrent
tunnel builds slightly adaptive: one concurrent build per 6 KB/s
within the fixed range 2..10
* While overloaded, try to avoid completely choking our own build attempts,
instead prefer limiting them to 1
2006-05-01 jrandom
* Adjust the tunnel build timeouts to cut down on expirations, and
increased the SSU connection establishment retransmission rate to
something less glacial.
* For the first 5 minutes of uptime, be less aggressive with tunnel
exploration, opting for more reliable peers to start with.
2006-05-01 jrandom
* Fix for a netDb lookup race (thanks cervantes!)
2006-04-27 jrandom
* Avoid a race in the message reply registry (thanks cervantes!)
2006-04-27 jrandom
* Fixed the tunnel expiration desync code (thanks Complication!)
* 2006-04-23 0.6.1.17 released

View File

@@ -1,5 +1,5 @@
<i2p.news date="$Date: 2006/04/13 07:40:21 $">
<i2p.release version="0.6.1.16" date="2006/04/15" minVersion="0.6"
<i2p.news date="$Date: 2006/04/15 02:58:12 $">
<i2p.release version="0.6.1.18" date="2006/05/09" minVersion="0.6"
anonurl="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/i2p/i2pupdate.sud"
publicurl="http://dev.i2p.net/i2p/i2pupdate.sud"
anonannouncement="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/pipermail/i2p/2005-September/000878.html"

View File

@@ -4,7 +4,7 @@
<info>
<appname>i2p</appname>
<appversion>0.6.1.17</appversion>
<appversion>0.6.1.18</appversion>
<authors>
<author name="I2P" email="support@i2p.net"/>
</authors>

View File

@@ -1,5 +1,5 @@
<i2p.news date="$Date: 2006/04/15 12:25:50 $">
<i2p.release version="0.6.1.17" date="2006/04/23" minVersion="0.6"
<i2p.news date="$Date: 2006/05/02 21:11:06 $">
<i2p.release version="0.6.1.18" date="2006/05/09" minVersion="0.6"
anonurl="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/i2p/i2pupdate.sud"
publicurl="http://dev.i2p.net/i2p/i2pupdate.sud"
anonannouncement="http://i2p/NF2RLVUxVulR3IqK0sGJR0dHQcGXAzwa6rEO4WAWYXOHw-DoZhKnlbf1nzHXwMEJoex5nFTyiNMqxJMWlY54cvU~UenZdkyQQeUSBZXyuSweflUXFqKN-y8xIoK2w9Ylq1k8IcrAFDsITyOzjUKoOPfVq34rKNDo7fYyis4kT5bAHy~2N1EVMs34pi2RFabATIOBk38Qhab57Umpa6yEoE~rbyR~suDRvD7gjBvBiIKFqhFueXsR2uSrPB-yzwAGofTXuklofK3DdKspciclTVzqbDjsk5UXfu2nTrC1agkhLyqlOfjhyqC~t1IXm-Vs2o7911k7KKLGjB4lmH508YJ7G9fLAUyjuB-wwwhejoWqvg7oWvqo4oIok8LG6ECR71C3dzCvIjY2QcrhoaazA9G4zcGMm6NKND-H4XY6tUWhpB~5GefB3YczOqMbHq4wi0O9MzBFrOJEOs3X4hwboKWANf7DT5PZKJZ5KorQPsYRSq0E3wSOsFCSsdVCKUGsAAAA/pipermail/i2p/2005-September/000878.html"
@@ -10,13 +10,13 @@
anonlogs="http://i2p/Nf3ab-ZFkmI-LyMt7GjgT-jfvZ3zKDl0L96pmGQXF1B82W2Bfjf0n7~288vafocjFLnQnVcmZd~-p0-Oolfo9aW2Rm-AhyqxnxyLlPBqGxsJBXjPhm1JBT4Ia8FB-VXt0BuY0fMKdAfWwN61-tj4zIcQWRxv3DFquwEf035K~Ra4SWOqiuJgTRJu7~o~DzHVljVgWIzwf8Z84cz0X33pv-mdG~~y0Bsc2qJVnYwjjR178YMcRSmNE0FVMcs6f17c6zqhMw-11qjKpY~EJfHYCx4lBWF37CD0obbWqTNUIbL~78vxqZRT3dgAgnLixog9nqTO-0Rh~NpVUZnoUi7fNR~awW5U3Cf7rU7nNEKKobLue78hjvRcWn7upHUF45QqTDuaM3yZa7OsjbcH-I909DOub2Q0Dno6vIwuA7yrysccN1sbnkwZbKlf4T6~iDdhaSLJd97QCyPOlbyUfYy9QLNExlRqKgNVJcMJRrIual~Lb1CLbnzt0uvobM57UpqSAAAA/meeting141"
publiclogs="http://www.i2p.net/meeting141" />
&#149;
2006-04-15: 0.6.1.16 <a href="http://dev.i2p/pipermail/i2p/2006-April/001280.html">released</a>
with a significant PRNG bugfix and other improvements.
2006-04-23: 0.6.1.17 <a href="http://dev.i2p/pipermail/i2p/2006-April/001282.html">released</a>
with multiple improvements. Upgrading should alleviate congestion and peer selection issues.
<br>
&#149;
2006-04-18:
<a href="http://dev.i2p/pipermail/i2p/2006-April/001281.html">status notes</a>
2006-05-02:
<a href="http://dev.i2p/pipermail/i2p/2006-May/001285.html">status notes</a>
and
<a href="http://www.i2p/meeting176">meeting log</a>
<a href="http://www.i2p/meeting178">meeting log</a>
<br>
</i2p.news>

View File

@@ -26,7 +26,7 @@ class JobQueueRunner implements Runnable {
_context.statManager().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobRunSlow", "How long jobs that take over a second take", "JobQueue", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobLag", "How long jobs have to wait before running", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sat on the job queue?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sit on the job queue?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_state = 1;
}

View File

@@ -15,8 +15,8 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.401 $ $Date: 2006/04/19 12:46:53 $";
public final static String VERSION = "0.6.1.17";
public final static String ID = "$Revision: 1.411 $ $Date: 2006/05/07 22:19:47 $";
public final static String VERSION = "0.6.1.18";
public final static long BUILD = 0;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);

View File

@@ -113,7 +113,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.leaseSetFoundLocally", "How often we tried to look for a leaseSet and found it locally?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.leaseSetFoundRemoteTime", "How long we tried to look fora remote leaseSet (when we succeeded)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.leaseSetFoundRemoteTime", "How long we tried to look for a remote leaseSet (when we succeeded)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.leaseSetFailedRemoteTime", "How long we tried to look for a remote leaseSet (when we failed)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.dispatchPrepareTime", "How long until we've queued up the dispatch job (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.dispatchTime", "How long until we've dispatched the message (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });

View File

@@ -166,21 +166,44 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
synchronized (_activeFloodQueries) { _activeFloodQueries.remove(key); }
Job find = null;
if ( (onFind != null) && (onFind.size() > 0) )
find = (Job)onFind.remove(0);
Job fail = null;
if ( (onFailed != null) && (onFailed.size() > 0) )
fail = (Job)onFailed.remove(0);
if (onFind != null) {
synchronized (onFind) {
if (onFind.size() > 0)
find = (Job)onFind.remove(0);
}
}
if (onFailed != null) {
synchronized (onFailed) {
if (onFailed.size() > 0)
fail = (Job)onFailed.remove(0);
}
}
SearchJob job = super.search(key, find, fail, timeoutMs, isLease);
if (job != null) {
if (_log.shouldLog(Log.INFO))
_log.info("Floodfill search timed out for " + key.toBase64() + ", falling back on normal search (#"
+ job.getJobId() + ") with " + timeoutMs + " remaining");
long expiration = timeoutMs + _context.clock().now();
while ( (onFind != null) && (onFind.size() > 0) )
job.addDeferred((Job)onFind.remove(0), null, expiration, isLease);
while ( (onFailed != null) && (onFailed.size() > 0) )
job.addDeferred(null, (Job)onFailed.remove(0), expiration, isLease);
List removed = null;
if (onFind != null) {
synchronized (onFind) {
removed = new ArrayList(onFind);
onFind.clear();
}
for (int i = 0; i < removed.size(); i++)
job.addDeferred((Job)removed.get(i), null, expiration, isLease);
removed = null;
}
if (onFailed != null) {
synchronized (onFailed) {
removed = new ArrayList(onFailed);
onFailed.clear();
}
for (int i = 0; i < removed.size(); i++)
job.addDeferred(null, (Job)removed.get(i), expiration, isLease);
removed = null;
}
}
}
void complete(Hash key) {
@@ -263,10 +286,13 @@ class FloodSearchJob extends JobImpl {
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
if ( (replyTunnel == null) || (outTunnel == null) ) {
_dead = true;
while (_onFailed.size() > 0) {
Job job = (Job)_onFailed.remove(0);
getContext().jobQueue().addJob(job);
List removed = null;
synchronized (_onFailed) {
removed = new ArrayList(_onFailed);
_onFailed.clear();
}
while (removed.size() > 0)
getContext().jobQueue().addJob((Job)removed.remove(0));
getContext().messageRegistry().unregisterPending(out);
return;
}
@@ -304,10 +330,13 @@ class FloodSearchJob extends JobImpl {
if (timeRemaining > 0) {
_facade.searchFull(_key, _onFind, _onFailed, timeRemaining, _isLease);
} else {
for (int i = 0; i < _onFailed.size(); i++) {
Job j = (Job)_onFailed.remove(0);
getContext().jobQueue().addJob(j);
List removed = null;
synchronized (_onFailed) {
removed = new ArrayList(_onFailed);
_onFailed.clear();
}
while (removed.size() > 0)
getContext().jobQueue().addJob((Job)removed.remove(0));
}
}
void success() {
@@ -316,8 +345,13 @@ class FloodSearchJob extends JobImpl {
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " successful");
_dead = true;
_facade.complete(_key);
while (_onFind.size() > 0)
getContext().jobQueue().addJob((Job)_onFind.remove(0));
List removed = null;
synchronized (_onFind) {
removed = new ArrayList(_onFind);
_onFind.clear();
}
while (removed.size() > 0)
getContext().jobQueue().addJob((Job)removed.remove(0));
}
}

View File

@@ -194,21 +194,20 @@ public class OutboundMessageRegistry {
public void renderStatusHTML(Writer out) throws IOException {}
private class CleanupTask implements SimpleTimer.TimedEvent {
private List _removing;
private long _nextExpire;
public CleanupTask() {
_removing = new ArrayList(4);
_nextExpire = -1;
}
public void timeReached() {
long now = _context.clock().now();
List removing = new ArrayList(1);
synchronized (_selectors) {
for (int i = 0; i < _selectors.size(); i++) {
MessageSelector sel = (MessageSelector)_selectors.get(i);
if (sel == null) continue;
long expiration = sel.getExpiration();
if (expiration <= now) {
_removing.add(sel);
removing.add(sel);
_selectors.remove(i);
i--;
} else if (expiration < _nextExpire || _nextExpire < now) {
@@ -216,9 +215,9 @@ public class OutboundMessageRegistry {
}
}
}
if (_removing.size() > 0) {
for (int i = 0; i < _removing.size(); i++) {
MessageSelector sel = (MessageSelector)_removing.get(i);
if (removing.size() > 0) {
for (int i = 0; i < removing.size(); i++) {
MessageSelector sel = (MessageSelector)removing.get(i);
OutNetMessage msg = null;
List msgs = null;
synchronized (_selectorToMessage) {
@@ -249,7 +248,6 @@ public class OutboundMessageRegistry {
}
}
}
_removing.clear();
}
if (_nextExpire <= now)

View File

@@ -576,9 +576,9 @@ public class EstablishmentManager {
return;
}
_transport.send(_builder.buildSessionCreatedPacket(state, _transport.getExternalPort(), _transport.getIntroKey()));
// if they haven't advanced to sending us confirmed packets in 5s,
// if they haven't advanced to sending us confirmed packets in 1s,
// repeat
state.setNextSendTime(now + 5*1000);
state.setNextSendTime(now + 1000);
}
private void sendRequest(OutboundEstablishState state) {
@@ -988,15 +988,15 @@ public class EstablishmentManager {
long delay = nextSendTime - now;
if ( (nextSendTime == -1) || (delay > 0) ) {
if (delay > 5000)
delay = 5000;
if (delay > 1000)
delay = 1000;
boolean interrupted = false;
try {
synchronized (_activityLock) {
if (_activity > 0)
return;
if (nextSendTime == -1)
_activityLock.wait(5000);
_activityLock.wait(1000);
else
_activityLock.wait(delay);
}

View File

@@ -360,9 +360,9 @@ public class OutboundEstablishState {
/** note that we just sent the SessionConfirmed packet */
public synchronized void confirmedPacketsSent() {
_lastSend = _context.clock().now();
_nextSend = _lastSend + 5*1000;
_nextSend = _lastSend + 1000;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send confirm packets, nextSend = 5s");
_log.debug("Send confirm packets, nextSend = 1s");
if ( (_currentState == STATE_UNKNOWN) ||
(_currentState == STATE_REQUEST_SENT) ||
(_currentState == STATE_CREATED_RECEIVED) )
@@ -371,15 +371,15 @@ public class OutboundEstablishState {
/** note that we just sent the SessionRequest packet */
public synchronized void requestSent() {
_lastSend = _context.clock().now();
_nextSend = _lastSend + 5*1000;
_nextSend = _lastSend + 1000;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send a request packet, nextSend = 5s");
_log.debug("Send a request packet, nextSend = 1s");
if (_currentState == STATE_UNKNOWN)
_currentState = STATE_REQUEST_SENT;
}
public synchronized void introSent() {
_lastSend = _context.clock().now();
_nextSend = _lastSend + 5*1000;
_nextSend = _lastSend + 1000;
if (_currentState == STATE_UNKNOWN)
_currentState = STATE_PENDING_INTRO;
}

View File

@@ -104,7 +104,7 @@ public class TunnelDispatcher implements Service {
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.participatingMessageCount",
"How many messages are sent through a participating tunnel?", "Tunnels",
new long[] { 60*10*1000l, 60*60*1000l, 24*60*60*1000l });
new long[] { 60*1000l, 60*10*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.ownedMessageCount",
"How many messages are sent through a tunnel we created (period == failures)?", "Tunnels",
new long[] { 60*1000l, 10*60*1000l, 60*60*1000l });

View File

@@ -46,13 +46,21 @@ class BuildExecutor implements Runnable {
_handler = new BuildHandler(ctx, this);
}
// Estimated cost of one tunnel build attempt, bytes
private static final int BUILD_BANDWIDTH_ESTIMATE_BYTES = 5*1024;
private int allowed() {
StringBuffer buf = null;
if (_log.shouldLog(Log.DEBUG)) {
buf = new StringBuffer(128);
buf.append("Allowed: ");
}
int allowed = 5;
int maxKBps = _context.bandwidthLimiter().getOutboundKBytesPerSecond();
int allowed = maxKBps / 6; // Max. 1 concurrent build per 6 KB/s outbound
if (allowed < 2) allowed = 2; // Never choke below 2 builds (but congestion may)
if (allowed > 10) allowed = 10; // Never go beyond 10, that is uncharted territory (old limit was 5)
String prop = _context.getProperty("router.tunnelConcurrentBuilds");
if (prop != null)
try { allowed = Integer.valueOf(prop).intValue(); } catch (NumberFormatException nfe) {}
@@ -110,8 +118,27 @@ class BuildExecutor implements Runnable {
return 0; // if we have a job heavily blocking our jobqueue, ssllloowww dddooowwwnnn
}
if (isOverloaded())
return 0;
if (isOverloaded()) {
int used1s = _context.router().get1sRate(true);
// If 1-second average indicates we could manage building one tunnel
if ((maxKBps*1024) - used1s > BUILD_BANDWIDTH_ESTIMATE_BYTES) {
// Check if we're already building some tunnels
if (concurrent > 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("Mild overload and favourable 1s rate (" + used1s + ") but already building, so allowed 0.");
return 0;
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Mild overload and favourable 1s rate(" + used1s + "), so allowed 1.");
return 1;
}
} else {
// Allow none
if (_log.shouldLog(Log.WARN))
_log.warn("We had serious overload, so allowed building 0.");
return 0;
}
}
return allowed;
}
@@ -124,7 +151,7 @@ class BuildExecutor implements Runnable {
// dont include the inbound rates when throttling tunnel building, since
// that'd expose a pretty trivial attack.
int maxKBps = _context.bandwidthLimiter().getOutboundKBytesPerSecond();
int used1s = _context.router().get1sRate(true); // dont throttle on the 1s rate, its too volatile
int used1s = 0; // dont throttle on the 1s rate, its too volatile
int used1m = _context.router().get1mRate(true);
int used5m = 0; //get5mRate(_context); // don't throttle on the 5m rate, as that'd hide available bandwidth
int used = Math.max(Math.max(used1s, used1m), used5m);

View File

@@ -51,6 +51,7 @@ class BuildHandler {
_context.statManager().createRateStat("tunnel.dropLoadProactive", "What the estimated queue time was when we dropped an inbound request (period is num pending)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.dropLoadProactiveAbort", "How often we would have proactively dropped a request, but allowed it through?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.handleRemaining", "How many pending inbound requests were left on the queue after one pass?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.buildReplyTooSlow", "How often a tunnel build reply came back after we had given up waiting for it?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.receiveRejectionProbabalistic", "How often we are rejected probabalistically?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("tunnel.receiveRejectionTransient", "How often we are rejected due to transient overload?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
@@ -85,7 +86,7 @@ class BuildHandler {
handled.add(_inboundBuildMessages.remove(_inboundBuildMessages.size()-1));
} else {
// drop any expired messages
long dropBefore = System.currentTimeMillis() - BuildRequestor.REQUEST_TIMEOUT;
long dropBefore = System.currentTimeMillis() - (BuildRequestor.REQUEST_TIMEOUT*3);
do {
BuildMessageState state = (BuildMessageState)_inboundBuildMessages.get(0);
if (state.recvTime <= dropBefore) {
@@ -193,6 +194,7 @@ class BuildHandler {
_log.warn("The reply " + replyMessageId + " did not match any pending tunnels");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Pending tunnels: " + buf.toString());
_context.statManager().addRateData("tunnel.buildReplyTooSlow", 1, 0);
} else {
handleReply(state.msg, cfg, System.currentTimeMillis()-state.recvTime);
}
@@ -276,7 +278,7 @@ class BuildHandler {
if (_log.shouldLog(Log.DEBUG))
_log.debug(state.msg.getUniqueId() + ": handling request after " + timeSinceReceived);
if (timeSinceReceived > BuildRequestor.REQUEST_TIMEOUT) {
if (timeSinceReceived > (BuildRequestor.REQUEST_TIMEOUT*3)) {
// don't even bother, since we are so overloaded locally
if (_log.shouldLog(Log.WARN))
_log.warn("Not even trying to handle/decrypt the request " + state.msg.getUniqueId()
@@ -415,7 +417,7 @@ class BuildHandler {
int proactiveDrops = countProactiveDrops();
long recvDelay = System.currentTimeMillis()-state.recvTime;
if (response == 0) {
float pDrop = recvDelay / (BuildRequestor.REQUEST_TIMEOUT);
float pDrop = recvDelay / (BuildRequestor.REQUEST_TIMEOUT*3);
pDrop = (float)Math.pow(pDrop, 16);
if (_context.random().nextFloat() < pDrop) { // || (proactiveDrops > MAX_PROACTIVE_DROPS) ) ) {
_context.statManager().addRateData("tunnel.rejectOverloaded", recvDelay, proactiveDrops);
@@ -598,7 +600,7 @@ class BuildHandler {
for (int i = 0; i < _inboundBuildMessages.size(); i++) {
BuildMessageState cur = (BuildMessageState)_inboundBuildMessages.get(i);
long age = System.currentTimeMillis() - cur.recvTime;
if (age >= BuildRequestor.REQUEST_TIMEOUT) {
if (age >= BuildRequestor.REQUEST_TIMEOUT*3) {
_inboundBuildMessages.remove(i);
i--;
dropped++;
@@ -610,7 +612,7 @@ class BuildHandler {
_context.statManager().addRateData("tunnel.dropLoadBacklog", _inboundBuildMessages.size(), _inboundBuildMessages.size());
} else {
int queueTime = estimateQueueTime(_inboundBuildMessages.size());
float pDrop = queueTime/((float)BuildRequestor.REQUEST_TIMEOUT);
float pDrop = queueTime/((float)BuildRequestor.REQUEST_TIMEOUT*3);
pDrop = (float)Math.pow(pDrop, 16); // steeeep
float f = _context.random().nextFloat();
if ( (pDrop > f) && (allowProactiveDrop()) ) {

View File

@@ -22,7 +22,7 @@ class BuildRequestor {
ORDER.add(new Integer(i));
}
private static final int PRIORITY = 500;
static final int REQUEST_TIMEOUT = 20*1000;
static final int REQUEST_TIMEOUT = 10*1000;
private static boolean usePairedTunnels(RouterContext ctx) {
String val = ctx.getProperty("router.usePairedTunnels");

View File

@@ -9,13 +9,21 @@ class ExpireJob extends JobImpl {
private TunnelPool _pool;
private TunnelCreatorConfig _cfg;
private boolean _leaseUpdated;
private long _dropAfter;
public ExpireJob(RouterContext ctx, TunnelCreatorConfig cfg, TunnelPool pool) {
super(ctx);
_pool = pool;
_cfg = cfg;
_leaseUpdated = false;
// give 'em some extra time before dropping 'em
getTiming().setStartAfter(cfg.getExpiration()); // + Router.CLOCK_FUDGE_FACTOR);
// we act as if this tunnel expires a random skew before it actually does
// so we rebuild out of sync. otoh, we will honor tunnel messages on it
// up through the full lifetime of the tunnel, plus a clock skew, since
// others may be sending to the published lease expirations
long expire = cfg.getExpiration();
_dropAfter = expire + Router.CLOCK_FUDGE_FACTOR;
expire -= ctx.random().nextLong(5*60*1000);
cfg.setExpiration(expire);
getTiming().setStartAfter(expire);
}
public String getName() {
if (_pool.getSettings().isExploratory()) {
@@ -42,7 +50,8 @@ class ExpireJob extends JobImpl {
_pool.removeTunnel(_cfg);
_leaseUpdated = true;
_pool.refreshLeaseSet();
requeue(Router.CLOCK_FUDGE_FACTOR);
long timeToDrop = _dropAfter - getContext().clock().now();
requeue(timeToDrop);
} else {
// already removed/refreshed, but now lets make it
// so we dont even honor the tunnel anymore

View File

@@ -55,7 +55,7 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
if (Boolean.valueOf(ctx.getProperty("router.exploreHighCapacity", "false")).booleanValue())
return true;
// no need to explore too wildly at first
if (ctx.router().getUptime() <= 10*1000)
if (ctx.router().getUptime() <= 5*60*1000)
return true;
// ok, if we aren't explicitly asking for it, we should try to pick peers
// randomly from the 'not failing' pool. However, if we are having a

View File

@@ -33,11 +33,6 @@ public class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
if (_testJob != null)
_testJob.testSuccessful(ms);
super.testSuccessful(ms);
// once a tunnel has been built and we know it works, lets skew ourselves a bit so we
// aren't as cyclic
if ( (_context.router().getUptime() < 10*60*1000) && (!_live) )
setExpiration(getExpiration() - _context.random().nextInt(5*60*1000));
_live = true;
}

View File

@@ -191,7 +191,7 @@ class TestJob extends JobImpl {
/** randomized time we should wait before testing */
private int getDelay() { return TEST_DELAY + getContext().random().nextInt(TEST_DELAY); }
/** how long we allow tests to run for before failing them */
private int getTestPeriod() { return 20*1000; }
private int getTestPeriod() { return 15*1000; }
private void scheduleRetest() { scheduleRetest(false); }
private void scheduleRetest(boolean asap) {
if (asap) {