diff --git a/.mtn-ignore b/.mtn-ignore new file mode 100644 index 00000000..a36f097b --- /dev/null +++ b/.mtn-ignore @@ -0,0 +1,6 @@ +env +pots +\.pyc$ +\.pyo$ +\.mo$ +~$ diff --git a/.tx/config b/.tx/config index 1d67b233..0e74a466 100644 --- a/.tx/config +++ b/.tx/config @@ -1,12 +1,60 @@ -[I2P.website] -source_file = ./messages.pot -source_lang = en -trans.de = i2p2www/translations/de/LC_MESSAGES/messages.po -trans.es = i2p2www/translations/es/LC_MESSAGES/messages.po -trans.fr = i2p2www/translations/fr/LC_MESSAGES/messages.po -trans.sv_SE = i2p2www/translations/sv/LC_MESSAGES/messages.po -type = PO - [main] -host = http://www.transifex.net +host = https://www.transifex.com +lang_map = ru_RU: ru + +[I2P.website_about] +file_filter = i2p2www/translations//LC_MESSAGES/about.po +source_file = pots/about.pot +source_lang = en +type = PO +minimum_perc = 2 + +[I2P.website_blog] +file_filter = i2p2www/translations//LC_MESSAGES/blog.po +source_file = pots/blog.pot +source_lang = en +type = PO +minimum_perc = 10 + +[I2P.website_comparison] +file_filter = i2p2www/translations//LC_MESSAGES/comparison.po +source_file = pots/comparison.pot +source_lang = en +type = PO +minimum_perc = 10 + +[I2P.website_docs] +file_filter = i2p2www/translations//LC_MESSAGES/docs.po +source_file = pots/docs.pot +source_lang = en +type = PO +minimum_perc = 1 + +[I2P.website_get-involved] +file_filter = i2p2www/translations//LC_MESSAGES/get-involved.po +source_file = pots/get-involved.pot +source_lang = en +type = PO +minimum_perc = 2 + +[I2P.website_misc] +file_filter = i2p2www/translations//LC_MESSAGES/misc.po +source_file = pots/misc.pot +source_lang = en +type = PO +minimum_perc = 10 + +[I2P.website_priority] +file_filter = i2p2www/translations//LC_MESSAGES/priority.po +source_file = pots/priority.pot +source_lang = en +type = PO +minimum_perc = 10 + +[I2P.website_research] +file_filter = i2p2www/translations//LC_MESSAGES/research.po +source_file = pots/research.pot +source_lang = en +type = PO +minimum_perc = 50 diff --git a/README b/README new file mode 100644 index 00000000..e6eae615 --- /dev/null +++ b/README @@ -0,0 +1,15 @@ +To run locally (for testing purposes): + +- Install proxychains, configure it for Tor + +- Pull in the dependencies: +$ proxychains ./setup_venv.sh +(you can also pull them non-anon by leaving out proxychains) + +- Compile translations (if you want to see them): +$ ./compile-messages.sh + +- Start the webserver: +$ ./runserver.py + +- Open the site at http://localhost:5000/ diff --git a/compile-messages-i2hq.sh b/compile-messages-i2hq.sh new file mode 100755 index 00000000..ff20d7ac --- /dev/null +++ b/compile-messages-i2hq.sh @@ -0,0 +1,13 @@ +#!/bin/sh +. ./etc/translation.vars + +export TZ=UTC + +if [ $# -ge 1 ] +then + pybabel compile -D $1 -d $TRANSDIR +else + for domain in $(ls $BABELCFG); do + pybabel compile -D $domain -d $TRANSDIR + done +fi diff --git a/compile-messages.sh b/compile-messages.sh index 819e4e81..8dddd6e3 100755 --- a/compile-messages.sh +++ b/compile-messages.sh @@ -1,4 +1,13 @@ #!/bin/sh -. ./translation.vars +. ./etc/translation.vars -TZ=UTC env/bin/pybabel compile -d $TRANSDIR +export TZ=UTC + +if [ $# -ge 1 ] +then + $PYBABEL compile -D $1 -d $TRANSDIR +else + for domain in $(ls $BABELCFG); do + $PYBABEL compile -D $domain -d $TRANSDIR + done +fi diff --git a/etc/babel.cfg/about b/etc/babel.cfg/about new file mode 100644 index 00000000..a8cc71cc --- /dev/null +++ b/etc/babel.cfg/about @@ -0,0 +1,2 @@ +[jinja2: **/pages/site/about/**.html] +extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension diff --git a/etc/babel.cfg/blog b/etc/babel.cfg/blog new file mode 100644 index 00000000..58f52396 --- /dev/null +++ b/etc/babel.cfg/blog @@ -0,0 +1,2 @@ +[jinja2: **/blog/**.rst] +extensions=jinja2.ext.autoescape,jinja2.ext.with_ diff --git a/etc/babel.cfg/comparison b/etc/babel.cfg/comparison new file mode 100644 index 00000000..e113121d --- /dev/null +++ b/etc/babel.cfg/comparison @@ -0,0 +1,2 @@ +[jinja2: **/pages/site/comparison/**.html] +extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension diff --git a/etc/babel.cfg/docs b/etc/babel.cfg/docs new file mode 100644 index 00000000..e37d7502 --- /dev/null +++ b/etc/babel.cfg/docs @@ -0,0 +1,2 @@ +[jinja2: **/pages/site/docs/**.html] +extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension diff --git a/etc/babel.cfg/get-involved b/etc/babel.cfg/get-involved new file mode 100644 index 00000000..2b9e70eb --- /dev/null +++ b/etc/babel.cfg/get-involved @@ -0,0 +1,2 @@ +[jinja2: **/pages/site/get-involved/**.html] +extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension diff --git a/etc/babel.cfg/misc b/etc/babel.cfg/misc new file mode 100644 index 00000000..ce64514f --- /dev/null +++ b/etc/babel.cfg/misc @@ -0,0 +1,2 @@ +[jinja2: **/pages/site/misc/**.html] +extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension diff --git a/etc/babel.cfg/priority b/etc/babel.cfg/priority new file mode 100644 index 00000000..6fbd837b --- /dev/null +++ b/etc/babel.cfg/priority @@ -0,0 +1,14 @@ +[jinja2: **/pages/blog/**.html] +extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension +[jinja2: **/pages/downloads/**.html] +extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension +[jinja2: **/pages/global/**.html] +extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension +[jinja2: **/pages/global/macros] +extensions=jinja2.ext.autoescape,jinja2.ext.with_ +[jinja2: **/pages/meetings/**.html] +extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension +[jinja2: **/pages/papers/**.html] +extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension +[jinja2: **/pages/site/*.html] +extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension diff --git a/etc/babel.cfg/research b/etc/babel.cfg/research new file mode 100644 index 00000000..59607bd9 --- /dev/null +++ b/etc/babel.cfg/research @@ -0,0 +1,2 @@ +[jinja2: **/pages/site/research/**.html] +extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension diff --git a/etc/multi-domain.patch b/etc/multi-domain.patch new file mode 100644 index 00000000..205f4275 --- /dev/null +++ b/etc/multi-domain.patch @@ -0,0 +1,115 @@ +--- env/lib/python2.7/site-packages/flaskext/babel.py 2013-07-13 00:00:00 +0000 ++++ env/lib/python2.7/site-packages/flaskext/babel.py 2013-07-13 00:00:00 +0000 +@@ -19,6 +19,7 @@ + from datetime import datetime + from flask import _request_ctx_stack + from babel import dates, numbers, support, Locale ++from gettext import NullTranslations + from werkzeug import ImmutableDict + try: + from pytz.gae import pytz +@@ -55,9 +56,11 @@ + }) + + def __init__(self, app=None, default_locale='en', default_timezone='UTC', +- date_formats=None, configure_jinja=True): ++ date_formats=None, configure_jinja=True, ++ default_domain=support.Translations.DEFAULT_DOMAIN): + self._default_locale = default_locale + self._default_timezone = default_timezone ++ self._default_domain = default_domain + self._date_formats = date_formats + self._configure_jinja = configure_jinja + self.app = app +@@ -77,6 +80,7 @@ + + app.config.setdefault('BABEL_DEFAULT_LOCALE', self._default_locale) + app.config.setdefault('BABEL_DEFAULT_TIMEZONE', self._default_timezone) ++ app.config.setdefault('BABEL_DEFAULT_DOMAIN', self._default_domain) + if self._date_formats is None: + self._date_formats = self.default_date_formats.copy() + +@@ -95,6 +99,7 @@ + + self.locale_selector_func = None + self.timezone_selector_func = None ++ self.domain_selector_func = None + + if self._configure_jinja: + app.jinja_env.filters.update( +@@ -142,6 +147,19 @@ + self.timezone_selector_func = f + return f + ++ def domainselector(self, f): ++ """Registers a callback function for domain selection. The default ++ behaves as if a function was registered that returns `None` all the ++ time. If `None` is returned, the domain falls back to the one from ++ the configuration. ++ ++ This has to return the domain as a list of strings (eg: ``['messages']``) ++ """ ++ assert self.domain_selector_func is None, \ ++ 'a localeselector function is already registered' ++ self.domain_selector_func = f ++ return f ++ + + def list_translations(self): + """Returns a list of all the locales translations exist for. The +@@ -178,6 +196,13 @@ + """ + return timezone(self.app.config['BABEL_DEFAULT_TIMEZONE']) + ++ @property ++ def default_domain(self): ++ """The default domain from the configuration as instance of a ++ `string` object. ++ """ ++ return self.app.config['BABEL_DEFAULT_DOMAIN'] ++ + + def get_translations(): + """Returns the correct gettext translations that should be used for +@@ -191,7 +216,10 @@ + translations = getattr(ctx, 'babel_translations', None) + if translations is None: + dirname = os.path.join(ctx.app.root_path, 'translations') +- translations = support.Translations.load(dirname, [get_locale()]) ++ locale = get_locale() ++ for domain in get_domains(): ++ dt = support.Translations.load(dirname, [locale], domain) ++ translations = dt if translations is None or not hasattr(translations, 'merge') else translations.merge(dt) + ctx.babel_translations = translations + return translations + +@@ -243,6 +271,29 @@ + return tzinfo + + ++def get_domains(): ++ """Returns the domains that should be used for this request as ++ `list` object. This returns `None` if used outside of ++ a request. ++ """ ++ ctx = _request_ctx_stack.top ++ if ctx is None: ++ return None ++ domains = getattr(ctx, 'babel_domains', None) ++ if domains is None: ++ babel = ctx.app.extensions['babel'] ++ if babel.domain_selector_func is None: ++ domains = [babel.default_domain] ++ else: ++ rv = babel.domain_selector_func() ++ if rv is None: ++ domains = [babel.default_domain] ++ else: ++ domains = rv ++ ctx.babel_domains = domains ++ return domains ++ ++ + def refresh(): + """Refreshes the cached timezones and locale information. This can + be used to switch a translation between a request and if you want diff --git a/project.vars b/etc/project.vars similarity index 100% rename from project.vars rename to etc/project.vars diff --git a/reqs.txt b/etc/reqs.txt similarity index 90% rename from reqs.txt rename to etc/reqs.txt index df74394b..baa1939c 100644 --- a/reqs.txt +++ b/etc/reqs.txt @@ -1,3 +1,4 @@ +pytz>=2012 Flask==0.9 Flask-Babel==0.8 Flask-Cache==0.10.1 diff --git a/etc/translation.vars b/etc/translation.vars new file mode 100644 index 00000000..cc7998cc --- /dev/null +++ b/etc/translation.vars @@ -0,0 +1,15 @@ +PROJECT=I2P +VERSION=website +BABELCFG=etc/babel.cfg +POTDIR=pots +PROJDIR=i2p2www +TRANSDIR=$PROJDIR/translations + +if [ -x env/bin/pybabel ]; then + PYBABEL=env/bin/pybabel +elif [ $(which pybabel) ]; then + PYBABEL=$(which pybabel) +else + echo "ERROR: pybabel was not found. Please run setup_venv.sh" >&2 + exit 1 +fi diff --git a/etc/update.vars b/etc/update.vars new file mode 100644 index 00000000..1d9b0230 --- /dev/null +++ b/etc/update.vars @@ -0,0 +1,3 @@ +MTNURL=127.0.0.1:8998 +MTNBRANCH=i2p.www.revamp +TOUCHFILE=/tmp/2fcd2f17-c293-4f77-b4c9-9b266ba70daa diff --git a/extract-messages.sh b/extract-messages.sh index 3c98a2a3..bdb277be 100755 --- a/extract-messages.sh +++ b/extract-messages.sh @@ -1,8 +1,36 @@ #!/bin/sh -. ./translation.vars +. ./etc/translation.vars -TZ=UTC ./pybabel extract --msgid-bugs-address="http://trac.i2p2.de" \ +export TZ=UTC + +if [ ! -e $POTDIR ]; then + mkdir $POTDIR +fi + +# By setting the PYTHONPATH here we can help pybabel find 'our' highlighting +# extension and we can use any pybabel +export PYTHONPATH=i2p2www:$PYTHONPATH + +if [ $# -ge 1 ] +then + $PYBABEL extract --msgid-bugs-address="http://trac.i2p2.de" \ --project=$PROJECT \ --version=$VERSION \ - -F $BABELCFG \ - -o $POTFILE $PROJDIR + -F $BABELCFG/$1 \ + -o $POTDIR/$1.pot $PROJDIR +else + for domain in $(ls $BABELCFG); do + if [ -e $POTDIR/$domain.pot ]; then + mv $POTDIR/$domain.pot $POTDIR/$domain.pot.old + fi + $PYBABEL extract --msgid-bugs-address="http://trac.i2p2.de" \ + --project=$PROJECT \ + --version=$VERSION \ + -F $BABELCFG/$domain \ + -o $POTDIR/$domain.pot $PROJDIR + diff -u $POTDIR/$domain.pot.old $POTDIR/$domain.pot | grep '^+' | grep -v '^+++' | grep -v '+"POT-Creation-Date' >/dev/null + if [ $? -eq 1 ]; then + mv $POTDIR/$domain.pot.old $POTDIR/$domain.pot + fi + done +fi diff --git a/i2p2www/__init__.py b/i2p2www/__init__.py index 8b0235fb..48f1a071 100644 --- a/i2p2www/__init__.py +++ b/i2p2www/__init__.py @@ -1,17 +1,33 @@ +# -*- coding: utf-8 -*- from flask import Flask, request, g, redirect, url_for, abort, render_template, send_from_directory, safe_join -from flaskext.babel import Babel -from flask.ext.cache import Cache +try: + from flaskext.babel import Babel +except ImportError: + from flask_babel import Babel +try: + from flask.ext.cache import Cache +except ImportError: + from flask_cache import Cache from docutils.core import publish_parts import os.path import os +try: + from i2p2www import settings +except ImportError: + settings = None + ########### # Constants -CURRENT_I2P_VERSION = '0.9.6' +CURRENT_I2P_VERSION = '0.9.9' -CANONICAL_DOMAIN = 'www.i2p2.de' +CANONICAL_DOMAIN = 'new.i2p-projekt.de' + +CACHE_CONFIG = settings.CACHE_CONFIG if settings and hasattr(settings, 'CACHE_CONFIG') else { + 'CACHE_DEFAULT_TIMEOUT': 600, + } BLOG_POSTS_PER_FEED = 10 BLOG_POSTS_PER_PAGE = 10 @@ -20,24 +36,54 @@ MEETINGS_PER_PAGE = 20 SUPPORTED_LANGS = [ 'en', 'es', -# 'zh', + 'zh_CN', 'de', 'fr', -# 'it', -# 'nl', -# 'ru', - 'sv', -# 'cs', -# 'ar', -# 'el', + 'it', + 'ja', + 'pl', + 'pt', + 'pt_BR', + 'ro', + 'ru', + 'sv_SE', ] +SUPPORTED_LANG_NAMES = { + 'en': u'English', + 'es': u'Castellano', + 'zh_CN': u'Chinese', + 'de': u'Deutsch', + 'fr': u'Français', + 'it': u'Italiano', + 'ja': u'Japanese', + 'pl': u'Polish', + 'pt': u'Portugese', + 'pt_BR': u'Brazilian Portugese', + 'ro': u'Romanian', + 'ru': u'Russian', + 'sv_SE': u'Svenska', + } + +DEFAULT_GETTEXT_DOMAIN = 'priority' +GETTEXT_DOMAIN_MAPPING = { + 'about': ['about'], + 'blog': ['blog'], + 'comparison': ['comparison'], + 'docs': ['docs'], + 'get-involved': ['get-involved'], + 'misc': ['misc'], + 'research': ['research'], + } + TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'pages') STATIC_DIR = os.path.join(os.path.dirname(__file__), 'static') BLOG_DIR = os.path.join(os.path.dirname(__file__), 'blog') MEETINGS_DIR = os.path.join(os.path.dirname(__file__), 'meetings/logs') SITE_DIR = os.path.join(TEMPLATE_DIR, 'site') MIRRORS_FILE = os.path.join(TEMPLATE_DIR, 'downloads/mirrors') +ANONBIB_CFG = os.path.join(TEMPLATE_DIR, 'papers/anonbib.cfg') +ANONBIB_FILE = os.path.join(TEMPLATE_DIR, 'papers/anonbib.bib') ################### @@ -50,11 +96,8 @@ class MyFlask(Flask): app = application = MyFlask('i2p2www', template_folder=TEMPLATE_DIR, static_url_path='/_static', static_folder=STATIC_DIR) app.debug = bool(os.environ.get('APP_DEBUG', 'False')) -babel = Babel(app) -cache = Cache(app, config={ - 'CACHE_DEFAULT_TIMEOUT': 600, - #'CACHE_TYPE': '', # See http://packages.python.org/Flask-Cache/#configuring-flask-cache - }) +babel = Babel(app, default_domain=DEFAULT_GETTEXT_DOMAIN) +cache = Cache(app, config=CACHE_CONFIG) ################# @@ -67,7 +110,22 @@ def get_locale(): return g.lang # otherwise try to guess the language from the user accept # header the browser transmits. The best match wins. - return request.accept_languages.best_match(['en', 'es', 'zh', 'de', 'fr', 'it', 'nl', 'ru', 'sv', 'cs', 'ar']) + return request.accept_languages.best_match(SUPPORTED_LANGS) + +@babel.domainselector +def get_domains(): + domains = [] + frags = request.path.split('/', 2) + if len(frags) == 3: + path = frags[2] + for subpath in GETTEXT_DOMAIN_MAPPING: + if path.startswith(subpath): + domains.extend(GETTEXT_DOMAIN_MAPPING[subpath]) + # Always end with the priority domain, as it contains + # various template strings and is likely to be the most + # up-to-date (in case of any common translation strings). + domains.append(DEFAULT_GETTEXT_DOMAIN) + return domains ########################## @@ -161,7 +219,6 @@ def page_not_found(error): def server_error(error): return render_template('global/error_500.html'), 500 - # Import these to ensure they get loaded import templatevars import urls diff --git a/i2p2www/anonbib/BibTeX.py b/i2p2www/anonbib/BibTeX.py new file mode 100644 index 00000000..6d1df089 --- /dev/null +++ b/i2p2www/anonbib/BibTeX.py @@ -0,0 +1,1269 @@ +#!/usr/bin/python2 +# Copyright 2003-2008, Nick Mathewson. See LICENSE for licensing info. + +"""BibTeX.py -- parse and manipulate BibTeX files and entries. + + Based on perl code by Eddie Kohler; heavily modified. +""" + +import cStringIO +import re +import sys +import os + +import config + +import rank + +__all__ = [ 'ParseError', 'BibTeX', 'BibTeXEntry', 'htmlize', + 'ParsedAuthor', 'FileIter', 'Parser', 'parseFile', + 'splitBibTeXEntriesBy', 'sortBibTexEntriesBy', ] + +# List: must map from month number to month name. +MONTHS = [ None, + "January", "February", "March", "April", "May", "June", + "July", "August", "September", "October", "November", "December"] + +# Fields that we only care about for making web pages (BibTeX doesn't +# recognize them.) +WWW_FIELDS = [ 'www_section', 'www_important', 'www_remarks', + 'www_abstract_url', 'www_html_url', 'www_pdf_url', 'www_ps_url', + 'www_txt_url', 'www_ps_gz_url', 'www_amazon_url', + 'www_excerpt_url', 'www_publisher_url', + 'www_cache_section', 'www_tags' ] + +def url_untranslate(s): + """Change a BibTeX key into a string suitable for use in a URL.""" + s = re.sub(r'([%<>`#, &_\';])', + lambda m: "_%02x"%ord(m.group(1)), + s) + s = s.replace("/",":") + return s + +class ParseError(Exception): + """Raised on invalid BibTeX""" + pass + + +def smartJoin(*lst): + """Equivalent to os.path.join, but handle"." and ".." entries a bit better. + """ + lst = [ item for item in lst if item != "." ] + idx = 0 + while idx < len(lst): + if idx > 0 and lst[idx] == "..": + del lst[idx] + else: + idx += 1 + return os.path.join(*lst) + +class BibTeX: + """A parsed BibTeX file""" + def __init__(self): + self.entries = [] # List of BibTeXEntry + self.byKey = {} # Map from BibTeX key to BibTeX entry. + def addEntry(self, ent): + """Add a BibTeX entry to this file.""" + k = ent.key + if self.byKey.get(ent.key.lower()): + print >> sys.stderr, "Already have an entry named %s"%k + return + self.entries.append(ent) + self.byKey[ent.key.lower()] = ent + def resolve(self): + """Validate all entries in this file, and resolve cross-references""" + seen = {} + for ent in self.entries: + seen.clear() + while ent.get('crossref'): + try: + cr = self.byKey[ent['crossref'].lower()] + except KeyError: + print "No such crossref: %s"% ent['crossref'] + break + if seen.get(cr.key): + raise ParseError("Circular crossref at %s" % ent.key) + seen[cr.key] = 1 + del ent.entries['crossref'] + + if cr.entryLine < ent.entryLine: + print "Warning: crossref %s used after declaration"%cr.key + + for k in cr.entries.keys(): + if ent.entries.has_key(k): + print "ERROR: %s defined both in %s and in %s"%( + k,ent.key,cr.key) + else: + ent.entries[k] = cr.entries[k] + + ent.resolve() + newEntries = [] + rk = config.REQUIRE_KEY + if rk is None: + # hack: if no key is required, require "title", since every + # entry will have a title. + rk = "title" + + for ent in self.entries: + if ent.type in config.OMIT_ENTRIES or not ent.has_key(rk): + ent.check() + del self.byKey[ent.key.lower()] + else: + newEntries.append(ent) + self.entries = newEntries + +def buildAuthorTable(entries): + """Given a list of BibTeXEntry, return a map from parsed author name to + parsed canonical name. + """ + authorsByLast = {} + for e in entries: + for a in e.parsedAuthor: + authorsByLast.setdefault(tuple(a.last), []).append(a) + # map from author to collapsed author. + result = {} + for k,v in config.COLLAPSE_AUTHORS.items(): + a = parseAuthor(k)[0] + c = parseAuthor(v)[0] + result[c] = c + result[a] = c + + for e in entries: + for author in e.parsedAuthor: + if result.has_key(author): + continue + + c = author + for a in authorsByLast[tuple(author.last)]: + if a is author: + continue + c = c.collapsesTo(a) + result[author] = c + + if 0: + for a,c in result.items(): + if a != c: + print "Collapsing authors: %s => %s" % (a,c) + if 0: + print parseAuthor("Franz Kaashoek")[0].collapsesTo( + parseAuthor("M. Franz Kaashoek")[0]) + print parseAuthor("Paul F. Syverson")[0].collapsesTo( + parseAuthor("Paul Syverson")[0]) + print parseAuthor("Paul Syverson")[0].collapsesTo( + parseAuthor("Paul F. Syverson")[0]) + + return result + +def splitEntriesBy(entries, field): + """Take a list of BibTeX entries and the name of a bibtex field; return + a map from vield value to list of entry.""" + result = {} + for ent in entries: + key = ent.get(field) + try: + result[key].append(ent) + except: + result[key] = [ent] + return result + +def splitSortedEntriesBy(entries, field): + """Take inputs as in splitEntriesBy, where 'entries' is sorted by 'field'. + Return a list of (field-value, entry-list) tuples, in the order + given in 'entries'.""" + result = [] + curVal = "alskjdsakldj" + curList = [] + for ent in entries: + key = ent.get(field) + if key == curVal: + curList.append(ent) + else: + curVal = key + curList = [ent] + result.append((curVal, curList)) + return result + +def sortEntriesBy(entries, field, default): + """Take inputs as in splitEntriesBy, and return a list of entries sorted + by the value of 'field'. Entries without 'field' are sorted as if their + value were 'default'. + """ + tmp = [] + i = 0 + for ent in entries: + i += 1 + v = ent.get(field, default) + if v.startswith(""): + v = default + tmp.append((txtize(v), i, ent)) + tmp.sort() + return [ t[2] for t in tmp ] + +def splitEntriesByAuthor(entries): + """Take a list of entries, sort them by author names, and return: + a sorted list of (authorname-in-html, bibtex-entry-list) tuples, + a map from authorname-in-html to name-for-url. + Entries with multiple authors appear once per author. + """ + collapsedAuthors = buildAuthorTable(entries) + entries = sortEntriesByDate(entries) + result = {} # Name in sorting order -> entries + htmlResult = {} # name in sorting order -> Full name + url_map = {} # Full name -> Url + for ent in entries: + for a in ent.parsedAuthor: + canonical = collapsedAuthors[a] + url = canonical.getHomepage() + sortkey = canonical.getSortingName() + secname = canonical.getSectionName() + if url: + url_map[secname] = url + + htmlResult[sortkey] = secname + result.setdefault(sortkey, []).append(ent) + sortnames = result.keys() + sortnames.sort() + sections = [ (htmlResult[n], result[n]) for n in sortnames ] + return sections, url_map + +## def sortEntriesByAuthor(entries): +## tmp = [] +## i = 0 +## for ent in entries: +## i += 1 +## authors = [ txtize(" ".join(a.von+a.last+a.first+a.jr)) +## for a in ent.parsedAuthor ] +## tmp.append((tuple(authors), i, ent)) +## tmp.sort() +## return [ t[2] for t in tmp ] + +def sortEntriesByDate(entries): + """Sort a list of entries by their publication date.""" + tmp = [] + i = 0 + for ent in entries: + i += 1 + if (ent.get('month') == "forthcoming" or + ent.get('year') == "forthcoming"): + tmp.append((20000*13, i, ent)) + continue + try: + monthname = ent.get("month") + if monthname is not None: + match = re.match(r"(\w+)--\w+", monthname) + if match: + monthname = match.group(1) + mon = MONTHS.index(monthname) + except ValueError: + print "Unknown month %r in %s"%(ent.get("month"), ent.key) + mon = 0 + + try: + date = int(ent['year'])*13 + mon + except KeyError: + print "ERROR: No year field in %s"%ent.key + date = 10000*13 + except ValueError: + date = 10000*13 + tmp.append((date, i, ent)) + tmp.sort() + return [ t[2] for t in tmp ] + + +# List of fields that appear when we display the entries as BibTeX. +DISPLAYED_FIELDS = [ 'title', 'author', 'journal', 'booktitle', +'school', 'institution', 'organization', 'volume', 'number', 'year', +'month', 'address', 'location', 'chapter', 'edition', 'pages', 'editor', +'howpublished', 'key', 'publisher', 'type', 'note', 'series' ] + +class BibTeXEntry: + """A single BibTeX entry.""" + def __init__(self, type, key, entries): + self.type = type # What kind of entry is it? (@book,@injournal,etc) + self.key = key # What key does it have? + self.entries = entries # Map from key to value. + self.entryLine = 0 # Defined on this line number + def get(self, k, v=None): + return self.entries.get(k,v) + def has_key(self, k): + return self.entries.has_key(k) + def __getitem__(self, k): + return self.entries[k] + def __setitem__(self, k, v): + self.entries[k] = v + def __str__(self): + return self.format(70,1) + def getURL(self): + """Return the best URL to use for this paper, or None.""" + best = None + for field in ['www_pdf_url', 'www_ps_gz_url', 'www_ps_url', + 'www_html_url', 'www_txt_url', ]: + u = self.get(field) + if u: + if not best: + best = u + elif (best.startswith("http://citeseer.nj.nec.com/") + and not u.startswith("http://citeseer.nj.nec.com/")): + best = u + return best + + def format(self, width=70, indent=8, v=0, invStrings={}): + """Format this entry as BibTeX.""" + d = ["@%s{%s,\n" % (self.type, self.key)] + if v: + df = DISPLAYED_FIELDS[:] + for k in self.entries.keys(): + if k not in df: + df.append(k) + else: + df = DISPLAYED_FIELDS + for f in df: + if not self.entries.has_key(f): + continue + v = self.entries[f] + if v.startswith(""): + d.append("%%%%% ERROR: Missing field\n") + d.append("%% %s = {?????},\n"%f) + continue + np = v.translate(ALLCHARS, PRINTINGCHARS) + if np: + d.append("%%%%% "+("ERROR: Non-ASCII characters: '%r'\n"%np)) + d.append(" ") + v = v.replace("&", "&") + if invStrings.has_key(v): + s = "%s = %s,\n" %(f, invStrings[v]) + else: + s = "%s = {%s},\n" % (f, v) + d.append(_split(s,width,indent)) + d.append("}\n") + return "".join(d) + def resolve(self): + """Handle post-processing for this entry""" + a = self.get('author') + if a: + self.parsedAuthor = parseAuthor(a) + #print a + #print " => ",repr(self.parsedAuthor) + else: + self.parsedAuthor = None + + def isImportant(self): + """Return 1 iff this entry is marked as important""" + imp = self.get("www_important") + if imp and imp.strip().lower() not in ("no", "false", "0"): + return 1 + return 0 + + def check(self): + """Print any errors for this entry, and return true if there were + none.""" + errs = self._check() + for e in errs: + print e + return not errs + + def _check(self): + errs = [] + if self.type == 'inproceedings': + fields = 'booktitle', 'year' + elif self.type == 'incollection': + fields = 'booktitle', 'year' + elif self.type == 'proceedings': + fields = 'booktitle', 'editor' + elif self.type == 'article': + fields = 'journal', 'year' + elif self.type == 'techreport': + fields = 'institution', + elif self.type == 'misc': + fields = 'howpublished', + elif self.type in ('mastersthesis', 'phdthesis'): + fields = () + else: + fields = () + errs.append("ERROR: odd type %s"%self.type) + if self.type != 'proceedings': + fields += 'title', 'author', 'www_section', 'year' + + for field in fields: + if self.get(field) is None or \ + self.get(field).startswith(""): + errs.append("ERROR: %s has no %s field" % (self.key, field)) + self.entries[field] = "%s:??"%field + + if self.type == 'inproceedings': + if self.get("booktitle"): + if not self['booktitle'].startswith("Proceedings of") and \ + not self['booktitle'].startswith("{Proceedings of"): + errs.append("ERROR: %s's booktitle (%r) doesn't start with 'Proceedings of'" % (self.key, self['booktitle'])) + + if self.has_key("pages") and not re.search(r'\d+--\d+', self['pages']): + errs.append("ERROR: Misformed pages in %s"%self.key) + + if self.type == 'proceedings': + if self.get('title'): + errs.append("ERROR: %s is a proceedings: it should have a booktitle, not a title." % self.key) + + for field, value in self.entries.items(): + if value.translate(ALLCHARS, PRINTINGCHARS): + errs.append("ERROR: %s.%s has non-ASCII characters"%( + self.key, field)) + if field.startswith("www_") and field not in WWW_FIELDS: + errs.append("ERROR: unknown www field %s"% field) + if value.strip()[-1:] == '.' and \ + field not in ("notes", "www_remarks", "author"): + errs.append("ERROR: %s.%s has an extraneous period"%(self.key, + field)) + return errs + + def biblio_to_html(self): + """Return the HTML for the citation portion of entry.""" + if self.type in ('inproceedings', 'incollection'): + booktitle = self['booktitle'] + bookurl = self.get('bookurl') + if bookurl: + m = PROCEEDINGS_RE.match(booktitle) + if m: + res = ["In the ", m.group(1), + ''%bookurl, m.group(2), ""] + else: + res = ['In the %s' % (bookurl,booktitle)] + else: + res = ["In the ", booktitle ] + + if self.get("edition"): + res.append(",") + res.append(self['edition']) + if self.get("location"): + res.append(", ") + res.append(self['location']) + elif self.get("address"): + res.append(", ") + res.append(self['address']) + res.append(", %s %s" % (self.get('month',""), self['year'])) + if not self.get('pages'): + pass + elif "-" in self['pages']: + res.append(", pages %s"%self['pages']) + else: + res.append(", page %s"%self['pages']) + elif self.type == 'article': + res = ["In "] + if self.get('journalurl'): + res.append('%s'% + (self['journalurl'],self['journal'])) + else: + res.append(self['journal']) + if self.get('volume'): + res.append(" %s"%self['volume']) + if self.get('number'): + res.append("(%s)"%self['number']) + res.append(", %s %s" % (self.get('month',""), self['year'])) + if not self.get('pages'): + pass + elif "-" in self['pages']: + res.append(", pages %s"%self['pages']) + else: + res.append(", page %s"%self['pages']) + elif self.type == 'techreport': + res = [ "%s %s %s" % (self['institution'], + self.get('type', 'technical report'), + self.get('number', "")) ] + if self.get('month') or self.get('year'): + res.append(", %s %s" % (self.get('month', ''), + self.get('year', ''))) + elif self.type == 'mastersthesis' or self.type == 'phdthesis': + if self.get('type'): + res = [self['type']] + elif self.type == 'mastersthesis': + res = ["Masters's thesis"] + else: + res = ["Ph.D. thesis"] + if self.get('school'): + res.append(", %s"%(self['school'])) + if self.get('month') or self.get('year'): + res.append(", %s %s" % (self.get('month', ''), + self.get('year', ''))) + elif self.type == 'book': + res = [self['publisher']] + if self.get('year'): + res.append(" "); + res.append(self.get('year')); + # res.append(", %s"%(self.get('year'))) + if self.get('series'): + res.append(","); + res.append(self['series']); + elif self.type == 'misc': + res = [self['howpublished']] + if self.get('month') or self.get('year'): + res.append(", %s %s" % (self.get('month', ''), + self.get('year', ''))) + if not self.get('pages'): + pass + elif "-" in self['pages']: + res.append(", pages %s"%self['pages']) + else: + res.append(", page %s"%self['pages']) + else: + res = ["<Odd type %s>"%self.type] + + res[0:0] = [""] + res.append(".") + + bibtexurl = "./bibtex#%s"%url_untranslate(self.key) + res.append((" " + "(BibTeX entry)" + "") %bibtexurl) + return htmlize("".join(res)) + + def to_html(self, cache_path="./cache", base_url="."): + """Return the HTML for this entry.""" + imp = self.isImportant() + draft = self.get('year') == 'forthcoming' + if imp: + res = ["
  • " ] + elif draft: + res = ["

  • " ] + else: + res = ["

  • "] + + # TODO: CITE_CACHE_DIR has permission problems on eche's server. + if False: #imp or not draft: + # Add a picture of the rank + # Only if year is known or paper important! + r = rank.get_rank_html(self['title'], self.get('year'), + update=False, base_url=base_url) + if r is not None: + res.append(r) + + res.append("%s"%( + url_untranslate(self.key),htmlize(self['title']))) + + for cached in 0,1: + availability = [] + if not cached: + for which in [ "amazon", "excerpt", "publisher" ]: + key = "www_%s_url"%which + if self.get(key): + url=self[key] + url = unTeXescapeURL(url) + availability.append('%s' %(url,which)) + + cache_section = self.get('www_cache_section', ".") + if cache_section not in config.CACHE_SECTIONS: + if cache_section != ".": + print >>sys.stderr, "Unrecognized cache section %s"%( + cache_section) + cache_section="." + + for key, name, ext in (('www_abstract_url', 'abstract','abstract'), + ('www_html_url', 'HTML', 'html'), + ('www_pdf_url', 'PDF', 'pdf'), + ('www_ps_url', 'PS', 'ps'), + ('www_txt_url', 'TXT', 'txt'), + ('www_ps_gz_url', 'gzipped PS','ps.gz') + ): + if cached: + #XXXX the URL needs to be relative to the absolute + #XXXX cache path. + url = smartJoin(cache_path,cache_section, + "%s.%s"%(self.key,ext)) + fname = smartJoin(config.OUTPUT_DIR, config.CACHE_DIR, + cache_section, + "%s.%s"%(self.key,ext)) + if not os.path.exists(fname): continue + else: + url = self.get(key) + if not url: continue + url = unTeXescapeURL(url) + url = url.replace('&', '&') + availability.append('%s' %(url,name)) + + if availability: + res.append([" ", " "][cached]) + res.append("(") + if cached: res.append("Cached: ") + res.append(", ".join(availability)) + res.append(")") + + res.append("
    by ") + + #res.append("\n\n" % self.parsedAuthor) + htmlAuthors = [ a.htmlizeWithLink() for a in self.parsedAuthor ] + + if len(htmlAuthors) == 1: + res.append(htmlAuthors[0]) + elif len(htmlAuthors) == 2: + res.append(" and ".join(htmlAuthors)) + else: + res.append(", ".join(htmlAuthors[:-1])) + res.append(", and ") + res.append(htmlAuthors[-1]) + + if res[-1][-1] != '.': + res.append(".") + res.append("
    \n") + res.append(self.biblio_to_html()) + res.append("·"%url_untranslate(self.key)) + res.append("

    ") + + if self.get('www_remarks'): + res.append("

    %s

    "%htmlize( + self['www_remarks'])) + + if imp or draft: + res.append("") + res.append("
  • \n\n") + + return "".join(res) + +def unTeXescapeURL(s): + """Turn a URL as formatted in TeX into a real URL.""" + s = s.replace("\\_", "_") + s = s.replace("\\-", "") + s = s.replace("\{}", "") + s = s.replace("{}", "") + return s + +def TeXescapeURL(s): + """Escape a URL for use in TeX""" + s = s.replace("_", "\\_") + s = s.replace("~", "\{}~") + return s + +RE_LONE_AMP = re.compile(r'&([^a-z0-9])') +RE_LONE_I = re.compile(r'\\i([^a-z0-9])') +RE_ACCENT = re.compile(r'\\([\'`~^"c])([^{]|{.})') +RE_LIGATURE = re.compile(r'\\(AE|ae|OE|oe|AA|aa|O|o|ss)([^a-z0-9])') +ACCENT_MAP = { "'" : 'acute', + "`" : 'grave', + "~" : 'tilde', + "^" : 'circ', + '"' : 'uml', + "c" : 'cedil', + } +UNICODE_MAP = { 'ń' : 'ń', } +HTML_LIGATURE_MAP = { + 'AE' : 'Æ', + 'ae' : 'æ', + 'OE' : 'Œ', + 'oe' : 'œ', + 'AA' : 'Å', + 'aa' : 'å', + 'O' : 'Ø', + 'o' : 'ø', + 'ss' : 'ß', + } +RE_TEX_CMD = re.compile(r"(?:\\[a-zA-Z@]+|\\.)") +RE_PAGE_SPAN = re.compile(r"(\d)--(\d)") +def _unaccent(m): + accent,char = m.groups() + if char[0] == '{': + char = char[1] + accented = "&%s%s;" % (char, ACCENT_MAP[accent]) + return UNICODE_MAP.get(accented, accented) +def _unlig_html(m): + return "%s%s"%(HTML_LIGATURE_MAP[m.group(1)],m.group(2)) +def htmlize(s): + """Turn a TeX string into good-looking HTML.""" + s = RE_LONE_AMP.sub(lambda m: "&%s" % m.group(1), s) + s = RE_LONE_I.sub(lambda m: "i%s" % m.group(1), s) + s = RE_ACCENT.sub(_unaccent, s) + s = unTeXescapeURL(s) + s = RE_LIGATURE.sub(_unlig_html, s); + s = RE_TEX_CMD.sub("", s) + s = s.translate(ALLCHARS, "{}") + s = RE_PAGE_SPAN.sub(lambda m: "%s-%s"%(m.groups()), s) + s = s.replace("---", "—"); + s = s.replace("--", "–"); + return s + +def author_url(author): + """Given an author's name, return a URL for his/her homepage.""" + for pat, url in config.AUTHOR_RE_LIST: + if pat.search(author): + return url + return None + +def txtize(s): + """Turn a TeX string into decnent plaintext.""" + s = RE_LONE_I.sub(lambda m: "i%s" % m.group(1), s) + s = RE_ACCENT.sub(lambda m: "%s" % m.group(2), s) + s = RE_LIGATURE.sub(lambda m: "%s%s"%m.groups(), s) + s = RE_TEX_CMD.sub("", s) + s = s.translate(ALLCHARS, "{}") + return s + +PROCEEDINGS_RE = re.compile( + r'((?:proceedings|workshop record) of(?: the)? )(.*)', + re.I) + +class ParsedAuthor: + """The parsed name of an author. + + Eddie deserves credit for this incredibly hairy business. + """ + def __init__(self, first, von, last, jr): + self.first = first + self.von = von + self.last = last + self.jr = jr + self.collapsable = 1 + + self.html = htmlize(str(self)) + self.txt = txtize(str(self)) + + s = self.html + for pat in config.NO_COLLAPSE_AUTHORS_RE_LIST: + if pat.search(s): + self.collapsable = 0 + break + + def __eq__(self, o): + return ((self.first == o.first) and + (self.last == o.last) and + (self.von == o.von) and + (self.jr == o.jr)) + + def __hash__(self): + return hash(repr(self)) + + def collapsesTo(self, o): + """Return true iff 'o' could be a more canonical version of this author + """ + if not self.collapsable or not o.collapsable: + return self + + if self.last != o.last or self.von != o.von or self.jr != o.jr: + return self + if not self.first: + return o + + if len(self.first) == len(o.first): + n = [] + for a,b in zip(self.first, o.first): + if a == b: + n.append(a) + elif len(a) == 2 and a[1] == '.' and a[0] == b[0]: + n.append(b) + elif len(b) == 2 and b[1] == '.' and a[0] == b[0]: + n.append(a) + else: + return self + if n == self.first: + return self + elif n == o.first: + return o + else: + return self + else: + realname = max([len(n) for n in self.first+o.first])>2 + if not realname: + return self + + if len(self.first) < len(o.first): + short = self.first; long = o.first + else: + short = o.first; long = self.first + + initials_s = "".join([n[0] for n in short]) + initials_l = "".join([n[0] for n in long]) + idx = initials_l.find(initials_s) + if idx < 0: + return self + n = long[:idx] + for i in range(idx, idx+len(short)): + a = long[i]; b = short[i-idx] + if a == b: + n.append(a) + elif len(a) == 2 and a[1] == '.' and a[0] == b[0]: + n.append(b) + elif len(b) == 2 and b[1] == '.' and a[0] == b[0]: + n.append(a) + else: + return self + n += long[idx+len(short):] + + if n == self.first: + return self + elif n == o.first: + return o + else: + return self + + def __repr__(self): + return "ParsedAuthor(%r,%r,%r,%r)"%(self.first,self.von, + self.last,self.jr) + def __str__(self): + a = " ".join(self.first+self.von+self.last) + if self.jr: + return "%s, %s" % (a,self.jr) + return a + + def getHomepage(self): + s = self.html + for pat, url in config.AUTHOR_RE_LIST: + if pat.search(s): + return url + return None + + def getSortingName(self): + """Return a representation of this author's name in von-last-first-jr + order, unless overridden by ALPH """ + s = self.html + for pat,v in config.ALPHABETIZE_AUTHOR_AS_RE_LIST: + if pat.search(s): + return v + + return txtize(" ".join(self.von+self.last+self.first+self.jr)) + + def getSectionName(self): + """Return a HTML representation of this author's name in + last, first von, jr order""" + secname = " ".join(self.last) + more = self.first+self.von + if more: + secname += ", "+" ".join(more) + if self.jr: + secname += ", "+" ".join(self.jr) + secname = htmlize(secname) + return secname + + def htmlizeWithLink(self): + a = self.html + u = self.getHomepage() + if u: + return "%s"%(u,a) + else: + return a + +def _split(s,w=79,indent=8): + r = [] + s = re.sub(r"\s+", " ", s) + first = 1 + indentation = "" + while len(s) > w: + for i in xrange(w-1, 20, -1): + if s[i] == ' ': + r.append(indentation+s[:i]) + s = s[i+1:] + break + else: + r.append(indentation+s.strip()) + s = "" + if first: + first = 0 + w -= indent + indentation = " "*indent + if (s): + r.append(indentation+s) + r.append("") + return "\n".join(r) + +class FileIter: + def __init__(self, fname=None, file=None, it=None, string=None): + if fname: + file = open(fname, 'r') + if string: + file = cStringIO.StringIO(string) + if file: + it = iter(file.xreadlines()) + self.iter = it + assert self.iter + self.lineno = 0 + self._next = it.next + def next(self): + self.lineno += 1 + return self._next() + + +def parseAuthor(s): + try: + return _parseAuthor(s) + except: + print >>sys.stderr, "Internal error while parsing author %r"%s + raise + +def _parseAuthor(s): + """Take an author string and return a list of ParsedAuthor.""" + items = [] + + s = s.strip() + while s: + s = s.strip() + bracelevel = 0 + for i in xrange(len(s)): + if s[i] == '{': + bracelevel += 1 + elif s[i] == '}': + bracelevel -= 1 + elif bracelevel <= 0 and s[i] in " \t\n,": + break + if i+1 == len(s): + items.append(s) + else: + items.append(s[0:i]) + if (s[i] == ','): + items.append(',') + s = s[i+1:] + + authors = [[]] + for item in items: + if item == 'and': + authors.append([]) + else: + authors[-1].append(item) + + parsedAuthors = [] + # Split into first, von, last, jr + for author in authors: + commas = 0 + fvl = [] + vl = [] + f = [] + v = [] + l = [] + j = [] + cur = fvl + for item in author: + if item == ',': + if commas == 0: + vl = fvl + fvl = [] + cur = f + else: + j.extend(f) + cur = f = [] + commas += 1 + else: + cur.append(item) + + if commas == 0: + split_von(f,v,l,fvl) + else: + f_tmp = [] + split_von(f_tmp,v,l,vl) + + parsedAuthors.append(ParsedAuthor(f,v,l,j)) + + return parsedAuthors + +ALLCHARS = "".join(map(chr,range(256))) +PRINTINGCHARS = "\t\n\r"+"".join(map(chr,range(32, 127))) +LC_CHARS = "abcdefghijklmnopqrstuvwxyz" +SV_DELCHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "@") +RE_ESCAPED = re.compile(r'\\.') +def split_von(f,v,l,x): + in_von = 0 + while x: + tt = t = x[0] + del x[0] + if tt[:2] == '{\\': + tt = tt.translate(ALLCHARS, SV_DELCHARS) + tt = RE_ESCAPED.sub("", tt) + tt = tt.translate(ALLCHARS, "{}") + if tt.translate(ALLCHARS, LC_CHARS) == "": + v.append(t) + in_von = 1 + elif in_von and f is not None: + l.append(t) + l.extend(x) + return + else: + f.append(t) + if not in_von: + l.append(f[-1]) + del f[-1] + + +class Parser: + """Parser class: reads BibTeX from a file and returns a BibTeX object.""" + ## Fields + # strings: maps entry string keys to their values. + # newStrings: all string definitions not in config.INITIAL_STRINGS + # invStrings: map from string values to their keys. + # fileiter: the line iterator we're parsing from. + # result: the BibTeX object that we're parsing into + # litStringLine: the line on which we started parsing a literal string; + # 0 for none. + # entryLine: the line on which the current entry started; 0 for none. + # + # curEntType: the type of the entry we're parsing now. (paper,article,etc) + def __init__(self, fileiter, initial_strings, result=None): + self.strings = config.INITIAL_STRINGS.copy() + self.strings.update(initial_strings) + self.newStrings = {} + self.invStrings = {} + for k,v in config.INITIAL_STRINGS.items(): + self.invStrings[v]=k + self.fileiter = fileiter + if result is None: + result = BibTeX() + self.result = result + self.litStringLine = 0 + self.entryLine = 0 + + def _parseKey(self, line): + it = self.fileiter + line = _advance(it,line) + m = KEY_RE.match(line) + if not m: + raise ParseError("Expected key at line %s"%self.fileiter.lineno) + key, line = m.groups() + return key, line + + def _parseValue(self, line): + it = self.fileiter + bracelevel = 0 + data = [] + while 1: + line = _advance(it,line) + line = line.strip() + assert line + + # Literal string? + if line[0] == '"': + line=line[1:] + self.litStringLine = it.lineno + while 1: + if bracelevel: + m = BRACE_CLOSE_RE.match(line) + if m: + data.append(m.group(1)) + data.append('}') + line = m.group(2) + bracelevel -= 1 + continue + else: + m = STRING_CLOSE_RE.match(line) + if m: + data.append(m.group(1)) + line = m.group(2) + break + m = BRACE_OPEN_RE.match(line) + if m: + data.append(m.group(1)) + line = m.group(2) + bracelevel += 1 + continue + data.append(line) + data.append(" ") + line = it.next() + self.litStringLine = 0 + elif line[0] == '{': + bracelevel += 1 + line = line[1:] + while bracelevel: + m = BRACE_CLOSE_RE.match(line) + if m: + #print bracelevel, "A", repr(m.group(1)) + data.append(m.group(1)) + bracelevel -= 1 + if bracelevel > 0: + #print bracelevel, "- '}'" + data.append('}') + line = m.group(2) + continue + m = BRACE_OPEN_RE.match(line) + if m: + bracelevel += 1 + #print bracelevel, "B", repr(m.group(1)) + data.append(m.group(1)) + line = m.group(2) + continue + else: + #print bracelevel, "C", repr(line) + data.append(line) + data.append(" ") + line = it.next() + elif line[0] == '#': + print >>sys.stderr, "Weird concat on line %s"%it.lineno + elif line[0] in "},": + if not data: + print >>sys.stderr, "No data after field on line %s"%( + it.lineno) + else: + m = RAW_DATA_RE.match(line) + if m: + s = self.strings.get(m.group(1).lower()) + if s is not None: + data.append(s) + else: + data.append(m.group(1)) + line = m.group(2) + else: + raise ParseError("Questionable line at line %s"%it.lineno) + + # Got a string, check for concatenation. + if line.isspace() or not line: + data.append(" ") + line = _advance(it,line) + line = line.strip() + assert line + if line[0] == '#': + line = line[1:] + else: + data = "".join(data) + data = re.sub(r'\s+', ' ', data) + data = re.sub(r'^\s+', '', data) + data = re.sub(r'\s+$', '', data) + return data, line + + def _parseEntry(self, line): #name, strings, entries + it = self.fileiter + self.entryLine = it.lineno + line = _advance(it,line) + + m = BRACE_BEGIN_RE.match(line) + if not m: + raise ParseError("Expected an opening brace at line %s"%it.lineno) + line = m.group(1) + + proto = { 'string' : 'p', + 'preamble' : 'v', + }.get(self.curEntType, 'kp*') + + v = [] + while 1: + line = _advance(it,line) + + m = BRACE_END_RE.match(line) + if m: + line = m.group(1) + break + if not proto: + raise ParseError("Overlong entry starting on line %s" + % self.entryLine) + elif proto[0] == 'k': + key, line = self._parseKey(line) + v.append(key) + elif proto[0] == 'v': + value, line = self._parseValue(line) + v.append(value) + elif proto[0] == 'p': + key, line = self._parseKey(line) + v.append(key) + line = _advance(it,line) + line = line.lstrip() + if line[0] == '=': + line = line[1:] + value, line = self._parseValue(line) + v.append(value) + else: + assert 0 + line = line.strip() + if line and line[0] == ',': + line = line[1:] + if proto and proto[1:] != '*': + proto = proto[1:] + if proto and proto[1:] != '*': + raise ParseError("Missing arguments to %s on line %s" % ( + self.curEntType, self.entryLine)) + + if self.curEntType == 'string': + self.strings[v[0]] = v[1] + self.newStrings[v[0]] = v[1] + self.invStrings[v[1]] = v[0] + elif self.curEntType == 'preamble': + pass + else: + key = v[0] + d = {} + for i in xrange(1,len(v),2): + d[v[i].lower()] = v[i+1] + ent = BibTeXEntry(self.curEntType, key, d) + ent.entryLine = self.entryLine + self.result.addEntry(ent) + + return line + + def parse(self): + try: + self._parse() + except StopIteration: + if self.litStringLine: + raise ParseError("Unexpected EOF in string (started on %s)" % + self.litStringLine) + elif self.entryLine: + raise ParseError("Unexpected EOF at line %s (entry started " + "on %s)" % (self.fileiter.lineno, + self.entryLine)) + + self.result.invStrings = self.invStrings + self.result.newStrings = self.newStrings + + return self.result + + def _parse(self): + it = self.fileiter + line = it.next() + while 1: + # Skip blank lines. + while not line or line.isspace() or OUTER_COMMENT_RE.match(line): + line = it.next() + # Get the first line of an entry. + m = ENTRY_BEGIN_RE.match(line) + if m: + self.curEntType = m.group(1).lower() + line = m.group(2) + line = self._parseEntry(line) + self.entryLine = 0 + else: + raise ParseError("Bad input at line %s (expected a new entry.)" + % it.lineno) + +def _advance(it,line): + while not line or line.isspace() or COMMENT_RE.match(line): + line = it.next() + return line + +# Matches a comment line outside of an entry. +OUTER_COMMENT_RE = re.compile(r'^\s*[\#\%]') +# Matches a comment line inside of an entry. +COMMENT_RE = re.compile(r'^\s*\%') +# Matches the start of an entry. group 1 is the type of the entry. +# group 2 is the rest of the line. +ENTRY_BEGIN_RE = re.compile(r'''^\s*\@([^\s\"\%\'\(\)\,\=\{\}]+)(.*)''') +# Start of an entry. group 1 is the keyword naming the entry. +BRACE_BEGIN_RE = re.compile(r'\s*\{(.*)') +BRACE_END_RE = re.compile(r'\s*\}(.*)') +KEY_RE = re.compile(r'''\s*([^\"\#\%\'\(\)\,\=\{\}\s]+)(.*)''') + +STRING_CLOSE_RE = re.compile(r'^([^\{\}\"]*)\"(.*)') +BRACE_CLOSE_RE = re.compile(r'^([^\{\}]*)\}(.*)') +BRACE_OPEN_RE = re.compile(r'^([^\{\}]*\{)(.*)') +RAW_DATA_RE = re.compile(r'^([^\s\},]+)(.*)') + +def parseFile(filename, result=None): + """Helper function: parse a single BibTeX file""" + f = FileIter(fname=filename) + p = Parser(f, {}, result) + r = p.parse() + r.resolve() + for e in r.entries: + e.check() + return r + +def parseString(string, result=None): + """Helper function: parse BibTeX from a string""" + f = FileIter(string=string) + p = Parser(f, {}, result) + r = p.parse() + r.resolve() + for e in r.entries: + e.check() + return r + +if __name__ == '__main__': + if len(sys.argv)>1: + fname=sys.argv[1] + else: + fname="testbib/pdos.bib" + + r = parseFile(fname) + + for e in r.entries: + if e.type in ("proceedings", "journal"): continue + print e.to_html() + diff --git a/i2p2www/anonbib/Makefile b/i2p2www/anonbib/Makefile new file mode 100644 index 00000000..90cb8e16 --- /dev/null +++ b/i2p2www/anonbib/Makefile @@ -0,0 +1,39 @@ +PYTHON=python +VERSION=0.3-dev + +all: + $(PYTHON) writeHTML.py anonbib.cfg + +clean: + rm -f *~ */*~ *.pyc *.pyo + +update: + $(PYTHON) updateCache.py anonbib.cfg + $(PYTHON) rank.py anonbib.cfg + +suggest: + $(PYTHON) rank.py suggest anonbib.cfg + +test: + $(PYTHON) test.py + +veryclean: clean + rm -f author.html date.html topic.html bibtex.html tmp.bib + +TEMPLATES=_template_.html _template_bibtex.html +CSS=css/main.css css/pubs.css +BIBTEX=anonbib.bib +SOURCE=BibTeX.py config.py metaphone.py reconcile.py updateCache.py \ + writeHTML.py rank.py tests.py +EXTRAS=TODO README Makefile ChangeLog anonbib.cfg gold.gif silver.gif \ + upb.gif ups.gif + +DISTFILES=$(TEMPLATES) $(CSS) $(BIBTEX) $(SOURCE) $(EXTRAS) + +dist: clean + rm -rf anonbib-$(VERSION) + mkdir anonbib-$(VERSION) + tar cf - $(DISTFILES) | (cd anonbib-$(VERSION); tar xf -) + mkdir anonbib-$(VERSION)/cache + tar czf anonbib-$(VERSION).tar.gz anonbib-$(VERSION) + rm -rf anonbib-$(VERSION) diff --git a/i2p2www/anonbib/README b/i2p2www/anonbib/README new file mode 100644 index 00000000..b15ea993 --- /dev/null +++ b/i2p2www/anonbib/README @@ -0,0 +1,52 @@ +anonbib 0.3 -- Code to generate the anonymity bibliography + +Copyright (c) 2003-2008 Nick Mathewson +Based on 'PDOSBib' perl code by Eddie Kohler + +This software is licensed under the GNU GPL, version 2 or later. + +To use this software, you need to understand BibTeX and Python a +little. If it breaks, you get to keep both pieces. You will need +Python 2.2 or later. + +To use this package: + - Get a good BibTeX file. You may want to mark it up with some of the + extra keys used in our "anonbib.bib" file. All of the additional + Bibtex keys we use have the prefix "www_"; check out anonbib.bib + for their usage. + + - Edit anonbib.cfg and _template_.html and _template_bibtex.html so they + refer to your files, authors, topics, and so on. + + - Run 'python updateCache.py anonbib.cfg' to create a local cache of the + papers in your bibliography based on their www_*_url entries. (By + default, the script will ignore any entries you have already cached. To + force a fresh download of a cached file, delete it.) + + - Run 'python rank.py anonbib.cfg' to download Google Scholar rankings of + all the papers. + + - Run 'python writeHTML.py anonbib.cfg'. Fix any errors you care about. + + - Re-run these scripts when you change the bibliography. + + - If you want to merge in big BibTeX files, try using the reconcile.py + script. See the comment at the start of the file for usage info. + + +New in 0.3: + - Support for Google Scholar rankings to denote hot/rising papers. + Implemented by George Danezis. + - Make reconcile script generate more useful output. + - Add support for multiple bibliographies generated from a single bibtex + source. This is done via 'tags' on bibtex entries. If an entry is + tagged, it appears in the corresponding bibliographies. This is good + for generating a master bibliography and one or more selected readings + lists from the same source. + - Handle more errors when downloading files. + - When fetching a paper with a .ps url, generate the .ps.gz file + automatically. + - Note an error when a crossref overrides an existing field in an entry. + - Handle the Proceedings type correctly. + - Enforce proper encoding on pages: it must be number--number. + - diff --git a/i2p2www/anonbib/TODO b/i2p2www/anonbib/TODO new file mode 100644 index 00000000..c208fc2e --- /dev/null +++ b/i2p2www/anonbib/TODO @@ -0,0 +1,33 @@ + + +- More general tasks + . Know about @book + . Write unit tests for everything + . Make name parsing vaguely sane + - Maybe uncrossref in tmp.bib + - Maybe pull important papers to the start of their sections? + . Clean \{}~ when going from note to url; add \{}~ when making + note from url. + . Also clean \_ to _ and back + - Look for urls in wherepublished. + . Forgive newlines in wherepublished, note. + - When sorting by date, entries with unknown months go into a magic + "month zero" before January. Is this right? + - Strip unused features. + o Take a configuration file on the command line instead of just + importing config.py. + +- Cache tasks + - Generate a list of broken links + - Re-download all cached items if requested + - Clear dead items from cache + - Use HTTP HEAD requests to decide whetherto update stale + elements in cache. + - Add ability to honor a "www_no_cache={1}" option for entries + if the authors ask us not to cache them. + - Maybe, add ability to cache images from an HTML page. + +- Reconcile tasks + - Document it. + - Notice when there is new or different information of certain kinds + (pages, dates, etc) in the new information. diff --git a/i2p2www/anonbib/__init__.py b/i2p2www/anonbib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/i2p2www/anonbib/config.py b/i2p2www/anonbib/config.py new file mode 100644 index 00000000..c1b1b6ec --- /dev/null +++ b/i2p2www/anonbib/config.py @@ -0,0 +1,56 @@ +# Copyright 2003-2006, Nick Mathewson. See LICENSE for licensing info. + +import re + +_KEYS = [ "ALL_TAGS", + "ALPHABETIZE_AUTHOR_AS","AUTHOR_URLS","CACHE_DIR","CACHE_SECTIONS", + "CACHE_UMASK", + "CITE_CACHE_DIR", + "COLLAPSE_AUTHORS", + "DOWNLOAD_CONNECT_TIMEOUT","INITIAL_STRINGS", + "MASTER_BIB", "NO_COLLAPSE_AUTHORS", "OMIT_ENTRIES", + "OUTPUT_DIR", "TEMPLATE_FILE", "BIBTEX_TEMPLATE_FILE", + "REQUIRE_KEY", "TAG_TITLES", "TAG_DIRECTORIES", "TAG_SHORT_TITLES", + ] + +for _k in _KEYS: + globals()[_k]=None +del _k + +def load(cfgFile): + mod = {} + execfile(cfgFile, mod) + for _k in _KEYS: + try: + globals()[_k]=mod[_k] + except KeyError: + raise KeyError("Configuration option %s is missing"%_k) + + INITIAL_STRINGS.update(_EXTRA_INITIAL_STRINGS) + AUTHOR_RE_LIST[:] = [ + (re.compile(k, re.I), v,) for k, v in AUTHOR_URLS.items() + ] + + NO_COLLAPSE_AUTHORS_RE_LIST[:] = [ + re.compile(pat, re.I) for pat in NO_COLLAPSE_AUTHORS + ] + + ALPHABETIZE_AUTHOR_AS_RE_LIST[:] = [ + (re.compile(k, re.I), v,) for k,v in ALPHABETIZE_AUTHOR_AS.items() + ] + +_EXTRA_INITIAL_STRINGS = { + # MONTHS + 'jan' : 'January', 'feb' : 'February', + 'mar' : 'March', 'apr' : 'April', + 'may' : 'May', 'jun' : 'June', + 'jul' : 'July', 'aug' : 'August', + 'sep' : 'September', 'oct' : 'October', + 'nov' : 'November', 'dec' : 'December', +} + +AUTHOR_RE_LIST = [] + +NO_COLLAPSE_AUTHORS_RE_LIST = [] + +ALPHABETIZE_AUTHOR_AS_RE_LIST = [] diff --git a/i2p2www/anonbib/css/main.css b/i2p2www/anonbib/css/main.css new file mode 100644 index 00000000..ce589cc9 --- /dev/null +++ b/i2p2www/anonbib/css/main.css @@ -0,0 +1,111 @@ +img { + border: 0px; +} + +BODY { + background-color: #FFF; + color: #000; + margin: 0px; +} + +FORM { + margin-top: 0.5em; + margin-bottom: 0.5em; +} + +P, TD { + font-family: lucida, "Lucida Sans Unicode", Geneva, sans-serif; +} + +P.contact { + text-align: center; +} + +P.contact A { + font-family: lucida, "Lucida Sans Unicode", Geneva, sans-serif; + font-weight: normal; +} + +SPAN.email { + font-family: lucidatypewriter, "Lucida Typewriter", Monaco, "Lucida Sans Unicode", monospace; + font-weight: bold; +} + +P IMG { + vertical-align: text-bottom; +} + +P.crumbbreadth { + margin-top: 0.25em; +} + +.compact { + margin-top: -0.5em; + text-indent: 0em; +} + +SPAN.biblio { + font-style: italic; +} + +SPAN.biblio A { + font-family: lucida, "Lucida Sans Unicode", Geneva, sans-serif; + font-weight: normal; + text-decoration: underline; +} + +SPAN.availability { + font-family: lucidatypewriter, "Lucida Typewriter", Monaco, "Lucida Sans Unicode", monospace; + font-weight: normal; +} + +UL { + list-style: outside; +} + +UL.expand { + margin-bottom: 1em; +} + +UL.sections { + list-style: none; +} + +/* Font-level properties */ + +PRE { + font-family: lucidatypewriter, "Lucida Typewriter", Monaco, "Lucida Sans Unicode", monospace; +} + +STRONG, A { + font-family: lucidatypewriter, "Lucida Typewriter", Monaco, "Rockwell", "Lucida Sans Unicode", monospace; + font-weight: bold; +} + +A:link { + color: #B00; +} + +A:visited { + color: #903; +} + +H1, H2, H3, H4, H5, H6 { + font-family: lucidatypewriter, "Lucida Typewriter", "Lucida Console", Monaco, monospace; +} + +H1 A, H2 A, H3 A, H4 A, H5 A, H6 A { + font-family: lucidatypewriter, "Lucida Typewriter", "Lucida Console", Monaco, monospace; +} + +H1 { + color: #00B; +} + +H2 { + color: #006; +} + +H3 { + color: #006; +} diff --git a/i2p2www/anonbib/gold.gif b/i2p2www/anonbib/gold.gif new file mode 100644 index 00000000..44505dba Binary files /dev/null and b/i2p2www/anonbib/gold.gif differ diff --git a/i2p2www/anonbib/metaphone.py b/i2p2www/anonbib/metaphone.py new file mode 100644 index 00000000..f57135d7 --- /dev/null +++ b/i2p2www/anonbib/metaphone.py @@ -0,0 +1,193 @@ +#!/usr/bin/python2 +# Copyright 2003-2008, Nick Mathewson. See LICENSE for licensing info. + +"""metaphone.py -- Pure-python metaphone implementation. + + (This is not guaranteed to match the real metaphone algorithm; I + haven't tested it thorougly enough. Let me know if you find bugs. + + Based on the original C++ metaphone implementation.) +""" + +TRIPLES = { + 'dge': 'j', + 'dgi': 'j', + 'dgy': 'j', + 'sia': '+x', + 'sio': '+x', + 'tia': '+x', + 'tio': '+x', + 'tch': '', + 'tha': '0', + 'the': '0', + 'thi': '0', + 'tho': '0', + 'thu': '0', + } + +DOUBLES = { + 'ph' : 'f', + 'sh' : 'x' + } + +SINGLETONS = { + 'd': 't', + 'f': 'f', + 'j': 'j', + 'l': 'l', + 'm': 'm', + 'n': 'n', + 'r': 'r', + 'p': 'p', + 'q': 'k', + 'v': 'f', + 'x': 'ks', + 'z': 's', +} + +ALLCHARS = "".join(map(chr, range(256))) +NONLCCHARS = "".join([c for c in ALLCHARS if not c.islower()]) +def metaphone(s): + """Return the metaphone equivalent of a provided string""" + s = s.lower() + s = s.translate(ALLCHARS, NONLCCHARS) + + if not s: return "" + + # If ae, gn, kn, pn, wr then drop the first letter. + if s[:2] in ("ae", "gn", "kn", "pn", "wr"): + s = s[1:] + + # Change "x" to "s" + if s[0] == 'x': + s = "s%s" % s[1:] + + # Get rid of "h" in "wh". + if s[:2] == 'wh': + s = "w%s" % s[1:] + + # Get rid of s from end. + if s[-1] == 's': + s = s[:-1] + + result = [] + prevLtr = ' ' + vowelBefore = 0 + lastChar = len(s)-1 + for idx in range(len(s)): + curLtr = s[idx] + # If first char is a vowel, keep it. + if curLtr in "aeiou": + if idx == 0: + result.append(curLtr) + continue + + # Skip double letters. + if idx < lastChar: + if curLtr == s[idx+1]: + continue + + try: + r = TRIPLES[s[idx:idx+3]] + if r == "+x": + if idx > 1: + result.append("x") + continue + else: + result.append(r) + continue + except KeyError: + pass + try: + r = DOUBLES[s[idx:idx+2]] + result.append(r) + continue + except KeyError: + pass + try: + r = SINGLETONS[s[idx]] + result.append(r) + continue + except KeyError: + pass + + if idx > 0: + prevLtr = s[idx-1] + vowelBefore = prevLtr in "aeiou" + curLtr = s[idx] + + nextLtr2 = ' ' + if idx < lastChar: + nextLtr = s[idx+1] + vowelAfter = nextLtr in "aeiou" + frontvAfter = nextLtr in "eiy" + if idx+1 < lastChar: + nextLtr2 = s[idx+2] + else: + nextLtr = ' ' + vowelAfter = frontvAfter = 0 + + + if curLtr == 'b': + if idx == lastChar and prevLtr == 'm': + pass + else: + result.append(curLtr) + elif curLtr == 'c': + # silent 'sci', 'sce, 'scy', 'sci', etc OK. + if not (prevLtr == 's' and frontvAfter): + if nextLtr in 'ia': + result.append("x") + elif frontvAfter: + result.append("s") + elif prevLtr == 's' and nextLtr == 'h': + result.append('k') + elif nextLtr == 'h': + if idx == 0 and nextLtr2 in "aeiou": + result.append('k') + else: + result.append('x') + elif prevLtr == 'c': + result.append('c') + else: + result.append('k') + elif curLtr == 'g': + if (idx < lastChar-1) and nextLtr == 'h': + pass + elif s[idx:] == 'gned': + pass + elif s[idx:] == 'gn': + pass + elif prevLtr == 'd' and frontvAfter: + pass + else: + hard = (prevLtr == 'g') + if frontvAfter and not hard: + result.append('j') + else: + result.append('k') + elif curLtr == 'h': + if prevLtr in 'csptg': + pass + elif vowelBefore and not vowelAfter: + pass + else: + result.append('h') + elif curLtr == 'k': + if prevLtr != 'c': result.append('k') + elif curLtr in 'wy': + if vowelAfter: + result.append(curLtr) + + return "".join(result) + +def demo(a): + print a, "=>", metaphone(a) + +if __name__ == '__main__': + demo("Nick. Mathewson") + + demo("joe schmidt") + demo("Beethoven") + + demo("Because the world is round") diff --git a/i2p2www/anonbib/rank.py b/i2p2www/anonbib/rank.py new file mode 100644 index 00000000..175a10d6 --- /dev/null +++ b/i2p2www/anonbib/rank.py @@ -0,0 +1,202 @@ +# Make rankings of papers and authors for automatic classification of content hotness + +# Google Scholar address +# http://scholar.google.com/scholar?as_epq= + +# Take care of the caching setup +cache_expire = 60*60*24*30 # 30 days + +# Checks +import config +import os +import sys +from os.path import exists, isdir, join, getmtime +from os import listdir, remove + +def remove_old(): + # Remove all old cached files + filenames = listdir(cache_folder()) + from time import time + now = time() + for f in filenames: + pf = join(cache_folder(), f) + time_mt = getmtime(pf) + if now - time_mt > cache_expire: # 30 days + remove(pf) + +def cache_folder(): + r = join(config.OUTPUT_DIR, config.CITE_CACHE_DIR) + if not exists(r): + os.makedirs(r) + assert isdir(r) + return r + +import re +from urllib2 import urlopen, build_opener +from urllib import quote +from datetime import date +import hashlib + +# A more handy hash +def md5h(s): + m = hashlib.md5() + m.update(s) + return m.hexdigest() + +format_tested = 0 + +def getPageForTitle(title, cache=True, update=True, save=True): + #Returns (citation-count, scholar url) tuple, or (None,None) + global format_tested + if not format_tested and update: + format_tested = 1 + TestScholarFormat() + + # Do not assume that the title is clean + title = re.sub("\s+", " ", title) + title = re.sub("[^'a-zA-Z0-9\. \-\/:]", "", title) + title = re.sub("'\/", " ", title) + + # We rely on google scholar to return the article with this exact title + gurl = "http://scholar.google.com/scholar?as_q=&as_epq=%s&as_occt=title" + + url = gurl % quote(title) + + # Access cache or network + if exists(join(cache_folder(), md5h(url))) and cache: + return url, file(join(cache_folder(), md5h(url)),'r').read() + elif update: + print "Downloading rank for %r."%title + + # Make a custom user agent (so that we are not filtered by Google)! + opener = build_opener() + opener.addheaders = [('User-agent', 'Anon.Bib.0.1')] + + print "connecting..." + connection = opener.open(url) + print "reading" + page = connection.read() + print "done" + if save: + file(join(cache_folder(), md5h(url)),'w').write(page) + return url, page + else: + return url, None + +def getCite(title, cache=True, update=True, save=True): + url, page = getPageForTitle(title, cache=cache, update=update, save=save) + if not page: + return None,None + + # Check if it finds any articles + if len(re.findall("did not match any articles", page)) > 0: + return (None, None) + + # Kill all tags! + cpage = re.sub("<[^>]*>", "", page) + + # Add up all citations + s = sum([int(x) for x in re.findall("Cited by ([0-9]*)", cpage)]) + return (s, url) + +def getPaperURLs(title, cache=True, update=True, save=True): + url, page = getPageForTitle(title, cache=cache, update=update, save=save) + if not page: + return [] + pages = re.findall(r'\&\#x25ba\;.*class=fl href="([^"]*)"', page) + return pages + +def get_rank_html(title, years=None, base_url=".", update=True, + velocity=False): + s,url = getCite(title, update=update) + + # Paper cannot be found + if s is None: + return '' + + html = '' + + url = url.replace("&","&") + + # Hotness + H,h = 50,5 + if s >= H: + html += 'More than %s citations on Google Scholar' % (url,base_url,H,H) + elif s >= h: + html += 'More than %s citations on Google Scholar' % (url,base_url,h,h) + + # Only include the velocity if asked. + if velocity: + # Velocity + d = date.today().year - int(years) + if d >= 0: + if 2 < s / (d +1) < 10: + html += '' % base_url + if 10 <= s / (d +1): + html += '' % base_url + + return html + +def TestScholarFormat(): + # We need to ensure that Google Scholar does not change its page format under our feet + # Use some cases to check if all is good + print "Checking google scholar formats..." + stopAndGoCites = getCite("Stop-and-Go MIXes: Providing Probabilistic Anonymity in an Open System", False)[0] + dragonCites = getCite("Mixes protected by Dragons and Pixies: an empirical study", False, save=False)[0] + + if stopAndGoCites in (0, None): + print """OOPS.\n +It looks like Google Scholar changed their URL format or their output format. +I went to count the cites for the Stop-and-Go MIXes paper, and got nothing.""" + sys.exit(1) + + if dragonCites != None: + print """OOPS.\n +It looks like Google Scholar changed their URL format or their output format. +I went to count the cites for a fictitious paper, and found some.""" + sys.exit(1) + +def urlIsUseless(u): + if u.find("freehaven.net/anonbib/") >= 0: + # Our own cache is not the primary citation for anything. + return True + elif u.find("owens.mit.edu") >= 0: + # These citations only work for 'members of the MIT community'. + return True + else: + return False + +URLTYPES=[ "pdf", "ps", "txt", "ps_gz", "html" ] + +if __name__ == '__main__': + # First download the bibliography file. + import BibTeX + suggest = False + if sys.argv[1] == 'suggest': + suggest = True + del sys.argv[1] + + config.load(sys.argv[1]) + if config.CACHE_UMASK != None: + os.umask(config.CACHE_UMASK) + bib = BibTeX.parseFile(config.MASTER_BIB) + remove_old() + + print "Downloading missing ranks." + for ent in bib.entries: + getCite(ent['title'], cache=True, update=True) + + if suggest: + for ent in bib.entries: + haveOne = False + for utype in URLTYPES: + if ent.has_key("www_%s_url"%utype): + haveOne = True + break + if haveOne: + continue + print ent.key, "has no URLs given." + urls = [ u for u in getPaperURLs(ent['title']) if not urlIsUseless(u) ] + for u in urls: + print "\t", u + diff --git a/i2p2www/anonbib/reconcile.py b/i2p2www/anonbib/reconcile.py new file mode 100644 index 00000000..e601af48 --- /dev/null +++ b/i2p2www/anonbib/reconcile.py @@ -0,0 +1,292 @@ +#!/usr/bin/python2 +# Copyright 2003-2008, Nick Mathewson. See LICENSE for licensing info. + +"""Code to determine which entries are new and which are old. + + To scan a new file, run "python reconcile.py anonbib.cfg new-file.bib". This + will generate a new bibtex file called 'tmp.bib', with all the new entries + cleaned up a little, and all the duplicate entries commented out. +""" + +import sys +import re + +assert sys.version_info[:3] >= (2,2,0) + +import BibTeX +import config +import metaphone + +_MPCACHE = {} +def soundsLike(s1, s2): + c = _MPCACHE + s1 = clean(s1) + s2 = clean(s2) + try: + m1 = c[s1] + except KeyError: + m1 = c[s1] = metaphone.metaphone(s1) + try: + m2 = c[s2] + except KeyError: + m2 = c[s2] = metaphone.metaphone(s2) + + return m1 == m2 + +def mphone(s): + c = _MPCACHE + s = clean(s) + try: + return c[s] + except: + m = c[s] = metaphone.metaphone(s) + return m + +def clean(s): + s = re.sub(r'\s+', ' ', s) + s = s.strip() + return s + +class MasterBibTeX(BibTeX.BibTeX): + def __init__(self): + BibTeX.BibTeX.__init__(self) + + def buildIndex(self): + self.byTitle = {} + for ent in self.entries: + for t in self._titleForms(ent['title']): + self.byTitle.setdefault(t, []).append(ent) + + def _titleForms(self, title): + title = title.lower() + title = re.sub(r'\b(an|a|the|of)\b', "", title) + title = clean(title) + res = [ mphone(title) ] + if ':' in title: + for t in title.split(":"): + res.append(mphone(t.strip())) + #print "%r\n => %s" % (title,res) + return res + + def _titlesAlike(self, t1, t2): + t1 = clean(t1) + t2 = clean(t2) + if t1 == t2: + return 2 + tf1 = self._titleForms(t1) + tf2 = self._titleForms(t2) + for t in tf1: + if t in tf2: return 1 + return 0 + + def _authorsAlike(self, a1, a2): + if not soundsLike(" ".join(a1.last)," ".join(a2.last)): + return 0 + + if (a1.first == a2.first and a1.von == a2.von + and a1.jr == a2.jr): + return 2 + + + if soundsLike(" ".join(a1.first), " ".join(a2.first)): + return 1 + + if not a1.first or not a2.first: + return 1 + + if self._initialize(a1.first) == self._initialize(a2.first): + return 1 + + return 0 + + def _initialize(self, name): + name = " ".join(name).lower() + name = re.sub(r'([a-z])[a-z\.]*', r'\1', name) + name = clean(name) + return name + + def _authorListsAlike(self, a1, a2): + if len(a1) != len(a2): + return 0 + a1 = [ (a.last, a) for a in a1 ] + a2 = [ (a.last, a) for a in a2 ] + a1.sort() + a2.sort() + if len(a1) != len(a2): + return 0 + r = 2 + for (_, a1), (_, a2) in zip(a1,a2): + x = self._authorsAlike(a1,a2) + if not x: + return 0 + elif x == 1: + r = 1 + return r + + def _entryDatesAlike(self, e1, e2): + try: + if clean(e1['year']) == clean(e2['year']): + return 2 + else: + return 0 + except KeyError: + return 1 + + def includes(self, ent, all=0): + title = ent['title'] + candidates = [] + for form in self._titleForms(title): + try: + candidates.extend(self.byTitle[form]) + except KeyError: + pass + goodness = [] + for knownEnt in candidates: + match = (self._entryDatesAlike(ent, knownEnt) * + self._titlesAlike(ent['title'], knownEnt['title']) * + self._authorListsAlike(ent.parsedAuthor, + knownEnt.parsedAuthor) ) + if match: + goodness.append((match, knownEnt)) + goodness.sort() + if all: + return goodness + if goodness: + return goodness[-1] + else: + return None, None + + def demo(self): + for e in self.entries: + matches = self.includes(e, 1) + m2 = [] + mids = [] + for g,m in matches: + if id(m) not in mids: + mids.append(id(m)) + m2.append((g,m)) + matches = m2 + + if not matches: + print "No match for %s"%e.key + if matches[-1][1] is e: + print "%s matches for %s: OK."%(len(matches), e.key) + else: + print "%s matches for %s: %s is best!" %(len(matches), e.key, + matches[-1][1].key) + if len(matches) > 1: + for g, m in matches: + print "%%%% goodness", g + print m + + +def noteToURL(note): + " returns tp, url " + note = note.replace("\n", " ") + m = re.match(r'\s*(?:\\newline\s*)*\s*\\url{(.*)}\s*(?:\\newline\s*)*', + note) + if not m: + return None + url = m.group(1) + for suffix, tp in ((".html", "html"), + (".ps", "ps"), + (".ps.gz", "ps_gz"), + (".pdf", "pdf"), + (".txt", "txt")): + if url.endswith(suffix): + return tp,url + return "???", url + +all_ok = 1 +def emit(f,ent): + global all_ok + + errs = ent._check() + if master.byKey.has_key(ent.key.strip().lower()): + errs.append("ERROR: Key collision with master file") + + if errs: + all_ok = 0 + + note = ent.get("note") + if ent.getURL() and not note: + ent['note'] = "\url{%s}"%ent.getURL() + elif note: + m = re.match(r'\\url{(.*)}', note) + if m: + url = m.group(0) + tp = None + if url.endswith(".txt"): + tp = "txt" + elif url.endswith(".ps.gz"): + tp = "ps_gz" + elif url.endswith(".ps"): + tp = "ps_gz" + elif url.endswith(".pdf"): + tp = "pdf" + elif url.endswith(".html"): + tp = "html" + if tp: + ent['www_%s_url'%tp] = url + + if errs: + all_ok = 0 + for e in errs: + print >>f, "%%%%", e + + print >>f, ent.format(77, 4, v=1, invStrings=invStrings) + +def emitKnown(f, ent, matches): + print >>f, "%% Candidates are:", ", ".join([e.key for g,e in matches]) + print >>f, "%%" + print >>f, "%"+(ent.format(77,4,1,invStrings).replace("\n", "\n%")) + +if __name__ == '__main__': + if len(sys.argv) != 3: + print "reconcile.py expects 2 arguments" + sys.exit(1) + + config.load(sys.argv[1]) + + print "========= Scanning master ==========" + master = MasterBibTeX() + master = BibTeX.parseFile(config.MASTER_BIB, result=master) + master.buildIndex() + + print "========= Scanning new file ========" + try: + fn = sys.argv[2] + input = BibTeX.parseFile(fn) + except BibTeX.ParseError, e: + print "Error parsing %s: %s"%(fn,e) + sys.exit(1) + + f = open('tmp.bib', 'w') + keys = input.newStrings.keys() + keys.sort() + for k in keys: + v = input.newStrings[k] + print >>f, "@string{%s = {%s}}"%(k,v) + + invStrings = input.invStrings + + for e in input.entries: + if not (e.get('title') and e.get('author')): + print >>f, "%%\n%%%% Not enough information to search for a match: need title and author.\n%%" + emit(f, e) + continue + + matches = master.includes(e, all=1) + if not matches: + print >>f, "%%\n%%%% This entry is probably new: No match found.\n%%" + emit(f, e) + else: + print >>f, "%%" + print >>f, "%%%% Possible match found for this entry; max goodness",\ + matches[-1][0], "\n%%" + emitKnown(f, e, matches) + + if not all_ok: + print >>f, "\n\n\nErrors remain; not finished.\n" + + f.close() diff --git a/i2p2www/anonbib/silver.gif b/i2p2www/anonbib/silver.gif new file mode 100644 index 00000000..8a4ff291 Binary files /dev/null and b/i2p2www/anonbib/silver.gif differ diff --git a/i2p2www/anonbib/testbib/pdos.bib b/i2p2www/anonbib/testbib/pdos.bib new file mode 100644 index 00000000..65b24b72 --- /dev/null +++ b/i2p2www/anonbib/testbib/pdos.bib @@ -0,0 +1,1742 @@ +%% *** +%% *** ASK YOURSELF: +%% *** +%% *** Did I put it in the right section? +%% *** Did I include a `www_section' tag? +%% *** Did I include the page numbers? +%% *** Did I include the location of the conference (in the `address' tag)? +%% *** +%% *** When you are done editing this file, run this command: +%% *** ./mkpdospubs.pl pdos.bib ../pubs.html +%% *** + +@string{MIT = "Massachusetts Institute of Technology"} +@string{MIT-LCS = "{MIT} Laboratory for Computer Science"} +@string{ACMabbr = "{ACM}"} +@string{SOSP = ACMabbr # " {S}ymposium on {O}perating {S}ystems {P}rinciples"} +@string{IEEEabbr = "{IEEE}"} +@string{IEEECompSoc = IEEEabbr # " {C}omputer {S}ociety"} +@string{OSDI = "{USENIX} {S}ymposium on {O}perating {S}ystems {D}esign and {I}mplementation"} + +@string{PDOSWWW = "http://www.pdos.lcs.mit.edu"} + +%% P2P PAPERS + +@string{p2p = "Peer-to-peer Computing"} + +@inproceedings{ivy:osdi02, + title = "Ivy: A Read/Write Peer-to-peer File System", + author = "Athicha Muthitacharoen and Robert Morris and Thomer Gil and Benjie Chen", + crossref = osdi5, + www_section = p2p, + www_abstract_url = PDOSWWW # "/ivy/osdi02.html", + www_ps_url = PDOSWWW # "/ivy/osdi02.ps", + www_ps_gz_url = PDOSWWW # "/ivy/osdi02.ps.gz", + www_pdf_url = PDOSWWW # "/ivy/osdi02.pdf" +} + +@inproceedings{trie:iptps02, + title = "Efficient Peer-To-Peer Lookup Based on a Distributed Trie", + author = "Michael J. Freedman and Radek Vingralek", + crossref = "iptps02", + www_section = p2p, + www_abstract_url = PDOSWWW # "/papers/trie:iptps02/index.html", + www_ps_url = PDOSWWW # "/papers/trie:iptps02/trie:iptps02.ps", + www_ps_gz_url = PDOSWWW # "/papers/trie:iptps02/trie:iptps02.ps.gz", + www_pdf_url = PDOSWWW # "/papers/trie:iptps02/trie:iptps02.pdf" +} + +@inproceedings{chord:dns02, + title = "Serving DNS using Chord", + author = "Russ Cox and Athicha Muthitacharoen and Robert Morris", + crossref = "iptps02", + www_section = p2p, + www_abstract_url = PDOSWWW # "/papers/chord:dns02/index.html", + www_ps_url = PDOSWWW # "/papers/chord:dns02/chord:dns02.ps", + www_ps_gz_url = PDOSWWW # "/papers/chord:dns02/chord:dns02.ps.gz", + www_pdf_url = PDOSWWW # "/papers/chord:dns02/chord:dns02.pdf" +} + +@inproceedings{chord:security02, + title = "Security Considerations for Peer-to-Peer Distributed Hash Tables", + author = "Emil Sit and Robert Morris", + crossref = "iptps02", + www_section = p2p, + www_abstract_url = PDOSWWW # "/papers/chord:security02/index.html", + www_ps_url = PDOSWWW # "/papers/chord:security02/chord:security02.ps", + www_ps_gz_url = PDOSWWW # "/papers/chord:security02/chord:security02.ps.gz", + www_pdf_url = PDOSWWW # "/papers/chord:security02/chord:security02.pdf" +} + +@inproceedings{cfs:sosp01, + title = "Wide-area cooperative storage with {CFS}", + author = "Frank Dabek and M. Frans Kaashoek and David Karger and Robert Morris and Ion Stoica", + crossref = "sosp18", + pages = "", + www_section = p2p, + www_abstract_url = PDOSWWW # "/papers/cfs:sosp01/", + www_ps_url = PDOSWWW # "/papers/cfs:sosp01/cfs_sosp.ps", + www_ps_gz_url = PDOSWWW # "/papers/cfs:sosp01/cfs_sosp.ps.gz", + www_pdf_url = PDOSWWW # "/papers/cfs:sosp01/cfs_sosp.pdf", +} + +@inproceedings{chord:sigcomm01, + title = "Chord: A Scalable Peer-to-peer Lookup Service for Internet Applications", + author = "Ion Stoica and Robert Morris and David Karger and M. Frans Kaashoek and Hari Balakrishnan", + crossref = "sigcomm01", + pages = "", + www_section = p2p, + www_abstract_url = PDOSWWW # "/papers/chord:sigcomm01/", + www_ps_url = PDOSWWW # "/papers/chord:sigcomm01/chord_sigcomm.ps", + www_ps_gz_url = PDOSWWW # "/papers/chord:sigcomm01/chord_sigcomm.ps.gz", + www_pdf_url = PDOSWWW # "/papers/chord:sigcomm01/chord_sigcomm.pdf", +} + +@inproceedings{chord:hotos, + title = "Building Peer-to-Peer Systems With Chord, a Distributed Lookup Service", + author = "Frank Dabek and Emma Brunskill and M. Frans Kaashoek and David Karger and Robert Morris and Ion Stoica and Hari Balakrishnan", + crossref = "hotos8", + pages = "", + www_section = p2p, + www_abstract_url = PDOSWWW # "/papers/chord:hotos01/", + www_ps_url = PDOSWWW # "/papers/chord:hotos01/hotos8.ps", + www_ps_gz_url = PDOSWWW # "/papers/chord:hotos01/hotos8.ps.gz", + www_pdf_url = PDOSWWW # "/papers/chord:hotos01/hotos8.pdf", +} + + +%% NETWORKING PAPERS + +@string{networking = "Networking and Communication"} + +@inproceedings{click:asplos02, + title = "Programming Language Optimizations for Modular Router Configurations", + author = "Eddie Kohler and Robert Morris and Benjie Chen", + booktitle = "Proceedings of the 10th Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS)", + location = "San Jose, CA", + month = oct, + year = 2002, + www_section = networking, + www_pdf_url = PDOSWWW # "/papers/click:asplos02.pdf" +} + +@inproceedings{grid:hotnets02, + title = "Performance of Multihop Wireless Networks: Shortest Path is Not Enough", + author = "Douglas S. J. {De Couto} and Daniel Aguayo and Benjamin A. Chambers and Robert Morris", + crossref = "hotnets1", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/grid:hotnets02/", + www_ps_url = PDOSWWW # "/papers/grid:hotnets02/paper.ps", + www_ps_gz_url = PDOSWWW # "/papers/grid:hotnets02/paper.ps.gz", + www_pdf_url = PDOSWWW # "/papers/grid:hotnets02/paper.pdf" +} + +@techreport{grid:losstr01, + title = "Effects of Loss Rate on Ad Hoc Wireless Routing", + author = "Douglas S. J. {De Couto} and Daniel Aguayo and Benjamin A. Chambers and Robert Morris", + institution = MIT-LCS, + year = 2002, month = mar, + number = "MIT-LCS-TR-836", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/grid:losstr02/", + www_ps_url = PDOSWWW # "/papers/grid:losstr02/paper.ps", + www_ps_gz_url = PDOSWWW # "/papers/grid:losstr02/paper.ps.gz", + www_pdf_url = PDOSWWW # "/papers/grid:losstr02/paper.pdf" +} + + +@article{span:wireless01, + title = "Span: An Energy-Efficient Coordination Algorithm for Topology Maintenance in Ad Hoc Wireless Networks", + author = "Benjie Chen and Kyle Jamieson and Hari Balakrishnan and Robert Morris", + crossref = "journal:winet", + volume = 8, + number = "5", + year = 2002, + month = sep, + pages = "", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/span:wireless01/", + www_ps_url = PDOSWWW # "/papers/span:wireless01/paper.ps", + www_ps_gz_url = PDOSWWW # "/papers/span:wireless01/paper.ps.gz", + www_pdf_url = PDOSWWW # "/papers/span:wireless01/paper.pdf" +} + +@inproceedings{dnscache:sigcommimw01, + title = "DNS Performance and the Effectiveness of Caching", + author = "Jaeyeon Jung, Emil Sit, Hari Balakrishnan and Robert Morris", + crossref = "sigcommimw01", + www_section = networking, + www_abstract_url = "http://nms.lcs.mit.edu/papers/dns-imw2001.html", + www_ps_url = "http://nms.lcs.mit.edu/papers/dns-imw2001.ps", + www_ps_gz_url = "http://nms.lcs.mit.edu/papers/dns-imw2001.ps.gz", + www_pdf_url = "http://nms.lcs.mit.edu/papers/dns-imw2001.pdf" +} + +@inproceedings{ron:sosp01, + title = "Resilient Overlay Networks", + author = "David Andersen and Hari Balakrishnan and M. Frans Kaashoek and Robert Morris", + crossref = "sosp18", + pages = "", + www_section = networking, + www_abstract_url = "http://nms.lcs.mit.edu/papers/ron-sosp2001.html", + www_ps_url = "http://nms.lcs.mit.edu/papers/ron-sosp2001.ps", + www_ps_gz_url = "http://nms.lcs.mit.edu/papers/ron-sosp2001.ps.gz", + www_pdf_url = "http://nms.lcs.mit.edu/papers/ron-sosp2001.pdf", +} + +@techreport{grid:proxytr01, + title = "Location Proxies and Intermediate Node Forwarding for Practical Geographic Forwarding", + author = "Douglas S. J. {De Couto} and Robert Morris", + institution = MIT-LCS, + year = 2001, month = jun, + number = "MIT-LCS-TR-824", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/grid:proxytr01/", + www_ps_url = PDOSWWW # "/papers/grid:proxytr01/paper.ps", + www_ps_gz_url = PDOSWWW # "/papers/grid:proxytr01/paper.ps.gz", + www_pdf_url = PDOSWWW # "/papers/grid:proxytr01/paper.pdf", +} + +@inproceedings{span:mobicom01, + title = "Span: An Energy-Efficient Coordination Algorithm for Topology Maintenance in Ad Hoc Wireless Networks", + author = "Benjie Chen and Kyle Jamieson and Hari Balakrishnan and Robert Morris", + crossref = "mobicom01", + pages = "85--96", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/span:mobicom01/", + www_ps_url = PDOSWWW # "/papers/span:mobicom01/span.ps", + www_ps_gz_url = PDOSWWW # "/papers/span:mobicom01/span.ps.gz", + www_pdf_url = PDOSWWW # "/papers/span:mobicom01/span.pdf" +} + +@inproceedings{grid:mobicom01, + title = "Capacity of Ad Hoc Wireless Networks", + author = "Jinyang Li and Charles Blake and Douglas S. J. {De Couto} and Hu Imm Lee and Robert Morris", + crossref = "mobicom01", + pages = "61--69", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/grid:mobicom01/", + www_ps_url = PDOSWWW # "/papers/grid:mobicom01/paper.ps", + www_ps_gz_url = PDOSWWW # "/papers/grid:mobicom01/paper.ps.gz", + www_pdf_url = PDOSWWW # "/papers/grid:mobicom01/paper.pdf" +} + +@inproceedings{click:usenix01, + title = "Flexible Control of Parallelism in a Multiprocessor PC Router", + author = "Benjie Chen and Robert Morris", + crossref = "usenix01", + pages = "333--346", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/click:usenix01/", + www_ps_url = PDOSWWW # "/papers/click:usenix01/usenix01.ps", + www_ps_gz_url = PDOSWWW # "/papers/click:usenix01/usenix01.ps.gz", + www_pdf_url = PDOSWWW # "/papers/click:usenix01/usenix01.pdf", +} + +@inproceedings{ron:hotos8, + title = "Resilient Overlay Networks", + author = "David Andersen and Hari Balakrishnan and M. Frans Kaashoek and Robert Morris", + crossref = "hotos8", + www_section = networking, + www_abstract_url = "http://nms.lcs.mit.edu/papers/ron-hotos2001.html", + www_ps_url = "http://nms.lcs.mit.edu/papers/ron-hotos2001.ps", + www_ps_gz_url = "http://nms.lcs.mit.edu/papers/ron-hotos2001.ps.gz", + www_pdf_url = "http://nms.lcs.mit.edu/papers/ron-hotos2001.pdf", +} + +@techreport{click:rewritertr, + title = "Modular components for network address translation", + author = "Eddie Kohler and Robert Morris and Massimiliano Poletto", + institution = "MIT LCS Click Project", + year = 2000, month = dec, + note = "http://www.pdos.lcs.mit.edu/papers/click-rewriter/", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/click-rewriter/", + www_ps_url = PDOSWWW # "/papers/click-rewriter/paper.ps", + www_ps_gz_url = PDOSWWW # "/papers/click-rewriter/paper.ps.gz", + www_pdf_url = PDOSWWW # "/papers/click-rewriter/paper.pdf", +} + +@inproceedings{grid:sigops-euro9, + title = "{C}ar{N}et: A Scalable Ad Hoc Wireless Network System", + author = "Robert Morris and John Jannotti and Frans Kaashoek and Jinyang Li and Douglas S. J. {De Couto}", + crossref = "sigops-euro9", + pages = "", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/grid:sigops-euro9/", + www_ps_gz_url = PDOSWWW # "/papers/grid:sigops-euro9/paper.ps.gz", + www_ps_url = PDOSWWW # "/papers/grid:sigops-euro9/paper.ps", + www_pdf_url = PDOSWWW # "/papers/grid:sigops-euro9/paper.pdf", + note = "The published version incorrectly lists Douglas De Couto's name" +} + +@inproceedings{grid:mobicom00, + title = "A Scalable Location Service for Geographic Ad Hoc Routing", + author = "Jinyang Li and John Jannotti and Douglas S. J. {De Couto} and David R. Karger and Robert Morris", + crossref = "mobicom00", + pages = "120--130", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/grid:mobicom00/", + www_ps_url = PDOSWWW # "/papers/grid:mobicom00/paper.ps", + www_ps_gz_url = PDOSWWW # "/papers/grid:mobicom00/paper.ps.gz", + www_pdf_url = PDOSWWW # "/papers/grid:mobicom00/paper.pdf" +} + +@techreport{click:lcstr00, + title = "Programming language techniques for modular router configurations", + author = "Eddie Kohler and Benjie Chen and M. Frans Kaashoek and Robert Morris and Massimiliano Poletto", + institution = MIT-LCS, + year = 2000, month = aug, + number = "MIT-LCS-TR-812", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/click:lcstr00/", + www_ps_url = PDOSWWW # "/papers/click:lcstr00/tr.ps", + www_ps_gz_url = PDOSWWW # "/papers/click:lcstr00/tr.ps.gz", + www_pdf_url = PDOSWWW # "/papers/click:lcstr00/tr.pdf", +} + +@article{click:tocs00, + title = "The Click modular router", + author = "Eddie Kohler and Robert Morris and Benjie Chen and John Jannotti and M. Frans Kaashoek", + crossref = "journal:tocs", + volume = 18, number = 3, + year = 2000, month = aug, + pages = "263--297", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/click:tocs00/", + www_ps_url = PDOSWWW # "/papers/click:tocs00/paper.ps", + www_ps_gz_url = PDOSWWW # "/papers/click:tocs00/paper.ps.gz", + www_pdf_url = PDOSWWW # "/papers/click:tocs00/paper.pdf" +} + +@inproceedings{click:sosp99, + title = "The {C}lick modular router", + author = "Robert Morris and Eddie Kohler and John Jannotti and M. Frans Kaashoek", + crossref = "sosp17", + pages = "217--231", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/click:sosp99/", + www_ps_url = PDOSWWW # "/papers/click:sosp99/paper.ps", + www_ps_gz_url = PDOSWWW # "/papers/click:sosp99/paper.ps.gz", + www_pdf_url = PDOSWWW # "/papers/click:sosp99/paper.pdf" +} + +@inproceedings{prolac:sigcomm99, + title = "A readable {TCP} in the {Prolac} protocol language", + author = "Eddie Kohler and M. Frans Kaashoek and David R. Montgomery", + crossref = "sigcomm99", + pages = "3--13", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/prolac:sigcomm99/", + www_ps_url = PDOSWWW # "/papers/prolac:sigcomm99/paper.ps", + www_pdf_url = PDOSWWW # "/papers/prolac:sigcomm99/paper.pdf" +} + +@inproceedings{ash:sigcomm96, + title = "{ASHs}: application-specific handlers for high-performance messaging", + author = "Deborah A. Wallach and Dawson R. Engler and M. Frans Kaashoek", + crossref = "sigcomm96", + pages = "40--52", + www_section = networking, + www_ps_url = PDOSWWW # "/papers/ash-sigcomm96.ps" +} + +@inproceedings{dpf:sigcomm96, + title = "{DPF}: fast, flexible message demultiplexing using dynamic code generation", + author = "Dawson R. Engler and M. Frans Kaashoek", + crossref = "sigcomm96", + pages = "53--59", + www_section = networking, + www_ps_url = PDOSWWW # "/papers/dpf.ps" +} + +@inproceedings{oam:ppopp95, + title = "Optimistic active messages: a mechanism for scheduling communication with computation", + author = "Deborah A. Wallach and Wilson C. Hsieh and Kirk Johnson and M. Frans Kaashoek and William E. Weihl", + crossref = "ppopp95", + pages = "217--226", + www_section = networking, + www_ps_url = PDOSWWW # "/papers/oam.ps" +} + +@techreport{user-level-comm:tr, + title = "Efficient implementation of high-level languages on user-level communication architectures", + author = "Wilson C. Hsieh and Kirk L. Johnson and M. Frans Kaashoek and Deborah A. Wallach and William E. Weihl", + institution = MIT-LCS, + year = 1994, month = may, + number = "MIT-LCS-TR-616", + www_section = networking, + www_ps_url = PDOSWWW # "/papers/UserLevelCommunication.ps", +} + +@inproceedings{ipc-persistent-relevance:wwos4, + title = "The persistent relevance of IPC performance", + author = "Wilson C. Hsieh and M. Frans Kaashoek and William E. Weihl", + crossref = "wwos4", + pages = "186--190", + www_section = networking, + www_ps_url = PDOSWWW # "/papers/RelevanceOfIPC.ps", +} + +@inproceedings{pan:openarch99, + title = "{PAN}: a high-performance active network node supporting multiple mobile code systems", + author = "Erik L. Nygren and Stephen J. Garland and M. Frans Kaashoek", + crossref = "openarch99", + pages = "78--89", + www_section = networking, + www_abstract_url = PDOSWWW # "/papers/pan-openarch99/", + www_ps_url = PDOSWWW # "/papers/pan-openarch99/pan-openarch99.ps", + www_ps_gz_url = PDOSWWW # "/papers/pan-openarch99/pan-openarch99.ps.gz", + www_pdf_url = PDOSWWW # "/papers/pan-openarch99/pan-openarch99.pdf", +} + +%% DISTRIBUTED COMPUTING + +@string{distribcomp = "Distributed Computing"} + +@inproceedings{lbfs:sosp01, + title = "A Low-bandwidth Network File System", + author = "Athicha Muthitacharoen and Benjie Chen and David Mazi{\`e}res", + crossref = "sosp18", + pages = "174--187", + www_section = distribcomp, + www_abstract_url = PDOSWWW # "/papers/lbfs:sosp01/", + www_ps_url = PDOSWWW # "/papers/lbfs:sosp01/lbfs.ps", + www_ps_gz_url = PDOSWWW # "/papers/lbfs:sosp01/lbfs.ps.gz", + www_pdf_url = PDOSWWW # "/papers/lbfs:sosp01/lbfs.pdf", +} + +@inproceedings{server-os:sigops-euro, + title = "Server operating systems", + author = "M. Frans Kaashoek and Dawson R. Engler and Gregory R. Ganger and Deborah A. Wallach", + crossref = "sigops-euro7", + pages = "141--148", + www_section = distribcomp, + www_html_url = PDOSWWW # "/papers/serverOS.html" +} + +@inproceedings{amoeba-eval:dcs16, + title = "An evaluation of the {Amoeba} group communication system", + author = "M. Frans Kaashoek and Andrew S. Tanenbaum", + crossref = "dcs16", + pages = "436--448", + www_section = distribcomp, + www_ps_url = PDOSWWW # "/papers/group-dcs16.ps" +} + + +%% SECURITY AND PRIVACY + +@string{security = "Security and Privacy"} + +@techreport{sfs:rex, + title = "REX: Secure, modular remote execution through file descriptor passing", + author = "Michael Kaminsky and Eric Peterson and Kevin Fu and David Mazi{\`e}res and M. Frans Kaashoek", + institution = "MIT-LCS", + year = 2003, month = jan, + number = "MIT-LCS-TR-884", + note = "http://www.pdos.lcs.mit.edu/papers/sfs:rex/", + www_section = security, + www_abstract_url = PDOSWWW # "/papers/sfs:rex/", + www_ps_url = PDOSWWW # "/papers/sfs:rex/MIT-LCS-TR-884.ps", + www_ps_gz_url = PDOSWWW # "/papers/sfs:rex/MIT-LCS-TR-884.ps.gz", + www_pdf_url = PDOSWWW # "/papers/sfs:rex/MIT-LCS-TR-884.pdf", +} + +@inproceedings{tarzan:ccs9, + title = "Tarzan: A Peer-to-Peer Anonymizing Network Layer", + author = "Michael J. Freedman and Robert Morris", + crossref = "ccs9", + www_section = security, + www_abstract_url = PDOSWWW # "/papers/tarzan:ccs9/index.html", + www_ps_url = PDOSWWW # "/papers/tarzan:ccs9/tarzan:ccs9.ps", + www_ps_gz_url = PDOSWWW # "/papers/tarzan:ccs9/tarzan:ccs9.ps.gz", + www_pdf_url = PDOSWWW # "/papers/tarzan:ccs9/tarzan:ccs9.pdf" +} + +@inproceedings{tarzan:iptps02, + title = "Introducing Tarzan, a Peer-to-Peer Anonymizing Network Layer", + author = "Michael J. Freedman and Emil Sit and Josh Cates and Robert Morris", + crossref = "iptps02", + www_section = security, + www_abstract_url = PDOSWWW # "/papers/tarzan:iptps02/index.html", + www_ps_url = PDOSWWW # "/papers/tarzan:iptps02/tarzan:iptps02.ps", + www_ps_gz_url = PDOSWWW # "/papers/tarzan:iptps02/tarzan:iptps02.ps.gz", + www_pdf_url = PDOSWWW # "/papers/tarzan:iptps02/tarzan:iptps02.pdf" +} + +@article{sfsro:tocs2002, + title = "{F}ast and secure distributed read-only file system", + author = "Kevin Fu and M. Frans Kaashoek and David Mazi{\`e}res", + crossref = "journal:tocs", + volume = 20, number = 1, + year = 2002, month = feb, + pages = "1--24", + www_section = security, + www_abstract_url = "http://portal.acm.org/citation.cfm?doid=505452.505453" +} + + +@inproceedings{webauth:sec10, + title = "{D}os and Don'ts of Client Authentication on the Web", + author = "Kevin Fu and Emil Sit and Kendra Smith and Nick Feamster", + crossref = "sec10", + www_section = security, + www_abstract_url = PDOSWWW # "/papers/webauth.html", + www_ps_url = PDOSWWW # "/papers/webauth:sec10.ps", + www_pdf_url = PDOSWWW # "/papers/webauth:sec10.pdf", + www_ps_gz_url = PDOSWWW # "/papers/webauth:sec10.ps.gz", + note = "An extended version is available as MIT-LCS-TR-818", +} + +@techreport{webauth:tr, + title = "{D}os and Don'ts of Client Authentication on the Web", + author = "Kevin Fu and Emil Sit and Kendra Smith and Nick Feamster", + institution = MIT-LCS, + year = 2001, month = may, + number = "MIT-LCS-TR-818", + www_section = security, + www_abstract_url = PDOSWWW # "/papers/webauth.html", + www_ps_url = PDOSWWW # "/papers/webauth:tr.ps", + www_pdf_url = PDOSWWW # "/papers/webauth:tr.pdf", + www_ps_gz_url = PDOSWWW # "/papers/webauth:tr.ps.gz", +} + +@inproceedings{sfsro:osdi2000, + title = "{F}ast and secure distributed read-only file system", + author = "Kevin Fu and M. Frans Kaashoek and David Mazi{\`e}res", + crossref = "osdi4", + pages = "181-196", + www_section = security, + www_abstract_url = PDOSWWW # "/papers/sfsro.html", + www_ps_url = PDOSWWW # "/papers/sfsro:osdi2000.ps", + www_pdf_url = PDOSWWW # "/papers/sfsro:osdi2000.pdf", + www_ps_gz_url = PDOSWWW # "/papers/sfsro:osdi2000.ps.gz", +} + +@inproceedings{sfs:sosp99, + title = "{S}eparating key management from file system security", + author = "David Mazi{\`e}res and Michael Kaminsky and M. Frans Kaashoek and Emmett Witchel", + crossref = "sosp17", + pages = "", + www_section = security, + www_ps_gz_url = PDOSWWW # "/papers/sfs:sosp99.ps.gz", + www_pdf_url = PDOSWWW # "/papers/sfs:sosp99.pdf", +} + +@inproceedings{nymserver:ccs5, + title = "The design, implementation and operation of an email pseudonym server", + author = "David Mazi{\`e}res and M. Frans Kaashoek", + crossref = "ccs5", + pages = "27--36", + www_section = security, + www_ps_gz_url = PDOSWWW # "/papers/nymserver:ccs5.ps.gz", + www_pdf_url = PDOSWWW # "/papers/nymserver:ccs5.pdf", +} + +@inproceedings{sfs:sigops-euro8, + title = "Escaping the evils of centralized control with self-certifying pathnames", + author = "David Mazi{\`e}res and M. Frans Kaashoek", + crossref = "sigops-euro8", + pages = "", + www_section = security, + www_ps_gz_url = PDOSWWW # "/papers/sfs:sigops-euro8.ps.gz" +} + +@inproceedings{secure-apps:hotos6, + title = "Secure applications need flexible operating systems", + author = "David Mazi{\`e}res and M. Frans Kaashoek", + pages = "56--61", + crossref = "hotos6", + www_section = security, + www_ps_gz_url = PDOSWWW # "/papers/mazieres:hotos6.ps.gz", +} + + +%% MOBILE COMPUTING + +@string{mobilecomp = "Mobile Computing"} + +@inproceedings{migrate:hotos8, + title = "Reconsidering Internet Mobility", + author = "Alex C. Snoeren and Hari Balakrishnan and M. Frans Kaashoek", + crossref = "hotos8", + www_section = mobilecomp, + www_abstract_url = "http://nms.lcs.mit.edu/papers/migrate-hotOS.html", + www_ps_url = "http://nms.lcs.mit.edu/papers/migrate-hotOS.ps", + www_pdf_url = "http://nms.lcs.mit.edu/papers/migrate-hotOS.pdf", +} + +@misc{rover:rfs-wip, + title = "{RFS}: a mobile-transparent file system for the {Rover} toolkit", + author = "Anthony D. Joseph and George M. Candea and M. Frans Kaashoek", + howpublished = "Works-in-progress poster, the 16th " # SOSP, + crossref = "sosp16", + www_section = mobilecomp, + www_ps_url = PDOSWWW # "/papers/RFS_SOSP_WIP.ps" + www_show = no, +} + +@article{rover:winet, + title = "Building reliable mobile-aware applications using the {Rover} toolkit", + author = "Anthony D. Joseph and M. Frans Kaashoek", + crossref = "journal:winet", + volume = 3, number = 5, + year = 1997, + pages = "405--419", + www_section = mobilecomp, + www_ps_url = PDOSWWW # "/papers/winet.ps" + www_ps_gz_url = PDOSWWW # "/papers/winet.ps.gz" +} + +@article{rover:ieee-toc, + title = "Mobile computing with the {Rover} toolkit", + author = "Anthony D. Joseph and Joshua A. Tauber and M. Frans Kaashoek", + crossref = "journal:ieee-toc", + volume = 46, number = 3, + year = 1997, month = mar, + pages = "337--352", + www_section = mobilecomp, + www_ps_url = PDOSWWW # "/papers/toc.ps" + www_ps_gz_url = PDOSWWW # "/papers/toc.ps.gz" +} + +@inproceedings{rover:mobicom, + title = "Building reliable mobile-aware applications using the {Rover} toolkit", + author = "Anthony D. Joseph and Joshua A. Tauber and M. Frans Kaashoek", + crossref = "mobicom96", + www_section = mobilecomp, + www_ps_url = PDOSWWW # "/papers/mobicom96.ps", + www_ps_gz_url = PDOSWWW # "/papers/mobicom96.ps.gz", +} + +@inproceedings{rover:sosp95, + title = "{Rover}: a toolkit for mobile information access", + author = "Anthony D. Joseph and {deLespinasse}, Alan F. and Joshua A. Tauber and David K. Gifford and M. Frans Kaashoek", + crossref = "sosp15", + pages = "156--171", + www_section = mobilecomp, + www_ps_url = PDOSWWW # "/papers/rover-sosp95.ps", + www_ps_gz_url = PDOSWWW # "/papers/rover-sosp95.ps.gz", +} + +@inproceedings{dynamic-documents:wmcsa, + title = "Dynamic documents: mobile wireless access to the {WWW}", + author = "M. Frans Kaashoek and Tom Pinckney and Joshua A. Tauber", + crossref = "wmcsa94", + pages = "179--184", + www_section = mobilecomp, + www_abstract_url = PDOSWWW # "/papers/wmcsa94.abstract.html", + www_ps_url = PDOSWWW # "/papers/wmcsa94.ps", + www_ps_gz_url = PDOSWWW # "/papers/wmcsa94.ps.gz", +} + +@inproceedings{mobile-storage-alt:osdi1, + title = "Storage alternatives for mobile computers", + author = "Fred Douglis and Ramón Cáceres and M. Frans Kaashoek and Kai Li and Brian Marsh and Joshua A. Tauber", + crossref = "osdi1", + pages = "25--37", + www_section = mobilecomp, + www_ps_url = PDOSWWW # "/papers/storage-alternatives.ps" +} + +@inproceedings{dynamic-documents:www94, + title = "Dynamic documents: extensibility and adaptability in the {WWW}", + author = "M. Frans Kaashoek and Tom Pinckney and Joshua A. Tauber", + crossref = "www94", + edition = "developers' day track", + www_section = mobilecomp, + www_ps_url = PDOSWWW # "/papers/www94.ps", + www_ps_gz_url = PDOSWWW # "/papers/www94.ps.gz", + www_html_url = PDOSWWW # "/papers/www94.html", +} + + +%% STORAGE MANAGEMENT + +@string{storage = "Storage Management"} + +@inproceedings{cffs:usenix97, + title = "Embedded inodes and explicit grouping: exploiting disk bandwidth for small files", + author = "Gregory R. Ganger and M. Frans Kaashoek", + crossref = "usenix97", + pages = "1--17", + www_section = storage, + www_abstract_url = PDOSWWW # "/papers/cffs.html", + www_ps_url = PDOSWWW # "/papers/cffs-usenix97.ps", + www_ps_gz_url = PDOSWWW # "/papers/cffs-usenix97.ps.gz", +} + +@inproceedings{arus:dcs16, + title = "Atomic recovery units: failure atomicity for logical disks", + author = "Robert Grimm and Wilson C. Hsieh and Wiebren de Jonge and M. Frans Kaashoek", + crossref = "dcs16", + pages = "26--37", + www_section = storage, + www_ps_url = PDOSWWW # "/papers/arus.ps", +} + +@inproceedings{logicaldisk:sosp14, + title = "The logical disk: a new approach to improving file systems", + author = "Wiebren de Jonge and M. Frans Kaashoek and Wilson C. Hsieh", + crossref = "sosp14", + pages = "15--28", + www_section = storage, + www_ps_url = PDOSWWW # "/papers/LogicalDisk.ps" +} + + +%% EXOKERNEL PAPERS + +@string{exokernels = "Exokernels"} + +@article{exo:tocs2002, + title = "{F}ast and flexible Application-Level Networking on Exokernel Systems", + author = "Gregory R. Ganger and Dawson R. Engler and M. Frans Kaashoek and Hector M. Briceno and Russell Hunt and Thomas Pinckney", + crossref = "journal:tocs", + volume = 20, + number = 1, + year = 2002, + month = feb, + pages = "49--83", + www_section = exokernels, + www_abstract_url = PDOSWWW # "papers/exo:tocs.html", + www_pdf_url = PDOSWWW # "papers/exo:tocs.pdf", + www_ps_url = PDOSWWW # "papers/exo:tocs.ps", + www_ps_gz_url = PDOSWWW # "papers/exo:tocs.ps.gz" +} + +@inproceedings{exo:sosp97, + title = "Application performance and flexibility on exokernel systems", + author = "M. Frans Kaashoek and Dawson R. Engler and Gregory R. Ganger and H{\'e}ctor M. Brice{\~n}o and Russell Hunt and David Mazi{\`e}res and Thomas Pinckney and Robert Grimm and John Jannotti and Kenneth Mackenzie", + pages = "52--65", + crossref = "sosp16", + www_section = exokernels, + www_abstract_url = PDOSWWW # "/papers/exo-sosp97.html", + www_html_url = PDOSWWW # "/papers/exo-sosp97/exo-sosp97.html", + www_ps_url = PDOSWWW # "/papers/exo-sosp97/exo-sosp97.ps", + www_ps_gz_url = PDOSWWW # "/papers/exo-sosp97/exo-sosp97.ps.gz", +} + +@inproceedings{exo:sosp95, + title = "{E}xokernel: an operating system architecture for application-level resource management", + author = "Dawson R. Engler and M. Frans Kaashoek and James {O'Toole Jr.}", + pages = "251--266", + crossref = "sosp15", + www_section = exokernels, + www_ps_url = PDOSWWW # "/papers/exokernel-sosp95.ps", +} + +@inproceedings{exo:osdi1, + title = "The exokernel approach to extensibility (panel statement)", + author = "Dawson R. Engler and M. Frans Kaashoek and {O'Toole Jr.}, James W.", + pages = "198", + crossref = "osdi1", + www_section = exokernels, + www_ps_url = PDOSWWW # "/papers/exo-abstract.ps", +} + +@inproceedings{exo:hotos5, + title = "Exterminate all operating system abstractions", + author = "Dawson R. Engler and M. Frans Kaashoek", + pages = "78--83", + crossref = "hotos5", + www_section = exokernels, + www_ps_url = PDOSWWW # "/papers/hotos-jeremiad.ps", +} + +@article{exo:osr, + title = "The operating system kernel as a secure programmable machine", + author = "Dawson R. Engler and M. Frans Kaashoek and {O'Toole Jr.}, James W.", + crossref = "journal:osr", + year = 1995, month = jan, + volume = 29, number = 1, + pages = "78--82", + www_section = exokernels, + www_ps_url = PDOSWWW # "/papers/osr-exo.ps", +} + +@inproceedings{exo:sigops-euro, + title = "The operating system kernel as a secure programmable machine", + author = "Dawson R. Engler and M. Frans Kaashoek and {O'Toole Jr.}, James W.", + crossref = "sigops-euro6", + pages = "62--67", + www_section = exokernels, + www_ps_url = PDOSWWW # "/papers/xsigops.ps", +} + + +%% DYNAMIC CODE GENERATION + +@string{dcg = "Dynamic Code Generation"} + +@article{tickc:toplas, + title = "{`C} and {tcc}: A language and compiler for dynamic code generation" + author = "Massimiliano Poletto and Wilson C. Hsieh and Dawson R. Engler and M. Frans Kaashoek", + crossref = "journal:toplas", + volume = 21, number = 2, + year = 1999, month = mar, + pages = "324--369", + www_section = dcg, + www_ps_url = PDOSWWW # "/papers/tickc-toplas.ps", +} + +@article{linearscan, + title = "Linear scan register allocation", + author = "Massimiliano Poletto and Vivek Sarkar", + crossref = "journal:toplas", + volume = 21, number = 5, + year = 1999, month = sep, + pages = "895--913", + www_section = dcg, + www_ps_url = PDOSWWW # "/papers/toplas-linearscan.ps", +} + +@inproceedings{tickc:pldi97, + title = "tcc: a system for fast, flexible, and high-level dynamic code generation", + author = "Massimiliano Poletto and Dawson R. Engler and M. Frans Kaashoek", + crossref = "pldi97", + pages = "109--121", + www_section = dcg, + www_ps_url = PDOSWWW # "/papers/tcc-pldi97.ps", +} + +@inproceedings{tickc:popl96, + title = "{`C}: A language for efficient, machine-independent dynamic code generation", + author = "Dawson R. Engler and Wilson C. Hsieh and M. Frans Kaashoek", + crossref = "popl96", + pages = "131--144", + www_section = dcg, + www_ps_url = PDOSWWW # "/papers/popl96.ps", + note = "An earlier version is available as MIT-LCS-TM-526", +} + +@inproceedings{tickc:wcsss96, + title = "tcc: a template-based compiler for {`C}", + author = "Massimiliano Poletto and Dawson R. Engler and M. Frans Kaashoek", + crossref = "wcsss96", + pages = "1--7", + www_section = dcg, + www_ps_url = PDOSWWW # "/papers/tcc-wcsss96.ps" +} + +@inproceedings{vcode:pldi96, + title = "{VCODE}: a retargetable, extensible, very fast dynamic code generation system", + author = "Dawson R. Engler", + crossref = "pldi96", + pages = "160--170", + www_section = dcg, + www_ps_url = PDOSWWW # "/papers/vcode-pldi96.ps", +} + +@inproceedings{dcg:asplos6, + title = "{DCG}: an efficient, retargetable dynamic code generation system", + author = "Dawson R. Engler and Todd A. Proebsting", + crossref = "asplos6", + pages = "263--272", + www_section = dcg, + www_ps_url = PDOSWWW # "/papers/dcg.ps", +} + + +%% PROGRAMMING LANGUAGES + +@string{proglang = "Programming Languages"} + +@inproceedings{pct:usenix02, + title = "Simple and General Statistical Profiling with PCT", + author = "Charles Blake and Steve Bauer", + crossref = "usenix02", + pages = "333--346" + www_section = proglang, + www_abstract_url = PDOSWWW # "/papers/pct:usenix02/", + www_ps_url = PDOSWWW # "/papers/pct:usenix02/blake02:pct.ps", + www_ps_gz_url = PDOSWWW # "/papers/pct:usenix02/blake02:pct.ps.gz", + www_pdf_url = PDOSWWW # "/papers/pct:usenix02/blake02:pct.pdf", +} + +@inproceedings{evolving-software:wcsss99, + title = "Evolving software with an application-specific language", + author = "Eddie Kohler and Massimiliano Poletto and David R. Montgomery", + crossref = "wcsss99", + pages = "94--102", + www_section = proglang, + www_abstract_url = PDOSWWW # "/papers/evolving-software:wcsss99/", + www_ps_url = PDOSWWW # "/papers/evolving-software:wcsss99/paper.ps", + www_pdf_url = PDOSWWW # "/papers/evolving-software:wcsss99/paper.pdf", +} + + +%% STORAGE MANAGEMENT + +@string{storage = "Storage Management"} + +@inproceedings{cffs:usenix97, + title = "Embedded inodes and explicit grouping: exploiting disk bandwidth for small files", + author = "Gregory R. Ganger and M. Frans Kaashoek", + crossref = "usenix97", + pages = "1--17", + www_section = storage, + www_abstract_url = PDOSWWW # "/papers/cffs.html", + www_ps_url = PDOSWWW # "/papers/cffs-usenix97.ps", + www_ps_gz_url = PDOSWWW # "/papers/cffs-usenix97.ps.gz", +} + +@inproceedings{arus:dcs16, + title = "Atomic recovery units: failure atomicity for logical disks", + author = "Robert Grimm and Wilson C. Hsieh and Wiebren de Jonge and M. Frans Kaashoek", + crossref = "dcs16", + pages = "26--37", + www_section = storage, + www_ps_url = PDOSWWW # "/papers/arus.ps", +} + +@inproceedings{logicaldisk:sosp14, + title = "The logical disk: a new approach to improving file systems", + author = "Wiebren de Jonge and M. Frans Kaashoek and Wilson C. Hsieh", + crossref = "sosp14", + pages = "15--28", + www_section = storage, + www_ps_url = PDOSWWW # "/papers/LogicalDisk.ps" +} + + +%% VIRTUAL MEMORY + +@string{vm = "Virtual Memory"} + +@inproceedings{avm:hotos5, + title = "{AVM}: application-level virtual memory", + author = "Dawson R. Engler and Sandeep K. Gupta and M. Frans Kaashoek", + crossref = "hotos5", + pages = "72--77", + www_section = vm, + www_ps_url = PDOSWWW # "/papers/hotos-uvm.ps", +} + +@inproceedings{software-tlb-prefetch:osdi1, + title = "Software prefetching and caching for translation lookaside buffers", + author = "Kavita Bala and M. Frans Kaashoek and William Weihl", + crossref = "osdi1", + pages = "243--253", + www_section = vm, + www_ps_url = PDOSWWW # "/papers/tlb.ps", +} + + + + +%% DISTRIBUTED SHARED MEMORY AND PARALLEL COMPUTING + +@string{dsm/parallel = "Distributed Shared Memory and Parallel Computing"} + +@inproceedings{dynamic-migration:supercomp96, + title = "Dynamic computation migration in distributed +shared memory systems", + author = "Wilson C. Hsieh and M. Frans Kaashoek and William E. Weihl", + crossref = "supercomp96", + www_section = dsm/parallel, + www_ps_url = PDOSWWW # "/papers/mcrl.ps" +} + +@inproceedings{crl:sosp95, + title = "{CRL}: high-performance all-software distributed shared memory", + author = "Kirk L. Johnson and M. Frans Kaashoek and Deborah A. Wallach", + crossref = "sosp15", + pages = "213--226", + www_section = dsm/parallel, + www_ps_url = PDOSWWW # "/papers/crl-sosp95.ps" + note = "An earlier version of this work appeared as Technical Report MIT-LCS-TM-517, MIT Laboratory for Computer Science, March 1995", +} + +@inproceedings{formal-sequential-consistent:dcs15, + title = "Implementing sequentially consistent shared objects using broadcast and point-to-point communication", + author = "Alan Fekete and M. Frans Kaashoek and Nancy Lynch", + crossref = "dcs15", + pages = "439--449", + www_section = dsm/parallel, + www_ps_url = PDOSWWW # "/papers/formal.ps" +} + +@techreport{formal-sequential-consistent:tr, + title = "Implementing sequentially consistent shared objects using broadcast and point-to-point communication", + author = "Alan Fekete and M. Frans Kaashoek and Nancy Lynch", + institution = MIT-LCS, + year = 1995, month = jun, + number = "MIT-LCS-TR-518", + www_section = dsm/parallel, + www_ps_url = PDOSWWW # "/papers/formaltr.ps" +} + +%@inproceedings{triangle-puzzle:dimacs94, +% title = "A case study of shared-memory and +%message-passing implementations of parallel breadth-first search: The +%triangle puzzle", +% author = "Kevin Lew and Kirk Johnson and M. Frans Kaashoek", +% crossref = "dimacs94", +% www_section = dsm/parallel, +% www_ps_url = PDOSWWW # "/papers/dimacs94.ps" +%} + + +%% PHD THESES + +@string{phdtheses = "Ph.D. Theses"} + +@phdthesis{snoeren-phd, + title = "A Session-Based Architecture for Internet Mobility", + author = "Alex C. Snoeren", + school = MIT, + year = 2002, month = dec, + www_section = phdtheses, + www_ps_url = "http://nms.lcs.mit.edu/~snoeren/papers/thesis/thesis.ps", + www_ps_gz_url = "http://nms.lcs.mit.edu/~snoeren/papers/thesis/thesis.ps.gz" + www_pdf_url = "http://nms.lcs.mit.edu/~snoeren/papers/thesis/thesis.pdf", +} + +@phdthesis{jannotti-phd, + title = "Network Layer Support for Overlay Networks", + author = "John Jannotti", + school = MIT, + year = 2002, month = aug, + www_section = phdtheses, + www_ps_url = PDOSWWW # "/papers/jannotti-phd.ps", + www_ps_gz_url = PDOSWWW # "/papers/jannotti-phd.ps.gz", + www_pdf_url = PDOSWWW # "/papers/jannotti-phd.pdf" +} + +@phdthesis{click:kohler-phd, + title = "The Click modular router", + author = "Eddie Kohler", + school = MIT, + year = 2000, month = nov, + www_section = phdtheses, + www_ps_gz_url = PDOSWWW # "/papers/click:kohler-phd/thesis.ps.gz", + www_pdf_url = PDOSWWW # "/papers/click:kohler-phd/thesis.pdf" +} + +@phdthesis{sfs:mazieres-phd, + title = "Self-certifying File System", + author = "David Mazieres", + school = MIT, + year = 2000, month = may, + www_section = phdtheses, + www_ps_gz_url = PDOSWWW # "/papers/sfs:mazieres-phd.ps.gz" +} + +@phdthesis{tickc:poletto-phd, + title = "Language and compiler support for dynamic code generation", + author = "Massimiliano Poletto", + school = MIT, + year = 1999, month = jun, + www_section = phdtheses, + www_ps_url = PDOSWWW # "/papers/tickc-poletto-phd.ps", + www_ps_gz_url = PDOSWWW # "/papers/tickc-poletto-phd.ps.gz", + www_pdf_url = PDOSWWW # "/papers/tickc-poletto-phd.pdf" +} + +@phdthesis{exo:engler-phd, + title = "The exokernel operating system architecture", + author = "Dawson R. Engler", + school = MIT, + year = 1998, month = oct, + www_section = phdtheses, + www_ps_url = PDOSWWW # "/exo/theses/engler/thesis.ps", + www_ps_gz_url = PDOSWWW # "/exo/theses/engler/thesis.ps.gz", +} + +@phdthesis{fugu:mackenzie-phd, + title = "An efficient virtual network interface in the {FUGU} scalable workstation", + author = "Kenneth M. Mackenzie", + school = MIT, + year = 1998, month = feb, + www_section = phdtheses, + www_ps_gz_url = PDOSWWW # "/exo/theses/kenmac/thesis.ps.gz", +} + +@phdthesis{app-specific-networking:wallach-phd, + title = "High-performance application-specific networking", + author = "Deborah Anne Wallach", + school = MIT, + year = 1997, month = jan, + www_section = phdtheses, + www_ps_url = PDOSWWW # "/exo/theses/kerr/thesis.ps", + www_ps_gz_url = PDOSWWW # "/exo/theses/kerr/thesis.ps.gz", +} + +@phdthesis{crl:johnson-phd, + title = "High-performance all-software distributed shared memory", + author = "Kirk L. Johnson", + school = MIT, + year = 1995, month = dec, + www_section = phdtheses, + www_ps_gz_url = PDOSWWW # "/papers/crl:johnson-phd.ps.gz", + www_abstract_url = "http://www.cag.lcs.mit.edu/~tuna/papers/thesis.html", +} + +@phdthesis{dyn-comp-migration:hsieh-phd, + title = "Dynamic computation migration in distributed shared memory systems", + author = "Wilson C. Hsieh", + school = MIT, + year = 1995, month = sep, + www_section = phdtheses, + www_pdf_url = PDOSWWW # "/papers/dyn-comp-migration:hsieh-phd.pdf", + note = "Also available as MIT LCS tech report MIT-LCS-TR-665." +} + + +%% MASTERS THESES + +@string{masterstheses = "Master's Theses"} + +@mastersthesis{sfs:savvides-meng, + title = "Access Control Lists for the Self-Certifying Filesystem" + author = "George Savvides", + school = MIT, + year = 2002, month = Aug, + www_section = masterstheses, + www_pdf_url = PDOSWWW # "/papers/sfs:savvides-meng.pdf", + www_ps_url = PDOSWWW # "/papers/sfs:savvides-meng.ps", + www_ps_gz_url = PDOSWWW # "/papers/sfs:savvides-meng.ps.gz", +} + +@mastersthesis{sfs:euresti-meng, + title = "Self-Certifying Filesystem Implementation for Windows" + author = "David Euresti", + school = MIT, + year = 2002, month = Aug, + www_section = masterstheses, + www_pdf_url = PDOSWWW # "/papers/sfs:euresti-meng.pdf", + www_ps_url = PDOSWWW # "/papers/sfs:euresti-meng.ps", + www_ps_gz_url = PDOSWWW # "/papers/sfs:euresti-meng.ps.gz", +} + +@mastersthesis{sfs:zeldovich-meng, + title = "Concurrency Control for Multi-Processor Event-Driven Systems" + author = "Nickolai Zeldovich", + school = MIT, + year = 2002, month = May, + www_section = masterstheses, + www_pdf_url = PDOSWWW # "/papers/sfs:zeldovich-meng.pdf", + www_ps_url = PDOSWWW # "/papers/sfs:zeldovich-meng.ps", + www_ps_gz_url = PDOSWWW # "/papers/sfs:zeldovich-meng.ps.gz", +} + +@mastersthesis{chord:om_p-meng, + title = "A Keyword Set Search System for Peer-to-Peer Networks" + author = "Omprakash D Gnawali", + school = MIT, + year = 2002, month = Jun, + www_section = masterstheses, + www_pdf_url = PDOSWWW # "/papers/chord:om_p-meng.pdf", + www_ps_url = PDOSWWW # "/papers/chord:om_p-meng.ps", + www_ps_gz_url = PDOSWWW # "/papers/chord:om_p-meng.ps.gz", +} + +@mastersthesis{grid:bac-meng, + title = "The Grid Roofnet: a Rooftop Ad Hoc Wireless Network" + author = "Benjamin A. Chambers", + school = MIT, + year = 2002, month = May, + www_section = masterstheses, + www_pdf_url = PDOSWWW # "/papers/grid:bac-meng.pdf", + www_ps_url = PDOSWWW # "/papers/grid:bac-meng.ps", + www_ps_gz_url = PDOSWWW # "/papers/grid:bac-meng.ps.gz", +} + +@mastersthesis{tarzan:freedman-meng, + title = "A Peer-to-Peer Anonymizing Network Layer" + author = "Michael J. Freedman", + school = MIT, + year = 2002, month = May, + www_section = masterstheses, + www_abstract_url = PDOSWWW # "/papers/tarzan:freedman-meng/index.html", + www_pdf_url = PDOSWWW # "/papers/tarzan:freedman-meng/tarzan:freedman-meng.pdf", + www_ps_url = PDOSWWW # "/papers/tarzan:freedman-meng/tarzan:freedman-meng.ps", + www_ps_gz_url = PDOSWWW # "/papers/tarzan:freedman-meng/tarzan:freedman-meng.ps.gz", +} + + +@mastersthesis{chord:tburkard-meng, + title = "Herodotus: A Peer-to-Peer Web Archival System" + author = "Timo Burkard", + school = MIT, + year = 2002, month = May, + www_section = masterstheses, + www_pdf_url = PDOSWWW # "/papers/chord:tburkard-meng.pdf", + www_ps_url = PDOSWWW # "/papers/chord:tburkard-meng.ps", + www_ps_gz_url = PDOSWWW # "/papers/chord:tburkard-meng.ps.gz", +} + +@mastersthesis{cfs:dabek-meng, + title = "A Cooperative File System" + author = "Frank Dabek", + school = MIT, + year = 2001, month = September, + www_section = masterstheses, + www_pdf_url = PDOSWWW # "/papers/chord:dabek_thesis/dabek.pdf", + www_ps_url = PDOSWWW # "/papers/chord:dabek_thesis/dabek.ps", + www_ps_gz_url = PDOSWWW # "/papers/chord:dabek_thesis/tyan-meng.ps.gz", +} + +@mastersthesis{chord:tyan-meng, + title = "A Case Study of Server Selection", + author = "Tina Tyan", + school = MIT, + year = 2001, month = September, + www_section = masterstheses, + www_pdf_url = PDOSWWW # "/papers/chord:tyan-meng.pdf", + www_ps_url = PDOSWWW # "/papers/chord:tyan-meng.ps", + www_ps_gz_url = PDOSWWW # "/papers/chord:tyan-meng.ps.gz", +} + +@mastersthesis{click:gil-ms, + title = "MULTOPS: a data structure for denial-of-service attack detection", + author = "Thomer M. Gil", + school = "Vrije Universiteit", + year = 2000, month = August, + www_section = masterstheses, + www_ps_gz_url = PDOSWWW # "/papers/click:gil-ms.ps.gz", +} + +@mastersthesis{click:sit-ms, + title = "A Study of Caching in the Internet Domain Name System", + author = "Emil Sit", + school = MIT, + year = 2000, month = may, + www_section = masterstheses, + www_ps_gz_url = PDOSWWW # "/papers/click:sit-ms.ps.gz", +} + + +@mastersthesis{sfs:kaminsky-ms, + title = "Flexible Key Management with SFS Agents", + author = "Michael Kaminsky", + school = MIT, + year = 2000, month = may, + www_section = masterstheses, + www_ps_gz_url = PDOSWWW # "/papers/sfs:kaminsky-ms.ps.gz", + www_pdf_url = PDOSWWW # "/papers/sfs:kaminsky-ms.pdf", +} + +@mastersthesis{sfs:almeida-ms, + title = "Framework for Implementing File Systems in Windows NT", + author = "Danilo Almeida", + school = MIT, + year = 1998, month = may, + www_section = masterstheses, + www_ps_gz_url = PDOSWWW # "/papers/sfs:almeida-ms.ps.gz", + www_pdf_url = PDOSWWW # "/papers/sfs:almeida-ms.pdf", +} + +@mastersthesis{sfs:rimer-ms, + title = "The Secure File System under Windows NT", + author = "Matthew Rimer", + school = MIT, + year = 1999, month = June, + www_section = masterstheses, + www_ps_gz_url = PDOSWWW # "/papers/sfs:rimer-ms.ps.gz", + www_pdf_url = PDOSWWW # "/papers/sfs:rimer-ms.pdf", +} + +@mastersthesis{prolac:montgomery-meng, + title = "A fast {Prolac} {TCP} for the real world", + author = "Montgomery, Jr., David Rogers", + school = MIT, + year = 1999, month = may, + www_section = masterstheses, + www_ps_gz_url = PDOSWWW # "/papers/prolac:montgomery-meng.ps.gz", +} + +@mastersthesis{exo:coffing-meng, + title = "An x86 Protected Mode Virtual Machine Monitor for the MIT Exokernel", + author = "Charles L. Coffing", + school = MIT, + year = 1999, month = May, + www_section = masterstheses, + www_ps_url = PDOSWWW # "/papers/exo:coffing-meng.ps", + www_ps_gz_url = PDOSWWW # "/papers/exo:coffing-meng.ps.gz", +} + +@mastersthesis{exo:chen-meng, + title = "Multiprocessing with the Exokernel Operating System", + author = "Benjie Chen", + school = MIT, + year = 2000, month = February, + www_section = masterstheses, + www_html_url = PDOSWWW # "/papers/exo:chen-meng.html", + www_ps_url = PDOSWWW # "/papers/exo:chen-meng.ps", + www_pdf_url = PDOSWWW # "/papers/exo:chen-meng.pdf", + www_ps_gz_url = PDOSWWW # "/papers/exo:chen-meng.ps.gz", +} + +@mastersthesis{exo:candea-meng, + title = "Flexible and efficient sharing of protected abstractions", + author = "George M. Candea", + school = MIT, + year = 1998, month = may, + www_section = masterstheses, + www_abstract_url = PDOSWWW # "/papers/candea-meng.html", + www_ps_url = PDOSWWW # "/papers/ProtAbs.ps", + www_ps_gz_url = PDOSWWW # "/papers/ProtAbs.ps.gz", + www_pdf_url = PDOSWWW # "/papers/ProtAbs.pdf", +} + +@mastersthesis{exo-os:jj-meng, + title = "Applying Exokernel Principles to Conventional Operating Systems", + author = "John Jannotti", + school = MIT, + year = 1998, month = feb, + www_section = masterstheses, + www_ps_url = PDOSWWW # "/papers/jj-meng-exo-feb98.ps", + www_ps_gz_url = PDOSWWW # "/papers/jj-meng-exo-feb98.ps.gz", + www_pdf_url = PDOSWWW # "/papers/jj-meng-exo-feb98.pdf", +} + +@mastersthesis{pan:nygren-meng, + title = "The design and implementation of a high-performance active network node", + author = "Erik L. Nygren", + school = MIT, + year = 1998, month = feb, + www_section = masterstheses, + www_abstract_url = PDOSWWW # "/papers/nygren-mengthesis-pan-feb98.html", + www_ps_url = PDOSWWW # "/papers/nygren-mengthesis-pan-feb98.ps", + www_ps_gz_url = PDOSWWW # "/papers/nygren-mengthesis-pan-feb98.ps.gz", + www_pdf_url = PDOSWWW # "/papers/nygren-mengthesis-pan-feb98.pdf", +} + +@mastersthesis{exo:wyatt-meng, + title = "Shared libraries in an exokernel operating system", + author = "Douglas Karl Wyatt", + school = MIT, + year = 1997, month = sep, + www_section = masterstheses, + www_ps_url = PDOSWWW # "/exo/theses/dwyatt/thesis.ps", + www_ps_gz_url = PDOSWWW # "/exo/theses/dwyatt/thesis.ps.gz", +} + +@mastersthesis{prolac:kohler-ms, + title = "Prolac: a language for protocol compilation", + author = "Eddie Kohler", + school = MIT, + year = 1997, month = sep, + www_section = masterstheses, + www_ps_gz_url = PDOSWWW # "/papers/prolac:kohler-ms.ps.gz", + www_pdf_url = PDOSWWW # "/papers/prolac:kohler-ms.pdf", +} + +@mastersthesis{sfs:mazieres-ms, + title = "Security and decentralized control in the {SFS} global file system", + author = "David Mazi{\`e}res", + school = MIT, + year = 1997, month = aug, + www_section = masterstheses, + www_ps_gz_url = PDOSWWW # "/papers/sfs:mazieres-ms.ps.gz" +} + +@mastersthesis{exo:pinckney-meng, + title = "Operating system extensibility through event capture", + author = "Thomas {Pinckney III}", + school = MIT, + year = 1997, month = feb, + www_section = masterstheses, + www_ps_url = PDOSWWW # "/exo/theses/pinckney/thesis.ps", + www_ps_gz_url = PDOSWWW # "/exo/theses/pinckney/thesis.ps.gz", +} + +@mastersthesis{exo:briceno-meng, + title = "Decentralizing {UNIX} abstractions in the exokernel architecture", + author = "H{\'e}ctor Manuel {Brice{\~n}o Pulido}", + school = MIT, + year = 1997, month = feb, + www_section = masterstheses, + www_ps_url = PDOSWWW # "/exo/theses/hbriceno/thesis.ps", + www_ps_gz_url = PDOSWWW # "/exo/theses/hbriceno/thesis.ps.gz", +} + +@mastersthesis{rover:nntp, + title = "The {Rover} {NNTP} proxy", + author = "Constantine Cristakos", + school = MIT, + year = 1996, month = jun, + type = "Advanced Undergraduate Project", + www_section = masterstheses, + www_ps_url = PDOSWWW # "/papers/DeanAUP.ps", + www_ps_gz_url = PDOSWWW # "/papers/DeanAUP.ps.gz", +} + +@mastersthesis{exo:grimm-ms, + title = "Exodisk: maximizing application control over storage management", + author = "Robert Grimm", + school = MIT, + year = 1996, month = may, + www_section = masterstheses, + www_ps_url = PDOSWWW # "/exo/theses/rgrimm/thesis.ps", + www_ps_gz_url = PDOSWWW # "/exo/theses/rgrimm/thesis.ps.gz", +} + +@mastersthesis{rover:tauber-ms, + title = "Issues in building mobile-aware applications with the {Rover} toolkit", + author = "Joshua A. Tauber", + school = MIT, + year = 1996, month = may, + www_section = masterstheses, + www_ps_url = PDOSWWW # "/papers/JoshThesis.ps", + www_ps_gz_url = PDOSWWW # "/papers/JoshThesis.ps.gz", +} + +@mastersthesis{rover-mosaic:delespinasse-thesis, + title = "{Rover} {Mosaic}: e-mail communication for a full-function {Web} browser", + author = "Alan F. {deLespinasse}", + school = MIT, + year = 1995, month = jun, + www_section = masterstheses, + www_ps_url = PDOSWWW # "/papers/RoverMosaicThesis.ps", + www_ps_gz_url = PDOSWWW # "/papers/RoverMosaicThesis.ps.gz", +} + +@mastersthesis{r2over-mosaic:delespinasse-thesis, + title = {{Rover} {Mosaic}: e-mail communication for a full-function {Web} browser}, + author = "Alan F. {deLespinasse}", + school = MIT, + year = 1995, month = jun, + www_section = masterstheses, + www_ps_url = PDOSWWW # "/papers/RoverMosaicThesis.ps", + www_ps_gz_url = PDOSWWW # "/papers/RoverMosaicThesis.ps.gz", +} + + +%% PROCEEDINGS + +@proceedings{asplos6, + booktitle = "Proceedings of the 6th International Conference on Architectural Support for Programming Languages and Operating Systems ({ASPLOS-VI})", + year = 1994, month = oct, + address = "San Jose, California" +} + +@proceedings{ccs5, + booktitle = "Proceedings of the 5th {ACM} Conference on Computer and Communications Security ({CCS-5})", + year = 1998, month = nov, + address = "San Francisco, California", + bookurl = "http://www.bell-labs.com/user/reiter/ccs5/" +} + +@proceedings{ccs9, + booktitle = "Proceedings of the 9th {ACM} Conference on Computer and Communications Security ({CCS-9})", + year = 2002, month = nov, + address = "Washington, D.C.", + bookurl = "http://www.acm.org/sigs/sigsac/ccs/" +} + +@proceedings{dcs16, + booktitle = "Proceedings of the 16th International Conference on Distributed Computing Systems", + organization = IEEECompSoc, + year = 1996, month = may, + address = "Hong Kong", +} + +@proceedings{dcs15, + booktitle = "Proceedings of the 15th International Conference on Distributed Computing Systems", + organization = IEEECompSoc, + year = 1995, month = jun, + address = "Vancouver, British Columbia", +} + +@proceedings{hotnets1, + booktitle = "Proceedings of the First {W}orkshop on {H}ot {T}opics in {N}etworks ({HotNets-I})", + year = 2002, month = oct, + organization = "{ACM SIGCOMM}", + address = "Princeton, New Jersey", + bookurl = "http://www.cs.washington.edu/hotnets/", +} + +@proceedings{hotos8, + booktitle = "Proceedings of the 8th {W}orkshop on {H}ot {T}opics in {O}perating {S}ystems ({HotOS-VIII})", + year = 2001, month = may, + organization = IEEECompSoc, + address = "Schloss Elmau, Germany", + bookurl = "http://i30www.ira.uka.de/conferences/HotOS/", +} + +@proceedings{hotos6, + booktitle = "Proceedings of the 6th {W}orkshop on {H}ot {T}opics in {O}perating {S}ystems ({HotOS-VI})", + year = 1997, month = may, + organization = IEEECompSoc, + address = "Chatham, Cape Cod, Massachusetts", + bookurl = "http://www.eecs.harvard.edu/hotos", +} + +@proceedings{hotos5, + booktitle = "Proceedings of the 5th {W}orkshop on {H}ot {T}opics in {O}perating {S}ystems ({HotOS-V})", + year = 1995, month = may, + organization = IEEECompSoc, + address = "Orcas Island, Washington", + bookurl = "http://www.research.microsoft.com/research/os/HotOs/", +} + +@proceedings{mobicom96, + booktitle = "Proceedings of the 2nd {ACM} International Conference on Mobile Computing and Networking ({MobiCom} '96)", + year = 1996, month = nov, + address = "Rye, New York", + bookurl = "http://www.acm.org/sigmobile/conf/mobicom96/", +} + +@proceedings{usenix02, + booktitle = "Proceedings of the 2002 USENIX Annual Technical Conference (USENIX '02)", + year = 2002, month = jun, + address = "Monterey, California", + bookurl = "http://www.usenix.org/events/usenix02/", +} + +@proceedings{usenix01, + booktitle = "Proceedings of the 2001 USENIX Annual Technical Conference (USENIX '01)", + year = 2001, month = jun, + address = "Boston, Massachusetts", + bookurl = "http://www.usenix.org/events/usenix01/", +} + +@proceedings{mobicom01, + booktitle = "Proceedings of the 7th {ACM} International Conference on Mobile Computing and Networking", + year = 2001, month = jul, + address = "Rome, Italy", + bookurl = "http://www.research.ibm.com/acm_sigmobile_conf_2001/" +} + +@proceedings{mobicom00, + booktitle = "Proceedings of the 6th {ACM} International Conference on Mobile Computing and Networking ({MobiCom} '00)", + year = 2000, month = aug, + address = "Boston, Massachusetts", + bookurl = "http://www.argreenhouse.com/mobicom2000/", +} + +@proceedings{openarch99, + booktitle = "Proceedings of the 2nd {IEEE} Conference on Open Architectures and Network Programming ({OpenArch} '99)", + year = 1999, month = mar, + address = "New York, New York", + bookurl = "http://www.ctr.columbia.edu/comet/activities/openarch99/", +} + +@proceedings{osdi5, + booktitle = "Proceedings of the 5th {USENIX} {S}ymposium on {O}perating {S}ystems {D}esign and {I}mplementation ({OSDI} '02)", + year = 2002, month = dec, + address = "Boston, Massachusetts", +} + +@proceedings{osdi1, + booktitle = "Proceedings of the 1st {USENIX} {S}ymposium on {O}perating {S}ystems {D}esign and {I}mplementation ({OSDI} '94)", + year = 1994, month = nov, + address = "Monterey, California", + bookurl = "http://www2.cs.utah.edu/~lepreau/osdi94/", +} + +@proceedings{osdi4, + booktitle = "Proceedings of the 4th {USENIX} {S}ymposium on {O}perating {S}ystems {D}esign and {I}mplementation ({OSDI} 2000)", + year = 2000, month = oct, + address = "San Diego, California", + bookurl = "http://www.usenix.org/events/osdi2000/", +} + +@proceedings{pldi97, + booktitle = "Proceedings of the {ACM} {SIGPLAN} '97 Conference on Programming Design and Implementation ({PLDI} '97)", + year = 1997, month = jun, + address = "Las Vegas, Nevada", + bookurl = "http://cs-www.bu.edu/pub/pldi97/", +} + +@proceedings{pldi96, + booktitle = "Proceedings of the {ACM} {SIGPLAN} '96 Conference on Programming Design and Implementation ({PLDI} '96)", + year = 1996, month = may, + address = "Philadelphia, Pennsylvania", +} + +@proceedings{popl96, + booktitle = "Proceedings of the 23rd {ACM} {SIGPLAN}-{SIGACT} Symposium on Principles of Programming Languages ({POPL} '96)", + year = 1996, month = jan, + address = "St. Petersburg Beach, Florida", + bookurl = "ftp://parcftp.xerox.com/pub/popl96/popl96.html" +} + +@proceedings{ppopp95, + booktitle = "Proceedings of the 5th {ACM} {SIGPLAN} Symposium on Principles and Practice of Parallel Programming ({PPoPP} '95)", + year = 1995, month = jul, + address = "Santa Barbara, California", + bookurl = "http://www.cs.ucsb.edu/Conferences/PPOPP95/", +} + +@proceedings{sigcomm99, + booktitle = "Proceedings of the {ACM} {SIGCOMM} '99 Conference: Applications, Technologies, Architectures, and Protocols for Computer Communication", + year = 1999, month = aug, + address = "Cambridge, Massachusetts", + bookurl = "http://www.acm.org/sigcomm/sigcomm99/", +} + +@proceedings{sigcomm96, + booktitle = "Proceedings of the {ACM} {SIGCOMM} '96 Conference: Applications, Technologies, Architectures, and Protocols for Computer Communication", + year = 1996, month = aug, + address = "Stanford, California", + bookurl = "http://www.acm.org/sigcomm/sigcomm96/", +} + +@proceedings{sigcommimw01, + booktitle = "Proceedings of the {ACM} {SIGCOMM} Internet Measurement Workshop '01", + year = 2001, month = nov, + address = "San Francisco, California", + bookurl = "http://www.acm.org/sigcomm/measworkshop2001.html" +} + +@proceedings{sigcomm01, + booktitle = "Proceedings of the {ACM} {SIGCOMM} '01 Conference", + year = 2001, month = aug, + address = "San Diego, California", + bookurl = "http://www.acm.org/sigcomm/sigcomm2001/", +} + +@proceedings{iptps02, + booktitle = "Proceedings of the 1st International Workshop on Peer-to-Peer Systems (IPTPS)", + year = 2002, month = mar, + address = "Cambridge, MA", + bookurl = "http://www.cs.rice.edu/Conferences/IPTPS02/" +} + +@proceedings{sigops-euro9, + booktitle = "Proceedings of the 9th {ACM} {SIGOPS} {E}uropean workshop: Beyond the {PC}: New Challenges for the Operating System", + year = 2000, month = sep, + address = "Kolding, Denmark", + bookurl = "http://www.diku.dk/ew2000/", +} + +@proceedings{sigops-euro8, + booktitle = "Proceedings of the 8th {ACM} {SIGOPS} {E}uropean workshop: Support for composing distributed applications", + year = 1998, month = sep, + address = "Sintra, Portugal", + bookurl = "http://www.dsg.cs.tcd.ie/~vjcahill/sigops98/", +} + +@proceedings{sigops-euro7, + booktitle = "Proceedings of the 7th {ACM} {SIGOPS} {E}uropean workshop: Systems support for worldwide applications", + year = 1996, month = sep, + address = "Connemara, Ireland", + bookurl = "http://mosquitonet.stanford.edu/sigops96/", +} + +@proceedings{sigops-euro6, + booktitle = "Proceedings of the 6th {ACM} {SIGOPS} {E}uropean workshop: Matching operating systems to application needs", + year = 1994, month = sep, + address = "Dagstuhl Castle, Wadern, Germany", +} + +@proceedings{sosp18, + booktitle = "Proceedings of the 18th " # SOSP # " ({SOSP} '01)", + year = 2001, month = oct, + address = "Chateau Lake Louise, Banff, Canada", + bookurl = "http://www.cs.ucsd.edu/sosp01/", +} + +@proceedings{sosp17, + booktitle = "Proceedings of the 17th " # SOSP # " ({SOSP} '99)", + year = 1999, month = dec, + address = "Kiawah Island, South Carolina", + bookurl = "http://www.diku.dk/sosp99/", +} + +@proceedings{sosp16, + booktitle = "Proceedings of the 16th " # SOSP # " ({SOSP} '97)", + year = 1997, month = oct, + address = "Saint-Mal{\^o}, France", + bookurl = "http://www.cs.washington.edu/sosp16", +} + +@proceedings{sosp15, + booktitle = "Proceedings of the 15th " # SOSP # " ({SOSP} '95)", + year = 1995, month = dec, + address = "Copper Mountain Resort, Colorado", +} + +@proceedings{sosp14, + booktitle = "Proceedings of the 14th " # SOSP # " ({SOSP} '93)", + year = 1993, month = dec, + address = "Asheville, North Carolina", +} + +@proceedings{supercomp96, + booktitle = "Supercomputing '96 Conference Proceedings: The international conference on high performance computing and communications", + organization = ACMabbr, + year = 1996, month = nov, + address = "Pittsburgh, Pennsylvania", + bookurl = "http://www.supercomp.org/sc96/", +} + +@proceedings{usenix97, + booktitle = "Proceedings of the {USENIX} 1997 Annual Technical Conference", + year = 1997, month = jan, + address = "Anaheim, California", + bookurl = "http://www.usenix.org/ana97/", +} + +@proceedings{wcsss96, + booktitle = "Workshop Record of {WCSSS} '96: The Inaugural Workshop on Compiler Support for Systems Software", + organization = "{ACM} {SIGPLAN}", + year = 1996, month = feb, + address = "Tuscon, Arizona", + bookurl = "http://www.cs.arizona.edu/wcsss96/" +} + +@proceedings{wcsss99, + booktitle = "Workshop Record of {WCSSS} '99: The 2nd {ACM} {SIGPLAN} Workshop on Compiler Support for Systems Software", + year = 1999, month = may, + address = "Atlanta, Georgia", + bookurl = "http://www.irisa.fr/compose/wcsss99/" +} + +@proceedings{wmcsa94, + booktitle = "Proceedings of the Workshop on Mobile Computing Systems and Applications ({WMCSA} '94)", + organization = IEEECompSoc, + year = 1994, month = dec, + address = "Santa Cruz, California", +} + +@proceedings{wwos4, + booktitle = "Proceedings of the 4th Workshop on Workstation Operating Systems", + organization = IEEECompSoc, + year = 1993, month = oct, + address = "Napa, California" +} + +@proceedings{www94, + booktitle = "Proceedings of the 2nd International {WWW} Conference: Mosaic and the Web", + year = 1994, month = oct, + address = "Chicago, Illinois", + bookurl = "http://www.ncsa.uiuc.edu/SDG/IT94/IT94Info-old.html", +} + +@proceedings{sec10, + booktitle = "Proceedings of the 10th {USENIX} {S}ecurity {S}ymposium", + year = 2001, month = aug, + address = "Washington, D.C.", + bookurl = "http://www.usenix.org/events/sec01/", +} + + +%% JOURNALS + +@journal{journal:ieee-toc, + journal = IEEEabbr # " Transactions on Computers", +} + +@journal{journal:osr, + journal = "Operating Systems Review", + organization = ACM, +} + +@journal{journal:toplas, + journal = ACMabbr # " Transactions on Programming Languages and Systems", +} + +@journal{journal:tocs, + journal = ACMabbr # " Transactions on Computer Systems", +} + +@journal{journal:winet, + journal = ACMabbr # " Wireless Networks", +} + diff --git a/i2p2www/anonbib/tests.py b/i2p2www/anonbib/tests.py new file mode 100644 index 00000000..3caa1674 --- /dev/null +++ b/i2p2www/anonbib/tests.py @@ -0,0 +1,86 @@ +#!/usr/bin/python2 +# Copyright 2004-2008, Nick Mathewson. See LICENSE for licensing info. + +"""Unit tests for anonbib.""" + +import BibTeX +import metaphone +#import reconcile +#import writeHTML +#import updateCache + +import unittest + +class MetaphoneTests(unittest.TestCase): + def testMetaphone(self): + pass + +class BibTeXTests(unittest.TestCase): + def testTranslation(self): + ut = BibTeX.url_untranslate + self.assertEquals(ut("Fred"),"Fred") + self.assertEquals(ut("Hello, World."), "Hello_2c_20World.") + + te = BibTeX.TeXescapeURL + ute = BibTeX.unTeXescapeURL + self.assertEquals(te("http://example/~me/my_file"), + r"http://example/\{}~me/my\_file") + self.assertEquals(ute(r"http:{}//example/\{}~me/my\_file"), + "http://example/~me/my_file") + + h = BibTeX.htmlize + self.assertEquals(h("Hello, world"), "Hello, world") + self.assertEquals(h(r"\'a\`e\'{i}(\'\i)\"o&\^u"), + "áèí(í)ö&" + "û") + self.assertEquals(h(r"\~n and \c{c}"), "ñ and ç") + self.assertEquals(h(r"\AE---a ligature"), "Æ—a ligature") + self.assertEquals(h(r"{\it 33}"), " 33") + self.assertEquals(h(r"Pages 33--99 or vice--versa?"), + "Pages 33-99 or vice–versa?") + + t = BibTeX.txtize + self.assertEquals(t("Hello, world"), "Hello, world") + self.assertEquals(t(r"\'a\`e\'{i}(\'\i)\"o&\^u"), + "aei(i)o&u") + self.assertEquals(t(r"\~n and \c{c}"), "n and c") + self.assertEquals(t(r"\AE---a ligature"), "AE---a ligature") + self.assertEquals(t(r"{\it 33}"), " 33") + self.assertEquals(t(r"Pages 33--99 or vice--versa?"), + "Pages 33--99 or vice--versa?") + + def authorsParseTo(self,authors,result): + pa = BibTeX.parseAuthor(authors) + self.assertEquals(["|".join(["+".join(item) for item in + [a.first,a.von,a.last,a.jr]]) + for a in pa], + result) + + def testAuthorParsing(self): + pa = BibTeX.parseAuthor + PA = BibTeX.ParsedAuthor + apt = self.authorsParseTo + + apt("Nick A. Mathewson and Roger Dingledine", + ["Nick+A.||Mathewson|", "Roger||Dingledine|"]) + apt("John van Neumann", ["John|van|Neumann|"]) + apt("P. Q. Z. de la Paz", ["P.+Q.+Z.|de+la|Paz|"]) + apt("Cher", ["||Cher|"]) + apt("Smith, Bob", ["Bob||Smith|"]) + apt("de Smith, Bob", ["Bob|de|Smith|"]) + apt("de Smith, Bob Z", ["Bob+Z|de|Smith|"]) + #XXXX Fix this. + #apt("Roberts Smith Wilkins, Bob Z", ["Bob+Z||Smith+Wilkins|"]) + apt("Smith, Jr, Bob", ["Bob||Smith|Jr"]) + + #XXXX Fix this. + #apt("R Jones, Jr.", ["R||Jones|Jr."]) + apt("Smith, Bob and John Smith and Last,First", + ["Bob||Smith|", "John||Smith|", "First||Last|"]) + apt("Bob Smith and John Smith and John Doe", + ["Bob||Smith|", "John||Smith|", "John||Doe|"]) + + +if __name__ == '__main__': + unittest.main() + diff --git a/i2p2www/anonbib/upb.gif b/i2p2www/anonbib/upb.gif new file mode 100644 index 00000000..58528283 Binary files /dev/null and b/i2p2www/anonbib/upb.gif differ diff --git a/i2p2www/anonbib/updateCache.py b/i2p2www/anonbib/updateCache.py new file mode 100755 index 00000000..7b7fe645 --- /dev/null +++ b/i2p2www/anonbib/updateCache.py @@ -0,0 +1,169 @@ +#!/usr/bin/python +# Copyright 2003-2008, Nick Mathewson. See LICENSE for licensing info. + +"""Download files in bibliography into a local cache. +""" + +import os +import sys +import signal +import time +import gzip + +import BibTeX +import config +import urllib2 +import getopt +import socket +import errno +import httplib + +FILE_TYPES = [ "txt", "html", "pdf", "ps", "ps.gz", "abstract" ] +BIN_FILE_TYPES = [ 'pdf', 'ps.gz' ] + +class UIError(Exception): + pass + +def tryUnlink(fn): + try: + os.unlink(fn) + except OSError: + pass + +def getCacheFname(key, ftype, section): + return BibTeX.smartJoin(config.OUTPUT_DIR,config.CACHE_DIR, + section, + "%s.%s"%(key,ftype)) + +def downloadFile(key, ftype, section, url,timeout=None): + if timeout is None: + timeout = config.DOWNLOAD_CONNECT_TIMEOUT + fname = getCacheFname(key, ftype, section) + parent = os.path.split(fname)[0] + if not os.path.exists(parent): + os.makedirs(parent) + + fnameTmp = fname+".tmp" + fnameURL = fname+".url" + tryUnlink(fnameTmp) + + def sigalrmHandler(sig,_): + pass + signal.signal(signal.SIGALRM, sigalrmHandler) + signal.alarm(timeout) + try: + try: + infile = urllib2.urlopen(url) + except httplib.InvalidURL, e: + raise UIError("Invalid URL %s: %s"%(url,e)) + except IOError, e: + raise UIError("Cannot connect to url %s: %s"%(url,e)) + except socket.error, e: + if getattr(e,"errno",-1) == errno.EINTR: + raise UIError("Connection timed out to url %s"%url) + else: + raise UIError("Error connecting to %s: %s"%(url, e)) + finally: + signal.alarm(0) + + mode = 'w' + if ftype in BIN_FILE_TYPES: + mode = 'wb' + outfile = open(fnameTmp, mode) + try: + while 1: + s = infile.read(1<<16) + if not s: break + outfile.write(s) + finally: + infile.close() + outfile.close() + + urlfile = open(fnameURL, 'w') + print >>urlfile, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + if "\n" in url: url = url.replace("\n", " ") + print >>urlfile, url + urlfile.close() + + os.rename(fnameTmp, fname) + +def getURLs(entry): + r = {} + for ftype in FILE_TYPES: + ftype2 = ftype.replace(".", "_") + url = entry.get("www_%s_url"%ftype2) + if url: + r[ftype] = url.strip().replace("\n", " ") + return r + +def getCachedURL(key, ftype, section): + fname = getCacheFname(key, ftype, section) + urlFname = fname+".url" + if not os.path.exists(fname) or not os.path.exists(urlFname): + return None + f = open(urlFname, 'r') + lines = f.readlines() + f.close() + if len(lines) != 2: + print >>sys.stderr, "ERROR: unexpected number of lines in", urlFname + return lines[1].strip() + +def downloadAll(bibtex, missingOnly=0): + """returns list of tuples of key, ftype, url, error""" + errors = [] + for e in bibtex.entries: + urls = getURLs(e) + key = e.key + section = e.get("www_cache_section", ".") + for ftype, url in urls.items(): + if missingOnly: + cachedURL = getCachedURL(key, ftype, section) + if cachedURL == url: + print >>sys.stderr,"Skipping",url + continue + elif cachedURL is not None: + print >>sys.stderr,"URL for %s.%s has changed"%(key,ftype) + else: + print >>sys.stderr,"I have no copy of %s.%s"%(key,ftype) + try: + downloadFile(key, ftype, section, url) + print "Downloaded",url + except UIError, e: + print >>sys.stderr, str(e) + errors.append((key,ftype,url,str(e))) + except (IOError, socket.error), e: + msg = "Error downloading %s: %s"%(url,str(e)) + print >>sys.stderr, msg + errors.append((key,ftype,url,msg)) + if urls.has_key("ps") and not urls.has_key("ps.gz"): + # Say, this is something we'd like to have gzipped locally. + psFname = getCacheFname(key, "ps", section) + psGzFname = getCacheFname(key, "ps.gz", section) + if os.path.exists(psFname) and not os.path.exists(psGzFname): + # This is something we haven't gzipped yet. + print "Compressing a copy of",psFname + outf = gzip.GzipFile(psGzFname, "wb") + inf = open(psFname, "rb") + while 1: + s = inf.read(4096) + if not s: + break + outf.write(s) + outf.close() + inf.close() + + return errors + +if __name__ == '__main__': + if len(sys.argv) == 2: + print "Loading from %s"%sys.argv[1] + else: + print >>sys.stderr, "Expected a single configuration file as an argument" + sys.exit(1) + config.load(sys.argv[1]) + + if config.CACHE_UMASK != None: + os.umask(config.CACHE_UMASK) + + bib = BibTeX.parseFile(config.MASTER_BIB) + downloadAll(bib,missingOnly=1) diff --git a/i2p2www/anonbib/ups.gif b/i2p2www/anonbib/ups.gif new file mode 100644 index 00000000..36f01245 Binary files /dev/null and b/i2p2www/anonbib/ups.gif differ diff --git a/i2p2www/anonbib/venue-checklist.txt b/i2p2www/anonbib/venue-checklist.txt new file mode 100644 index 00000000..139a223a --- /dev/null +++ b/i2p2www/anonbib/venue-checklist.txt @@ -0,0 +1,41 @@ +This file is to keep track of which volumes of which publications have +been combed for anonymity papers and which we still have to add. + +=== DONE: + +ExampleConference (through 2008) + +PETS 2000-2003 + +=== CLAIMED: + +PETS 2000-2010 -- Nick (claimed 6/16) +ESORICS 1990-2010 -- Nick (claimed 6/16) +CCS -- George (claimed 6/17) +USENIX Security ("Oakland") -- George (claimed 6/17) + +=== SHOULD DO: + +Infohiding +IEEE Security and privacy +NDSS +WPES +WEIS +Financial Crypto +Eurocrypt +Asiacrypt + +Search: Papers that cite Chaum's paper +Search: Papers that cite the Tor paper +Search: Papers that cite the original onion routing papers +Search: Papers mentioning "anonymity" or "anonymous" +Search: Papers mentioning "mixnet" or "mix-net" + +=== UNDERSERVED CONTENT; PLEASE SUGGEST SEARCHES AND VENUES + +Private information retrieval; PIR +Anti-censorship; censorship +Location privacy +Anonymous credentials +Anonymizing data +Secure multiparty computation diff --git a/i2p2www/anonbib/views.py b/i2p2www/anonbib/views.py new file mode 100644 index 00000000..7e1434cf --- /dev/null +++ b/i2p2www/anonbib/views.py @@ -0,0 +1,70 @@ +from flask import render_template + +from i2p2www import ANONBIB_CFG, ANONBIB_FILE +from i2p2www.anonbib import BibTeX, config + +def papers_list(tag='', choice='date'): + config.load(ANONBIB_CFG) + rbib = BibTeX.parseFile(ANONBIB_FILE) + if tag: + rbib = [ b for b in rbib.entries if tag in b.get('www_tags', '').split() ] + else: + rbib = rbib.entries + + if choice == 'topic': + sectionType = 'Topics' + rbib = BibTeX.sortEntriesBy(rbib, 'www_section', 'ZZZZZZZZZZZZZZ') + rbib = BibTeX.splitSortedEntriesBy(rbib, 'www_section') + if rbib[-1][0].startswith(""): + rbib[-1] = ("Miscellaneous", rbib[-1][1]) + + rbib = [ (s, BibTeX.sortEntriesByDate(ents)) + for s, ents in rbib + ] + elif choice == 'author': + sectionType = 'Authors' + rbib, url_map = BibTeX.splitEntriesByAuthor(rbib) + else: + sectionType = 'Years' + choice = 'date' + rbib = BibTeX.sortEntriesByDate(rbib) + rbib = BibTeX.splitSortedEntriesBy(rbib, 'year') + + bib = { + 'tags': config.ALL_TAGS, + 'tag_titles': config.TAG_TITLES, + 'tag_short_titles': config.TAG_SHORT_TITLES, + 'tag': tag, + 'sectiontypes': sectionType, + 'field': choice, + } + + sections = [] + for section, entries in rbib: + s = { + 'name': section, + 'slug': BibTeX.url_untranslate(section), + 'entries': entries, + } + sections.append(s) + bib['sections'] = sections + + return render_template('papers/list.html', bib=bib) + +def papers_bibtex(tag=None): + config.load(ANONBIB_CFG) + rbib = BibTeX.parseFile(ANONBIB_FILE) + if tag: + rbib = [ b for b in rbib.entries if tag in b.get('www_tags', '').split() ] + else: + rbib = rbib.entries + entries = [ (ent.key, ent) for ent in rbib ] + entries.sort() + entries = [ ent[1] for ent in entries ] + + bib = { + 'title': 'Papers on I2P', + 'entries': rbib, + } + + return render_template('papers/bibtex.html', bib=bib) diff --git a/i2p2www/anonbib/writeHTML.py b/i2p2www/anonbib/writeHTML.py new file mode 100755 index 00000000..19a7c146 --- /dev/null +++ b/i2p2www/anonbib/writeHTML.py @@ -0,0 +1,246 @@ +#!/usr/bin/python +# Copyright 2003-2008, Nick Mathewson. See LICENSE for licensing info. + +"""Generate indices by author, topic, date, and BibTeX key.""" + +import sys +import re +import os +import json + +assert sys.version_info[:3] >= (2,2,0) +os.umask(022) + +import BibTeX +import config + +def getTemplate(name): + f = open(name) + template = f.read() + f.close() + template_s, template_e = template.split("%(entries)s") + return template_s, template_e + +def pathLength(s): + n = 0 + while s: + parent, leaf = os.path.split(s) + if leaf != '' and leaf != '.': + n += 1 + s = parent + return n + +def writeBody(f, sections, section_urls, cache_path, base_url): + '''f: an open file + sections: list of (sectionname, [list of BibTeXEntry]) + section_urls: map from sectionname to external url''' + for s, entries in sections: + u = section_urls.get(s) + sDisp = re.sub(r'\s+', ' ', s.strip()) + sDisp = sDisp.replace(" ", " ") + if u: + print >>f, ('
  • %s

    '%( + (BibTeX.url_untranslate(s), u, sDisp))) + else: + print >>f, ('
  • %s

    '%( + BibTeX.url_untranslate(s),sDisp)) + print >>f, "
      " + for e in entries: + print >>f, e.to_html(cache_path=cache_path, base_url=base_url) + print >>f, "
  • " + +def writeHTML(f, sections, sectionType, fieldName, choices, + tag, config, cache_url_path, section_urls={}): + """sections: list of (sectionname, [list of BibTeXEntry])''' + sectionType: str + fieldName: str + choices: list of (choice, url)""" + + title = config.TAG_TITLES[tag] + short_title = config.TAG_SHORT_TITLES[tag] + # + secStr = [] + for s, _ in sections: + hts = re.sub(r'\s+', ' ', s.strip()) + hts = s.replace(" ", " ") + secStr.append("

    %s

    \n"% + ((BibTeX.url_untranslate(s),hts))) + secStr = "".join(secStr) + + # + tagListStr = [] + st = config.TAG_SHORT_TITLES.keys() + st.sort() + root = "../"*pathLength(config.TAG_DIRECTORIES[tag]) + if root == "": root = "." + for t in st: + name = config.TAG_SHORT_TITLES[t] + if t == tag: + tagListStr.append(name) + else: + url = BibTeX.smartJoin(root, config.TAG_DIRECTORIES[t], "date.html") + tagListStr.append("%s"%(url, name)) + tagListStr = " | ".join(tagListStr) + + # + choiceStr = [] + for choice, url in choices: + if url: + choiceStr.append("%s"%(url, choice)) + else: + choiceStr.append(choice) + + choiceStr = (" | ".join(choiceStr)) + + fields = { 'command_line' : "", + 'sectiontypes' : sectionType, + 'choices' : choiceStr, + 'field': fieldName, + 'sections' : secStr, + 'otherbibs' : tagListStr, + 'title': title, + 'short_title': short_title, + "root" : root, + } + + header, footer = getTemplate(config.TEMPLATE_FILE) + print >>f, header%fields + writeBody(f, sections, section_urls, cache_path=cache_url_path, + base_url=root) + print >>f, footer%fields + +def jsonDumper(obj): + if isinstance(obj, BibTeX.BibTeXEntry): + e = obj.entries.copy() + e['key'] = obj.key + return e + else: + raise TypeError("Do not know how to serialize %s"%(obj.__class,)) + +def writePageSet(config, bib, tag): + if tag: + bib_entries = [ b for b in bib.entries + if tag in b.get('www_tags', "").split() ] + else: + bib_entries = bib.entries[:] + + if not bib_entries: + print >>sys.stderr, "No entries with tag %r; skipping"%tag + return + + tagdir = config.TAG_DIRECTORIES[tag] + outdir = os.path.join(config.OUTPUT_DIR, tagdir) + cache_url_path = BibTeX.smartJoin("../"*pathLength(tagdir), + config.CACHE_DIR) + if not os.path.exists(outdir): + os.makedirs(outdir, 0755) + ##### Sorted views: + + ## By topic. + + entries = BibTeX.sortEntriesBy(bib_entries, "www_section", "ZZZZZZZZZZZZZZ") + entries = BibTeX.splitSortedEntriesBy(entries, "www_section") + if entries[-1][0].startswith(""): + entries[-1] = ("Miscellaneous", entries[-1][1]) + + entries = [ (s, BibTeX.sortEntriesByDate(ents)) + for s, ents in entries + ] + + f = open(os.path.join(outdir,"topic.html"), 'w') + writeHTML(f, entries, "Topics", "topic", + (("By topic", None), + ("By date", "./date.html"), + ("By author", "./author.html") + ), + tag=tag, config=config, + cache_url_path=cache_url_path) + f.close() + + ## By date. + + entries = BibTeX.sortEntriesByDate(bib_entries) + entries = BibTeX.splitSortedEntriesBy(entries, 'year') + for idx in -1, -2: + if entries[idx][0].startswith(""): + entries[idx] = ("Unknown", entries[idx][1]) + elif entries[idx][0].startswith("forthcoming"): + entries[idx] = ("Forthcoming", entries[idx][1]) + sections = [ ent[0] for ent in entries ] + + first_year = int(entries[0][1][0]['year']) + try: + last_year = int(entries[-1][1][0].get('year')) + except ValueError: + last_year = int(entries[-2][1][0].get('year')) + + years = map(str, range(first_year, last_year+1)) + if entries[-1][0] == 'Unknown': + years.append("Unknown") + + f = open(os.path.join(outdir,"date.html"), 'w') + writeHTML(f, entries, "Years", "date", + (("By topic", "./topic.html"), + ("By date", None), + ("By author", "./author.html") + ), + tag=tag, config=config, + cache_url_path=cache_url_path) + f.close() + + ## By author + entries, url_map = BibTeX.splitEntriesByAuthor(bib_entries) + + f = open(os.path.join(outdir,"author.html"), 'w') + writeHTML(f, entries, "Authors", "author", + (("By topic", "./topic.html"), + ("By date", "./date.html"), + ("By author", None), + ), + tag=tag, config=config, + cache_url_path=cache_url_path, + section_urls=url_map) + f.close() + + ## The big BibTeX file + + entries = bib_entries[:] + entries = [ (ent.key, ent) for ent in entries ] + entries.sort() + entries = [ ent[1] for ent in entries ] + + ## Finding the root directory is done by writeHTML(), but + ## the BibTeX file doesn't use that, so repeat the code here + root = "../"*pathLength(config.TAG_DIRECTORIES[tag]) + if root == "": root = "." + + header,footer = getTemplate(config.BIBTEX_TEMPLATE_FILE) + f = open(os.path.join(outdir,"bibtex.html"), 'w') + print >>f, header % { 'command_line' : "", + 'title': config.TAG_TITLES[tag], + 'root': root } + for ent in entries: + print >>f, ( + ("%s" + "
    %s
    ") + %(BibTeX.url_untranslate(ent.key), ent.key, ent.format(90,8,1))) + print >>f, footer + f.close() + + f = open(os.path.join(outdir,"bibtex.json"), 'w') + json.dump(entries, f, default=jsonDumper) + f.close() + + +if __name__ == '__main__': + if len(sys.argv) == 2: + print "Loading from %s"%sys.argv[1] + else: + print >>sys.stderr, "Expected a single configuration file as an argument" + sys.exit(1) + config.load(sys.argv[1]) + + bib = BibTeX.parseFile(config.MASTER_BIB) + + for tag in config.TAG_DIRECTORIES.keys(): + writePageSet(config, bib, tag) diff --git a/i2p2www/babel.cfg b/i2p2www/babel.cfg deleted file mode 100644 index f25e938a..00000000 --- a/i2p2www/babel.cfg +++ /dev/null @@ -1,7 +0,0 @@ -[python: **.py] -[jinja2: **/pages/**.html] -extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension -[jinja2: **/pages/global/macros] -extensions=jinja2.ext.autoescape,jinja2.ext.with_ -[jinja2: **/blog/**.rst] -extensions=jinja2.ext.autoescape,jinja2.ext.with_ diff --git a/i2p2www/blog/2011/06/06/Ipredator-SoC.rst b/i2p2www/blog/2011/06/06/Ipredator-SoC.rst index 40c96634..870ff823 100644 --- a/i2p2www/blog/2011/06/06/Ipredator-SoC.rst +++ b/i2p2www/blog/2011/06/06/Ipredator-SoC.rst @@ -20,7 +20,7 @@ This work was the idea of Jan-Erik Fiske of `ViaEuropa`_ and Peter Sunde of `fla Current information can be found on `zzz's forum`_ and #i2p-dev on chat.freenode.net -.. _`zzz's forum`: http://zzz.i2p.to/topics/888 +.. _`zzz's forum`: http://{{ i2pconv('zzz.i2p') }}/topics/888 Cheers diff --git a/i2p2www/blog/2011/09/03/Ipredator-SoC-itoopie-released.rst b/i2p2www/blog/2011/09/03/Ipredator-SoC-itoopie-released.rst index 242a34d1..88fdb1cf 100644 --- a/i2p2www/blog/2011/09/03/Ipredator-SoC-itoopie-released.rst +++ b/i2p2www/blog/2011/09/03/Ipredator-SoC-itoopie-released.rst @@ -16,11 +16,9 @@ The aim of itoopie is to provide an interface that is simpler and has a lower lu I2PControl is an I2P plugin providing a JSONRPC interface for the I2P router. The interface supports setting basic settings (bandwidth, ports etc.), reading many stats and is provided over an SSL encrypted HTTP connection. -More information and instructions can be found at `itoopie.net`_, `itoopie.i2p.to`_ (via proxy)and `itoopie.i2p`_ (anonymously). +More information and instructions can be found at `{{ i2pconv('itoopie.i2p') }}`_. -.. _`itoopie.net`: http://itoopie.net -.. _`itoopie.i2p.to`: http://itoopie.i2p.to -.. _`itoopie.i2p`: http://itoopie.i2p +.. _`{{ i2pconv('itoopie.i2p') }}`: http://{{ i2pconv('itoopie.i2p') }} This project has been funded by the VPN services `Relakks`_ & `Ipredator`_ and was initiated by Jan-Erik Fiske and `Peter Sunde`_. diff --git a/i2p2www/blog/2013/07/15/0.9.7-Release.rst b/i2p2www/blog/2013/07/15/0.9.7-Release.rst new file mode 100644 index 00000000..794991af --- /dev/null +++ b/i2p2www/blog/2013/07/15/0.9.7-Release.rst @@ -0,0 +1,88 @@ +============= +{% trans %}0.9.7 Release{% endtrans %} +============= +.. meta:: + :date: 2013-07-15 + :category: release + :excerpt: {% trans %}0.9.7 includes significant bug fixes and improvements.{% endtrans %} + +{% trans %}0.9.7 includes significant bug fixes and improvements.{% endtrans %} + +{% trans -%} +For the first time, class 'N' routers (those with a minimumum of 128 KBytes/sec of shared bandwidth) +will automatically become floodfill (previously it was only 'O' routers with 256 KBps). This will +increase the floodfill population for additional resistance to certain attacks (see below). Floodfill routers +don't consume much additional bandwidth, but they do tend to use additional memory and concurrent +connections. If you do not wish your router to become floodfill, set the advanced configuration +router.floodfillParticipant=false . +{%- endtrans %} + +{% trans -%} +As we think the last release fixed the experimental update-via-torrent bugs, 3% of routers should +update over in-network bittorrent this cycle. +{%- endtrans %} + +{% trans -%} +Plugin update checks, possibly broken for several releases, are fixed. Your plugins should once again +auto-update after updating the router. +{%- endtrans %} + +{% trans -%} +We fixed a major streaming timer bug that contributed to frequent IRC disconnects. +{%- endtrans %} + +{% trans -%} +This release contains additional mitigations for the `"practical attacks" paper`_. +However, we have a lot more work to do to resist Sybil attacks on the floodfills, and resist +traffic analysis at the gateways and endpoints of exploratory tunnels. +It's a good reminder for everybody that our network is still relatively small and vulnerable. +We don't currently recommend any uses that would put anybody in serious jeopardy. +We'll keep working to improve it... please keep working to spread the word. A bigger network is a better network. +{%- endtrans %} + +.. _{% trans %}`"practical attacks" paper`{% endtrans %}: http://wwwcip.informatik.uni-erlangen.de/~spjsschl/i2p.pdf + +**{% trans %}RELEASE DETAILS{% endtrans %}** + +**{% trans %}Anonymity Improvements{% endtrans %}** + +- {% trans %}End-to-end encryption of responses to leaseset lookups{% endtrans %} +- {% trans %}Expand floodfill pool by enabling class 'N' floodfills{% endtrans %} +- {% trans %}Randomize padding inside encrypted SSU packets{% endtrans %} +- {% trans %}Preparation for better SSU protocol obfuscation{% endtrans %} + +**{% trans %}Bug Fixes{% endtrans %}** + +- {% trans %}Fix newer lease sets not getting stored or published{% endtrans %} +- {% trans %}Fix classpath bug when used with 4-year-old installations, causing the console not to start{% endtrans %} +- {% trans %}Fix addressbook database bug preventing update of the reverse index{% endtrans %} +- {% trans %}Fix i2psnark bug that changed the infohash of torrents created by Robert and fetched via magnet link{% endtrans %} +- {% trans %}Fix version checking for plugins{% endtrans %} +- {% trans %}Fix a streaming timer bug causing frequent IRC disconnects (also affects other close-on-idle tunnels){% endtrans %} + +**{% trans %}Other{% endtrans %}** + +- {% trans %}Don't install as a service on Windows by default{% endtrans %} +- {% trans %}Reduce transport idle timeouts{% endtrans %} +- {% trans %}Reduce tunnels on idle in i2psnark{% endtrans %} +- {% trans %}Change default in i2ptunnel GUI to 3 hops{% endtrans %} +- {% trans %}IE 10 support{% endtrans %} +- {% trans %}Individual expiration times in leases, for efficiency on destinations with a high number of tunnels{% endtrans %} +- {% trans %}Low-level encryption and XOR speedups{% endtrans %} +- {% trans %}Jetty 7.6.11{% endtrans %} +- {% trans %}Tomcat 6.0.37{% endtrans %} +- {% trans %}Translation updates: Chinese, French, German, Portuguese, Russian, Spanish{% endtrans %} +- {% trans %}New Turkish translation{% endtrans %} +- {% trans %}Wrapper 3.5.19 (new installs and PPA only){% endtrans %} +- {% trans %}Update GeoIP data (new installs and PPA only){% endtrans %} + +**{% trans %}SHA256 Checksums:{% endtrans %}** + +:: + + 48c10fe5c7455e134df44717215ed66ff79cfb2dd03fbfd64c14b3d5c179eab7 i2pinstall_0.9.7_windows.exe + c0a67051bb0c6f1e4ce3ac8a04257063c4b606b76309b39a6c3daeaaa3888e04 i2pinstall_0.9.7.jar + 497e2601ab7594c93e0866e7f6ad9c445f05f7660efcea596ab255677056b1cb i2psource_0.9.7.tar.bz2 + d0b8f0a2491c5ad401f87c94b3b805c03bccd69f9f1b57177a53287f29f85959 i2pupdate_0.9.7.zip + a620eafff86e8eb919acb5b8cd42578df68928b122dc3e715c0b431cdd4c0ef2 i2pupdate.su2 + 0d5723c361059a60431f3275ad5c0978c3b66097ecda1d1b8f5310c594f0a1ea i2pupdate.sud diff --git a/i2p2www/blog/2013/08/10/0.9.7.1-Release.rst b/i2p2www/blog/2013/08/10/0.9.7.1-Release.rst new file mode 100644 index 00000000..75e4231e --- /dev/null +++ b/i2p2www/blog/2013/08/10/0.9.7.1-Release.rst @@ -0,0 +1,39 @@ +=============== +{% trans %}0.9.7.1 Release{% endtrans %} +=============== +.. meta:: + :date: 2013-08-10 + :category: release + :excerpt: {% trans %}This unscheduled release disables the RouterInfo verification messages that were used in the attack published in the UCSB paper, which should make correlating a LeaseSet and a Router much more difficult. We have also included a limited number of other fixes listed below. Our 0.9.8 release, which will include IPv6 support, is still on-schedule for late September.{% endtrans %} + +{% trans %}This unscheduled release disables the RouterInfo verification messages that were used in the attack published in the UCSB paper, which should make correlating a LeaseSet and a Router much more difficult. We have also included a limited number of other fixes listed below. Our 0.9.8 release, which will include IPv6 support, is still on-schedule for late September.{% endtrans %} + +{% trans %}As usual, we recommend that all users update to this release.{% endtrans %} + +**{% trans %}RELEASE DETAILS{% endtrans %}** + +**{% trans %}Anonymity Improvements{% endtrans %}** + +- {% trans %}Disable RouterInfo verification messages{% endtrans %} + +**{% trans %}Other{% endtrans %}** + +- {% trans %}Extend inbound tunnel expiration{% endtrans %} +- {% trans %}i2prouter: bashism fix{% endtrans %} +- {% trans %}i2psnark: increase max piece size, mime type updates{% endtrans %} +- {% trans %}New reseed host{% endtrans %} +- {% trans %}New update hosts, thanks Meeh and dg{% endtrans %} +- {% trans %}Streaming: RTO changes{% endtrans %} +- {% trans %}Updater: Increase update-via-torrent to 30 percent{% endtrans %} +- {% trans %}UPnP fix for some hardware{% endtrans %} + +**{% trans %}SHA256 Checksums:{% endtrans %}** + +:: + + 293f445196a2f35c4d580f65b548135399e1f4443450b5ecf1cc53b1203fdad1 i2pinstall_0.9.7.1_windows.exe + 9fae874a4d680f50f5efd7be70cfcf55f2f4687e011bde9c4b4899bafb002e97 i2pinstall_0.9.7.1.jar + 7b73bdb23c53798054741cbaa4e7d8cce832ee566fbb17df0c803d0c22d099e1 i2psource_0.9.7.1.tar.bz2 + 69ca22a77a2de87f726d86555317f8688891d31f5312cf71d5a43febe2729b38 i2pupdate_0.9.7.1.zip + f59c9c80349c328b3e912113a3842146f647ff22ae323cef6b1e56a23f8c8cf1 i2pupdate.su2 + 52d1f32e2a72091da10312853e5df6bced12cb97770ba20732f2d9d6c4d2f5fe i2pupdate.sud diff --git a/i2p2www/blog/2013/09/30/0.9.8-Release.rst b/i2p2www/blog/2013/09/30/0.9.8-Release.rst new file mode 100644 index 00000000..fef85036 --- /dev/null +++ b/i2p2www/blog/2013/09/30/0.9.8-Release.rst @@ -0,0 +1,138 @@ +============= +{% trans %}0.9.8 Release{% endtrans %} +============= +.. meta:: + :date: 2013-09-30 + :category: release + :excerpt: {% trans %}0.9.8 includes the long-awaited support for IPv6. It's enabled by default, but of course you need a public IPv6 address to use it. Configuration is on the 'network' configuration tab in your console. We also have anonymity improvements including padding of SSU packets and longer router private keys.{% endtrans %} + +{% trans %}0.9.8 includes the long-awaited support for IPv6. It's enabled by default, but of course you need a public IPv6 address to use it. Configuration is on the 'network' configuration tab in your console. We also have anonymity improvements including padding of SSU packets and longer router private keys.{% endtrans %} + +{% trans %}30% of you will update via in-network torrent in this update cycle.{% endtrans %} + +**{% trans %}IPv6 Details{% endtrans %}** + +{% trans -%} +IPv6 is enabled and preferred by default. If you have a public IPv6 address +and you are connecting to another router with a published IPv6 address, it will +connect via IPv6. There is a new IPv6 configuration section on /confignet in +the router console. If IPv6 is causing problems you may disable it there. +{%- endtrans %} + +{% trans -%} +As a part of the IPv6 development effort, I2P now supports multiple +published IP addresses. If you have multiple public IP addresses (IPv4, IPv6, +or both), you may enable or disable them individually on /confignet. The +default is to use the first IPv4 and IPv6 addresses it discovers. If you have +multiple addresses you should review the configuration on /confignet and adjust +it if necessary. +Note that while you may enable multiple IPv4 and IPv6 addresses on /confignet, +we recommend that you use only one IPv4 and one IPv6 address. There are +bugs still to be fixed with multiple addresses of each type. +{%- endtrans %} + +{% trans -%} +While IPv6 support was designed and developed over several years, it has +only been tested by a limited number of users and is still beta. If you do have +a public IPv6 address, please monitor your router and the logs for problems, +and disable it necessary. Please report any bugs on +http://trac.i2p2.i2p. +{%- endtrans %} + +**{% trans %}Rekeying Details{% endtrans %}** + +{% trans -%} +For those of you running I2P on faster hardware (generally, 64-bit x86) the +router will generate a new identity using longer keys. This will substantially +reduce your participating traffic for 48 hours or more, while your router +re-integrates into the network. Due to the new keys, the large number of +torrent updates, and the recent network growth, we expect substantial +disruption to the network for a week or more after the update is released. +Please be patient and things should start to improve after a few days. +{%- endtrans %} + +{% trans -%} +These changes may result in higher CPU usage for some of you. We're doing +our best to increase efficiency, but stronger security generally requires more +computation. Performance may also be poor during the first week +due to the network churn. +We will evaluate the network performace before deciding whether to +change the key length on slower hardware in a future release. +{%- endtrans %} + +{% trans -%} +We are experiencing rapid network growth in the last few weeks, which is +causing a bit of a bumpy ride for some, especially on weekends. However, the +network is still performing fairly well, so keep spreading the word. +{%- endtrans %} + +**{% trans %}More Changes Coming{% endtrans %}** + +{% trans -%} +We're in the initial stages of desiging major changes to strengthen our +crypto. Stronger crypto will use more CPU and it may possibly +require a Java 7 JRE at a minimum. We understand your desire to run I2P on low-power +and/or older hardware. We're working hard to minimize the impacts, but some +loss of performance is inevitable. In addition, Java 5 and 6 are no longer +supported by Oracle. Now is a good time to upgrade to Java 7. Any change in +minimum requirements will be announced well in advance. +{%- endtrans %} + +**{% trans %}New Website{% endtrans %}** + +{% trans -%} +After a heroic effort by str4d, the new website preview is available at +http://i2hq.srv.i2p2.de. We hope to see it go live at +https://geti2p.net and http://www.i2p2.i2p soon. Please +contribute to the new website translations on Transifex, especially the +website_priority resource. +{%- endtrans %} + +**{% trans %}Community Participation{% endtrans %}** + +{% trans -%} +In early August, hottuna and zzz attended DEFCON 21 in Las Vegas. +Last weekend, echelon attended the CTS IV conference in Berlin and +psi attended the Tahoe-LAFS hackfest at GNU 30 in Cambridge, Mass. +Several of us will be at 30C3 in Hamburg late this year. +It's great to see people participating at these events and representing I2P. +{%- endtrans %} + +**{% trans %}RELEASE DETAILS{% endtrans %}** + +**{% trans %}Major Changes{% endtrans %}** + +- {% trans %}IPv6 support for both NTCP and SSU{% endtrans %} + +**{% trans %}Anonymity Improvements{% endtrans %}** + +- {% trans %}SSU protocol obfuscation by adding random padding{% endtrans %} +- {% trans %}Longer encryption and DH private keys for users on faster platforms{% endtrans %} + +**{% trans %}Bug Fixes{% endtrans %}** + +- {% trans %}Fix I2PTunnel / I2CP locking and duplicates (partial){% endtrans %} +- {% trans %}Fix translation of HTTP proxy error pages{% endtrans %} +- {% trans %}Fix occasional runtime exception in NTCP{% endtrans %} + +**{% trans %}Other{% endtrans %}** + +- {% trans %}Big rework of transport code to accommodate multiple addresses and IPv6{% endtrans %} +- {% trans %}Streaming: Improved recovery from lost acks, other fixes{% endtrans %} +- {% trans %}Use Transifex for translation of initial news and HTTP proxy error pages{% endtrans %} +- {% trans %}Translation updates: Chinese, French, German, Portuguese, Russian, Swedish, Turkish{% endtrans %} +- {% trans %}New Romanian translation{% endtrans %} +- Jetty 7.6.12.v20130726 +- {% trans %}Wrapper 3.5.20 (new installs and PPA only){% endtrans %} +- {% trans %}Update GeoIP data (new installs and PPA only){% endtrans %} + +**{% trans %}SHA256 Checksums:{% endtrans %}** + +:: + + 5a863c43dc986087e5a5facd02b8ede32e1903bad1f4531bff95e61eab0facaf i2pinstall_0.9.8_windows.exe + 8af3f933346d76ac67ce814d7f991bbc00fa31c23124313841dbef9ae7bcf908 i2pinstall_0.9.8.jar + 787d1fe113398dfcec25d7daaca4e4093f309cb3e622b80757bcdf0558472041 i2psource_0.9.8.tar.bz2 + 24a08305228b817f87e251af74c4b5e9d1726de8d7d64c17bc2ede5511d42e58 i2pupdate_0.9.8.zip + 76b049da4e02b96e9e05eaf69b2e8214a6d6874385ab2d82c2885379ccd65278 i2pupdate.su2 + dba0f8e4660cb9147c50b7b3c4a0f95d342cfc65a51e0d37e445bc72026ed05f i2pupdate.sud diff --git a/i2p2www/blog/2013/10/02/0.9.8.1-Release.rst b/i2p2www/blog/2013/10/02/0.9.8.1-Release.rst new file mode 100644 index 00000000..a520bd45 --- /dev/null +++ b/i2p2www/blog/2013/10/02/0.9.8.1-Release.rst @@ -0,0 +1,36 @@ +=============== +{% trans %}0.9.8.1 Release{% endtrans %} +=============== +.. meta:: + :date: 2013-10-02 + :category: release + :excerpt: {% trans %}0.9.8.1 fixes a problem with updating to 0.9.8 on Windows for some people. New installs and non-Windows platforms are not affected, however all platforms will automatically update even if running 0.9.8.{% endtrans %} + +{% trans %}0.9.8.1 fixes a problem with updating to 0.9.8 on Windows for some people. New installs and non-Windows platforms are not affected, however all platforms will automatically update even if running 0.9.8.{% endtrans %} + +{% trans -%} +See the `Trac ticket`_ for details and workarounds. See +`the 0.9.8 release notes`_ for information on IPv6 and other changes. +{%- endtrans %} + +{% trans -%} +Due to recent attacks, logins are disabled on `Trac`_ and new registrations are +disabled on `zzz.i2p`_. Until those services are restored, please report all +bugs on IRC freenode or IRC2P #i2p-dev. +{%- endtrans %} + +.. _{% trans %}`Trac ticket`{% endtrans %}: http://{{ i2pconv('trac.i2p2.i2p') }}/ticket/1056 +.. _{% trans %}`the 0.9.8 release notes`{% endtrans %}: {{ url_for('blog_post', slug='2013/09/30/0.9.8-Release') }} +.. _`Trac`: http://{{ i2pconv('trac.i2p2.i2p') }}/ +.. _`zzz.i2p`: http://{{ i2pconv('zzz.i2p') }}/ + +**{% trans %}SHA256 Checksums:{% endtrans %}** + +:: + + e4a0a5929f20a5e176aad1ba4fe85d6c321c06fbc802cd715970ec380bb9e4fe i2pinstall_0.9.8.1_windows.exe + 8b933d55622743e3692585d09a1393a898dfd3d8c8f4c7f489adc23981273d30 i2pinstall_0.9.8.1.jar + 315072afc19b254a67062affe8b4515198ff64ecfcb4292b5f58b83975b3a1c3 i2psource_0.9.8.1.tar.bz2 + a340f84b5893ba0f193ec86e09f15c0ef724735eafb4c67c090f23be020b24ab i2pupdate_0.9.8.1.zip + 15d135f9923337df2092e42b9c5aa6ba5904b39c5ff403eef235843b1957b942 i2pupdate.su2 + d9902504d63556fa63a503fd088185dbbf3ace8b80e14dd4482b30e56b11f8d6 i2pupdate.sud diff --git a/i2p2www/blog/2013/12/07/0.9.9-Release.rst b/i2p2www/blog/2013/12/07/0.9.9-Release.rst new file mode 100644 index 00000000..290a2761 --- /dev/null +++ b/i2p2www/blog/2013/12/07/0.9.9-Release.rst @@ -0,0 +1,86 @@ +============= +{% trans %}0.9.9 Release{% endtrans %} +============= +.. meta:: + :date: 2013-12-07 + :category: release + :excerpt: {% trans %}0.9.9 fixes a number of bugs in the netdb, streaming, and i2ptunnel, and starts work on a year-long plan to increase the strength of the cryptographic signing algorithms used in the router, and support multiple algorithms and key lengths simultaneously. Automatic update files will now be signed with 4096-bit RSA keys.{% endtrans %} + +{% trans %}0.9.9 fixes a number of bugs in the netdb, streaming, and i2ptunnel, and starts work on a year-long plan to increase the strength of the cryptographic signing algorithms used in the router, and support multiple algorithms and key lengths simultaneously. Automatic update files will now be signed with 4096-bit RSA keys.{% endtrans %} + +{% trans -%} +We now support SSL between your router and your servers for security. +See `this development thread`_ for more information. +{%- endtrans %} + +.. _{% trans %}`this development thread`{% endtrans %}: http://{{ i2pconv('zzz.i2p') }}/topics/1495 + +{% trans -%} +As usual, we recommend that you update to this release. +The best way to maintain security and help the network is to run the latest release. +Several members of the I2P team will be at 30C3 in Hamburg this year. +Come say hello and ask for an I2P sticker. +Thanks to everyone for their support this year. +{%- endtrans %} + +**{% trans %}RELEASE DETAILS{% endtrans %}** + +**{% trans %}Anonymity Improvements{% endtrans %}** + +- {% trans %}Don't build client tunnels through zero-hop exploratory tunnels{% endtrans %} +- {% trans %}New "su3" file support using stronger keys{% endtrans %} +- {% trans %}Use su3 for updates{% endtrans %} + +**{% trans %}Bug Fixes{% endtrans %}** + +- {% trans %}Issues with losing data when closing streams{% endtrans %} +- {% trans %}Fix various streaming connection limit issues{% endtrans %} +- {% trans %}Issues with resource usage of closed connections{% endtrans %} +- {% trans %}Clean up timer threads in close-on-idle tunnels{% endtrans %} +- {% trans %}Several other streaming fixes{% endtrans %} +- {% trans %}Reject more non-public IPv6 addresses{% endtrans %} +- {% trans %}Fix IPv6 GeoIP{% endtrans %} +- {% trans %}Fix peer selection in first minutes after startup{% endtrans %} +- {% trans %}Several I2PTunnel bug fixes{% endtrans %} +- {% trans %}Fix major i2psnark DHT bug that prevented magnets from working well{% endtrans %} +- {% trans %}Fix client tunnels that fail due to name resolution failure at startup, particularly with b32 hostnames{% endtrans %} +- {% trans %}Fix changing client i2ptunnel target list{% endtrans %} +- {% trans %}Fix major bugs preventing reception of encrypted responses to leaseset lookups and verifies{% endtrans %} +- {% trans %}Fix bad links on some i2psnark buttons in Opera and text-mode browsers{% endtrans %} +- {% trans %}Fix NPE in Susimail{% endtrans %} + +**{% trans %}Other{% endtrans %}** + +- {% trans %}Start work on supporting stronger signing keys in the router{% endtrans %} +- {% trans %}Reduce thread usage for HTTP Server tunnels{% endtrans %} +- {% trans %}Auto-stop update torrent after some time{% endtrans %} +- {% trans %}Add ability to stop webapp via console{% endtrans %} +- {% trans %}New POST throttler in HTTP server tunnel{% endtrans %} +- {% trans %}Improve connection throttling{% endtrans %} +- {% trans %}More work to reduce number of connections{% endtrans %} +- {% trans %}Re-enable router info expiration job{% endtrans %} +- {% trans %}Extend router info expiration and other changes to reduce load on floodfills{% endtrans %} +- {% trans %}Support multiple servers through a single server tunnel{% endtrans %} +- {% trans %}Support specification of server port in i2ptunnel clients{% endtrans %} +- {% trans %}Add support for SSL connections from i2ptunnel to external server{% endtrans %} +- {% trans %}SSL and crypto code refactoring{% endtrans %} +- {% trans %}i2psnark storage code refactoring{% endtrans %} +- {% trans %}New destination cache{% endtrans %} +- {% trans %}Lots of code cleanup and resolution of findbugs warnings{% endtrans %} +- {% trans %}New Japanese translation (partial){% endtrans %} +- {% trans %}Translation updates: French, German, Italian, Romanian, Russian, Spanish, Swedish, and others{% endtrans %} +- Jetty 7.6.13.v20130916 +- {% trans %}Wrapper 3.5.22 (new installs and PPA only){% endtrans %} +- {% trans %}Update GeoIP data (new installs and PPA only){% endtrans %} + +**{% trans %}SHA256 Checksums:{% endtrans %}** + +:: + + 97dd4326ad8afdea0e78ffcb053b23793dfa79d89458be3fe3a1ed62a2d988e9 i2pinstall_0.9.9_windows.exe + 5028910d3fb9747a6724e39f8eccb6d9ebe0530ef017102c372871badfbf6d9f i2pinstall_0.9.9.jar + cbbf25dea50a717c3376eb7af226f0b2a653e0372db8782ef37aa8b3d275436c i2psource_0.9.9.tar.bz2 + 533b0ce2d9e1bfc8762ba17eef3572ae7355ed8f21d5d3557b718a14b05794f2 i2pupdate_0.9.9.zip + 77824eb6f754f8b40301b7d260701eb2211ee51105d5f4b43d2c328f71736e0f i2pupdate.su2 + 78769de16a997730468e2e66c7519e2923d533cd96259dac352e04b07d80486c i2pupdate.su3 + 81b89ed00705668003b2715b930519bbeb939b9623c7e6e8d63c2762aa748bd8 i2pupdate.sud diff --git a/i2p2www/downloads.py b/i2p2www/downloads.py index 4313aaa0..7d40a4f6 100644 --- a/i2p2www/downloads.py +++ b/i2p2www/downloads.py @@ -45,10 +45,12 @@ def downloads_list(): # TODO: read mirror list or list of available files return render_template('downloads/list.html', def_mirror=DEFAULT_MIRROR) +# Debian-specific page +def downloads_debian(): + return render_template('downloads/debian.html') + # Specific file downloader def downloads_select(version, file): - if (file == 'debian'): - return render_template('downloads/debian.html', file=file) mirrors=read_mirrors() obj=[] for protocol in mirrors.keys(): @@ -71,4 +73,5 @@ def downloads_redirect(version, protocol, domain, file): if not domain in mirrors: abort(404) return redirect(mirrors[domain]['url'] % data) - return redirect(mirrors[randint(0, len(mirrors) - 1)]['url'] % data) + randomain = mirrors.keys()[randint(0, len(mirrors) - 1)] + return redirect(mirrors[randomain]['url'] % data) diff --git a/i2p2www/legacy.py b/i2p2www/legacy.py index cbd778b5..89457d51 100644 --- a/i2p2www/legacy.py +++ b/i2p2www/legacy.py @@ -6,12 +6,16 @@ from flask import g, redirect, url_for LEGACY_FUNCTIONS_MAP={ 'announcements': {'function': 'blog_index', 'params': {}}, - 'debian': {'function': 'downloads_select', 'params': {'file': 'debian'}}, + 'debian': {'function': 'downloads_debian', 'params': {}}, 'download': {'function': 'downloads_list', 'params': {}}, + 'installation': {'function': 'downloads_list', 'params': {}}, + 'meetings': {'function': 'meetings_index', 'params': {}}, + 'papers': {'function': 'papers_list', 'params': {}}, 'statusnotes': {'function': 'blog_index', 'params': {}}, } LEGACY_PAGES_MAP={ + 'api': 'docs', 'applications': 'get-involved/develop/applications', 'benchmarks': 'misc/benchmarks', 'bittorrent': 'docs/applications/bittorrent', @@ -33,28 +37,27 @@ LEGACY_PAGES_MAP={ 'clt': 'misc/clt', 'common_structures_spec': 'docs/spec/common-structures', 'configuration': 'docs/spec/configuration', - 'contact': 'about/contact', + 'contact': 'contact', 'cvs': 'misc/cvs', 'datagrams': 'docs/api/datagrams', 'dev-guidelines': 'get-involved/guides/dev-guidelines', 'developerskeys': 'get-involved/develop/developers-keys', 'donate': 'get-involved/donate', - 'faq': 'support/faq', 'getinvolved': 'get-involved', 'geoip': 'docs/spec/geoip', - 'glossary': 'support/glossary', + 'glossary': 'about/glossary', 'halloffame': 'about/hall-of-fame', 'how': 'docs', 'how_cryptography': 'docs/how/cryptography', 'how_elgamalaes': 'docs/how/elgamal-aes', 'how_garlicrouting': 'docs/how/garlic-routing', 'how_intro': 'docs/how/intro', - 'how_networkcomparisons': 'about/comparison', + 'how_networkcomparisons': 'comparison', 'how_networkdatabase': 'docs/how/network-database', 'how_peerselection': 'docs/how/peer-selection', 'how_threatmodel': 'docs/how/threat-model', 'how_tunnelrouting': 'docs/how/tunnel-routing', - 'htproxyports': 'support/browser-config', + 'htproxyports': 'about/browser-config', 'i2cp': 'docs/protocol/i2cp', 'i2cp_spec': 'docs/spec/i2cp', 'i2np': 'docs/protocol/i2np', @@ -83,13 +86,13 @@ LEGACY_PAGES_MAP={ 'newtranslators': 'get-involved/guides/new-translators', 'ntcp': 'docs/transport/ntcp', 'ntcp_discussion': 'docs/discussions/ntcp', - 'othernetworks': 'about/comparison/other-networks', - 'papers': 'research/papers', - 'performance-history': 'support/performance/history', - 'performance': 'support/performance/future', + 'othernetworks': 'comparison/other-networks', + 'performance-history': 'about/performance/history', + 'performance': 'about/performance/future', 'plugin_spec': 'docs/spec/plugin', 'plugins': 'docs/plugins', 'ports': 'docs/ports', + 'pressetext-0.7': 'misc/pressetext-0.7', 'protocols': 'docs/protocol', 'ratestats': 'misc/ratestats', 'release-signing-key': 'get-involved/develop/release-signing-key', @@ -103,6 +106,7 @@ LEGACY_PAGES_MAP={ 'supported_applications': 'docs/applications/supported', 'team': 'about/team', 'techintro': 'docs/how/tech-intro', + 'ticket1056': 'misc/ticket1056', 'ticket919': 'misc/ticket919', 'todo': 'get-involved/todo', 'transition-guide': 'misc/transition-guide', @@ -120,14 +124,75 @@ LEGACY_PAGES_MAP={ 'upgrade-0.6.1.30': 'misc/upgrade-0.6.1.30', } +LEGACY_BLOG_POSTS_MAP={ + 'statnotes0108': {'date': (2008, 2, 1), 'title': 'status'}, + 'summerofcode-2011': {'date': (2011, 6, 6), 'title': 'Ipredator-SoC'}, + 'summerofcode-2011-end': {'date': (2011, 9, 3), 'title': 'Ipredator-SoC-itoopie-released'}, +} + +LEGACY_RELEASES_MAP={ + '0.6.1.30': (2007, 10, 7), + '0.6.1.31': (2008, 2, 10), + '0.6.1.32': (2008, 3, 9), + '0.6.1.33': (2008, 4, 26), + '0.6.2': (2008, 6, 7), + '0.6.3': (2008, 8, 26), + '0.6.4': (2008, 10, 6), + '0.6.5': (2008, 12, 1), + '0.7': (2009, 1, 25), + '0.7.1': (2009, 3, 29), + '0.7.2': (2009, 4, 19), + '0.7.3': (2009, 5, 18), + '0.7.4': (2009, 6, 13), + '0.7.5': (2009, 6, 29), + '0.7.6': (2009, 7, 31), + '0.7.7': (2009, 10, 12), + '0.7.8': (2009, 12, 8), + '0.7.9': (2010, 1, 12), + '0.7.10': (2010, 1, 22), + '0.7.11': (2010, 2, 15), + '0.7.12': (2010, 3, 15), + '0.7.13': (2010, 4, 27), + '0.7.14': (2010, 6, 7), + '0.8': (2010, 7, 12), + '0.8.1': (2010, 11, 15), + '0.8.2': (2010, 12, 22), + '0.8.3': (2011, 1, 24), + '0.8.4': (2011, 3, 2), + '0.8.5': (2011, 4, 18), + '0.8.6': (2011, 5, 16), + '0.8.7': (2011, 6, 27), + '0.8.8': (2011, 8, 23), + '0.8.9': (2011, 10, 11), + '0.8.10': (2011, 10, 20), + '0.8.11': (2011, 11, 8), + '0.8.12': (2012, 1, 6), + '0.8.13': (2012, 2, 27), + '0.9': (2012, 5, 2), + '0.9.1': (2012, 7, 30), + '0.9.2': (2012, 9, 21), + '0.9.3': (2012, 10, 27), + '0.9.4': (2012, 12, 17), + '0.9.5': (2013, 3, 8), + '0.9.6': (2013, 5, 28), + '0.9.7': (2013, 7, 15), + '0.9.7.1': (2013, 8, 10), + '0.9.8': (2013, 9, 30), + '0.9.8.1': (2013, 10, 2), +} + def legacy_show(f): lang = 'en' if hasattr(g, 'lang') and g.lang: lang = g.lang + if lang == 'zh': + lang = 'zh_CN' if f in LEGACY_FUNCTIONS_MAP: return redirect(url_for(LEGACY_FUNCTIONS_MAP[f]['function'], lang=lang, **LEGACY_FUNCTIONS_MAP[f]['params'])) elif f in LEGACY_PAGES_MAP: return redirect(url_for('site_show', lang=lang, page=LEGACY_PAGES_MAP[f])) + elif f in LEGACY_BLOG_POSTS_MAP: + return legacy_blog(lang, LEGACY_BLOG_POSTS_MAP[f]['date'], LEGACY_BLOG_POSTS_MAP[f]['title']) else: return redirect(url_for('site_show', lang=lang, page=f)) @@ -135,4 +200,16 @@ def legacy_meeting(id): return redirect(url_for('meetings_show', id=id, lang='en')) def legacy_status(year, month, day): - return redirect(url_for('blog_post', lang='en', slug=('%s/%s/%s/status' % (year, month, day)))) + return legacy_blog('en', (year, month, day), 'status') + +def legacy_release(version): + lang = 'en' + if hasattr(g, 'lang') and g.lang: + lang = g.lang + if version in LEGACY_RELEASES_MAP: + return legacy_blog(lang, LEGACY_RELEASES_MAP[version], '%s-Release' % version) + else: + return legacy_show('release-%s' % version) + +def legacy_blog(lang, (year, month, day), title): + return redirect(url_for('blog_post', lang=lang, slug=('%d/%02d/%02d/%s' % (year, month, day, title)))) diff --git a/i2p2www/lexers.py b/i2p2www/lexers.py index aaf6718e..fe1c23b1 100644 --- a/i2p2www/lexers.py +++ b/i2p2www/lexers.py @@ -29,7 +29,9 @@ class DataSpecLexer(RegexLexer): (r'(-*)(//)(-+\+-)', bygroups(Text, Generic.Strong, Text)), ], 'content': [ + (r'(\s*)(\+-)', bygroups(Text, Text), '#pop', 'boundary'), (r'(\s*)([\+|])$', bygroups(Text, Text), '#pop'), + (r'(\s*)(\.\.\.)(\s)', bygroups(Text, Generic.Strong, Text)), (r'(\s*)(\.\.\.)$', bygroups(Text, Generic.Strong), '#pop'), (r'(\s*)(~)$', bygroups(Text, Generic.Strong), '#pop'), (r'(\s*)([\w=;]+)$', bygroups(Text, Name.Tag), '#pop'), diff --git a/i2p2www/meetings/helpers.py b/i2p2www/meetings/helpers.py index 204aff0a..622141f1 100644 --- a/i2p2www/meetings/helpers.py +++ b/i2p2www/meetings/helpers.py @@ -56,7 +56,10 @@ def get_meetings_ids(num=0): # ignore all non-.rst files if not f.endswith('.rst'): continue - meetings.append(int(f[:-4])) + try: + meetings.append(int(f[:-4])) + except ValueError: + continue meetings.sort() meetings.reverse() if (num > 0): @@ -65,7 +68,7 @@ def get_meetings_ids(num=0): def render_meeting_rst(id): # check if that file actually exists - name = str(id) + '.rst' + name = '%03d.rst' % id path = safe_join(MEETINGS_DIR, name) if not os.path.exists(path): abort(404) diff --git a/i2p2www/meetings/logs/1.log b/i2p2www/meetings/logs/001.log similarity index 100% rename from i2p2www/meetings/logs/1.log rename to i2p2www/meetings/logs/001.log diff --git a/i2p2www/meetings/logs/1.rst b/i2p2www/meetings/logs/001.rst similarity index 100% rename from i2p2www/meetings/logs/1.rst rename to i2p2www/meetings/logs/001.rst diff --git a/i2p2www/meetings/logs/2.log b/i2p2www/meetings/logs/002.log similarity index 100% rename from i2p2www/meetings/logs/2.log rename to i2p2www/meetings/logs/002.log diff --git a/i2p2www/meetings/logs/2.rst b/i2p2www/meetings/logs/002.rst similarity index 100% rename from i2p2www/meetings/logs/2.rst rename to i2p2www/meetings/logs/002.rst diff --git a/i2p2www/meetings/logs/3.log b/i2p2www/meetings/logs/003.log similarity index 100% rename from i2p2www/meetings/logs/3.log rename to i2p2www/meetings/logs/003.log diff --git a/i2p2www/meetings/logs/3.rst b/i2p2www/meetings/logs/003.rst similarity index 100% rename from i2p2www/meetings/logs/3.rst rename to i2p2www/meetings/logs/003.rst diff --git a/i2p2www/meetings/logs/4.log b/i2p2www/meetings/logs/004.log similarity index 100% rename from i2p2www/meetings/logs/4.log rename to i2p2www/meetings/logs/004.log diff --git a/i2p2www/meetings/logs/4.rst b/i2p2www/meetings/logs/004.rst similarity index 100% rename from i2p2www/meetings/logs/4.rst rename to i2p2www/meetings/logs/004.rst diff --git a/i2p2www/meetings/logs/7.log b/i2p2www/meetings/logs/007.log similarity index 100% rename from i2p2www/meetings/logs/7.log rename to i2p2www/meetings/logs/007.log diff --git a/i2p2www/meetings/logs/7.rst b/i2p2www/meetings/logs/007.rst similarity index 60% rename from i2p2www/meetings/logs/7.rst rename to i2p2www/meetings/logs/007.rst index dc72e56e..b057cee5 100644 --- a/i2p2www/meetings/logs/7.rst +++ b/i2p2www/meetings/logs/007.rst @@ -1,5 +1,5 @@ -I2P dev meeting, July 3 2002 -============================ +I2P dev meeting, July 3, 2002 +============================= (Courtesy of the wayback machine http://www.archive.org/) diff --git a/i2p2www/meetings/logs/8.log b/i2p2www/meetings/logs/008.log similarity index 100% rename from i2p2www/meetings/logs/8.log rename to i2p2www/meetings/logs/008.log diff --git a/i2p2www/meetings/logs/8.rst b/i2p2www/meetings/logs/008.rst similarity index 100% rename from i2p2www/meetings/logs/8.rst rename to i2p2www/meetings/logs/008.rst diff --git a/i2p2www/meetings/logs/9.log b/i2p2www/meetings/logs/009.log similarity index 100% rename from i2p2www/meetings/logs/9.log rename to i2p2www/meetings/logs/009.log diff --git a/i2p2www/meetings/logs/9.rst b/i2p2www/meetings/logs/009.rst similarity index 100% rename from i2p2www/meetings/logs/9.rst rename to i2p2www/meetings/logs/009.rst diff --git a/i2p2www/meetings/logs/10.log b/i2p2www/meetings/logs/010.log similarity index 100% rename from i2p2www/meetings/logs/10.log rename to i2p2www/meetings/logs/010.log diff --git a/i2p2www/meetings/logs/010.rst b/i2p2www/meetings/logs/010.rst new file mode 100644 index 00000000..57e44abf --- /dev/null +++ b/i2p2www/meetings/logs/010.rst @@ -0,0 +1,7 @@ +I2P dev meeting, September 4, 2002 +================================== + +Quick recap +----------- + +TODO diff --git a/i2p2www/meetings/logs/11.log b/i2p2www/meetings/logs/011.log similarity index 100% rename from i2p2www/meetings/logs/11.log rename to i2p2www/meetings/logs/011.log diff --git a/i2p2www/meetings/logs/11.rst b/i2p2www/meetings/logs/011.rst similarity index 100% rename from i2p2www/meetings/logs/11.rst rename to i2p2www/meetings/logs/011.rst diff --git a/i2p2www/meetings/logs/12.log b/i2p2www/meetings/logs/012.log similarity index 100% rename from i2p2www/meetings/logs/12.log rename to i2p2www/meetings/logs/012.log diff --git a/i2p2www/meetings/logs/12.rst b/i2p2www/meetings/logs/012.rst similarity index 100% rename from i2p2www/meetings/logs/12.rst rename to i2p2www/meetings/logs/012.rst diff --git a/i2p2www/meetings/logs/15.log b/i2p2www/meetings/logs/015.log similarity index 100% rename from i2p2www/meetings/logs/15.log rename to i2p2www/meetings/logs/015.log diff --git a/i2p2www/meetings/logs/15.rst b/i2p2www/meetings/logs/015.rst similarity index 100% rename from i2p2www/meetings/logs/15.rst rename to i2p2www/meetings/logs/015.rst diff --git a/i2p2www/meetings/logs/18.log b/i2p2www/meetings/logs/018.log similarity index 100% rename from i2p2www/meetings/logs/18.log rename to i2p2www/meetings/logs/018.log diff --git a/i2p2www/meetings/logs/18.rst b/i2p2www/meetings/logs/018.rst similarity index 100% rename from i2p2www/meetings/logs/18.rst rename to i2p2www/meetings/logs/018.rst diff --git a/i2p2www/meetings/logs/20.log b/i2p2www/meetings/logs/020.log similarity index 100% rename from i2p2www/meetings/logs/20.log rename to i2p2www/meetings/logs/020.log diff --git a/i2p2www/meetings/logs/20.rst b/i2p2www/meetings/logs/020.rst similarity index 100% rename from i2p2www/meetings/logs/20.rst rename to i2p2www/meetings/logs/020.rst diff --git a/i2p2www/meetings/logs/21.log b/i2p2www/meetings/logs/021.log similarity index 100% rename from i2p2www/meetings/logs/21.log rename to i2p2www/meetings/logs/021.log diff --git a/i2p2www/meetings/logs/21.rst b/i2p2www/meetings/logs/021.rst similarity index 100% rename from i2p2www/meetings/logs/21.rst rename to i2p2www/meetings/logs/021.rst diff --git a/i2p2www/meetings/logs/22.log b/i2p2www/meetings/logs/022.log similarity index 100% rename from i2p2www/meetings/logs/22.log rename to i2p2www/meetings/logs/022.log diff --git a/i2p2www/meetings/logs/22.rst b/i2p2www/meetings/logs/022.rst similarity index 100% rename from i2p2www/meetings/logs/22.rst rename to i2p2www/meetings/logs/022.rst diff --git a/i2p2www/meetings/logs/23.log b/i2p2www/meetings/logs/023.log similarity index 100% rename from i2p2www/meetings/logs/23.log rename to i2p2www/meetings/logs/023.log diff --git a/i2p2www/meetings/logs/23.rst b/i2p2www/meetings/logs/023.rst similarity index 100% rename from i2p2www/meetings/logs/23.rst rename to i2p2www/meetings/logs/023.rst diff --git a/i2p2www/meetings/logs/25.log b/i2p2www/meetings/logs/025.log similarity index 100% rename from i2p2www/meetings/logs/25.log rename to i2p2www/meetings/logs/025.log diff --git a/i2p2www/meetings/logs/25.rst b/i2p2www/meetings/logs/025.rst similarity index 100% rename from i2p2www/meetings/logs/25.rst rename to i2p2www/meetings/logs/025.rst diff --git a/i2p2www/meetings/logs/26.log b/i2p2www/meetings/logs/026.log similarity index 100% rename from i2p2www/meetings/logs/26.log rename to i2p2www/meetings/logs/026.log diff --git a/i2p2www/meetings/logs/26.rst b/i2p2www/meetings/logs/026.rst similarity index 100% rename from i2p2www/meetings/logs/26.rst rename to i2p2www/meetings/logs/026.rst diff --git a/i2p2www/meetings/logs/28.log b/i2p2www/meetings/logs/028.log similarity index 100% rename from i2p2www/meetings/logs/28.log rename to i2p2www/meetings/logs/028.log diff --git a/i2p2www/meetings/logs/28.rst b/i2p2www/meetings/logs/028.rst similarity index 100% rename from i2p2www/meetings/logs/28.rst rename to i2p2www/meetings/logs/028.rst diff --git a/i2p2www/meetings/logs/29.log b/i2p2www/meetings/logs/029.log similarity index 100% rename from i2p2www/meetings/logs/29.log rename to i2p2www/meetings/logs/029.log diff --git a/i2p2www/meetings/logs/29.rst b/i2p2www/meetings/logs/029.rst similarity index 100% rename from i2p2www/meetings/logs/29.rst rename to i2p2www/meetings/logs/029.rst diff --git a/i2p2www/meetings/logs/30.log b/i2p2www/meetings/logs/030.log similarity index 100% rename from i2p2www/meetings/logs/30.log rename to i2p2www/meetings/logs/030.log diff --git a/i2p2www/meetings/logs/30.rst b/i2p2www/meetings/logs/030.rst similarity index 100% rename from i2p2www/meetings/logs/30.rst rename to i2p2www/meetings/logs/030.rst diff --git a/i2p2www/meetings/logs/31.log b/i2p2www/meetings/logs/031.log similarity index 100% rename from i2p2www/meetings/logs/31.log rename to i2p2www/meetings/logs/031.log diff --git a/i2p2www/meetings/logs/31.rst b/i2p2www/meetings/logs/031.rst similarity index 100% rename from i2p2www/meetings/logs/31.rst rename to i2p2www/meetings/logs/031.rst diff --git a/i2p2www/meetings/logs/32.log b/i2p2www/meetings/logs/032.log similarity index 100% rename from i2p2www/meetings/logs/32.log rename to i2p2www/meetings/logs/032.log diff --git a/i2p2www/meetings/logs/32.rst b/i2p2www/meetings/logs/032.rst similarity index 100% rename from i2p2www/meetings/logs/32.rst rename to i2p2www/meetings/logs/032.rst diff --git a/i2p2www/meetings/logs/33.log b/i2p2www/meetings/logs/033.log similarity index 100% rename from i2p2www/meetings/logs/33.log rename to i2p2www/meetings/logs/033.log diff --git a/i2p2www/meetings/logs/33.rst b/i2p2www/meetings/logs/033.rst similarity index 100% rename from i2p2www/meetings/logs/33.rst rename to i2p2www/meetings/logs/033.rst diff --git a/i2p2www/meetings/logs/34.log b/i2p2www/meetings/logs/034.log similarity index 100% rename from i2p2www/meetings/logs/34.log rename to i2p2www/meetings/logs/034.log diff --git a/i2p2www/meetings/logs/34.rst b/i2p2www/meetings/logs/034.rst similarity index 100% rename from i2p2www/meetings/logs/34.rst rename to i2p2www/meetings/logs/034.rst diff --git a/i2p2www/meetings/logs/35.log b/i2p2www/meetings/logs/035.log similarity index 100% rename from i2p2www/meetings/logs/35.log rename to i2p2www/meetings/logs/035.log diff --git a/i2p2www/meetings/logs/35.rst b/i2p2www/meetings/logs/035.rst similarity index 100% rename from i2p2www/meetings/logs/35.rst rename to i2p2www/meetings/logs/035.rst diff --git a/i2p2www/meetings/logs/47.log b/i2p2www/meetings/logs/047.log similarity index 100% rename from i2p2www/meetings/logs/47.log rename to i2p2www/meetings/logs/047.log diff --git a/i2p2www/meetings/logs/47.rst b/i2p2www/meetings/logs/047.rst similarity index 100% rename from i2p2www/meetings/logs/47.rst rename to i2p2www/meetings/logs/047.rst diff --git a/i2p2www/meetings/logs/49.log b/i2p2www/meetings/logs/049.log similarity index 100% rename from i2p2www/meetings/logs/49.log rename to i2p2www/meetings/logs/049.log diff --git a/i2p2www/meetings/logs/49.rst b/i2p2www/meetings/logs/049.rst similarity index 100% rename from i2p2www/meetings/logs/49.rst rename to i2p2www/meetings/logs/049.rst diff --git a/i2p2www/meetings/logs/50.log b/i2p2www/meetings/logs/050.log similarity index 100% rename from i2p2www/meetings/logs/50.log rename to i2p2www/meetings/logs/050.log diff --git a/i2p2www/meetings/logs/50.rst b/i2p2www/meetings/logs/050.rst similarity index 100% rename from i2p2www/meetings/logs/50.rst rename to i2p2www/meetings/logs/050.rst diff --git a/i2p2www/meetings/logs/51.log b/i2p2www/meetings/logs/051.log similarity index 100% rename from i2p2www/meetings/logs/51.log rename to i2p2www/meetings/logs/051.log diff --git a/i2p2www/meetings/logs/51.rst b/i2p2www/meetings/logs/051.rst similarity index 100% rename from i2p2www/meetings/logs/51.rst rename to i2p2www/meetings/logs/051.rst diff --git a/i2p2www/meetings/logs/52.log b/i2p2www/meetings/logs/052.log similarity index 100% rename from i2p2www/meetings/logs/52.log rename to i2p2www/meetings/logs/052.log diff --git a/i2p2www/meetings/logs/52.rst b/i2p2www/meetings/logs/052.rst similarity index 100% rename from i2p2www/meetings/logs/52.rst rename to i2p2www/meetings/logs/052.rst diff --git a/i2p2www/meetings/logs/53.log b/i2p2www/meetings/logs/053.log similarity index 100% rename from i2p2www/meetings/logs/53.log rename to i2p2www/meetings/logs/053.log diff --git a/i2p2www/meetings/logs/53.rst b/i2p2www/meetings/logs/053.rst similarity index 100% rename from i2p2www/meetings/logs/53.rst rename to i2p2www/meetings/logs/053.rst diff --git a/i2p2www/meetings/logs/54.log b/i2p2www/meetings/logs/054.log similarity index 100% rename from i2p2www/meetings/logs/54.log rename to i2p2www/meetings/logs/054.log diff --git a/i2p2www/meetings/logs/54.rst b/i2p2www/meetings/logs/054.rst similarity index 100% rename from i2p2www/meetings/logs/54.rst rename to i2p2www/meetings/logs/054.rst diff --git a/i2p2www/meetings/logs/55.log b/i2p2www/meetings/logs/055.log similarity index 100% rename from i2p2www/meetings/logs/55.log rename to i2p2www/meetings/logs/055.log diff --git a/i2p2www/meetings/logs/55.rst b/i2p2www/meetings/logs/055.rst similarity index 100% rename from i2p2www/meetings/logs/55.rst rename to i2p2www/meetings/logs/055.rst diff --git a/i2p2www/meetings/logs/56.log b/i2p2www/meetings/logs/056.log similarity index 100% rename from i2p2www/meetings/logs/56.log rename to i2p2www/meetings/logs/056.log diff --git a/i2p2www/meetings/logs/56.rst b/i2p2www/meetings/logs/056.rst similarity index 100% rename from i2p2www/meetings/logs/56.rst rename to i2p2www/meetings/logs/056.rst diff --git a/i2p2www/meetings/logs/57.log b/i2p2www/meetings/logs/057.log similarity index 100% rename from i2p2www/meetings/logs/57.log rename to i2p2www/meetings/logs/057.log diff --git a/i2p2www/meetings/logs/57.rst b/i2p2www/meetings/logs/057.rst similarity index 100% rename from i2p2www/meetings/logs/57.rst rename to i2p2www/meetings/logs/057.rst diff --git a/i2p2www/meetings/logs/58.log b/i2p2www/meetings/logs/058.log similarity index 100% rename from i2p2www/meetings/logs/58.log rename to i2p2www/meetings/logs/058.log diff --git a/i2p2www/meetings/logs/58.rst b/i2p2www/meetings/logs/058.rst similarity index 100% rename from i2p2www/meetings/logs/58.rst rename to i2p2www/meetings/logs/058.rst diff --git a/i2p2www/meetings/logs/59.log b/i2p2www/meetings/logs/059.log similarity index 100% rename from i2p2www/meetings/logs/59.log rename to i2p2www/meetings/logs/059.log diff --git a/i2p2www/meetings/logs/59.rst b/i2p2www/meetings/logs/059.rst similarity index 100% rename from i2p2www/meetings/logs/59.rst rename to i2p2www/meetings/logs/059.rst diff --git a/i2p2www/meetings/logs/60.log b/i2p2www/meetings/logs/060.log similarity index 100% rename from i2p2www/meetings/logs/60.log rename to i2p2www/meetings/logs/060.log diff --git a/i2p2www/meetings/logs/60.rst b/i2p2www/meetings/logs/060.rst similarity index 100% rename from i2p2www/meetings/logs/60.rst rename to i2p2www/meetings/logs/060.rst diff --git a/i2p2www/meetings/logs/61.log b/i2p2www/meetings/logs/061.log similarity index 100% rename from i2p2www/meetings/logs/61.log rename to i2p2www/meetings/logs/061.log diff --git a/i2p2www/meetings/logs/61.rst b/i2p2www/meetings/logs/061.rst similarity index 100% rename from i2p2www/meetings/logs/61.rst rename to i2p2www/meetings/logs/061.rst diff --git a/i2p2www/meetings/logs/62.log b/i2p2www/meetings/logs/062.log similarity index 100% rename from i2p2www/meetings/logs/62.log rename to i2p2www/meetings/logs/062.log diff --git a/i2p2www/meetings/logs/62.rst b/i2p2www/meetings/logs/062.rst similarity index 100% rename from i2p2www/meetings/logs/62.rst rename to i2p2www/meetings/logs/062.rst diff --git a/i2p2www/meetings/logs/63.log b/i2p2www/meetings/logs/063.log similarity index 100% rename from i2p2www/meetings/logs/63.log rename to i2p2www/meetings/logs/063.log diff --git a/i2p2www/meetings/logs/63.rst b/i2p2www/meetings/logs/063.rst similarity index 100% rename from i2p2www/meetings/logs/63.rst rename to i2p2www/meetings/logs/063.rst diff --git a/i2p2www/meetings/logs/64.log b/i2p2www/meetings/logs/064.log similarity index 100% rename from i2p2www/meetings/logs/64.log rename to i2p2www/meetings/logs/064.log diff --git a/i2p2www/meetings/logs/64.rst b/i2p2www/meetings/logs/064.rst similarity index 100% rename from i2p2www/meetings/logs/64.rst rename to i2p2www/meetings/logs/064.rst diff --git a/i2p2www/meetings/logs/65.log b/i2p2www/meetings/logs/065.log similarity index 100% rename from i2p2www/meetings/logs/65.log rename to i2p2www/meetings/logs/065.log diff --git a/i2p2www/meetings/logs/65.rst b/i2p2www/meetings/logs/065.rst similarity index 100% rename from i2p2www/meetings/logs/65.rst rename to i2p2www/meetings/logs/065.rst diff --git a/i2p2www/meetings/logs/66.log b/i2p2www/meetings/logs/066.log similarity index 100% rename from i2p2www/meetings/logs/66.log rename to i2p2www/meetings/logs/066.log diff --git a/i2p2www/meetings/logs/66.rst b/i2p2www/meetings/logs/066.rst similarity index 100% rename from i2p2www/meetings/logs/66.rst rename to i2p2www/meetings/logs/066.rst diff --git a/i2p2www/meetings/logs/68.log b/i2p2www/meetings/logs/068.log similarity index 100% rename from i2p2www/meetings/logs/68.log rename to i2p2www/meetings/logs/068.log diff --git a/i2p2www/meetings/logs/68.rst b/i2p2www/meetings/logs/068.rst similarity index 100% rename from i2p2www/meetings/logs/68.rst rename to i2p2www/meetings/logs/068.rst diff --git a/i2p2www/meetings/logs/69.log b/i2p2www/meetings/logs/069.log similarity index 100% rename from i2p2www/meetings/logs/69.log rename to i2p2www/meetings/logs/069.log diff --git a/i2p2www/meetings/logs/69.rst b/i2p2www/meetings/logs/069.rst similarity index 100% rename from i2p2www/meetings/logs/69.rst rename to i2p2www/meetings/logs/069.rst diff --git a/i2p2www/meetings/logs/70.log b/i2p2www/meetings/logs/070.log similarity index 100% rename from i2p2www/meetings/logs/70.log rename to i2p2www/meetings/logs/070.log diff --git a/i2p2www/meetings/logs/70.rst b/i2p2www/meetings/logs/070.rst similarity index 100% rename from i2p2www/meetings/logs/70.rst rename to i2p2www/meetings/logs/070.rst diff --git a/i2p2www/meetings/logs/71.log b/i2p2www/meetings/logs/071.log similarity index 100% rename from i2p2www/meetings/logs/71.log rename to i2p2www/meetings/logs/071.log diff --git a/i2p2www/meetings/logs/71.rst b/i2p2www/meetings/logs/071.rst similarity index 100% rename from i2p2www/meetings/logs/71.rst rename to i2p2www/meetings/logs/071.rst diff --git a/i2p2www/meetings/logs/72.log b/i2p2www/meetings/logs/072.log similarity index 100% rename from i2p2www/meetings/logs/72.log rename to i2p2www/meetings/logs/072.log diff --git a/i2p2www/meetings/logs/72.rst b/i2p2www/meetings/logs/072.rst similarity index 100% rename from i2p2www/meetings/logs/72.rst rename to i2p2www/meetings/logs/072.rst diff --git a/i2p2www/meetings/logs/73.log b/i2p2www/meetings/logs/073.log similarity index 100% rename from i2p2www/meetings/logs/73.log rename to i2p2www/meetings/logs/073.log diff --git a/i2p2www/meetings/logs/73.rst b/i2p2www/meetings/logs/073.rst similarity index 100% rename from i2p2www/meetings/logs/73.rst rename to i2p2www/meetings/logs/073.rst diff --git a/i2p2www/meetings/logs/74.log b/i2p2www/meetings/logs/074.log similarity index 100% rename from i2p2www/meetings/logs/74.log rename to i2p2www/meetings/logs/074.log diff --git a/i2p2www/meetings/logs/74.rst b/i2p2www/meetings/logs/074.rst similarity index 100% rename from i2p2www/meetings/logs/74.rst rename to i2p2www/meetings/logs/074.rst diff --git a/i2p2www/meetings/logs/75.log b/i2p2www/meetings/logs/075.log similarity index 100% rename from i2p2www/meetings/logs/75.log rename to i2p2www/meetings/logs/075.log diff --git a/i2p2www/meetings/logs/75.rst b/i2p2www/meetings/logs/075.rst similarity index 100% rename from i2p2www/meetings/logs/75.rst rename to i2p2www/meetings/logs/075.rst diff --git a/i2p2www/meetings/logs/76.log b/i2p2www/meetings/logs/076.log similarity index 100% rename from i2p2www/meetings/logs/76.log rename to i2p2www/meetings/logs/076.log diff --git a/i2p2www/meetings/logs/76.rst b/i2p2www/meetings/logs/076.rst similarity index 100% rename from i2p2www/meetings/logs/76.rst rename to i2p2www/meetings/logs/076.rst diff --git a/i2p2www/meetings/logs/77.log b/i2p2www/meetings/logs/077.log similarity index 100% rename from i2p2www/meetings/logs/77.log rename to i2p2www/meetings/logs/077.log diff --git a/i2p2www/meetings/logs/77.rst b/i2p2www/meetings/logs/077.rst similarity index 100% rename from i2p2www/meetings/logs/77.rst rename to i2p2www/meetings/logs/077.rst diff --git a/i2p2www/meetings/logs/78.log b/i2p2www/meetings/logs/078.log similarity index 100% rename from i2p2www/meetings/logs/78.log rename to i2p2www/meetings/logs/078.log diff --git a/i2p2www/meetings/logs/78.rst b/i2p2www/meetings/logs/078.rst similarity index 100% rename from i2p2www/meetings/logs/78.rst rename to i2p2www/meetings/logs/078.rst diff --git a/i2p2www/meetings/logs/79.log b/i2p2www/meetings/logs/079.log similarity index 100% rename from i2p2www/meetings/logs/79.log rename to i2p2www/meetings/logs/079.log diff --git a/i2p2www/meetings/logs/79.rst b/i2p2www/meetings/logs/079.rst similarity index 100% rename from i2p2www/meetings/logs/79.rst rename to i2p2www/meetings/logs/079.rst diff --git a/i2p2www/meetings/logs/80.log b/i2p2www/meetings/logs/080.log similarity index 100% rename from i2p2www/meetings/logs/80.log rename to i2p2www/meetings/logs/080.log diff --git a/i2p2www/meetings/logs/80.rst b/i2p2www/meetings/logs/080.rst similarity index 100% rename from i2p2www/meetings/logs/80.rst rename to i2p2www/meetings/logs/080.rst diff --git a/i2p2www/meetings/logs/81.log b/i2p2www/meetings/logs/081.log similarity index 100% rename from i2p2www/meetings/logs/81.log rename to i2p2www/meetings/logs/081.log diff --git a/i2p2www/meetings/logs/81.rst b/i2p2www/meetings/logs/081.rst similarity index 100% rename from i2p2www/meetings/logs/81.rst rename to i2p2www/meetings/logs/081.rst diff --git a/i2p2www/meetings/logs/82.log b/i2p2www/meetings/logs/082.log similarity index 100% rename from i2p2www/meetings/logs/82.log rename to i2p2www/meetings/logs/082.log diff --git a/i2p2www/meetings/logs/82.rst b/i2p2www/meetings/logs/082.rst similarity index 100% rename from i2p2www/meetings/logs/82.rst rename to i2p2www/meetings/logs/082.rst diff --git a/i2p2www/meetings/logs/90.log b/i2p2www/meetings/logs/090.log similarity index 100% rename from i2p2www/meetings/logs/90.log rename to i2p2www/meetings/logs/090.log diff --git a/i2p2www/meetings/logs/90.rst b/i2p2www/meetings/logs/090.rst similarity index 100% rename from i2p2www/meetings/logs/90.rst rename to i2p2www/meetings/logs/090.rst diff --git a/i2p2www/meetings/logs/92.log b/i2p2www/meetings/logs/092.log similarity index 100% rename from i2p2www/meetings/logs/92.log rename to i2p2www/meetings/logs/092.log diff --git a/i2p2www/meetings/logs/92.rst b/i2p2www/meetings/logs/092.rst similarity index 100% rename from i2p2www/meetings/logs/92.rst rename to i2p2www/meetings/logs/092.rst diff --git a/i2p2www/meetings/logs/93.log b/i2p2www/meetings/logs/093.log similarity index 100% rename from i2p2www/meetings/logs/93.log rename to i2p2www/meetings/logs/093.log diff --git a/i2p2www/meetings/logs/93.rst b/i2p2www/meetings/logs/093.rst similarity index 100% rename from i2p2www/meetings/logs/93.rst rename to i2p2www/meetings/logs/093.rst diff --git a/i2p2www/meetings/logs/95.log b/i2p2www/meetings/logs/095.log similarity index 100% rename from i2p2www/meetings/logs/95.log rename to i2p2www/meetings/logs/095.log diff --git a/i2p2www/meetings/logs/95.rst b/i2p2www/meetings/logs/095.rst similarity index 100% rename from i2p2www/meetings/logs/95.rst rename to i2p2www/meetings/logs/095.rst diff --git a/i2p2www/meetings/logs/99.log b/i2p2www/meetings/logs/099.log similarity index 100% rename from i2p2www/meetings/logs/99.log rename to i2p2www/meetings/logs/099.log diff --git a/i2p2www/meetings/logs/99.rst b/i2p2www/meetings/logs/099.rst similarity index 100% rename from i2p2www/meetings/logs/99.rst rename to i2p2www/meetings/logs/099.rst diff --git a/i2p2www/meetings/logs/10.rst b/i2p2www/meetings/logs/10.rst deleted file mode 100644 index b5d141f6..00000000 --- a/i2p2www/meetings/logs/10.rst +++ /dev/null @@ -1,7 +0,0 @@ -I2P dev meeting, September 4 2002 -================================= - -Quick recap ------------ - -TODO diff --git a/i2p2www/meetings/logs/221.log b/i2p2www/meetings/logs/221.log new file mode 100644 index 00000000..d6a68c4d --- /dev/null +++ b/i2p2www/meetings/logs/221.log @@ -0,0 +1,319 @@ +21:01:00 So, who is here? +21:01:11 Me. +21:01:18 o/ +21:01:37 i'm here :) +21:02:10 eche|on, Meeh, KillYourTV, psi, hottuna +21:02:21 count me in too (as a spectator) +21:02:28 * nom is listening, while coding on some side projects +21:02:39 Feel free to contribute if you feel you have something to add. +21:03:04 * dg waits a minute or two more +21:03:27 rundown of topics in the meantime dg? +21:03:42 Topics: +21:03:45 * Motivating the community - "are bounties appropriate?" +21:03:45 * Managing money +21:03:46 ** Making the project "official" - benefits/negatives/how +21:04:24 i had something to add *thinks* +21:04:31 hm? +21:06:37 * lillith can't remember... probably nothing too important anyway :) +21:09:14 * dg frowns at the lack of others +21:09:44 * LaughingBuddha spectates +21:10:27 Let's start then +21:10:54 * lillith remembered! +21:10:59 hm? +21:11:14 RN: ping +21:11:25 as kytv|away pointed out, if we're deciding on voting we need some sort of elegibility criteria :) +21:11:49 aye +21:12:07 Let's get started +21:12:10 * Motivating the community - "are bounties appropriate?" +21:12:13 i expect asdfsdafsdafsd wishes to be invluded int points 1+2 :) +21:12:24 Are bounties working? +21:12:43 Everything merged into one big argument last time over bounties, management and BTC so trying to spread it out this time & be dignified. +21:12:53 Who's the guy for bounties? eche|on? +21:13:00 yep +21:13:11 Is he here? +21:13:11 Determining if bounties are working depends on what the defined purpose of a bounty is. +21:13:11 define "working". Are they, IMO, bringing in the developers or fixes we need? No. +21:13:18 he's in control of all money - point 2 :) +21:13:25 Then let's think of something else. +21:13:40 The bounty system does not seem to be working for even the bounties themselves. +21:13:54 i think there should be some sort of benefit or incentive further than loving i2p +21:14:09 A lot of the links on the page are 404s too but that's an unrelated issue +21:14:12 From the bounties page: " Instead, we are making use of a bounty system, whereby anyone can get support for working on something that people want implemented, and people who want to contribute to I2P can be assured that their support goes to what they care about." +21:14:12 we have to draw people in then keep them with our charm and civility ;) +21:14:23 Not that I'm in the position to work on any of the bounties, but they seemed to quite vague last time i looked at them +21:14:30 to be* +21:14:37 The only thing that will draw attention to I2P is content. +21:14:45 eche|on posted his thoughts here - http://zzz.i2p/topics/1359 - if he could not attend. +21:14:48 imo bounties do not work, because a code base is only as good as its maintenance, and paying someone for 'completion' gives the wrong ideas/incentives about what we need in terms of developers, for code to be worth using on a distributed scale, it has to be continually worked on by motivated people. having one person create a code base, get paid and possibly disappear does nothing to benefit the community +21:14:51 Title: zzz.i2p: Managing the project (at zzz.i2p) +21:14:57 str4d: instead, as opposed to...? +21:15:17 From that statement above, the purpose of bounties would seem to be to finance one-off drives to get specific features implemented. +21:15:20 are bounties appropriate? - I think it depends, imo bounties for devs, for particular project and where no contest/conmpetiotion is involved - in such cases they are appropriate +21:15:26 nom: it worked in the past if you look at the bounty page.. +21:15:30 str4d: Is that what we want? +21:15:41 nom: agreed +21:15:48 Does that work? Somewhat. +21:16:03 weltende, exactly. There are clear examples of bounties being taken. +21:16:18 http://www.i2p2.de/bounties.html +21:16:29 Title: Bounties - I2P (at www.i2p2.de) +21:16:34 Bounty uptake IS slow, due to a lack of visibility/advertising/marketing/whatever, but the bounties are slowly getting taken. +21:16:41 I don't know if the bounties which are being fufilled are perhaps not being fufilled the way we want too. +21:17:03 But, of the claimed bounties, not a single developer is currently with I2P. +21:17:10 For example: "Datastore over I2P" - "CLAIMED for 700 euro" - "duck, smeghead" +21:17:20 perhaps, change bounties to ..... and maintain your work for a reasonable time +21:17:23 to get actual continuous development going, a better model is one of project/stipends, where people donate to a project with stated goals, and the people running that project pay the money out continuously to people who are actively working to accomplish those goals +21:17:34 The solution was, IMO, hacky, the bountry $$$ was rather high for the hack and the two developers for that bounty are nowhere to be found. +21:17:46 dg: that's irrelevant - as per the current bounty outline, it is up to the donor to decide on the completion. +21:18:01 What if multiple donors exist? +21:18:08 First donor. +21:18:11 I don't like bounties. IMO, the one way to draw developers in is to draw attention to I2P. +21:18:15 (as per current outline) +21:18:21 lillith: not really needed imho if it's in the core router.. +21:18:25 If a bounty is funded by I2P, then it does become relevant as I2P itself is the judge. +21:18:32 Oh. That doesn't seem right. :s. +21:18:54 IMO, the best way to draw attention to I2P is by providing content. +21:19:06 Right, but some of the bounties can lead to content. +21:19:13 I'm not arguing for the current bounty system, just outlining it. +21:19:44 str4d: right, and thanks. +21:20:03 honestly i think a big part of the problem is that were conflating things that are directly part of the i2p code base, with things that are simply run ontop of i2p. ex translation vs datastore +21:20:03 The biggest problem with a semi-anonymous project like I2P is developer retention. The current bounty model does nothing to help that. +21:20:42 I'm against the bounty system as it doesn't help the ecosystem we have, evidently (none of the developers are here today..) and I feel project funds could be better allocated. +21:20:57 a bounty/payment for one person to do one specific part of the code base is fine in theory, but they don't work for creating continuous development of apps/systems that run ontop of i2p +21:21:12 I concur. +21:21:17 dg: well.. if there aren't taken, then the money isn't spent.. +21:21:54 weltende: The funds are in reserve, they cannot be spent as they are allocated for spending on $bounty. +21:21:57 like adding unit tests to i2p could be worth a bounty, but it would probably be better to make an arrangement with coders who will be paid a small amount continuously to keep adding more unit tests as needed +21:22:03 if you however think that for a certain bounty the code isn't good enough or so.. it might be a good idea to specify more clearly in the bounty description what needs to be done +21:22:26 dg: which is only a problem if we have to spend the money right away +21:23:01 it's not reserved forever as you can see in the bounty page.. funds have gone back to the money pool before +21:23:21 weltende: I doubt we will ever be at the point where we NEED the funds allocated to bounties but it seems redundant. +21:23:44 Fund allocation is beside the current point. +21:23:59 dg: exactly my point +21:24:11 dg: are competitions included in bounties or are they point 1.5? +21:24:14 There will always be money, in one way or another. +21:24:26 (Or not) +21:24:29 i think the datastore is a great example of where bounties shouldn't be used, for something as complex as a universal datastore to be viable, it has to be its own project with active developers, paying someone for completion will get you something that is marginally functional, but it will never improve +21:24:40 ^ +21:24:40 nom: agreed. +21:24:43 lillith: Competitions hadn't occurred to me but I suppose it would be the point after this. +21:24:46 Let me refer to the i2p artwork contest for 29c3 - Was that really a dev project? Was it appropriate to use bounties in it? While there was no even strict criteria stated? +21:24:57 The result will satisfy the bounty, but likely will not scale. +21:25:00 nom: Couldn't have said it better myself. +21:25:26 nom: torrents were nothing but a bounty either.. +21:25:34 Umlaut: i thought they were echelons personal funds? +21:25:54 if I was willing to contribute to the contest, the bounty would rather discourage me? +21:26:01 (most) bounties are set by users - between giving them a choice and them not donating at all, at least with a bounty they have some say in what happens +21:26:32 to put it another way... there are no bounties at google.... +21:26:32 LaughingBuddha really? then sorry, I wasn't aware about that +21:26:32 weltende yes but zzz is continuing to work on snark isn't he? +21:26:47 If I2P had an established structure for spinning off projects (or acting as an umbrella for them) then that would be a different matter (but that ties in to the later point about "official"ness). +21:26:51 Umlaut: I might be mistaken but I thought i read that somewhere +21:27:04 I think that bounties are useful, but not in the way that they are currently being marketed. +21:27:08 LaughingBuddha: all i2p's funds are technically eche|on's personal money +21:27:11 nom: zzz was around anyway though. I think his motivations and such are different than gaining rewards and the bounty program has little to do with it. I do not believe he gained anything from the torrent bounty either. +21:27:18 And that they shouldn't be the main focus. +21:27:21 nom: yes.. but without the bounty there wouldn't have been a codebase to begin with.. (and he was not part of the bounty dev team) +21:27:21 We'll get to the money later.. +21:27:40 lillith: Doesn't he "manage" it? +21:27:47 dg: ok +21:28:10 weltende, you are making a good point. +21:28:14 i2p is no legal entity, so it can't own anything. hence it is eche|on's personal money. +21:28:29 Bounties are useful for kickstarting code, not for continued development. +21:28:36 lillith: I see +21:28:36 if you want continuous development you should pay developers continuously to work on things they want to work on. donating money to get something done is fine, but it shouldn't be given as a lump sum to whoever can get an 0.0.1 working first, it should be used to fund project development over time +21:28:39 he could legally leave with it all one day (he wouldnt', but he could) +21:28:48 nom: and I don't really see your point with no bounties at google.. the people that work for google get paid to work there.. +21:28:52 ^this +21:29:27 But it seems we agree with the first part of nom's statement. No? +21:29:30 eg bounty of $X per month to work on something +21:29:45 Yeah +21:29:52 or perhaps define milestones in the bounty? +21:29:56 Seems like a good solution +21:30:07 (and upon reaching milestone $X you get $Y amount of money) +21:30:07 That sounds good. +21:30:14 milestones seem like a good idea +21:30:17 but they need to be clearly outlined +21:30:20 Milestones + continuous payment? +21:30:20 lol thats what my point was, they get paid, and they do work, and the work they do isn't directly connected with how they get paid. ofc if they stopped doing work, they would stop getting paid, but their not getting paid for completing a specific piece of code, their getting paid enough to live on and spend their lives coding +21:30:23 Milestones is sort of like what the Unit Tests bounty currently has. +21:30:27 is it eche|on we have to ask nicely to change the website etc? +21:30:38 no, website is in mtn +21:30:41 nom I agree with your point, paying to the devs who are reliable and known for being good contributors +21:30:44 lillith: no, anyone can change the website. +21:30:54 Or keep a part of the bounty as a "continued support" payment per month of the application/whatever +21:31:22 So we don't get outdated apps, libs, etc. +21:31:29 Would the project be judged at every milestone then? +21:31:44 LaughingBuddha: good point. Who by? +21:32:00 eh, milestones are just smaller bounties... a simpler solution is to have a pool of money for a project, and someone/group of someones who pay the money to people who are actively working on it +21:32:03 The "board"? (Againg, getting to this later). +21:32:10 Dev board? +21:32:10 yeah +21:32:29 generally you would end up with the dev board being the same people who are getting paid ofc... +21:32:46 to make anything decided upon here 'official', is that as simple as someone checking an update to the website into mtn? +21:32:55 how many active devs are there working on the i2p codebase? +21:32:58 also you need to take under consideration how the current donating system looks from the potential donor (someone new to i2p community especially) point of view +21:33:04 LaughingBuddha: one +21:33:07 lillith: Kinda. And posting ot zzz.i2p. ;_; +21:33:15 The dev board determine the state of $project and decide if it should continue to get funding? +21:33:18 i could be one of them +21:33:25 LaughingBuddha: 2, 3? +21:33:32 hmm +21:33:47 the board / employees model seems to work pretty well for 99% of the corporations in the world. you have a group of people who are the most committed and have already contributed a lot who manage the money, and you have people who join and contribute and get paid for their efforts based on the judgement of the long time contributors +21:33:54 What if we set up a board of min. 5 people who are knowledgeable on the subject? +21:34:01 Devs + Users +21:34:09 and i would trust the system more if there was more than one person, something like mentioned already dev-board which handles the money +21:34:24 What if you had to pay to be on the board? +21:34:31 wut +21:34:38 (this only works tho if you can separate i2p proper projects, from projects that just run on i2p, which should not be managed by the i2p dev team itself) +21:34:38 orion: not a good model. +21:34:47 inb4 Russian oligarch takes over I2P +21:34:57 haha +21:35:06 inb4 already happened, zzz = vladimir +21:35:10 Pay in code. +21:35:29 And how do you measure how much you have to pay? +21:35:32 200 lines of code? +21:35:35 some people are big contributers without coding +21:35:46 No idea, just brainstorming. +21:35:49 like any oligarchy the only natural system is election by the existing board +21:35:49 Exactly. +21:36:03 So, would the normal "dev" (team) board (coming up later) decide if $project is worth paying out to? +21:36:15 Overcomplication will lead to it not being done +21:36:22 3 tiers: inner circle, outer circle, others +21:36:30 lillith: i like that +21:36:37 other = new/ unknown people +21:36:51 outer circle = known/ trusted people +21:36:51 because we don't seem to have enough devs for a real judge panel +21:37:02 dg I would think so as the devs should know *best* what project are most important/urgent/worth spending money on +21:37:05 inner circle voted for by outer circle +21:37:20 its a hierarchy, the i2p project as a whole is more than just the i2p dev team, but they are the tip of the spear so to speak. they get / have the most donations / resources. but other projects built ontop of i2p wouldn't be managed by the i2p dev team, but could get funding from i2p proper +21:37:23 kind like meetings but more structured hierachally +21:38:13 imo <+dg> Overcomplication will lead to it not being done +21:38:37 +1 +21:39:15 The whole (team/dev) "board" idea ties in nicely as we will be discussing this next anyway +21:39:22 Should we leave this for another time or ...? +21:39:28 in short, zzz eche and whoever else they consider to be part of the 'board' of i2p are in charge of the money/decisions (they already are), and other projects on i2p should be structured similarly with their own boards of decision makers. instead of bounties for a sub project (datastore, btc client, etc) the bountie should be given to the board for that project, and let them decide how to spend it to get things done +21:39:39 so shall we get back on topic or has bouties been discussed to death? +21:40:49 and the decision to give a bounty to a board of devs for a project obviously has to be made by the board of i2p, that way you don't have 3 people show up, say their gonna do something, get the money and then never do it. +21:41:13 nom: +1 +21:41:21 nom: +1 +21:41:24 nom: I think it's payed out upon completion +21:41:34 nom++ +21:41:46 nom: +1 +21:41:54 I think that's a good note to end on? :) +21:42:14 Agreed +21:42:24 in the future it would be better for donators to give directly to the sub project if a board/group already exists, instead of donating to eche to create a bounty. since if theres already a group working on it, they would be the best to determine how to use the money to accomplish those goals +21:42:53 ok, moving on +21:42:58 nom that makes perfect sense +21:43:01 nom++ +21:43:11 * nom raises his glass, cheers mates +21:43:18 I feel we have covered "managing money" mostly and it comes under "making the project official" anyway +21:43:21 :) +21:43:21 So let's do the latter? +21:43:47 clarify the position on money first for lurkers? +21:43:54 for an e.V. we would at least 7 people who are willing to go public as members +21:43:55 Official = Register as Organisation? +21:44:26 LaughingBuddha: yes +21:44:29 in case register as a organization, in which country? +21:45:01 lillith: Bounty funds should go to teams assigned by the core I2P board.. if we go ahead with that. +21:45:04 Meeh: US, I assume? +21:45:07 that also need deanonymization of sertiant people +21:45:14 ok so who are the brave souls to give up their anonymity (if that means going official)? +21:45:17 What did you guys decide on? +21:45:20 Not necessarily the US +21:45:28 idk if 'offical' designation would really be all that useful... i honestly can't see what the benefit would be +21:45:31 presumably the people have to be in the US too? +21:45:54 nom: a legal entity to donate to +21:45:54 other than to put the project/people more on the radar of the powers that be... +21:46:06 I can give out my identity, so no problem for me.. But I guess I'm not allowed into the US, so yea. +21:46:17 Registration is stupid. +21:46:28 dg: What are the benefits? +21:46:39 Let's just spread out the money among different "accounts" managed by different people. +21:46:55 I.e, the eche|on account, the zzz account, the dg account, etc. +21:46:57 A wallet for each (sub)project? +21:47:04 LaughingBuddha: Managing the project's money under "I2P" and not a singular person, or persons. An official guise is far less suspicious and accountable. +21:47:09 No. +21:47:12 Do you think that going official would bring some real benefits to the i2p-world? +21:47:14 orion: not sure if the tax office might not find tht fishy +21:47:14 Just different "accounts". +21:47:32 * nom thinks the focus should be more on the logistics of the hierarchy of boards / democracy / voting thing. to actually have a system like that we would need either a well run website, or some sort of distributed system for it +21:47:35 dg: I see +21:47:46 it would certainly bring a lot of paperwork +21:47:54 Umlaut: no more complaining about eche|on holding the money +21:48:04 nom++ +21:48:13 * str4d@freenode clones nom's brain +21:48:14 nom: perhaps so, yeah. If we can arrange that, then we can come to a consensus on this.. +21:48:44 For the record, if you guys want to do something that requires giving up anonymity, I will do it. +21:48:57 git clone http://git.repo.i2p/repo/nom.git +21:49:00 I'd consider it +21:49:03 Going "official" is primarily a financial decision IMHO; it doesn't really contribute to the structure. +21:49:22 Even though I am opposed to the idea of going to the government, I will do it if that is what the project decides is best. +21:49:40 So, let's change the focus to the organizational structure +21:49:51 (As that supercedes this anyhow) +21:50:06 str4d: well.. e.V. requires the members to vote for an board once a year... so we already have procedure for voting for the board then ;) +21:50:14 "The Debian project only allows voting to be done by 'Debian Developers' (where "$developer" = "any sort of contributor"). If there is any sort of voting system enabled here it would need to be limited in a similar fashion, otherwise the system would be ripe for abuse, allowing for a small but vocal clique to push its demands through." +21:50:21 Should we adopt a similar approach? +21:50:25 (for the e.V.) +21:50:44 how much do you need to contribute to be a contributer? +21:50:59 The problem with the "Debian Developers" approach is the number of developers I2P has (very few) +21:51:05 ie is being active in #i2p-help enough? +21:51:25 we must find a definition on contributer +21:51:33 for what? +21:51:36 * lillith does not read 'contributer' as 'code contributer' +21:51:55 str4d: "any sort of contributor". +21:51:59 sigint: read scrollback on sighup ;) +21:52:10 will do +21:52:12 dg, yeah, just read that part *derp* +21:52:12 org structure is pretty simple in theory, just have a three tiered system of board members (elected by the existing board oligarchy), contributors (elected at large by the existing group of contributors), and users (everyone else, including people who /want/ to be seen as contributors, but havn't been around long enough for people generally to trust them) +21:52:27 sighup's like your little brother ;) +21:52:39 it all depends on the scale of contribution, reliability of the contributor and other factors +21:53:06 sorta like, royalty, nobility, and the commoners.... +21:53:13 reliability = being trusted by others +21:53:16 maybe a good start will be starting with rough numbers and working from there? +21:53:31 nom i'm actaually referring to what you have said +21:54:09 not reliable = someone who promised to do something, raised some hope and then run away (with a bounty..) +21:54:24 hmm yah +21:55:35 nom: "existing"? +21:56:15 I gotta go. In closing I just want to say that having funds in one central location makes it easier to steal by oppressive governments, and that if we need to do something which requires giving up my anonymity, I will do it. Cya +21:56:26 perhaps, supreme court(board), senate(contributors) and house(users) would be better... the board has the real control over all the decisions, but they take into account the votes of the contributors who are trusted identities, and the votes of the general population of users too, but you don't weigh that too much as theres no real protection against people making tons of user idents to vote with +21:56:33 bye orion :) +21:56:37 Should we cut now and continue this next week at the same time? +21:56:40 o/ orion +21:56:50 An hour is long, I don't want this to drag on. +21:57:04 Whatever you want. +21:57:07 dg: i'm up for that +21:57:17 I'm happy to continue next week. +21:57:26 gives time to ponder what has already been said +21:57:29 sure, sounds good +21:57:31 We need to think this over. +21:57:43 And hopefully a few more people show up then ^_^ +21:58:07 * nom thinks the main takeaway here is that we could use a site / system to have group decision making / voting on +21:58:07 yes... +21:58:14 I agree, sounds good guys. I'll update the zzz.i2p topic soon (poke me if I don't in 24 hours). +21:58:25 thanks all. :) +21:58:29 Good session +21:58:32 * lillith picks up the baffer menacingly +21:58:42 ;) go +21:58:53 thanks for letting me join +21:58:53 *baf* meeting closed :) +21:59:04 lights out! +21:59:06 thank you, and goodnight :) +21:59:19 Great. I joined in right at the end. I forgot that there even was one :| +21:59:22 inb4 massive well timed netsplit +21:59:25 brb, reading backlog +21:59:28 o/ +21:59:40 sigint timezone fail? +21:59:50 o/ sponge. +21:59:50 :-) +21:59:57 sigint: same time next week ;) say anything you missed the chance to then :) +22:00:12 orion wants to know about my ideas I see... +22:00:50 I pointed him in your direction sponge - figured pooling the creative juices was a good idea. +22:01:05 lillith: i hadn't explicitely planned on joining this meeting, but it would have been nice. no big deal though. i do have an idea that would be good to bring up in next week's meeting. +22:01:09 Yes, excellent. +22:01:32 I need people to help with my ideas... I have too many +22:01:35 idea: offer btc rewards for security vulnerabilities +22:01:39 sigint: it's dg you'll want to talk to on that then :) +22:01:41 (And orions work on i2pcpp has proven that he is good at implementing stuff ^_^) +22:01:58 sigint, post any ideas for next week in the zzz.i2p thread. +22:01:59 * lillith raises eyebrows +22:02:07 vairy interesting +22:02:10 will do diff --git a/i2p2www/meetings/logs/221.rst b/i2p2www/meetings/logs/221.rst new file mode 100644 index 00000000..aa831b9e --- /dev/null +++ b/i2p2www/meetings/logs/221.rst @@ -0,0 +1,19 @@ +I2P dev meeting, March 26, 2013 @ 21:00 UTC +=========================================== + +Quick recap +----------- + +* **Present:** + dg, + LaughingBuddha, + lillith, + Meeh, + nom, + orion, + str4d, + Umlaut, + weltende + +* **Next Meeting** + The next meeting is scheduled for Tuesday, April 2 @ 21:00 UTC (9:00PM) diff --git a/i2p2www/meetings/logs/222.log b/i2p2www/meetings/logs/222.log new file mode 100644 index 00000000..b5932209 --- /dev/null +++ b/i2p2www/meetings/logs/222.log @@ -0,0 +1,354 @@ +20:52:42 okay meeting topics for today: +20:54:22 1. Are bounties appropriate? +20:54:29 2. Managing money +20:54:29 2a. The ssl certs +20:54:32 3. Making the i2p project official +20:56:38 4. Procedure regarding decicions for the project (for example making it official) +20:56:53 for scrollback from last week if you were not here, http://sighup.i2p/irclogs/show?search=&user=&from_date=26+Mar+2013&to_date=26+Mar+2013&channels[]=#i2p-dev&per_page=3&page_format=Html +20:56:53 relevant zzz.i2p posts: http://zzz.i2p/topics/1359 for the meeting thread +20:56:53 http://zzz.i2p/topics/1366 for the bounties thread +20:57:07 Title: zzz.i2p: Managing the project (at zzz.i2p) +20:57:09 Title: zzz.i2p: I2P Bounty System - 2013 (at zzz.i2p) +20:57:55 chosen download bin file from zzz.i2p? +20:58:02 1. Are bounties appropriate, and further bounty discussion +20:58:05 Huh. Corruption again! +20:58:50 not sure who (if anyone) woud like to be pinged, so i'l go on +20:59:22 trolly: that's a bug +20:59:30 haha, no problem +20:59:41 Last week it was decided that while bounties can be a good thing they may well need some looking at +20:59:48 a trojan bug? just joking.. +20:59:59 try to nab the output of `http_proxy="http://127.0.0.1:4444/" http://zzz.i2p/whateverurlbrokebefore` and check /logs for anything important +21:00:05 it seems to be corruption, we saw this on id3nt.i2p in the past +21:00:19 i suggested some revisions of the 50 BTC syndie bounty to echelon, and he has updated it +21:01:06 which led me to two questions: can/should we employ people, ie give them a small amount of money regularly over an extended period? +21:01:57 and what exactly is the procedure on bounties funded by i2p's money, not directly from a donor +21:02:20 * lillith opens up the floor for discussion +21:04:50 dg: if it's occuring on another site, that suggests an I2P tunnel problem. +21:05:26 str4d_: This happened before, is what I am saying. I do not know if the person affected == trolly but it was a few months back and none of us had any answer. +21:05:41 * str4d_ was affected by it. +21:05:52 dg: !!! +21:05:56 Okay, more than one person. +21:06:10 I believe zab was still around at the time which may tell you the period.. +21:06:21 dr|z3d: !!! +21:06:24 But the issue is orthogonal to the current discussion =) +21:06:36 * dr|z3d lols. +21:06:47 str4d_: implying discussion ;) +21:08:32 should i2p employ people? yes and no IMO. yes the people that continue to provide services that make i2p of higher quality (such as running the default IRC network and the more popular things like id3nt.i2p) are the best candidates to receive funds... in addition to the developer stuff which may have bounties attached. +21:09:27 Shinobiwan: I wouldn't consider that employment though. +21:09:50 "Employment" would be payments for the purpose of direct I2P development (code or otherwise). +21:10:19 Shinobiwan: i2p should award effort and achievement. +21:10:43 what i had initially proposed was a small monthly payment for maintaining syndie, getting and keeping it into repos, bug fizing, etc +21:12:25 otoh, i2p should not award aspiration, lazinesss or failure to deliver. +21:12:32 That seems like a good compromise between the current bounty system and "proper" employment (which is hard for anonymous dev work) +21:13:20 Right. So if a monthly system were set up, the payment would be subject to "sufficient" work having been done. +21:13:31 dr|z3d: absolutely. there is plenty of money to give to people who deserve it +21:14:06 (So it would require a monthly meeting between the deciding people to analyze the various outputs during that period) +21:14:09 lillith: we're swimming in it. +21:14:35 I don't think that failure to deliver in one particular month should be cause for complete funding cuts, though. +21:15:03 commitment, dedication, service. +21:15:04 people have afk commitments as well as internet ones +21:15:07 derp, pingout. +21:15:37 remind me again why str4d_ isn't getting compensated? :) +21:16:07 I'd propose a more flexible system where the "employee" gets paid for the months they do sufficient work in. +21:16:07 (extended absences would be grounds for discontinuing funding though) +21:16:22 lillith: exactly (like zzz currently) +21:17:02 dr|z3d: under my proposed system, I wouldn't be at present =P +21:17:26 the threat of halebopp dropping indent inspires zzz to offer hosting costs. so why does str4d_ have to battle with eche|on to get hosting funding? +21:17:53 i offer one word: incompetence. +21:17:56 dr|z3d: that's on a tangent. +21:18:42 str4d_: more than likely. +21:18:45 what kind of maintenance work are we talking about here? shouldn't bugs and specific goals like repo inclusion be separate bounties so more than one person can claim them? +21:18:56 There are two kinds of potential funding that I can see - the bounty/employment hybrid above, and donations from I2P towards community services. These should be treated separately. +21:19:25 value added recompense. +21:20:00 anything else is jizz. like paying 10btc for dogpoo. +21:20:03 str4d_: and competitions too, if there were ever to be another +21:20:10 darrob: what we are trying to do is promote developers staying around. +21:20:13 lillith: true, that's a third category. +21:20:40 also beer. my bad. +21:21:34 I wouldn't mind being paid beer for my humor... ;) +21:21:43 from echelon.i2p: - the I2P general fund will cover all needed costs of I2P - discussed by dev team and will be noted here and on official webpage +21:22:09 i think most things would be acceptable as long as they are discussed and agreed upon beforehand +21:22:12 darrob: So rather than paying out a large lump sum for an arbitrary milestone and then the dev goes AWOL, we define smaller milestones and tasks within the confines of (what is currently called) the bounty, and the dev gets continual smaller payments. +21:23:55 The bounty system would still exist for bounties proposed by third parties (as they have control over how their funds are used), but for bounties that would be proposed by I2P itself from I2P funds, the new system should be better for I2P IMHO. +21:24:26 bounties are shit. hit and run contributors. +21:25:09 dr|z3d: hence why we are discussing a new system +21:25:12 not to mention "i paid $200, I'm important attitudes" +21:25:26 Part of the problem IMHO is that the current system only has a general description, with no concrete structure. +21:25:49 lillith: excuse me if i'm not quite following the finer points of the argument :) +21:25:52 For the new system, we need an agreed set of guidelines for proposing and managing funded tasks. +21:26:59 dr|z3d: if people want to waste/spend their money on bounties for improperly completed features, they should still be allowed to imho +21:27:58 lillith: yep. Or they can choose to use the new system, by donating their money to I2P and putting in a request through whatever process we decide on to set up a new funded task. +21:28:16 i agree - there is money there, and we might as well use it, so we might as well use it properly and effectively +21:28:56 and then if the donor goes awol it's still technically a community owned bounty +21:28:59 i'd like to see people sticking around too, of course, but i don't see how bounties are shit at all. on the other hand the monthly thing sounds like trouble but i don't mean to dismiss it too quickly. +21:29:02 Tasks funded via the new system need to be funded with money controlled by I2P, because it will be a panel of I2P representatives who decide what counts as "sufficient" work, not the donor themselves. +21:29:03 lillith: i disagree. donate to the project and let the project decide how to distribute rewards. +21:29:06 * Shinobiwan not sure if my other msgs went through +21:29:17 bounty and employment != donation ... both should take place I think... employment/bounty for dev specific stuff... and perhaps donations for things like community services +21:29:19 the employment part would need more of a specific set of conditions +21:29:24 the community service part, really just needs the community to decide what's worth supporting +21:29:27 and then dish out something appropriate +21:29:50 dr|z3d: both options will be there. +21:29:53 Shinobiwan: they didnt, now they did :) +21:30:04 Shinobiwan: http://killyourtv.i2p/irclogs/latest.log.html for scrollback. +21:30:07 Title: #i2p-dev logs for Tuesday, 2013-04-02 (at killyourtv.i2p) +21:30:12 "oh we need russian" no we don't. we need commitment. not money chasing rats that disappear as soon as the bounty is awarded. +21:30:24 thx str4d, K1773R +21:30:47 dr|z3d: a new, private infrastructure may well appear for paying individuals for work - it might as well all be in together +21:31:27 darrob: the reason most proposals sound like trouble is because we don't have a large enough developer base to properly run/support them. Therefore, a proposal that should result in a larger developer base is a good idea. +21:31:42 money should not be able to dictate the project. period. +21:32:01 and http://killyourtv.i2p/irclogs/%23i2p-dev.2013-04-02.log for "live" scrollback (the HTMLized logs are processed every 10 minutes or so) +21:32:16 sponsor the project, great, but don't tell us how to spend the money. +21:32:27 dr|z3d: i like it if a money chasing rat fixed certain features in syndie and ran. what's the problem? maybe someday syndie will get a real developer again but that person won't necessarily need payment then. actually, as far as committed maintainers are concerned, it might actually be counterproductive to offer a pay for the job. +21:33:05 thx KillYourTV +21:33:19 np +21:33:36 darrob: the "problem" is money thinking it can dictate the agenda. +21:33:39 dr|z3d: i2p isn't being told how to spend its money, because bounty money never was i2p's. i2p/echelon just act as an escrow service +21:33:39 Interesting point dr|z3d - I think part of this depends on what we define as the I2P project. +21:34:42 lillith: I think the point dr|z3d is making is that, rather than being told how to spend its money, I2P is being told how to proceed, i.e. the development path is decided by the person with the most money. +21:34:57 dr|z3d: bounties are just offers (or cries for help). where do you get the negative attitude? +21:35:21 And if the bounty process was adhered to as-is, that could potentially be rather problematic wrt the threat model. +21:36:18 that is a good point - i hadn't thought of it in that way before +21:36:47 a set of rules that says "This person must be paid on this date" is a good idea in that, that person can count on the income to be there when they need it. But on the other hand, it also may create drama when people fail to meet other people's expectations of what that money is really going towards... so I think it's probably not inappropriate to have meeting specifically for 'paydays' or whatever... if there's going to be a 'regular' thing. +21:37:35 bounties are shit. show me ongoing commitment from bounty hunters and i'll change my view. except you can't. hit and run merchants. +21:37:49 So maybe what needs to happen is that any tasks/sub-projects that affect I2P directly must be funded and controlled by I2P itself. +21:37:52 darrob: i get the "negative" attitudes from half complete work that's awarded a bounty, only to disappear before you can say "um, i think you missed..." +21:38:03 darrob: also, next time you pretend str4d_ is a css artist, don't bother. you insult yourself. +21:38:10 and you also lose a friend. +21:38:13 dr|z3d: didn't str4d_ and zzz claim some bounties for the unit tests? +21:38:17 dr|z3d: what? +21:38:20 dr|z3d: OT +21:38:55 str4d_: yeah. also, beer. darrob: if you don't get it, *yawn* +21:39:03 lillith: that was after the unit tests bounty was split up into sub-tasks/milestones (which I'd say was a step towards the proposed new system). +21:39:45 dr|z3d: keep on-topic in here please =) +21:39:56 I think everything has been said on this topic now no? +21:40:03 if bounties exist IMO they should go towards the things that nobody currently part of the community knows how to or can do, IMO... not the things they dont have time for. +21:40:06 * dr|z3d recalibrates. +21:40:06 i guess all i'm trying to say is that i question that hit and run improvements are necessarily a bad thing. +21:40:19 I2P will survive with everyone supporting it, not just the people who get paid +21:40:30 darrob: they are good for kickstarting development in a new area +21:40:37 But the I2P router/project is not a new area, IMHO +21:40:53 So, how about the following: +21:41:20 Shinobiwan: like design! *laughs* 5 years of asking for help, and not one iota of thought to offer a bounty *laughs* +21:41:58 Projects that directly affect the I2P program/network can be funded only from I2P funds, and donors who want to contribute just donate to I2P. +21:42:21 because designers aren't coders, ergo worth nothing. except when you're offering 10BTC/100$ for anything, including crayons. +21:42:47 Projects that don't directly affect the I2P program/network but are still I2P-related (e.g. syndie) are eligible for bounties on new/substantial work. +21:42:58 sorry, but I can't take this conversation _too_ seriously, built as it is on an anthill of incompetence. +21:43:01 (But can also be managed via I2P if the donor wants) +21:43:23 dr|z3d: i'l ping you when we move on then :) +21:43:26 But a bounty would need to be more accurately-defined than the current system allows. +21:43:46 lillith: very good, sir :) +21:44:04 s/allows/does +21:44:10 str4d_: does that imply that there will be an i2p management board to make those decisions? i think that was another week's discussion, right? +21:44:32 darrob: yes. +21:44:42 This is separate to any "official-ness". +21:45:05 str4d_: sounds good :) +21:45:32 But there would be a panel of (elected) developers (coders/designers/contributors) who are trusted with steering the I2P project. +21:45:46 I.e. something a bit more formal than what we currently have. +21:46:00 darrob: that's either coming up or later, depending on whether we want to continue +21:46:26 Mmm. My proposal works under the assumption that such a panel exists in some form. +21:47:05 (exact specifics being discussed later as above) +21:47:21 lulz +21:47:28 what's #i2p-dev then? +21:47:35 i disagree with the panel idea tbh +21:47:38 meetings seem to work well, and they let new people have a say too +21:47:38 it would need to be large enough to get a variety of perspectives +21:47:38 you never know who might offer then next amazing idea +21:48:04 lillith: exactly. +21:48:11 it's that panel, but yea... it would need to become officially official +21:48:18 But with the current size of the developer base, that's hard. +21:48:25 It's a chicken-and-egg problem. +21:48:35 And we need to break into the loop somewhere. +21:51:05 Shinobiwan: yea, thats basically what i'm trying to say :) +21:51:05 actually no it's not +21:51:05 #i2p-dev along with mailing lists, zzz.i2p, syndie, etc +21:51:05 anyone who wants a say should have a chance imho +21:52:35 lillith: yes, but there still needs to be a group of people with a final say. +21:52:53 people need to know eachother w/out knowing one another... to the point I can say, str4d, KYTV, dr|z3d ... (a lot more but just for example) have been on the network for so long, and IMO all make I2P of higher quality. Collectively I think people can figure out and reach an agreement who would go on such a panel... even though nobody really knows eachother AFK. Opinions from new people to the project should be listened to also +21:52:53 however +21:53:00 (i.e. the people who control the funds. Currently, that is a single person - eche) +21:53:25 At least to begin with. +21:53:52 The Debian developer model is a good one for making decisions like you suggest lillith - all done via voting. +21:53:55 so, everyone has a say, a few (3 or 4) people have the final say? +21:54:10 (And a "developer" is just someone who has contributed in some way IIRC) +21:54:25 But a voting system needs a larger base of "developers" first, I think. +21:54:51 lillith: anyone can suggest an idea - that's never going to change. +21:55:06 in my mind it's more than 3 or 4 people.. more like 12+ and growing... but people that fail to make the meetings don't get to vote... (and if their vote is especially important/relevant, then the meeting might happen another time) +21:55:12 But I2P has finite resources, and those resources need to be allocated appropriately. +21:56:06 (the biggest resource being time from continual developers) +21:56:20 as I wrote on zzz.i2p, I think votes such as via gpg signed messages to a mailing list would be better than irc. We've got mailing lists and they should be utilized. +21:56:20 s/biggest/most important but currently most limited/ +21:56:23 str4d_ meant: (the most important but currently most limited resource being time from continual developers) +21:56:28 KillYourTV: agreed. +21:56:43 ya gpg signed == much better +21:56:54 Provides a transparent and verifiable archive of votes. +21:57:09 is that topic 1 over then? +21:57:09 i agree also. don't expect to accomplish any serious discussion on irc. +21:57:12 * KillYourTV is 'stealing' ideas from Debian's system(s) +21:57:19 http://www.debian.org/vote/ +21:57:22 Title: Debian Voting Information (at www.debian.org) +21:57:39 it's all open source, its there to be 'stolen' :) +21:58:06 2. Managing money +21:58:10 and with mailing lists you have the oh-so-helpful 'plonk' mechanism available if needed to raise the signal to noise ratio. +21:58:21 eche|on: ping +21:58:24 KillYourTV: dont copy the "GPL Nazis" idea pls :P +21:59:46 afaict the money management wrt bounties has already been discussed enough +22:00:12 KillYourTV: if we go the mailing-list route, the mailing-list needs to be usable entirely within I2P (currently not the case). +22:00:23 (But also usable externally) +22:00:33 but there are other, non- bounty uses for money, for example purchasing ssl certificates +22:00:36 agreed +22:00:53 str4d_: the nntp interface should qualify. +22:00:53 and agreed to lillith's last point (ofc) +22:01:04 dr|z3d welt weltende welterde echelon +22:01:07 ...which i *think* is functional. +22:01:10 are they linked? +22:01:30 I know they were supposed to be but the last I checked (months ago) they weren't. +22:01:38 purchasing ssl certificates would go w/ donations IMO ... community services ... the same way to decide what the money goes towards as donations. +22:01:41 * KillYourTV fires up the nntp tunnel +22:01:41 you can at least read through it. +22:01:44 can we keep this ontopic guys, project management is coming up :) +22:02:01 i.e.. have a meeting... say "we need this"... "agree?" ... panel says OK ... majority of panel green light +22:02:32 not so quickly, but the general idea. +22:02:32 hehe +22:02:47 Shinobiwan: can fall under the same vote system proposed above. +22:03:14 yep +22:04:16 well the ML interface is accesible via i2p more or less.. http://vmfwbic2brek2ez223j6fc6bl5mmouzqvbsch45msvyyzih3iqua.b32.i2p/ still contains redirects to lists.i2p2.de.. not sure what to do about those +22:04:24 Title: lists.i2p2.de Mailing Lists (at vmfwbic2brek2ez223j6fc6bl5mmouzqvbsch45msvyyzih3iqua.b32.i2p) +22:04:32 if there is such a panel, it is not finalized at a fixed number of people... IMO... it should grow, and grow and grow... so, whatever panel currently exists, should have some procedure to bring in new panel members regularly +22:04:43 we need a central party with a politburo and the users' congress. :) +22:04:46 FTR, the mailing lists as currently set up are not available via nntp. +22:05:04 (haven't added it to the hosts.txt yet) +22:05:42 (at least not under i2p.*) +22:06:27 hmm.. they should be.. but maybe not under i2p.* +22:06:46 I didn't set it up.. so don't really know anymore ;) +22:06:58 KillYourTV: i think i2p. are welt's preexisting groups. the new ones are alt.privacy.i2p.dev/general and alt.privacy.syndie.dev/general. +22:08:42 ah...nvm me. now that I refreshed the list again I see those new ones. +22:09:00 sorry +22:09:03 ah.. right.. slrn didn't show them as they didn't contain unread messages +22:09:34 i'm still confused why there are no messages. i really thought i saw a couple of test messages before. +22:09:45 can we get back on topic please? +22:10:23 i, and surely others want to know what's going on with ssl certificates for the i2p domains +22:11:17 i didn't see the topic change, just but there are other, non- bounty uses for money, for example purchasing ssl certificates /me zips it +22:11:56 ahh, sorry +22:12:06 can we keep this ontopic guys, project management is coming up :) +22:12:23 and what's the topic? ;) (I didn't see that switch) +22:12:30 2. Managing money +22:12:41 afaict the money management wrt bounties has already been discussed enough +22:12:41 lillith: eche is currently sourcing the required money. +22:12:48 but there are other, non- bounty uses for money, for example purchasing ssl certificates +22:13:16 eche|on: any updates on the certificate situation? I haven't had to get "real "certs for a while and don't know how long the verification process takes nowadays. +22:13:19 kytv2: I am on the hunt for 3k euro and cert requests... +22:13:29 so it's under control then? +22:13:36 and pushed.. +22:14:14 yes, it's being taken care of +22:14:21 Can I reiterate my opinion that it's dangerous to have one person managing all the money? +22:14:45 ok +22:15:00 Current topic = ? +22:15:03 It's not being put in to some off-shore corporation, right? +22:15:14 2. Managing money +22:15:21 orion: no. +22:15:58 afair eche wanted to speak with an lawyer about making i2p an official entity of some kind +22:15:58 dr|z3d: ping :) +22:16:12 Currently our funds lie in a (bank?) account owned by eche|on and (mostly) in a Bitcoin wallet held by eche|on. +22:16:49 yes, that's right, in .at IIRC +22:17:07 (wrt: 'official entity') +22:18:46 at as in austria? +22:18:53 yes +22:19:19 (not australia *scnr*) +22:19:46 (running gag from EEVblog if you are curious) +22:20:26 okay, looks like we've moved on again +22:20:41 3. Making the project official +22:21:12 lillith: re: money management, it's rather dependent on both the "official" status of the project, and the project management status. +22:21:30 (The former re: where funds are kept, the latter re: how funds are spent) +22:22:14 ok, fair enough :) we can straddle points 2 and 3 for a while then :) +22:25:16 must go +22:25:19 bye +22:25:47 later I'll send yo new translaion str4d_ +22:29:26 or not, as the case may be +22:30:11 i'd suggest time to move on :) +22:30:28 Starting build #28 for job I2P-Bote +22:30:28 4. Procedure for making decisions in i2p +22:31:27 * KillYourTV votes for taking long discussions about important decisions to a mailing list +22:31:30 Project I2P-Bote build #28:SUCCESS in 1 min 3 sec: http://jenkins.killyourtv.i2p/job/I2P-Bote/28/ +22:31:34 so, mailing list, hierachy, etc +22:31:37 That way anyone can take part when he/she can +22:31:50 i'd like to put in an honourable mention for syndie here +22:31:56 +1 +22:32:03 (@ML) +22:32:04 everything is signed by default, for a start +22:33:18 I like syndie too (ofc), but mailing lists would be easier for outsiders to take part +22:33:45 but yes, i agree in principle. no point hanging around waiting for discussion that clearly isn't happening +22:33:48 that's not to say that discussions can't be mirrored to syndie... +22:34:33 yeah.. nntp syndie gateway or so would be nice to have +22:34:36 and of course, officially moving is only one thread away ;) +22:35:54 21:00 UTC isn't handy for everyone. On a mailing list time zones mean nothing. On a mailing list there no netsplits, relay problems, or ping outs. For meaningful discussions a mailing list (IMHO) is _THE_ way to go. +22:36:30 KillYourTV: I agree. +22:37:54 irc is good when you need pretty-damn-close-to-realtime...but "we need a new domain" doesn't have that kind of urgency. Post it and it'll be addressed when $user can address it. +22:37:54 imho syndie has all the benefits of mailing lists and more, the only issue is accessability for outsiders +22:38:32 then again, how many people that we want to include already use mailing lists? +22:39:03 There's been talk of a Syndie webapp" but I don't think that's gone (and will) go anywhere. +22:40:20 I'd gather that more use mailing lists than IRC. +22:40:31 i don't want to dominate a discussion on syndie vs ml here, but i think it's something woth considering +22:41:25 * weltende@freenode prefers his mail/nntp client tbh +22:42:04 syndie via mutt would = 'win' +22:43:14 this topic also includes hierachy, which was touched upon before but imo needs some expansion +22:45:09 I don't know if we can come to a consensus on anything AND have a discussion easily with IRC meetings anymore. +22:45:28 It worked in 2006 when it was more of a quick update on the project but it's not anymore and it involves lengthy debates/discussion. +22:46:40 having more time to think through things would result in on topic, well thought out, clear discussion +22:46:47 threading also = win +22:47:13 +1 +22:47:21 +1 +22:47:28 +1 +22:47:39 might I add: nntp, fuck yeah. +22:47:50 irc meetings were always an experimental thing, and the experiment failed :) +22:48:05 hey, lillith, at least we're having the discussions now, right? :) +22:49:40 imho we should keep irc meetings and move things that take a long time in the meeting or which has a lot of discussion to the ML +22:50:11 yep :) seems like no-one actually likes meetings anyway haha +22:50:15 I think it's mainly due to the timing. +22:50:18 And pressure to respond in a fast manner.. +22:50:18 postman: http://zzz.i2p/topics/1367 +22:50:18 but there's no chance of getting a time thats good for _everyone_ +22:50:18 plus some people have irregular schedules +22:50:18 Exactly. +22:50:25 Title: zzz.i2p: I2P and e-mail (at zzz.i2p) +22:50:30 with a heavy heart, and feeling rather poetic, i'd gladly baf the last meeting ;) +22:50:41 weltende: I was thinking this.. keep irc meetings for discussion of some things in the ML (actually doing something & such). W +22:50:44 go for it, lillith. +22:51:26 * lillith bafs the meeting closed +22:51:36 thank you, and goodnight :) +22:52:01 Starting build #103 for job I2P +22:52:09 can susimail handle mailing lists? +22:52:41 sure, but I'd use a 'real' client like claws or mutt +22:53:04 (just a matter or preference) +22:54:55 ahh, thats okay then :) +22:56:33 Project I2P build #103:SUCCESS in 4 min 34 sec: http://jenkins.killyourtv.i2p/job/i2p/103/ +23:01:15 Project I2P UnitTests build #74:SUCCESS in 4 min 31 sec: http://jenkins.killyourtv.i2p/job/UnitTests/74/ +23:04:51 and +1 to meetings continuing with the bigger things being taken to mailing lists/forums/syndie. +23:05:54 IRC is good for quick status updates..but a "newsletter" of sorts could work for that purpose too. +23:06:19 it's nice to have a start, with some goals, and an end, with a consensus +23:06:22 mailing list also works for alerts. see how tor do it with consensus issues. +23:06:46 'today THIS is what we decided and THIS is what we're going to do about it' +23:07:29 i'm not sure but i imagine ml discussions as dragging on with no distinct endpoint +23:07:52 +1 for newsletter though +23:08:33 they can, sure...but I think more will be accomplished on a long ML discussion than a 4-5 hour long irc meeting. +23:09:08 * lillith signs up with an open mind :) +23:09:38 * psi likes the idea of a mailing list +23:09:53 where is the ML? +23:10:01 * KillYourTV really likes MLs but they (the ones on the Internet) will probably cause his AFK identity to be leaked...heh +23:10:12 lists.i2p2.de i think +23:10:39 and (/me scrolls up) +23:10:54 http://vmfwbic2brek2ez223j6fc6bl5mmouzqvbsch45msvyyzih3iqua.b32.i2p/ +23:11:08 Title: lists.i2p2.de Mailing Lists (at vmfwbic2brek2ez223j6fc6bl5mmouzqvbsch45msvyyzih3iqua.b32.i2p) +23:11:24 it just redirects +23:11:55 there's also nntp.welterde.i2p +23:12:37 alt.privacy.i2p.*, alt.privacy.syndie.* +23:13:16 (cheers darrob for pointing me to the right usenet groups) +23:14:57 * RN wanders off to tinker with thundirbird +23:17:46 +1 to meetings continuing (but sticking to time) and +1 to "important" discussions on the ML. +23:19:32 for you interested.. posting is atm allowed for the following groups: +23:19:35 post: "i2p.*,alt.anonymous,alt.anonymous.*,alt.privacy.anon-server,alt.privacy.anon-server.*,alt.privacy.i2p.*,alt.privacy.syndie.*" +23:25:35 this will have to be fixed before "important" discussions make their way there http://lists.i2p2.de/pipermail/i2p-general/ +23:25:42 Title: The I2p-general Archives (at lists.i2p2.de) +23:26:27 ...unless the messages were purposely wiped (which wouldn't make sense) +23:30:13 hmm.. +23:30:21 no idea atm.. but heading to bed now diff --git a/i2p2www/meetings/logs/222.rst b/i2p2www/meetings/logs/222.rst new file mode 100644 index 00000000..6432b8b8 --- /dev/null +++ b/i2p2www/meetings/logs/222.rst @@ -0,0 +1,17 @@ +I2P dev meeting, April 2, 2013 @ 21:00 UTC +========================================== + +Quick recap +----------- + +* **Present:** + dg, + dr|z3d, + K1773R, + KillYourTV, + lillith, + orion, + RN, + Shinobiwan, + str4d, + weltende diff --git a/i2p2www/meetings/logs/223.log b/i2p2www/meetings/logs/223.log new file mode 100644 index 00000000..d870bf07 --- /dev/null +++ b/i2p2www/meetings/logs/223.log @@ -0,0 +1,165 @@ +19:56:52 Hi@all && (welt||welterde||weltende) +19:57:24 ;-) +20:00:33 Starting build #182 for job I2P +20:01:11 Mathiasdm, Meeh, postman, str4d, _sponge, KillYourTV, Complication +20:01:19 Alright, lets get this meeting started +20:01:33 meeting? hmm +20:01:33 Agenda: +20:01:39 * New bounty system +20:01:44 * New bounties +20:01:49 * Misc? +20:02:21 __New bounty system___ +20:03:25 During this summer I'll have some time over for I2P development, but I also have to pay my rent which is why a new bounty system or at least a new set of bounties and sub-bounties will be suggested +20:03:51 \o +20:04:37 Project I2P build #182:SUCCESS in 4 min 7 sec: http://jenkins.killyourtv.i2p/job/I2P/182/ +20:05:00 after discussing the idea with eche|on, it seems like the best option for payed work is via the bounty system +20:05:44 to make it work I'll suggest at least one large bounty and then create sub-bounties for it +20:06:27 the sub bounties will be created and closed on a bi-weekly schedule +20:06:41 (preferably by holding a meeting like this when a sub bounty is to be closed) +20:07:27 you know my opinion, and so I just wait for input ;-) +20:07:35 Currently the i2p project has a lot of funds which aren't doing us any good +20:08:10 and allowing me to contribute to some much needed problem areas in i2p should be a good thing overall +20:08:51 Does anyone have any questions or feedback at this idea? +20:09:26 I've talked to zzz, eche|on, postman and Mathiasdm earlier and they have approved +20:10:07 I've tried to reach welt/welterde/weltende, _sponge, badger and KillYourTV but have not gotten any response from them +20:10:23 Project I2P UnitTests build #153:SUCCESS in 5 min 36 sec: http://jenkins.killyourtv.i2p/job/UnitTests/153/ +20:10:35 But I'd like to know what the rest of the inhabitants of #i2p-dev think about the idea +20:10:52 I agree that we should be doing something with the funds +20:11:08 An organized method of doing so is useful, I don't disagree at all so I'm remaining mute +20:12:04 dg, does this seem like a good way of doing something useful? +20:13:00 yes. The bounty system already works, we should build upon it +20:13:19 you're proposing using existing funds? euros or BTC? +20:13:21 As far as bounty amounts go, 325€ per bi-weekly sub-bounty is what I need to cover my basic costs of living +20:13:47 euros are safer and simpler for me +20:14:07 but maybe parts could be payed in btc +20:14:42 in any case the bounty should be set in euros and then possibly payed out in btc +20:14:47 eche|on, whats our balances? +20:15:27 and to answer your question, Im proposing using existing funds +20:15:27 http://echelon.i2p/donations/index.html - still on those sums +20:15:32 Title: Donations (at echelon.i2p) +20:15:40 so ~28k€ and 626 BTC +20:16:47 hottuna: What work will you be performing? +20:17:22 appx. how many hours a week are you proposing to work? +20:17:35 that is point two on the agenda, but i'm primarily thinking about improving on our floodfill issues +20:17:57 40 h/week. So full time. +20:18:56 so round numbers, 8 euros/hour +20:19:18 nope. 4 euros/hour +20:19:20 in my mind that sounds reasonable/cheap +20:19:35 325/80 +20:20:13 mcdonalds isn't hiring? :) +20:20:35 i think burger king has payed me more an hour :P +20:21:06 you worked for a burger king? hell,... I should have visited your working office^^ +20:21:35 appx. how many weeks you propose to work? +20:21:56 lets see.. this will be a rough number +20:23:19 I should manage at least 8, but it could be more or less than that +20:24:10 so a 1300 euro commitment from us +20:24:24 yeah +20:24:49 more than that would have to be discussed in a meeting +20:25:18 anybody remember what we paid jrandom monthly? +20:26:08 let's see what the internet archive says +20:26:10 less. ~500$ IMHO +20:26:39 he was more of a hippie than tuna is :) +20:26:50 $465 USD/month +20:27:11 I'm hippying as hard as I can damnit! +20:27:52 hippy harder!! +20:28:49 alright, so does anyone have any objections or questions? +20:29:15 no objection +20:29:41 sounds good +20:30:25 ditto +20:30:54 Alright. Then we are all happy about this +20:31:32 For the record: As no complaints have been raised, we'll proceed with the new bounty system. +20:31:47 __New bounties__ +20:32:34 The floodfill system has some issues, including attack resistance and scalability. +20:33:02 Replacing it is the first bounty that I will suggest. +20:33:30 I've talked to zzz about some alternatives +20:33:47 and step one appears to be to move to a kademlia based netdb +20:34:30 zzz has in fact already started by implementing kademlia in i2psnark +20:34:59 this is probably a good base for for a netdb network +20:35:53 there are some modifications that can be made to kad to make it more probabilistic and avoid the worst aspects of eclipse and sybil attacks. +20:36:01 I'm not sure "replace" is the right word. And also not sure it's the top of my list. Our ff system is actually in pretty good shape right now. But I'm not sure how much you want to get into discussing it now. +20:36:27 A reasonable sub-bounty may be just to analyze the current situation and make proposals +20:36:41 replace would be a long term goal, initially adding a second netdb backend would be the goal +20:36:58 yeah, replace is the wrong word. +20:37:09 but sure, the UCSD folks highlighted some issues. +20:37:35 ignoring vulnerabilities for a moment, I think we're actually good for a couple years of growth w/o changes +20:38:06 22:37 < zzz> A reasonable sub-bounty may be just to analyze the current situation and make proposals <-- sounds like a good idea if it's time-boxed +20:38:53 spending two weeks on an analysis might be overkill, but having a meeting and discussing the alternatives after a week might be good +20:38:55 what's _not_ realistic is replacing ffs with R5N this summer. +20:39:09 zzz, agreed +20:41:24 there might also be a need for some work surrounding development like multirouter support +20:41:24 which would make development easier +20:41:24 fyi for everybody, the netdb roadmap in my head is 1) encrypted lookup responses and 2) migrate the snark kad back to router +20:41:24 like the ideas +20:41:35 ./roadmap +20:41:49 yeah +20:44:21 I don't think that 2 full weeks are needed for this +20:44:27 yea +20:45:21 "alternative exploration"? +20:45:30 as in the exploration tunnels right or? +20:45:30 depends how long before your head explodes +20:45:37 what else on your list? +20:45:45 "alternative exploration" = {what technology?, if dht-which?, what code-base?} +20:46:03 maybe one week, and if I have time to spare I'll start with the multirouter stuff. +20:47:09 I'm not sure, but some of the bounties like ipv6 will have to be completed soon as ipv6 looks to be actually deployed now +20:47:40 zzz is working on ipv6 a load but he my appreciate help +20:48:12 I try to add IPv6 on my root server for I2P use. +20:48:15 Resolving issues regarging an openitp submission has been suggested by zzz +20:48:22 as soon as I find time to understand and get it up... +20:48:57 I have a dev server that I can let developers into for testing.. It have multiple ipv6 adresses +20:49:00 having us accepted into OpenITP would be a major thing for us +20:49:07 Could setup more of them now for testing +20:49:22 and now gone for a good night time... +20:49:25 here's my list: IPv6 (incl. testing), Crypto (see trac wiki), OpenITP prep (see trac wiki), NTCP and SSU protocol obfuscation (old zzz.i2p post, Lance James might be able to help), other state firewall resistance, Symmetric NATs (ticket #873), ... +20:49:32 http://trac.i2p2.i2p/ticket/873 - (accepted defect) - Port changing .. obscurely +20:49:40 zzz: want access to a ipv6 server for testing? +20:49:51 hottuna: major thing, yes, but, in case you (or others) are not aware: OpenITP are not long term funders. They fund short, achievable goals to improve projects "quickly". +20:51:05 Meeh yes, in a couple weeks. I'd like to see the minor fix in 0.9.5 to ignore published IPv6 addresses get out there before we start publishing them +20:51:24 s/0.9.5/0.9.6/ +20:51:24 crypto is another thing that I know a bit about, so my time might be well spent there +20:51:27 zzz meant: Meeh yes, in a couple weeks. I'd like to see the minor fix in 0.9.6 to ignore published IPv6 addresses get out there before we start publishing them +20:51:48 ok :) I can setup multiple too if needed +20:51:51 maybe if we're lucky I'll be somewhat done with the floodfill system by the time zzz is done with ipv6 +20:51:58 got a /48 net +20:52:14 that way we could both attack the crypto problem +20:52:21 heck what about i2pcpp +20:52:37 orion is 404 atm +20:52:48 sindu might help there when he got time, great C coder +20:52:59 talked about it earlier, know him from RL +20:53:26 that sounds interesting +20:53:49 if orion is at least willing to accept help, that's a big step - he wasn't before - +20:53:52 but I think that I should spend time where makes the most difference which in my mind is floodfills/ipv6 and crypto +20:54:11 *it +20:54:14 sure, my list doesn't necessarily match your skills or interest +20:54:29 also, he should get some creds for spreading the i2p stickers around Oslo, Norway. He have placed it all around the city +20:54:44 hottuna: if you want, send more.. soon emtpy again:P +20:55:11 oh yeah, hottuna if you aren't coming to DEFCON I need some too +20:55:30 im planning on coming to defcon +20:55:44 i havent bought any plane tickets yet, but I will soon. +20:55:47 oh hella yes. +20:56:23 hottuna: if you got files, I might be able to get some free printups myself +20:56:43 the files are in the i2p.graphics branch +20:56:46 if you got the sticker in png/ai/whatever format +20:56:49 ok thanks +20:57:00 if im remembering correctly +20:57:16 alright. +20:57:51 Is everyone ok with the first bounty being for the floodfill system? +20:58:02 aye +20:58:25 yepp +20:58:50 ok, so first 1 week of research into the options, followed by implementation (currently most likely kademlia)? sounds good +20:59:06 yes, that's the idea +21:01:56 ok +21:03:15 For the record: The first bounty to be introduced is adding a new netdb backend. The first sub bounty should be divided into alternative exploration, multirouter research and discussion with you guys +21:03:26 __Misc__ +21:04:38 How is the website deployment going? +21:09:27 Everyone died? +21:09:31 str4d? +21:12:57 oh +21:13:04 I was curious :) +21:14:22 did I miss anything exciting? +21:14:29 only this: +21:14:32 23:10 -!- hottuna [hottuna@irc2p] has quit [Quit: leaving] +21:14:32 23:12 <+Mathiasdm> oh +21:14:35 23:13 <+Mathiasdm> I was curious :) +21:15:12 Alright, if no one knows, let's see next week +21:15:38 * hottuna baf's with the meeting ending hammer +21:19:59 * Mathiasdm lurks onward :) diff --git a/i2p2www/meetings/logs/223.rst b/i2p2www/meetings/logs/223.rst new file mode 100644 index 00000000..20fcb43f --- /dev/null +++ b/i2p2www/meetings/logs/223.rst @@ -0,0 +1,13 @@ +I2P dev meeting, May 21, 2013 @ 20:00 UTC +========================================= + +Quick recap +----------- + +* **Present:** + dg, + eche|on, + hottuna, + Mathiasdm, + Meeh, + zzz diff --git a/i2p2www/meetings/logs/224.log b/i2p2www/meetings/logs/224.log new file mode 100644 index 00000000..314f624d --- /dev/null +++ b/i2p2www/meetings/logs/224.log @@ -0,0 +1,489 @@ +19:52:28 zzz, christoph2: syn +19:54:26 yay, dev beating! +19:54:33 s/beating/meeting/ +19:54:37 topiltzin meant: yay, dev meeting! +20:00:03 * hottuna baf's the meeting opened +20:00:07 Agenda: +20:00:14 * The next NetDB backend +20:00:14 * Ticket #729 - properties location on osx +20:00:14 * Ticket #741 - process renamer on windows +20:00:14 * Misc? +20:00:22 http://trac.i2p2.i2p/ticket/729 - (assigned enhancement) - on OSX ~/.i2p -> ~/Library/Application Support/i2p +20:00:33 http://trac.i2p2.i2p/ticket/741 - (accepted enhancement) - Make I2P easier to deal with with Windows firewall software +20:00:45 __ The next NetDB backend__ +20:01:16 I've been working on a proposal, the first RFC is ready +20:01:35 http://trac.i2p2.de/wiki/NetDB/NextBackend +20:01:38 Title: NetDB/NextBackend – I2P (at trac.i2p2.de) +20:02:14 The general idea is to use a Kademlia base and extend it with features that improve performance and/or reliability. +20:02:59 Some of the initial code for Kademlia has already been written by zzz +20:03:34 In fact a full BEP5 implementation. BEP5 is the mainline bittorrent implementation of Kademlia. +20:04:13 Several DHTs have been considered: Chord, Freenet and Pastry. +20:04:47 However Kad is fast, extendible and relatively reliable. +20:05:05 some other Kad derivatives that are used in production: Azureus kad, eMule kad, Mojito Kad (Limewire) +20:05:24 Overnet (eDonkey, now defunct) +20:05:47 no p2p app uses chord or pastry (to my knowledge) +20:05:54 I've had a look through the Az-Kad and it's not very compatible. Mojito might be interesting +20:05:57 On top of Kad a few changes have been proposed. +20:06:05 Recursive tunnels for faster lookups. +20:06:20 And Random Recursive lookups for more reliable lookups. +20:07:13 Insertions will be standard Kad until Random Recursive Stores are implemented. +20:07:45 Alright, so that is the overview. Does anyone have any questions? +20:08:17 One objection to recursive tunnels is that it renders local ip banlists useless +20:08:40 for example, I could have manually added the ips of a hostile party to my ban list +20:09:18 the nodes that participate in the recursive lookup/store will not know that +20:09:37 That is true. +20:10:00 Recursive queries are somewhat frail, and should only be used for speed. +20:10:35 Random Recursive queries will however, eventually find a path which doesnt involve the banned nodes. +20:11:05 For what kind of situations would you not trust the ban-list of another node? +20:11:25 sponge: want udp +20:11:28 eche|on: count is not persistent after network changes ("soft restart") +20:11:51 for the situation where the operator of that node hasn't been diligent in updating the banlist +20:12:02 or for the situation where the other node has no banlist at all +20:12:29 But what would happen if the query passed through a 'banned' node? +20:12:51 Either it is forwarded, dropped or recorded. +20:13:31 iterative never passes thru anybody +20:13:34 whatever the sybil/eclipse attack does - probably droped? +20:14:38 That is the thing about Recursive. It's ok if it fails. We have more reliable methods for keys that are under attack. +20:15:09 Like Iterative or Random Recursive +20:15:24 how to select a mode? +20:15:35 theoretically you could include a small bloom filter of banned ips to the query +20:15:54 mode selection an open question. +20:15:57 is an* +20:16:28 In my mind a parallel version would be interesting +20:16:39 A sequential failover version would be slow +20:17:03 But it is a bandwidth vs. max_latency tradeof +20:17:51 topiltzin: R5N includes a bloomfilter in queries. But I don't think the really is needed. +20:18:14 We build this thing to work even if failures are encountered +20:18:14 how much slower is the iterative lookup, and is that slowness a bottleneck of any kind? Do we really need to be optimizing that? +20:18:45 I think we gotta start with adding stat code (where necessary) to netdb and snark and gathering stats on current performance of those two impls +20:18:52 When you visit an eepsite, a lookup has to be done. +20:19:25 topiltzin: the speed of lookups can be seen under the 'Lookup' part of http://trac.i2p2.de/wiki/NetDB/NextBackend +20:19:28 Title: NetDB/NextBackend – I2P (at trac.i2p2.de) +20:20:16 netdb has lots of stats, if we add stats to equivalent places in snark we can start to put a picture together +20:20:35 query latencies etc? +20:21:06 zzz: +1 on moar stats +20:21:06 latencies, queries-per-success, etc, yes +20:22:26 Having access to those stats would be interesting. Especially when developing something new. However comparing I2PSnark-DHT to FloodFill is comparing apples to oranges. +20:22:29 as I said the other day, I think the snark code could be moved back to netdb but only if we choose K and B to swallow the whole local netdb into the routing table +20:22:57 if the routing table is missing most of the local netdb we may as well just keep sorting +20:23:55 your proposal (and yes it's been my plan for a couple years as well) is to replace the orange with the apple, so it's kindof important to compare them. +20:23:58 Im am not against setting a high B, lookup latency is a real issue +20:24:55 regarding K I think keeping it at 8 may be reasonable. +20:25:18 of course the new dht would have to be evaluated. +20:26:05 you can't pick K in isolation. You have to pick K and B to make the routing table work as well as sorting does now, for a given local netdb size. +20:27:03 Both can be tweaked while deploying. +20:27:29 So I'd go for an initial guesstimation base on what we know and what we need. +20:28:17 also depends greatly on whether it's the ffs or everybody that's in the new dht +20:29:24 Not making every node a participant in the new dht would be a mistake an keep us vulnerable to attacks like that presented in the UCSB paper +20:30:15 I don't see info on who's in or out in your proposal +20:30:18 I suppose I wasn't very clear about that in the proposal. +20:30:25 ;) +20:31:30 not at all sure you want everybody (natted, android, hidden, chinese, mobile phones, etc) in it +20:31:46 check out jr's extensive comments on where it all went bad +20:31:53 node churn is not good for the dht. You should have some minimal uptime requirements +20:32:32 topiltzin: node churn isnt much of an issue since all our data is mutable and republished every 37 seconds - 30 minutes +20:33:09 nat:ed nodes should probably not participate. android probably should +20:33:17 sure, N=500 and B=-8 was the disaster he never figured out, but there were other causes too, that are still present in our network... and could get much much worse if android takes off +20:33:25 chinese.. i don't know.. +20:34:04 other than likely having higher churn, how is android different? +20:34:32 node churn affects routing negatively.. so if the goal of this effort is to improve routing you cannot ignore it +20:34:39 I mean phones, not android in particular +20:34:58 android==phnoes for me aswell +20:35:22 mobile devices have lower bandwidth and horsepower and intermittent connectivity +20:35:57 How is it done now? +20:36:12 what? +20:36:39 regarding android devices that want to be an ff? +20:36:42 christoph2: is lurking somewhere +20:36:49 * christoph2 hides +20:37:00 there are some criteria for becoming an FF, one of them is uptime +20:37:11 how would fast key-rotation interfere with an eclipse attack? +20:37:57 and how long does it take for a node to integrate into the netdb of the other nodes? (ie pollute their routing tables) +20:38:32 androids become ff automatically like anybody else, if they meet the criteria. But seems unlikely anybody would do that over the air +20:38:38 well you have time T it takes to integrate a node into I2P (untill it's reasonably well connected) and time t the rotation. you need T/t + safety nodes for eclipse +20:38:53 topiltzin: uptime is really not much of an issue. R5N has some pretty aggressive replication factors. So churn is not an issue +20:39:00 * nodes needed to actually eclipse +20:40:27 hottuna: not exactly following code changes. was less than 30 minutes in december +20:40:27 I did some quick calculations yesterday +20:40:27 well 0.9.2 iirc +20:40:27 nodes_needed_for_eclipse = (60/key_rot_interval)*eclipse_integration_time*attackers_per_eclipse +20:40:27 nodes_needed_for_eclipse = (60/10)*24*20 = 2880. Which might be prohibitive for an attacker. +20:40:27 hottuna, how would a new keyspace (either a different permutation formula, different rotation schedule, or both) work? I don't see how we could ever migrate over. +20:40:27 ok, that sounds reasonable +20:40:49 We'd use both in parallel? the current implementation will remain separate until we can safely move away from it. +20:41:26 what I really want to know is what can we do in the next two weeks to improve resistance +20:41:29 christoph2: are those calculations sensible? and would 2880 nodes be an issue at all? +20:41:36 if that's making the class N routers ff, lets do that. +20:41:36 I find it very hard to believe that node churn isn't an issue. The bigger the churn, the worse the routing table of each individual node +20:42:29 how could we ever 'move safely away' and maintain compatibility? How could we handle the conn limit issues of two parallel impls? How would we migrate from one to the other? +20:42:33 topiltzin: the value K, which is the size of each bucket in the routing table is chosen to be a number of nodes that are highly unlikely to drop out of the dht in an hour. +20:42:33 ^^ class F but !windoze +20:43:04 s/F/N/ +20:43:08 topiltzin meant: ^^ class N but !windoze +20:43:12 sure, we could do class N non-windows. No idea how many there are +20:43:35 it would also expose those routers as being non-windows, small anon issue +20:43:35 hottuna: you get ~20 on a moderately expensive server. 100 of these may or may not be a problem depending on whom you defend against. and I'm not sure if you couldn't get several times more nodes per server with proper code +20:44:22 alright, so it could be a bit of an issue. However it won't be for long the way technology tends to evolve +20:45:28 what else could we do for 0.9.7? +20:45:28 true re: anon issue.. so maybe just do all N and hope we don't piss users off too mch +20:46:18 didn't read everything. what was the issue with windows? +20:46:25 re connections: old nodes would carry on as usual. new nodes would balance their queries amongst both nets. +20:46:49 christoph2: baked in connection limits +20:46:52 christoph2: windows doesnt allow for a large number of connections +20:47:07 ah ok +20:47:27 christoph2: alright, so that answers the key rotation issue. it is probably not worthwhile +20:47:34 actually it's the rate at which new connections are opened that's limited +20:49:07 hottuna, I don't see how we get from here to there. I can see how to move the snark code to netdb with the same iterative lookups in the same keyspace. I don't know whether its worth it, but at least I can see how. After that it all seems really hard and mysterious. +20:50:02 We would change the key-space? Or what are you referring to as keyspace? +20:50:05 +1 with starting with snark code and figuring other stuff $later +20:50:40 keyspace = key->routing key algo, including rotation +20:52:14 so step one while deploying is having something that works (likely iterative only). then we add new KRPC messages for Recursive and Random Recursive +20:52:54 And when the net has upgraded to mostly support them we can enable them in the originator nodes. +20:53:27 deploying will even help us figure out performance while under massive attack +20:53:38 (for background, I started with the netdb kbucket code to make a generic library in i2p.zzz.kademlia, with arbitrary K, B, hash size, and eviction algo. Then I unit tested it to death. Then I moved it to snark for BEP 5 and more testing. The last part of the original plan is to move it back to netdb to complete the circle) +20:54:54 zzz.kad && i2psnark seems like a good base. I've been reading some of the code today, and it makes a lot of sense to me. +20:55:01 you're proposing different keyspace, different rotation, and different participants. i.e. a completely new overlay. +20:55:33 I'd like to do a completely new overlay. +20:56:04 oh good. code reading++. +20:56:47 alright. If this makes sense and no one has any objections I'd like to move this meeting along. +20:57:42 __Ticket #729 - properties location on osx__ +20:57:49 topiltzin, Meeh +20:58:11 yep, that's some very low-hanging fruit that's been dangling around +20:58:39 new overlay sounds like misery to me. +21:00:12 ... awkward moment ... +21:00:59 we still on dht? +21:02:09 imho discussion on dht isn't over but for the benefit of the meeting it should be +21:02:23 no decisions seem clear +21:02:26 * dg returns to shadows +21:03:16 I think the decision for the immediate future 0.9.7 is moar FFs .. the long-term view is still foggy +21:03:42 I'm gonna go ahead with #729 . Meeh, you around bro? +21:04:16 sry, I forgot about meeting +21:04:57 alright topiltzin, what's up with #729? +21:05:35 So, I've been running it for a while now, propagating trunk to branch i2p.i2p.729 +21:05:50 works fine, straight-forward +21:06:21 affects only new installs on OSX, so low impact, etc. +21:06:44 I'd like to merge it and get it over with +21:07:03 zzz, up for the #729 merge? +21:07:45 I don't have mac access, but Im assuming that topiltzin and Meeh does. +21:08:12 Yeah, we're probably the only osx users around here :) +21:08:15 here's a diff: +21:08:15 mtn diff -r h:i2p.i2p -r h:i2p.i2p.729 +21:09:14 I don't have repo access on this machine :/ +21:09:41 "access"? +21:10:00 as in set up :P +21:10:07 no objections +21:10:38 pastebin coming for those who care +21:10:50 just needs some testing, but probably wont get more unless its merged +21:10:50 thanks! +21:11:35 I lobbied for merging months ago as you will see in #729 comments +21:11:42 http://pastethis.i2p/show/3404/ +21:11:45 Title: Paste #3404 | LodgeIt! (at pastethis.i2p) +21:12:01 let's go ahead with the merge then +21:12:17 ok great. Meeh, speak now or forever hold your peace +21:12:28 (or whatever it is the priest says at the wedding) +21:13:18 I'd like him to speak later too if that's when he tests it :) +21:13:21 ok, I'll merge after the meeting +21:13:56 __Ticket #741 - process renamer on windows__ +21:14:11 str4d: you around for this? +21:15:54 mmk, this ticket is not so small +21:16:57 background - on windows, i2p runs with a process name of "java" +21:16:57 hi +21:17:24 meeting today? +21:17:27 which means any security settings that are applied to i2p become valid for any and every java application +21:17:41 sponge: yes. http://zzz.i2p/topics/1397?page=1#p6616 +21:17:48 Title: zzz.i2p: Meeting [4th June] (at zzz.i2p) +21:17:48 ty +21:17:59 bout time I made one of these... +21:18:48 this day is always difficult for me to do anything at this particular hour +21:18:55 can we do anything on 741 w/o str4d ? +21:19:29 I finally have a machine with windows on it +21:19:36 if we have a copy of visual studio then we can do everything without him +21:19:59 7 iirc, never use it though, so i can help/test +21:20:14 I could get a VS license from microsoft, if anyone knows how to use it.. +21:20:41 it's a good idea for the project to have such license +21:20:41 I mean as far as discussion. So back to the beginning, topiltzin you put this on the agenda why? just to try to get things moving? +21:20:41 vs is pretty painful from what I have heard +21:21:07 exactly - get some action going +21:21:37 Alright, str4d isn't around. Should we table this? +21:21:48 aye +21:22:28 * sponge has some 'misc' for discussion +21:22:41 let me know when I got the talking stick +21:23:03 Ill take that as a resounding yes. +21:23:03 Moving along.. +21:23:06 __Misc__ +21:23:09 if you guys want to table it fine, but let's not forget about it competely +21:23:21 topiltzin: agreed +21:23:46 (I will bring it up next meeting too) +21:23:57 ;-) +21:24:08 sponge: Misc was it? +21:24:51 MISC-- Bridge API for UDP (BOB) -- I have a few ideas on how it could be done, but I need some feedback, and need to know if it is even wanted +21:25:18 basically we need some sort of standard that is expandable +21:25:22 and to stick with it +21:25:43 it also has to be able to not mess with what is out there already +21:25:57 well-- adapt easily +21:26:56 So the question is what people would use it for? +21:27:03 we already have a thread going at http://zzz.i2p/topics/1393 --- how about putting your proposal there? +21:27:10 Title: zzz.i2p: UDP Trackers (at zzz.i2p) +21:27:10 two ways I am thinking of is either wrap a UDP packet with <> or <> +21:28:13 hottuna: trackers, voip? +21:28:16 I'm curious on demand +21:28:16 dare i say it, games +21:29:03 and I need people to discuss this. I have been trying for YEARS to talk with someine, to get more ideas, and nobody wants to think on the problem +21:29:03 oh, anonet. psi was pushing for that. +21:29:03 *someone +21:29:03 gotta read up on how SOCKS does it too +21:29:03 there are apps out there that do use IDP +21:29:06 *UDP +21:29:22 don't forget gnutella +21:29:25 voip (mumble) has been implemented and seen some use +21:29:44 that's tcp +21:29:47 bote uses a udp-ish packet too +21:29:54 gnutella can use udp +21:29:58 zzz: My bad +21:30:29 When is the next meeting? +21:30:40 Whenever someone wants to hold one +21:30:40 it's all easy inside the JVM. I could add udp to zzzot in a day. It's the external i/f that is a pita. +21:30:40 so is there demand? and if you got implementation ideas that can expand and not go stale, post +21:30:45 Oh crap. We're in a meeting. +21:30:45 I won't host one next week. +21:31:06 orion: we're at __Misc__ now.. +21:31:25 sponge: yes. +21:31:32 number 2 misc--- ipv6 and it's implications on de-anoning +21:31:35 hottuna: Thank you. +21:31:50 concerns? +21:32:01 haw close are we to using ipv6 +21:32:08 how +21:32:12 what concerns are you having sponge? +21:32:27 ipv6 can link to who you are very easily +21:32:46 damn, overslept the meeting -.- +21:32:53 IPv6 thread: http://zzz.i2p/topics/109 +21:32:56 since the address space is larger? +21:32:59 Title: zzz.i2p: IPV6 TODO (at zzz.i2p) +21:33:03 yes +21:33:03 I was thinking +21:33:14 zzz: this is different, but related +21:33:17 ipv6 does not deanonymize? WHOIS _may_ be more accurate as _may_ be determining if a NAT is in place (Bob and Ryan are behind a NAT, you do not know which is which) -- with IPv6, you can perhaps know if it is Bob or Ryan. +21:33:24 IMO, it makes no practical difference to I2P. +21:33:27 i2p could get an ipv6 space +21:33:39 socks 5 udp would be awesome +21:33:42 farm that out to users via tunnel +21:33:45 o/ +21:33:48 Side note: i2pcpp will have full ipv6 support. +21:33:54 Apologies for being late. +21:33:57 dg: I agree. +21:34:06 awaiting sponge to list his concerns (post #66) +21:34:20 hottuna: Can we move on if sponge has nothing to add? +21:34:35 i feel it's a non issue +21:34:35 schedule? merge for 0.9.8, enable by default in 0.9.9 +21:34:38 so in short.... will i2p provide an ipv6 tunnel for persons of high concern? +21:34:53 hey str4d, you missed the i2p.exe discussion :( +21:35:04 should wee? +21:35:07 I don't think our threat model includes I2P being illegal to run. +21:35:31 If that was the case ipv4 would be problematic as well. +21:35:42 orion, I'm trying to keep our docs up to date w.r.t IPv6. The docs should match what's in my ipv6 branch now. +21:35:45 ht: in some countries (china?) it is +21:36:20 And who runs i2p is the only additional information that would be leaked. +21:36:39 the best way thru the GFW may be via IPv6, hard to see how it's a negative +21:38:09 last misc from me--- So sorry I have been missing all the previous meetings. Again, difficult for me to do this day of the week, and hour. I will be more active very soon on everything as well... the talking stick is for the next persion... +21:38:13 zzz: Thank you. +21:39:03 Meeh: you missed #726, but are requested to do some testing of the patches that will be merged by topiltzin (i think that is the summary) +21:39:15 str4d: #741 was tabled for next meeting +21:39:22 sponge: nice :) +21:39:29 I say bring up 741 now +21:39:32 Okay, anything else? +21:39:32 hottuna: noted. +21:39:39 he's here, why not +21:39:46 fine by me +21:39:46 hottuna: Yes, minor thing. +21:40:01 ok, go orion! +21:40:04 de-tablizing 741 ... :) +21:40:20 I was wondering if someone could get me my credentials for the press@i2p2.de email account. +21:40:27 As well as update the website. +21:40:46 orion: website is in mtn +21:40:56 update what part of the website? +21:41:03 And no credentials required to update website. +21:41:18 (Just create a mtn key and go) +21:41:25 str4d: email account +21:41:43 welterde handles that domain as far as I know. +21:41:46 Or, nevermind. The team.html page has already been updated. +21:41:46 you'll be sorely disappointed, as I don't think we've ever gotten a single email there, but welterde is the person to ask to get added. It's just a redirector to a list, there's no account. +21:42:02 So right now it's just the email account. +21:42:20 I Will speak to welterde, thank you. I yield my time. +21:42:30 excellent +21:42:38 __Ticket #741 - process renamer on windows__ +21:42:45 Okay, so briefly de-tablizing 741? +21:42:45 topiltzin, str4d +21:42:52 yes +21:42:58 :-) +21:43:05 Current situation: the process renamer works. +21:43:12 (When called by the Tanuki wrapper) +21:43:23 (or passed CLI arguments) +21:44:01 I've tested it on Win7. topiltzin has verified that the code has been run on pretty much everything except Win8. +21:44:12 So it needs testing there. +21:44:34 Does anyone have win8 access? +21:44:37 32/64? +21:44:52 * KillYourTV can +21:44:59 The one part that is not working currently is the internal defaults - the arguments that are used if no arguments are provided externally (i.e. wrapper or CLI). +21:45:02 (win 8, x64 and/or x86) +21:45:09 My daughter was going to upgrade to 8, but we found out it is really bad. +21:45:12 zzz: I was running 64-bit Win7 +21:45:30 (IIRC) +21:45:30 so KillYourTV, you're up for some testing? +21:45:37 always +21:45:44 :) +21:45:52 Thanks KillYourTV :) +21:46:11 two remaining points I can see: +21:46:11 * KillYourTV will set up some VMs +21:46:14 Testing just requires dropping the new i2p.exe into the install folder, and tweaking wrapper.config to use "i2p" instead of "java". +21:46:21 1. Icons - need them in different sizes, alpha channels, b.s. +21:46:36 2. Strings like license, description, etc. need reviewing +21:46:55 1. - I've set the VS file to refer to the icon in the installer/ dir in i2p.i2p. +21:47:22 So it should be using the same icon as the launch4j-based i2p.exe uses. +21:47:25 I've not noticed but is the proposed "renamer" already in i2p.i2p? +21:47:36 2. - Agreed. +21:47:36 re Icons: i don't think that any high quality/svg files exist +21:47:51 KillYourTV: yes - installer/c/i2pExe +21:48:10 if it doesnt work w/o arguments, isnt that a problem? +21:48:10 cheers, I can handle the rest then ^^ +21:48:28 zzz: yes it is. +21:48:35 then some things like control panel are going to look weird +21:48:43 That needs to be fixed if it is going to replace the launch4j-based i2p.exe +21:48:54 str4d: are you sure it's a problem? I thought you hardcoded some defaults +21:49:17 topiltzin: I did, but it just crashes and I couldn't work out why at the time. +21:49:29 hardcodeing can be a bad thing, Do a path search first. +21:49:47 But when I pulled out (what should have been) the exact same arguments and used them via the CLI, it worked fine.. +21:50:02 sponge: different defaults. +21:50:13 ahh +21:50:35 sponge: these are the settings that I2P is run with if nothing else is there (no wrapper.config). See installer/i2pstandalone.xml +21:50:38 str4d: in order KillYourTV to test you need to build the actual i2p.exe or have you commited that in mtn? +21:50:46 (and the doBuildExe target in build.xml) +21:50:49 str4d: you may have to do like I did for BOB, basically a double main() +21:50:53 topiltzin: it's in mtn +21:51:07 * KillYourTV already asked ^^ +21:51:14 topiltzin: needs to be built - I wasn't going to commit the binary until we were close to actually using i. +21:51:21 KillYourTV: I meant that the source is in mtn ^_^ +21:51:24 the first main inserts missing args, passes it to the actual main() +21:51:31 oh...heh +21:51:58 sponge: that's pretty much what is done - if args are passed they are used, otherwise default args are constructed. +21:52:05 so you got main() and _main() +21:52:08 ok so the i2p.exe is not in mtn? +21:52:08 topiltzin: what is the format of launch.properties? +21:52:27 topiltzin: correct. Just installer/c/i2pExe/i2p.c etc. +21:52:30 the first is just a cleanup +21:52:37 sponge: see installer/c/i2pExe/i2p.c for the code. +21:52:37 topiltzin: src yes, binary no +21:52:48 will look, thanks +21:53:11 I'll get back to you on why it is broken +21:53:27 topiltzin: there were also several commented-out methods that I couldn't work out their purpose. +21:54:04 that's fine, I can explain offline +21:54:15 but KillYourTV needs a binary to test, can you build one? +21:54:54 topiltzin: sure. +21:55:21 launch.properties - I believe one line per property, need to double-check +21:55:39 (unless you already have VS2008 KillYourTV - that's what it is built with) +21:56:05 which brings up another interesting __misc__ point: +21:56:08 topiltzin: I'm thinking that launch.properties could be like wrapper.config but for the standalone case. +21:56:23 yeah +21:56:42 (Because the current standalone i2p.exe is not adjustable at all) +21:58:33 now that the project is loaded with cash (because some mysterious person donated 1000 BTC when they were still cheap) we should have some software licenses for things like vmware, visual studio, etc. +21:59:21 visual studio I can get for free or one of you guys +21:59:24 I'm sure that KillYourTV has legally purchased his copies of Windows 8 :-D but technically it's the project that should be funding that +21:59:39 microsoft is advertising $450 win8 computers on tv (Asus? Acer?), we could just buy one of those +22:00:05 excellent idea zzz +22:00:16 (dreamspark copies, "for educational use") +22:00:27 tiger direct often has deals for 300-400 on low end laptops +22:00:27 If Microsoft offers student discounts, I can get them. +22:00:34 If you want to go that route. +22:00:37 hottuna yes please (re VS) +22:00:51 wait +22:01:01 is the gamer laptop we bought win. 8? +22:01:19 do we really need toys? couldnt the testing be done on a vm? +22:01:27 echelon had his own windows. +22:01:45 and I do my testing in clean VMs +22:01:52 str4d: I have vs around some place (it is very old) but I won't be using that. I'll simply review your code once pull and apply is finished here and advise you +22:02:14 sponge: thanks. +22:02:59 a vm is always better +22:02:59 I agree with hottuna regarding the VM. +22:02:59 and we can pass around images for easier debugging etc. +22:02:59 alright. so are we happy with this topic/discussion? +22:02:59 str4d: no problem. I've head my head buried in C, C++ and ASM for the last month +22:03:02 a win8 netbook would be a hella lot cheaper than VS +22:03:52 zzz: What if I got a student copy of VS? +22:04:03 I was thinking of donating my student copy as well. +22:04:14 orion: if you get a student copy i2p cannot technically use it +22:04:21 My daughter could possibly get a student version too +22:04:27 s/technically/legally/ +22:04:31 topiltzin meant: orion: if you get a student copy i2p cannot legally use it +22:04:31 topiltzin: why not? +22:04:34 hottuna: yes over here. Two main action items: Fix the defaults (and provide a launch.properties); build an i2p.exe for KillYourTV to test. +22:04:37 It's for my education. +22:05:07 and not for a for-profit company/project +22:05:07 beause it is a student copy for orion's education - it means only he can use it +22:05:26 ok. in that case I cant provide VS. +22:05:49 what license does yours have? +22:05:58 and this stuff cant be built by mingw? +22:05:58 topiltzin: student +22:06:46 you can use it to build i2p.exe or other stuff for i2p, the only thing you can't do is give it to someone else +22:07:23 what about vs2008 express? Is that limited to 32bit only? +22:07:46 str4d: note! It is not good style to mix C++ comments in C code ;-) use /* */ +22:08:01 I suppose we need i2p.exe 64bit _and_ i2p.exe 32bit +22:08:32 I *think* 32-bit only is good enough +22:08:35 I also already see your problem +22:09:01 good enough = runs on both 64 and 32 bit windows +22:09:19 I'm not sure a 32bit i2p.exe can load the 64bit wrapper. The 32bit wrapper can't load the 64bit jvm +22:09:36 dunno though about this +22:10:48 str4d: i2p.c line 54, and the loop below -- you are not assiginging correctly... it should be '*new_argv[0]' not 'new_argv[0]' same for the loop below that. The final NULL should be OK +22:11:06 KillYourTV: how about a x86 which starts the x86 or x64 launcher? +22:11:44 str4d: Try that, and it should work for you +22:11:47 that's what I'm saying, I don't know if it can work. 32bit binaries _usually_ cannot call x64 binaries. +22:12:47 actually the first line may be OK, but the loop does need to be a * +22:13:26 read_options, if returning as a pointer, needs to copy the pointer +22:13:45 KillYourTV: trough cmd.exe it should work as last resort, tough thats a win problem +22:13:48 new_argv[i] = &(read_options[i-1]); +22:13:51 like so +22:14:57 sponge do you have access to a windows box? Can you help test this? +22:15:17 sponge: also post any comments on trac #741 +22:15:35 I have a win 7 laptop, but can't test today. I'm short on time, and had to budget time to be here +22:16:17 otherwise i would jump at it +22:16:52 point is that you have a pointer to an array of pointers +22:17:41 I can basically test any/all versions of Windows +22:17:44 you are not copying the pointer, your code is copying the first few chars, which will point to random crap and cause your crash +22:18:46 new_argv[0] = argv[0]; <-- that is okay +22:18:59 new_argv[i] = read_options[i-1]; <-- random crap +22:19:13 * hottuna is readying the meeting closing hammer +22:20:21 alright.. closing time +22:20:24 sponge: I'm pretty sure that section is still the same as it was for limewireExe +22:20:31 Before everyone goes, I've been thinking of "non profit 501(c)(3) status" for the Invisible Internet Project. Would this be the place to talk about that or somewhere else? +22:20:38 (Which *should* have been in a working state, according to topiltzin) +22:20:45 micster: yes +22:21:04 hottuna: we're done with #741? +22:21:22 i doubt we'll become done with it :P +22:21:29 str4d: problem 2 +22:21:33 free(read_options); +22:21:45 don't free them there +22:21:48 I saw a post in the forum about someone wanting to incorporate in Germany. I'm in the US and have an interest in pursuing this. +22:21:52 KillYourTV: re: 32/64, what currently happens with the launch4j-based i2p.exe? That starts a separate java.exe process; is it built separately for 32 and 64 bit? +22:21:55 sponge: I've gotta go. Could you take care of the rest of the meeting? +22:21:58 free them at the very end +22:22:09 I'm about to go too +22:22:15 it just needs a final baf, and it's done +22:22:18 darnit! +22:22:25 micster: Great! Sadly, timing's pretty bad. Post about it on zzz.i2p ("the forum") if you can? +22:22:28 sponge: I'll try your suggestion and report back. +22:22:31 I think it is done +22:22:38 Ok +22:22:41 (later though - afk now o/) +22:22:59 str4d: double check that it is a pointer +22:23:01 * hottuna baf's the meeting closing hammer +22:23:06 * hottuna **baf** +22:23:17 **BARF** :-) +22:23:35 summary posted at: http://zzz.i2p/topics/1397 +22:23:42 Title: zzz.i2p: Meeting [4th June] (at zzz.i2p) +22:23:50 :) +22:23:57 cool, I can now go run my errands +22:24:08 great meeting everyone! +22:24:19 micster: the meeting is now finishing up and everyone seems to have a lot they want to get across. You'll get more exposure and brain time if you post it there. +22:24:53 Ok, I'll make the post. Maybe it can be discussed in a future meeting. +22:25:01 Just wanted to see if I was in the right place. +22:26:52 lots of good discussion. thanks for making the time to particpate y'all +22:27:07 :) +22:28:54 micster, the correct thread for that is http://zzz.i2p/topics/1388 +22:28:58 Title: zzz.i2p: Official I2P group (at zzz.i2p) diff --git a/i2p2www/meetings/logs/224.rst b/i2p2www/meetings/logs/224.rst new file mode 100644 index 00000000..79146123 --- /dev/null +++ b/i2p2www/meetings/logs/224.rst @@ -0,0 +1,37 @@ +I2P dev meeting, June 4, 2013 @ 20:00 UTC +========================================= + +Quick recap +----------- + +* **Present:** + christoph2, + dg, + hottuna, + inscrutable, + KillYourTV, + Meeh, + orion, + psi, + sponge, + str4d, + topiltzin, + zzz + +* The next NetDB backend + * Fast key rotation is probably meaningless according to discussions with christop egger. + * Regarding building a completely new overlay, that may be difficult task. + +* Ticket #729 - properties location on osx + * zab/topiltzin will merge. Meehs attention is requested for some testing. + +* Ticket #741 - process renamer on windows + * KillYouTV agreed to some Win8 x84/x64 testing. + * Defaults of the new i2p exe are not set up/working yet. + * Icons for the I2P executables are not available in high quality/svg. -> Bad looks. + +* Misc? + * Sponge: Bridge API for UDP (BOB) RFC + * Sponge: IPV6 and de-anonymization since the address space is so large that each user and device is likely to have an address. + * Sponge: Will be around more Real Soon Now (tm). + * Orion: i2pcpp supports ipv6. diff --git a/i2p2www/meetings/logs/225.log b/i2p2www/meetings/logs/225.log new file mode 100644 index 00000000..466eb4fe --- /dev/null +++ b/i2p2www/meetings/logs/225.log @@ -0,0 +1,108 @@ +20:00:08 0) hi +20:00:23 1) RI verifies disabled in a point release? +20:00:30 2) misc. topics led by Meeh +20:00:33 3) baffer by Meeh +20:00:36 ------------- +20:00:36 0) hi +20:00:51 1) RI verifies disabled in a point release? +20:01:02 welterde brought this up the other day +20:01:33 if I'm going to do it it has to be in the next few days, as I'm AFK ~ 13th - 29th +20:01:53 echelon is traveling but for the moment we'll assume we can get a hold of him and he can do the news +20:02:14 so welterde, please make your case for why we should do this +20:03:08 the attack outlined in the paper is quite an serious attack for your not-well used destinations, as the required statistics are not very large +20:04:14 does it attack the server dests, or the (client) users that connect to them? +20:04:21 and for long-lived destinations it's even more dangerous as you can keep the attack going for as long as it takes to get enough statistics +20:05:08 zzz: the client that connects to some dest... say irc link tunnels over dedicated destination would be a prime target (if you get hold of the destination somehow) +20:06:29 zzz: however.. there is an option to disable RI verifies in the advanced options.. maybe news update to users to disable it? +20:06:29 have you always considered it serious or have you changed your mind recently? +20:06:56 I thought I just added that option last week? +20:07:15 oh +20:07:18 you did, I made the same mistake. +20:07:34 thought you just changed the default value of that option.. ok.. not an option then +20:07:57 maybe I didnt explain it well in some post... +20:08:59 zzz: and in the paper they also didn't take timing into account.. I guess that can be used to further improve the attack +20:09:02 We've had a preprint of their paper for almost 5 months, since March 10. If this is a drop-everything problem, we've done an incredibly poor job of responding. +20:09:33 So I'm wondering if you have always thought it critical or have changed your mind recently, if so why? +20:10:33 well.. I was under quite a bit of stress until recently.. so didn't really take a look until now +20:11:30 zzz: buts it's really hard to say as we don't really have that much data on these things.. +20:11:48 what happened to that page on trac with our openitp responses, and our lack of security criteria... +20:12:11 If it is a drop-everything problem, waiting 1 1/2 more months is a problem too. +20:12:30 sure +20:12:40 but is it +20:13:12 is the problem the RI verifies or is it Sybil? If it's Sybil then we don't have any near-term fixes +20:13:27 zzz: it's the RI verifies +20:13:46 i.e., is there a large class of hostile-ff attacks +20:14:16 zzz: and an variant of the attack might be possible with RI lookup and then waiting for an connect as well.. but that attack would be magnitudes more difficult.. so I wouldn't worry about that one just yet +20:14:35 if an attacker takes over a portion of the keyspace, isn't there any number of things he could do? +20:15:20 zzz: given enough time the attacker doesn't have to occupy an large portion of the keyspace +20:15:23 I guess I always looked at this as a Sybil issue. Doesnt me I was right. +20:15:30 *mean +20:16:07 he only has to occupy the space surrounding the target LS +20:16:53 zzz: hmm.. what would be nice for stats.i2p or so would be an visualization of the ff over the keyspace.. (if there is no such thing yet) +20:18:50 ok thanks for making the case welterde. Let me now ask for others to jump here with their opinions +20:18:53 non-subtle attacks might be visible there then +20:19:00 * welterde looks for the openitp page you mentioned +20:19:22 str4d set it up but I don't see it linked on the home page any more +20:19:35 q: Could someone pull off the RI attack without full keyspace Sybil? +20:19:45 I think yes but ??? +20:20:05 http://trac.i2p2.i2p/wiki/OpenITPReview/Criteria +20:20:35 Vulnerability Response Process Maturity and Transparency +20:21:20 we aren't ever talking about full keyspace sybil here. You're targeting a particular slice +20:21:31 dg: he only has to capture most LS lookups.. and as many RI lookups as possible; the latter portion only depends on how much time he has for the attack +20:22:17 "most"? For the network? +20:22:20 it's just really bugging me that we could have done this months ago for no effort. +20:22:39 right. it looks shit if we do it now, really. +20:22:49 but I guess that's irrelevant +20:23:14 who else has an opinion, please speak up +20:23:43 dd if=/dev/null of=opinion.txt +20:24:13 last call. we doing this? +20:24:27 of course if someone was bored he could whip up an simulation.. that would certainly help ;) +20:25:09 maybe I'm just pissed at myself that I didn't think of just turning off the verifies. +20:25:32 zzz: don't worry about it. you aren't expected to cover everything always. +20:25:43 ok everybody with an opinion please enter yes to do a release this week or no for don't +20:26:06 (or i am here if you don't care one way or the other..) +20:26:58 If I don't see any votes we arent doing it +20:27:21 will the release contain *only* disabling RI verifies? +20:27:32 vs. whatever is in trunk now? +20:27:35 maybe we shouldn't have skipped the who is here phase of the meeting +20:27:54 I'm just not qualified enough. +20:27:57 I don't care who is here. I care who has an opinion. +20:28:24 zzz: well.. who isn't here doesn't have an opinion ;) +20:28:46 zzz: I guess we are talking more about an small release, right? +20:28:59 welterde: what do you mean +20:29:02 It would be just RI verifies + anything else tiny we decide to pluck from trunk +20:29:17 and probably called 0.9.7.1? +20:29:28 yeah.. that's what I had in mind as well +20:29:39 no knowledge of this topic therefore no opinion, if we do it i'll of course be able to do the uploads to the various places, etc. +20:29:58 for gods sake somebody vote. welterde at least +20:30:13 who else has read the UCSB paper? +20:30:16 oh I am for it if that's not clear ;) +20:30:41 I've read it.. +20:31:16 I'm anxious to test the other stuff in trunk so the more we decide to pluck the more "Yes" my vote becomes. No opinion strictly on RI verification. +20:31:54 str4d: your opinion? you were quite active in the discussion on the forum ;) +20:33:56 zzz: maybe we should do the vote on the paper thread.. so str4d and tuna (and the others in the thread who are not here) have a say as well.. +20:33:56 I would want to keep the "other stuff" list very short as I would be doing this very fast and then blowing out of town, unable to fix problems +20:33:59 tuna is almost completely afk for a while yet +20:34:51 a no would be better than silence +20:35:03 zzz: well.. or kytv could do the build.. +20:35:10 in theory kytv can do releases too, he's the other one with signing keys, yes +20:36:35 ok then lets do it. I'll put a thread up on zzz.i2p if you want to propose other stuff to go in, final decision in about 24 hours, and I'll do the build maybe thursday. Can somebody contact echelon? +20:36:53 anything else on this topic? +20:37:37 I don't think so. +20:38:23 http://zzz.i2p/topics/1443 +20:38:40 please review the 17K line diff from 0.9.7 and history.txt for other pluck candidates +20:38:47 2) Meeh's topics +20:38:50 take it away Meeh +20:54:33 zzz: the tag is "i2p-0.9.7" +20:54:36 not "0.9.7" +20:54:47 * topiltzin preparing his plucklist +20:55:26 same here +20:55:32 thx +20:55:47 zzz: pm ok? +20:57:06 only if it's of zero interest to anybody else +20:58:51 draft for email to zooko +20:58:55 http://pastethis.i2p/show/0bZ3iFeE9uABCORkfXV6/ +20:58:58 Title: Paste #0bZ3iFeE9uABCORkfXV6 | LodgeIt! (at pastethis.i2p) +20:59:10 I didn't include status or anything yet. I may be way off base. Feedback appreciated. +21:01:00 3) /me *baf*s the meeting closed for Meeh +21:03:29 dg that's a really great start. diff --git a/i2p2www/meetings/logs/225.rst b/i2p2www/meetings/logs/225.rst new file mode 100644 index 00000000..28338222 --- /dev/null +++ b/i2p2www/meetings/logs/225.rst @@ -0,0 +1,12 @@ +I2P dev meeting, August 6, 2013 @ 20:00 UTC +=========================================== + +Quick recap +----------- + +* **Present:** + dg, + kytv, + topiltzin, + welterde, + zzz diff --git a/i2p2www/meetings/logs/226.log b/i2p2www/meetings/logs/226.log new file mode 100644 index 00000000..bf26e2ad --- /dev/null +++ b/i2p2www/meetings/logs/226.log @@ -0,0 +1,264 @@ +20:00:31 0) hi +20:00:38 1) website revamp (str4d) +20:00:55 2) console home page request for http://open4you.i2p/ +20:01:03 Title: open4you.i2p (at open4you.i2p) +20:01:11 3) disable outproxy (topiltzin / dg) +20:01:20 4) netdb project update (hottuna) +20:01:23 0) hi +20:01:25 hi +20:01:30 hi +20:01:35 hi +20:01:42 more info and links at http://zzz.i2p/topics/1460 +20:01:55 let's try something new and limit each agenda item to 15 minutes +20:02:00 Title: zzz.i2p: Meeting Tues. Sept. 10, 8 PM UTC (at zzz.i2p) +20:02:03 1) website revamp (str4d) +20:02:07 go str4d +20:04:12 skipping 1) for now +20:04:17 2) console home page request for http://open4you.i2p/ +20:04:21 Title: open4you.i2p (at open4you.i2p) +20:04:28 is the open4you requester here? +20:05:53 skipping 2) for now +20:05:59 3) disable outproxy (topiltzin / dg) +20:06:05 go dg or topiltzin +20:06:05 \o +20:06:13 I'll take it first. +20:06:50 Hi all, I've been mulling this over for some time: I think the outproxy for HTTP (and possibly HTTPS) being included by default isn't aligned with our goals; +20:07:25 most of the time, when asked on IRC about outproxying, community members recommend Tor and not to touch the outproxy for many/any uses +20:07:38 "we leave that to Tor" is something that's thrown around a lot +20:08:22 the outproxy is centralized which is obviously a big issue, there's problems with Tor having thousands of exit nodes even, let alone us having just one +20:08:43 security issues aside, it's a usability problem. What if someone trying to use I2P thinks I2P is just really slow because google.com loads slow? Oh, and it's in German? Huh? +20:09:14 We haven't built in the code for outproxying as a 'real' measure for a reason: we don't want to do it and hidden services are our target +20:09:58 I feel we're putting users at risk by inserting the outproxy by default and (possibly) confusing their perception of what I2P is and what it does. +20:10:04 iniial pitch over +20:10:17 topiltzin, you have anything to add? +20:10:52 that's a good list; I can also see KillYourTV 's point that the functionality should not be removed +20:11:22 ok let's throw it open to comments from others +20:11:23 but I strongly agree with dg that it's better to steer users away from using i2p for clearnet surfing +20:12:17 Someone (Pseudonemo) has suggested an explanation page as to why clearnet websites will not load and possibly a Tor recommendation. +20:12:19 a plugin that outproxies via tor locally would be nice.. +20:12:45 welterde: funny you say this.. jtor was introduced by ioerror to zzz a few weeks back. +20:13:26 dg: yeah.. that's what I was thinking about.. should be much cleaner than having to rely on an external running component +20:13:52 So, thoughts? +20:13:54 Usually the only time I observe topic of I2P's in discussion is answering questions like "Why doesnt it work like I want it to?" and supplying alternatives like Meeh's outproxy or just using Tor. Having the option for the outproxy is a good thing, and having users knowing that there is such an option is also nice. +20:13:56 I'm against removing it - it's a limited feature, yes, but what it does it actually does pretty well. We won't improve the usability of it by disabling it. We'll just make things harder and more confusing. +20:14:45 I think overall I agree with zzz (since my entrance) +20:14:58 perhaps when we have a better solution (a tor plugin) then we could disable echelon;s by default +20:15:19 options are always good, and I use the outproxies a lot, I know of course not to trust outproxies communications +20:15:21 Why should we do something that we (mostly) acknowledge is something Tor is better for? It seems like we're letting down people by even trying, it's a false impression. A well designed explanation page could ease the confusion/hardness +20:15:46 I don't suggest we remove the capability, just the default +20:15:47 sounds like dg is mainly describing a documentation / education problem, that won't be fixed by disabling it. +20:16:24 it would not be fixed but would be avoided +20:16:31 I don't know why the user should be trusting a third party by installing I2P +20:16:36 i2ptunnel is scary and I'd rather not have to point people in there to turn it on +20:16:36 Some people do not use tor and will not use tor for various reasons. They can have use for an I2P outproxy. +20:17:12 kytv's proposal to have a warning page that makes things clear would be a proper solution, no? +20:17:16 dg you are right, EpicCoffee too +20:17:26 On rare occasions I use the outproxy, mostly to check out links sent in i2prc +20:17:42 I understand we can say "well, the user should read" or similar but.. should we let them shoot themselves in the foot so easily? +20:17:49 like me EpicCoffee +20:17:51 to look at it another way, while it's on by default on the i2p side, it always takes affirmative action by the user to enable it in the browser. So you could say it is not on by default now, if you consider the browser too. +20:18:09 If the link cannot be loaded through the outproxy, I am unlikely to load it through the clearnet. +20:18:32 it takes affirmative action to browse eepsites, it is still confusing. If someone is aware of the risks, they can add the outproxy. +20:18:37 *** trolly_ is now known as trolly +20:18:37 Or load the maybe-coming tor plugin +20:18:49 I'm sure I'm not alone in this. +20:18:49 lol +20:18:55 does eche|on have an opinion? +20:19:03 This isn't an attack on echelon either but should we be trusting a single person with all outproxy traffic? +20:19:12 Regardless of who it is, it's a single point and a single person +20:19:37 would you rather trust one person you somewhat know, or 3000 people you don't? +20:19:41 An assumption could easily be made that I2P is performing a tor like mechanism +20:19:57 3000 people I don't +20:20:10 coming up on the 15 minute mark. do we have any sort of consensus? anybody else that has a thought on this? +20:20:34 imho we shouldn't disable it until we have the tor plugin +20:20:37 I think dg is primarily arguing from the perspective of new users. They don't know eche|on. +20:20:51 +1 Pseudonemo +20:20:59 *I* know eche|on, as a result, I don't mind the outproxy much but yes. +20:21:04 welterde: I can agree on this compromise. +20:21:21 nobody should use an outproxy for anything sensitive. that should be a given lol. +20:21:22 Let's leave it there? +20:21:33 assuming a tor plugin is in the works at all.. which it isn't +20:21:54 ok, interesting topic, thanks for bringing it up dg. May be worth talking about again after a while, whether we have a tor plugin or not +20:21:59 it is an education problem so it would be best to solve it through education +20:22:16 zzz: no problem, thank you. +20:22:39 any volunteers to look at the education / documentation issue and propose some improvements? +20:22:42 topiltzin: I don't think I would like a tor plugin in i2p. My opinion on that is I don't like/trust tor. +20:22:55 EpicCoffee: moved topic, save for next time :) +20:23:16 last call for volunteers and then we are done with this topic +20:23:43 ok then. +20:24:04 skipping 4) as hottuna is not here +20:24:04 lets circle back to 1), is str4d here? +20:24:45 skipping 1) again +20:24:49 * EpicCoffee must be oblivious to the "list" +20:25:01 EpicCoffee: you weren't here for the start, will pm. +20:25:09 2) open4you.i2p, which we will discuss whether or not the requestor is here. +20:25:12 ah thank you dg +20:25:13 open4you are you here? +20:25:15 EpicCoffee: channel is logged real-time http://killyourtv.i2p/irclogs/%23i2p-dev.2013-09-10.log +20:25:54 the request is at the bottom of http://zzz.i2p/topics/1429 +20:26:03 Title: zzz.i2p: 0.9.8 Release Summary (at zzz.i2p) +20:26:44 as a refresher, since we haven't done this in a while, my personal guidelines are at http://zzz.i2p/topics/236 +20:26:49 Title: zzz.i2p: How to get my Eepsite added to the Router Console home page (at zzz.i2p) +20:27:09 has anybody used this site? Does anybody have any thoughts about putting it on the console? +20:27:30 If open4you doesn't turn up, can I propose one? +20:27:31 loading the guidelines now +20:27:36 I like that the operator is upfront about what he can and cannot guarantee +20:28:08 *** trolly is now known as trolly_ +20:28:14 but besides that (positive) impression there isn't much to say... +20:28:15 we are considering open4you only today, at least in item 2). If you like you can have a new item 5) dg +20:28:28 I believe there should be available hosting on i2p, however I'm not sure I would trust a host lol. I'm conflicted on the hosting topic so I'll step aside on this one +20:28:29 zzz: ok, thx +20:28:42 his TOS looks ok to me +20:29:03 it's a new category not currently on the console so that's a plus +20:29:03 i know of no clients from open4you.i2p so I can't vouch for it but if the owner can turn up another time, I'm fine with discussing it again +20:29:56 no vouches makes me suspicious +20:30:24 the recent FreedomHosting thing makes eepsite hosting interesting +20:31:13 I get the gist this is free, right? +20:31:19 do we have any questions that the requestor must answer for us to make a decision? or can we make a decision today? +20:31:33 I didnt see whether it was free or not. the faq is only in russian. +20:31:45 it's free +20:31:45 If it's free, why not? There's no risk of a scam and it's definitely of 'interest'. +20:31:49 "9 months work free web hosting in i2p." +20:31:58 sorry guys, I was under the impression it was bitcoin +20:32:19 still, we have no idea if it actually works +20:32:46 Should we bite the bullet and find a volunteer to test it at some point from now to the release? +20:32:55 If it doesn't work, we pull it +20:33:15 I'm inclined to say yes now. I don't know what one person testing it would prove. We can always pull it later. +20:33:33 well, testing it at least works. There's not much else we can test. +20:33:39 I would hope people are at least vaguely aware of the trust issues involved in hosting +20:33:41 yeah, I'm also a yes. +20:34:10 there's no reason it shouldn't work. For one, I dont see any evidence of an automated process. +20:34:12 Any other votes? +20:34:26 it seems to be email drivne +20:34:28 driven +20:34:43 last call for opinions / objections +20:34:57 by works I mean whether the operator will actually do what he says he will do +20:35:14 do we have any testimonies from users on open4you? +20:35:22 as opposed to set up an eepsite and abandon it +20:35:24 no +20:35:28 psi: none, that's the problem +20:35:31 topiltzin: if that happens, we can remove it. +20:35:39 the owner is at least somewhat active as he requested +20:35:48 I'm for it. +20:35:53 yeah, i'd say not to go ahead with endorsement , first we get a test user in +20:35:56 my guess is he has almost no business so far. Let's give him a trial period of a couple of I2P releases with it in the console, and if its bogus then we pull it +20:36:09 or... if we endorse put a big fat warning on it +20:36:10 zzz: +1 +20:36:27 hmm +20:36:33 zzz has a good point +20:36:42 but... still +20:36:49 psi: it is free +20:36:56 psi pls clarify are you objecting or not +20:36:56 I'll email him the next few days +20:36:58 oh? it is? +20:37:04 yes +20:37:08 i am voicing caution +20:37:13 no objection +20:37:16 I guess I might as well put zab.i2p back up +20:37:22 I thought it was bitcoin but it isn't +20:37:25 It's free so.. I don't care much about testimonies yet +20:37:30 :) +20:37:40 topiltzin: You for or against? +20:37:52 abstaining +20:38:00 sounds like we have no objections and it's approved? if I'm wrong please speak up +20:38:01 I will test his hosting the next few days though +20:38:05 2 for, 0 against +20:38:12 abstain +20:38:19 abstain +20:38:26 im curious how files are transferred to open4you hosting account +20:38:47 EpicCoffee: ftp (huh?) or via a control panel it seems. +20:39:07 sftp? +20:39:18 ok I'll email him to get a logo and check the stuff into the console. Any of you so inclined, please sign up and report back +20:39:34 if it's bogus or doesnt work, we can always pull it just before the release. +20:39:35 "1 GB of available space, php5, mysql, ftp access, 3 tunnel \ 2 hop" +20:39:40 +1 +20:39:48 psi: doubt it.. +20:40:01 and that ends item 2) +20:40:18 no sign of hottuna or str4d so we're on to 5) dg's request +20:40:20 dg go +20:40:29 although it should work better over i2p (you know.. passive/active mode and so for ftp..) +20:41:36 I propose http://salt.i2p/ (not my site, efkt's) for inclusion in the router console. salt.i2p contains; a helpful, large wiki of eepsites and guides; xmpp; an IRC channel which is probably most active one on IRC2p +20:41:51 Title: salted (at salt.i2p) +20:42:00 It provides a lot of eepsite 'starting points' and is SFW. +20:42:02 ill say +1 for salt +20:42:07 I like the salt wiki +20:42:16 yep, #salt pwnz +1 +20:42:18 salt has grown to be a great resource for me and others in the community +20:42:28 the channel is #salt ftr +20:42:52 we really can only take requests from the eepsite owner. Could you ask him to apply following the guidelines on http://zzz.i2p/topics/236 ? then we can consider it for 0.9.9 +20:42:54 +1.. pretty much what that old wiki used to be now.. +20:43:06 Title: zzz.i2p: How to get my Eepsite added to the Router Console home page (at zzz.i2p) +20:43:33 I'm not familiar with the site +20:43:55 I don't think we want to start adding sites w/o permission +20:44:14 He was active not long ago, he may shoot up in a second +20:44:23 I agree with not adding w/o permission +20:44:34 figured it was worth a shot though +20:44:43 paging efkt +20:44:49 You have my permission as long as everyone's happy with it. I created the site with some of I2P's possible policies and ideas in mind about what content is appropriate for I2P. Im open to discussion if you don't like some of what might be there (Just saying) +20:44:53 efkt is the owner of salt? I presume such based on observations. +20:44:59 Yes, EpicCoffee. +20:45:10 Thus the disclaimer on the front page, explicit policies about gore/pr0n/etc +20:45:43 The wiki is not open to edit unless we end up knowing you, so it is unlikely to be vandalized. +20:46:03 * EpicCoffee also points out the salt xmpp +20:46:17 the XMPP is popular too and comes with guides. :) +20:46:50 yeah salt is pretty damn great.. has a real community feel to it +20:46:59 if you want to do it right now please answer the following questions. If it's too much to do now we can discuss in about 6 weeks before the next release +20:47:00 so long as users of it realize the admins can see what is said if not encrypted (I think the wiki article on it highly encourages otr) +20:47:06 We have enough XMPP servers floating around that in the future it might not be a bad idea to list a few in the console. Most of us running servers have S2S happening. +20:47:07 - An email address +20:47:07 - The URL to link to +20:47:07 - The URL of your English terms of service if available or necessary +20:47:07 - If the site is not in English, a brief description of the site in English +20:47:07 - (Optional) A URL to a transparent png icon to display. Size must be 32x32 (subject to change). If no icon is provided the icon will be "toopie". We will copy this icon into the router console source and serve it locally. +20:47:10 - If an icon is provided, the license of the image. +20:47:12 - (Optional) A one or two-word label in English. If not provided we will use example.i2p +20:47:14 - (Optional) a few words or a sentence in English for a popup (tooltip) +20:48:19 efkt@mail.i2p +20:48:23 site: http://salt.i2p +20:48:27 Title: salted (at salt.i2p) +20:48:38 site terms of service - Theres a disclaimer on the front page: http://salt.i2p +20:48:43 Title: salted (at salt.i2p) +20:49:15 I dont have a 32x32 icon to provide speedily but it would be the same as our favicon, a black star with transparent or white background +20:49:47 label name can be simply "salt" +20:49:59 efkt I can make a quick 32x32 icon if you want. well as quick as i2p will allow lol +20:50:25 we can give you a week for the icon :) +20:50:32 how long has the site been up? +20:50:34 EpicCoffee: If you like. Its just like the favicon - the black "anarchist star", you see the same kind of shape all over google images. It takes only a moment really +20:50:40 zzz: august 2012, IIRC +20:51:21 http://salt.i2p/wiki/index.php/Main_Page#Tutorials +20:51:21 Honestly, I'm very surprised I get so many users unable to reach the site as its not part of the default addressbook (or the XMPP) - Yet we get a lot of newcomers being directed there through IRC. Sorry to step off topic. +20:51:35 Title: Salted Wiki (at salt.i2p) +20:51:41 zzz: Oh, and the blurb "Cryptomunitions and tutorials' +20:51:46 Yeah, it's a problem (in addition to the annoying addressbook-not-found bug) when introducing users. +20:52:20 anybody on the console home page is automatically added to the default address book +20:52:42 Oh. That will be helpful for anyone who is new to I2P coming to IRC2P to explore. +20:52:52 one headache solved :) +20:52:59 1/2. +20:53:05 no xmpp.salt.i2p ;) +20:53:32 efkt to me your terms of service are insufficient for a wiki that we would recommend, as it describes only what you are not responsible for. It does not say what type of content if any is disallowed. +20:54:03 If anything goes then I cannot support the request. +20:54:04 zzz: Even though noone is allowed to edit the wiki, at all, unless I have personally vetted them and given them an account? +20:54:19 efkt: I believe he's saying the actual rules are unclear. pr0n, etc. +20:54:41 Hrm. +20:54:53 there are no rules stated at all. +20:54:56 32X32 blackstar.jpg, base64 encoded: http://pastethis.i2p/show/5437/ +20:54:57 Title: Paste #5437 | LodgeIt! (at pastethis.i2p) +20:55:00 wrt allowed content +20:55:54 I can easily add something. It's not a problem, only mulling over what "extra" things to add - Content such as Gore, Rape, Torture, Snuff, or weird sex will never be allowed on the site +20:56:23 so, family friendly - safe for work etc? +20:56:53 I don't think we should ask you to draft a content ToS in the next 30 seconds. Want to think about it and come back in a few weeks? +20:57:06 At this time no I cannot say salt is family friendly or safe for work. The onionland page explicitly links to drug onions. +20:57:43 zzz: Yes, I'd like that at least - Id rather work with everyone and produce something desirable that's useful to I2P +20:59:22 EpicCoffee: any image hosts on i2p? +20:59:24 my philosophy is that anything similar to postman's tracker rules are acceptable. +20:59:24 I don't want to tell you how to run your site or how to write your rules. You do what you like and then we'll decide if we want it in the console. +20:59:24 but you can survey the links we have now and get a feel +21:00:36 we're past 15 minutes so can we ask you to come back in a few weeks? Any site that has that much support from the folks here I think would be a good addition to the console, if you can work out the ToS stuff. +21:01:36 I'm happy to work out a ToS, and to talk about anything on the site the community feels doesn't belong. Thanks zzz. Yes that's fine +21:02:13 great. glad to hear about a new site I wasn't familiar with. I'll check it out. email me or post on zzz.i2p when you pull it all together +21:02:19 and that's the end of 5) +21:02:25 last call for str4d for 1) +21:02:36 hottuna still not here for 4) +21:02:47 * zzz warms up the baffer +21:03:50 * zzz *bafs* the meeting closed +21:07:16 thanks everybody, good meeting diff --git a/i2p2www/meetings/logs/226.rst b/i2p2www/meetings/logs/226.rst new file mode 100644 index 00000000..4febe3d8 --- /dev/null +++ b/i2p2www/meetings/logs/226.rst @@ -0,0 +1,17 @@ +I2P dev meeting, September 10, 2013 @ 20:00 UTC +=============================================== + +Quick recap +----------- + +* **Present:** + dbowie, + dg, + efkt, + EpicCoffee, + Pseudonemo, + psi, + topiltzin, + trolly, + welterde, + zzz diff --git a/i2p2www/meetings/logs/227.log b/i2p2www/meetings/logs/227.log new file mode 100644 index 00000000..c5fc3f0a --- /dev/null +++ b/i2p2www/meetings/logs/227.log @@ -0,0 +1,384 @@ +20:09:33 Meeting time. Who is here? +20:09:53 * psi is here +20:10:04 * dg here +20:11:34 * topiltzin . +20:11:51 hottuna, zzz, welterde, kytv: ping +20:12:17 * orion is here +20:13:01 * str4d loads meeting agenda +20:14:01 I can't reach zzz.i2p. Can anyone else get to http://zzz.i2p/topics/1480 ? +20:14:35 Got it. +20:14:43 1) Threat model +20:14:44 1a) Discuss merits of DREAD classification scheme (and choose another if necessary). +20:14:44 1b) Discuss threat model (and update if needed). +20:14:44 1c) Apply DREAD (or other scheme) to attack vectors in threat model. +20:14:44 2) Website revamp - check over in preparation for launch. +20:14:53 3) Roadmapping. +20:15:22 4) Docs discussion. +20:15:41 We already coverered 0) Say hi ;-P +20:15:42 1) Threat model +20:15:53 1a) Discuss merits of DREAD classification scheme (and choose another if necessary). +20:17:07 As I said in the forum post, I think that one of the things we can do to improve how other perceive I2P is to improve and clarify the threat model. +20:17:29 Right now, it is a wall of text, and difficult for users (and non-motivated devs) to find the main concerns. +20:17:45 It's hard to rank it also. +20:17:47 Understand urgency, etc. +20:18:03 And without any proper risk modelling, we really have no idea if we are focusing on the right aspects. +20:18:13 It would be great to get a short version of the threat model first and build off that +20:18:23 dg: exactly. +20:18:59 I did some research, and https://www.owasp.org/index.php/Threat_Risk_Modeling has a good threat risk modeling "layout", which is used by e.g. Cryptocat for their threat model. +20:19:04 Title: Threat Risk Modeling - OWASP (at www.owasp.org) +20:19:53 The DREAD scheme that they describe is not completely effective at identifying risk correctly, according to feedback mentioned in a subsequent post by the designer of the model - https://blogs.msdn.com/b/david_leblanc/archive/2007/08/13/dreadful.aspx +20:20:49 I propose that we use the modified DREAD model that he gives in the above post, to model the severity and priority of our attack vectors. +20:20:50 Discuss! +20:21:13 Give me some time to review the models? :) +20:21:40 dg: you were supposed to have done that already, I linked to it in the forum post... +20:21:44 :P +20:21:50 sorry +20:22:24 (but I didn't actually ask people to do so, my bad) +20:23:08 DREAD tl;dr - they rank a threat on five 1-10 scales, add the results and divide by 5. +20:23:12 Damage Potential +20:23:29 Reproducibility +20:23:29 Exploitability +20:23:29 Affected Users +20:23:30 Discoverability +20:24:12 modified DREAD tl;dr - same five parameters, but a 1-3 (low, med, high) scale and a "weighted" calculation. +20:25:09 I'm giving it a brief read; I obviously don't know all the details but any structured system is better. +20:25:18 The modified DREAD model makes better sense to me than the original. +20:26:06 I have a lot of respect for OWASP too. :P +20:26:10 "If we look at the five components, we see that none of these are highly correlated - one of them does not imply the other. This means we have independent factors, which is one of the strongest criteria for a solid model. Thus our task is to figure out how to properly weight the inputs. In WSC, we told you to rate them from 1-10, add them up, and divide by 5. If we apply some obvious tests, we find that a damage of 1, and all other factors 10 (a well known nuisance +20:26:10 , e.g., pop-ups) gets weighted the same as a discoverability of 1 and everything else 10 (hard to sort out, but causes the heat death of the universe). This is an obvious malfunction." +20:27:10 dg: so do I. They have many other potentially-useful models and docs there. +20:27:31 Anyone else have comments? +20:29:50 If no-one else has comments yet, then we will move on to the next topic while you think. +20:30:05 no comments +20:31:03 1b) Discuss threat model (and update if needed). +20:31:17 http://vekw35szhzysfq7cwsly37coegsnb4rrsggy5k4wtasa6c34gy5a.b32.i2p/en/docs/how/threat-model +20:31:18 * psi starts skimming threat model +20:31:39 Title: I2P's Threat Model - I2P (at vekw35szhzysfq7cwsly37coegsnb4rrsggy5k4wtasa6c34gy5a.b32.i2p) +20:31:47 I notice a rating? +20:31:50 Is this new? +20:32:04 dg: I added the modified DREAD system. +20:32:12 (in anticipation of no one having objections) +20:32:31 (but not in anticipation of no comments at all :-P ) +20:32:53 The ratings are invalid. +20:33:03 It doesn't seem to match- +20:33:05 yeah +20:33:09 (this is what I want to change in this meeting) +20:33:25 While we are discussing the threat model itself, please think about possible ratings (for the next topic) +20:33:28 The design looks good so with actual values, I'd like it. We should order in value of severity too. +20:34:48 Our threat model page does not follow the "standard" threat model layout (e.g. OWASP page) +20:35:04 Identify Security Objectives +20:35:05 Survey the Application +20:35:05 Decompose it +20:35:05 Identify Threats +20:35:05 Identify Vulnerabilities +20:35:08 we're going to duscuss the values of these rating now... or later? +20:35:50 psi: next topic. Right now we are discussing the threat model itself - we can't rate threats if they are out-of-date. +20:35:58 right +20:36:17 (And FYI meeting will end at 10PM UTC) +20:36:29 (At least, I will be leaving then) +20:37:18 The threat model page does not clearly identify our security objectives. +20:37:21 Where is everyone? +20:37:29 We can't operate with 3 people. +20:37:54 topiltzin, hottuna, zzz, welterde, kytv: ping +20:37:55 there is more to "formalizing" the model than just rating each element +20:37:56 I think it is worth considering the methods outlined in todays guardian articles. The NSA tried to target the dev process +20:38:16 zzz: I know, but we have to start somewhere. +20:38:18 in particular, the major objection to our model is that we don't clearly specify what is in and what is out +20:38:40 What affects us and what doesn't? +20:38:43 which is a step that would need to happen before rating, should we care to address the critics +20:39:23 zzz: that is what we are doing now. +20:39:23 The threat model page does not clearly identify our security objectives. +20:39:29 the major point of a threat model is to specify what's NOT in it, e.g. the NSA. Projects use that to wave their hands and say "not our problem, not in our threat model" +20:39:44 we haven't done that. +20:40:07 . +20:40:10 Right. So let's do that. +20:40:29 If we make a formal model and omit the NSA, we can then stop working on protocol obfuscation, and perhaps even stronger crypto. +20:40:42 or, we could call that a copout. +20:41:18 From the start, it's clear that Tor can't save you from a GPA. Do we make this and other caveats clear? +20:41:26 and do we protect against NSA? +20:41:59 Global adversaries (that can monitor the entire internet) are out by nature of the onion routing design. +20:42:18 NSA, as big as it is, is not a global adversary. +20:42:37 the NSA as it is does have an extensive reach +20:42:38 Most of the current model is aspirational, as we are too small to realistically counter may of the items atm +20:42:50 Would we protect against GPA with some of the things in our roadmap? ;) +20:42:52 str4d: perhaps but they do work with others +20:43:01 the traditional terminology is "state-level" adversary, e.g. NSA +20:43:03 GPA? +20:43:11 equinox: likely. +20:43:13 zzz: thanks. +20:43:18 Global Passive Adversary +20:43:56 so if you want to make a strict model and exclude state-level, and use it to guide dev, then that would e.g. tell us not to work on obfuscation +20:44:47 It's a difficult enough task to maintain anonymity, let alone do obfuscation. +20:45:43 critics love formal threat models... does having one only enable the trolls, or would it actually help us promote and dev? +20:45:53 We have always stated that I2P does not do obfuscation (but not explicitly in the threat model) +20:46:19 That is a fair point. +20:46:28 a threat model is good for focus +20:46:34 The trolls have enough if they want to troll. Fuck that. +20:46:41 trolls are always around, I wouldn't take those into account +20:46:43 (sorry to jump in) +20:46:51 My goal with this meeting was not to have a strict threat model that we must absolutely follow to the letter. +20:47:02 Even if we wanted to have that, it would not be possible in a single meeting. +20:47:25 No problem. Nice to see you, Mathiasdm. +20:47:28 A formal threat model helps us to define what we're trying to protect against either +20:47:37 I've been around for almost a year and I'm still not sure exactly what. +20:47:40 The website page we call the "threat model" is a giant WoT and difficult to grep. That is really what I want to fix. +20:48:20 I want users to be able to look at it and quickly understand what we are trying to do. +20:48:50 We know the state agencies and actors on behalf of the state will only increase their scope as time goes on (if they are left unchecked). I think it is best to plan for that eventuallity rather than reacting to it. +20:49:16 Because misinformation and misunderstanding have been a problem with I2P for a long time. +20:50:28 I think the page is pretty good. Although perhaps it needs another page that's a summary. +20:51:12 The threat risk modelling (with DREAD) is something that is easy to do, and easy to remove if we decide that it doesn't give us valid information. +20:51:57 zzz: it is good for someone who is prepared to take the time to read it. It is not good for skimmers. +20:52:36 As the post I linked above says: "Warning! Do NOT apply this system, or any other system, without THINKING about it. This system may or may not help you arrive at the right conclusion, and if it does not, consider worth what you paid to get it, which is zero." +20:53:26 imho you have 3 orthogonal goals for the single page: 1) simplifying for the masses, 2) formalizing, and 3) risk modelling +20:54:38 1) and 3) are linked - having the ratings enables the masses to skim, find the "important" ones to them, and read. +20:54:49 But I agree that 2) is orthogonal (and also linked to 3) ) +20:56:04 If having a formal threat model becomes a blocker to other things, then we will need to pursue it. But when I originally said "formalize", I should have said "clarify". +20:57:43 Quick poll: does anyone here think that going through and applying DREAD to the attack vectors on our "threat model" page is useful or a good idea? +20:58:28 If yes, let's move to next topic and do so, then we can discuss the result. If no, let's forget about it and move on. +20:58:44 yes-as-long-as-its-someone-else-doing-it +20:58:46 What's the alternative? +20:59:09 hahaha +20:59:21 being honest :) +20:59:37 or depressing. :) +21:02:00 It isn't a bad idea but, I'm not sure that is the be-all-end-all solution for the threat model. +21:02:06 hmm + hottuna: I don't intend it as such, but I think it is a useful step. And no one else was suggesting or doing anything :-P + it depends if there are more people helping + if it's just 1 person no way + if there are collaborators, possibly + psi: I wanted to do it in-meeting right now, while we had more than one person. + "formalizing" is important to some - OpenITP, critics, reviewers, auditors, funders, others in our field, etc. + would it really be enough and structured well enough to just do it now in this meeting? + im not very familiar with the whole DREAD process though. + hottuna: we go through each attack vector, and rate the five categories as low, medium or high. That's all. + i am not familiar with DREAD as well + I chose that one because it was very simple to apply. + ah + (The five categories I outlined just above the index on the threat model page) + let's try an example one + each known attack vector? + psi, sure + I intentionally did everything beforehand to make it simple because I knew that getting anyone here to agree to do this would be hard :P + Okay, "timing attacks" + sure. + Damage Potential: If a threat exploit occurs, how much damage will be caused? + If it is used to identify a user, then that user is deanonymized -> high? + statistical exploits based on timing and packet sizes have been employed against tor to successfully find out which site was being visited + with very high success ratios (~90% if I remember correctly) + (use e.g. https://www.owasp.org/index.php/Threat_Risk_Modeling#DREAD to get an idea of scales - it already has three levels described) + Reliability: How reliable is the attack? - low? med? It is generally network-load-dependent. +21:12:28 to be able to time what exactly? +21:13:26 anything in general? +21:14:11 okay +21:14:27 I don't know. +21:14:47 But the descriptions seems messgae oriented. +21:14:52 (use e.g. https://www.owasp.org/index.php/Threat_Risk_Modeling#DREAD to get an idea of scales - it already has three levels described) +21:14:54 Title: Threat Risk Modeling - OWASP (at www.owasp.org) +21:14:56 Reliability: How reliable is the attack? - low? med? It is generally network-load-dependent. +21:15:33 psi: that's a good point - the "Timing attacks" section should probably be split into message-delivery attacks and message-content attacks +21:15:36 damage potential: 5? +21:15:51 Assume message-delivery for now. +21:15:55 " Complete system or data destruction " means the box explodes i assume? +21:16:08 as far as reliability goes, statistical models have been proven reliable in the case of tor.. +21:18:00 hottuna: we are using a 1-3 scale +21:19:08 the 1-10 scale described in the OWASP is harder to justify. +21:19:08 "What's the difference between discoverability of 6 and 7? Who the heck knows?" +21:19:08 Use the OWASP scale as an indicator of how to assign low/med/high +21:19:11 psi: In our case, I would say that "high" is complete correlation between a particular user and their activity. +21:19:13 timing i'd say 5 or 6 +21:19:13 (for dammage) +21:19:14 (for Damage) +21:19:17 https://blogs.msdn.com/b/david_leblanc/archive/2007/08/13/dreadful.aspx explains the categories possibly better. +21:20:00 i see +21:20:16 but the damage would be revealing some sort of information, which may be bad.. theoretically it could reveal that I'm running a certain application or talking to a certain destination +21:20:20 is that a 5-6? +21:20:34 Exploitability: What is needed to exploit this threat? - med? The attacker needs to monitor several locations along the possible path. +21:20:36 low? +21:20:49 it depends on the attacker +21:20:55 and it also depends on the network size +21:21:34 Exploitability is requirements before launching the attack. Reliability is how well it works once triggered. +21:21:48 ah +21:21:49 psi: yes, so these ratings will change over time. +21:22:05 (And this is an example of a limitation of the model, and a big flaw in the original DREAD) +21:22:06 exploitability would be med +21:22:18 Exploitability is only used to calculate priority, not severity. +21:22:25 just running a stock i2p router would be not enough +21:22:54 psi: right, so not high. +21:23:15 But not low because it doesn't need advanced computing power etc. +21:23:20 Affected Users: How many users will be affected? +21:23:27 You would have to be a part of a tunnel, and then just have a look at the message profile. If you're the ibgw for a service, you might be able to separate out a few users from the rest. Or at least cluster them inte different user groups +21:23:40 into* +21:24:23 mid may be a hit much for exploitability +21:24:29 bit* +21:24:36 mid-low +21:24:40 in the ibg case, I'd say it's pretty easy, but you wouldnt get a ton of information +21:24:45 ibgw* +21:25:06 psi: mid or low. It will only affect the priority score. +21:25:48 As far as eploitability goes, I think it's very doable. Especially in comparison to other exploits. +21:25:55 Discoverability: How easy is it to discover this threat? - mid? It requires at least some knowledge of how I2P works. +21:25:59 hottuna: agreeed +21:26:10 "Something that's highly discoverable is publicly known, or very similar to something that is publicly known. Low discoverability is that it takes intimate knowledge of the internal workings of your app to sort out." +21:26:22 mid +21:26:51 We would never know about the attack since it's passive +21:26:55 hottuna: exactly. The classification partly depends on what is chosen for other attacks. It's all relative. +21:27:26 str4d, are you noting some sort of value based on what's being said? +21:27:44 hottuna: yes. +21:29:02 good. +21:29:02 D: low +21:29:19 hmm +21:29:29 Affected users: High (all who actually do something) +21:29:37 Here's what I think we agreed on, and what it calculates: +21:29:37 Damage Potential: medium +21:29:37 Reliability: medium +21:29:37 Exploitability: medium +21:29:51 Affected Users: high +21:29:52 Discoverability: medium +21:29:53 Severity: 4/5 +21:29:54 Priority: 5/9 +21:30:23 timing attacks are pretty bad but they don't seem practical +21:30:29 at least, at the moment +21:30:41 Does that seem like a sensible result? Are the levels I set what we actually decided on? +21:30:58 I dont agree with discoverability. +21:31:01 And we should do at least one other attack vector, to get a sense of how this will compare them. +21:31:09 A passively logging node would never be discovered. +21:31:17 hottuna: you think it should be high? +21:31:17 Sure. +21:31:29 hottuna: wrong "discoverability". +21:31:47 whatever undiscoverable translates into +21:31:53 This is a defensive model. This is discoverability of the vulnerability by the attacker. +21:32:00 the resources used to launch an attack would be rather obvious unless they pwnd all the boxes +21:32:12 oh. I see. +21:32:18 Timing attacks are specific and maybe not as applicable to us anyway.. +21:32:25 Oh, in that case I agree. +21:33:28 to do a timing attack would require either a birds eye view or ownership of many nodes (how many? idk) +21:33:38 Severity is how bad we think the attack is, Priority is the order it thinks we should focus on. +21:33:55 Oh. +21:33:55 not sure if a bird's eye view would be enough too +21:33:57 Yeah, 4/5. +21:34:10 Let's leave that classification for now, and do another one for comparison. +21:34:30 reflecting on 4/5 IF they can do timing attacks then pretty much everything low latency is affect +21:34:33 affected* +21:34:54 priority... not sure 5/9 is appropriate +21:35:15 "Tagging attacks" should be easy to classify. +21:35:32 psi: we won't know what priority means until we have more classified. Classification is an iterative process. +21:35:38 okay +21:35:48 So, tagging attacks. +21:36:15 tagging messages? tagging routers? +21:36:48 Messages +21:36:59 (kinda) +21:37:07 Determining what path a message follows. +21:37:17 Damage potential: mid? +21:37:30 mid agreed +21:37:38 low in a sense +21:37:43 Damage potential: lo +21:37:47 low-mid +21:37:58 Tagging (if possible) is only going to reveal info within a particular. tunnel +21:37:58 it depends on the situation +21:38:01 Reliability: low. +21:38:01 yea +21:38:08 Or... +21:38:10 Hmm. +21:38:41 on what scope would the tagging be measured at? +21:38:58 if they were used in a situation where they could identify tunnel participants, they woulkd work every time, right? +21:39:00 Exploitability and discoverability are low - it should be impossible to tag messages themselves, and collusion requires exact placement of routers. +21:39:20 E:low +21:39:21 psi: a message going between two endpoints (a client or server). +21:39:23 D: low +21:39:39 i agree LOW +21:39:45 E and D +21:39:50 hottuna: exactly. If a tagging attack was discovered, it would work every time. +21:40:13 so, R: high? +21:40:21 But such discovery should be impossible because everything is signed. +21:40:51 But it depends on the tagging attack. +21:40:56 Message tagging: high. +21:40:57 if they have your keys then they can sign too +21:41:06 Collusion tagging: mid. +21:41:07 str4d, sure, but discoverability is another metric +21:41:13 * str4d says high for now. +21:41:28 * hottuna is sattisfied +21:41:44 Affected users: only users with malicious nodes in their tunnels are affected. +21:42:02 low +21:42:16 A: most likely low +21:42:26 Okay: +21:42:26 Damage Potential: low +21:42:27 Reliability: high +21:42:27 Exploitability: low +21:42:27 Affected Users: low +21:42:27 Discoverability: low +21:42:28 Severity: 2/5 +21:42:29 Priority: 2/9 +21:42:52 looks good +21:42:59 sounds good +21:43:22 feels good +21:43:57 onto an actual threat? +21:44:28 Shall we quickly go through the remaining meeting topics, and then come back to this? +21:44:37 ok +21:44:56 * str4d culls 4) Docs discussion, it will take too long. +21:45:12 2) Website revamp - check over in preparation for launch. +21:45:35 the site revamp is applying better CSS or is there more? +21:45:48 Apart from this classification process (or removing the classifications), what else needs doing before welterde "launches" the site revamp? +21:46:12 I dont know. +21:46:21 psi: "better" CSS, but a lot of structural and layout changes. +21:46:32 I think structurally, everything is ready. +21:46:50 How automatic is the translation update process? +21:46:50 Completely. +21:47:06 How frequent is it? +21:47:28 Whenever I update it. +21:47:45 Ok. +21:47:48 So far, whenever I have seen string changes I run the scripts to extract and update the translation strings. +21:47:50 i need to jet ill bbl in 30 minutes +21:47:56 I suppose that is good enough. +21:48:01 * str4d will be gone by then. +21:48:30 psi: you're welcome to continue the DREAD discussion then :) +21:48:44 oh, str4d: the giant download button on the front page doesnt seem to auto update to the latest version +21:48:45 There are known CSS problems in IE 7 and 8 IIRC +21:49:00 hottuna: that is another bug that I need to talk with welterde about. +21:49:09 ok. good. +21:49:25 Whenever a .py file changes, a script is meant to restart the server (and whenever translations change, it recompiles them) +21:49:49 But for some reason, changes to .py files are not being detected on welterde's server... +21:49:49 (They were before) +21:50:24 Okay, if there is nothing else, then I +21:50:43 'm happy with the revamp and once the .py bug is fixed, it can go live. +21:50:52 Alright! +21:51:11 (IE 7/8 CSS will be mitigated when I get a chance, but I don't consider it a blocker) +21:51:23 Sounds reasonable. +21:51:42 "live" == welterde will make it live at https://geti2p.net (the URL we decided on several meetings ago), but leave www.i2p2.de as-is. +21:51:52 Title: I2P Anonymous Network - I2P (at geti2p.net) +21:52:00 Why will i2p2.de be left as it is? +21:52:03 Then I will run tests, check Google etc. are happy with it. +21:52:30 hottuna: in case something catastrophic happens and we need to revert. +21:52:42 ok, so it's just temporary +21:52:51 Only when everything is absolutely checked and ready, will we 301 redirect i2p2.de to geti2p.net +21:53:15 that makes sense +21:53:23 Because 301 is a permanent move, and will cause search engines to update their links. +21:54:08 The legacy redirection code uses 302 redirects for now, but will be changed to 301 once everything is set (so that we don't lose pagerank from old links) +21:54:28 Okay, moving on: +21:54:28 3) Roadmapping. +21:54:42 hottuna: your turn. +21:55:44 You have about ten minutes of my time (maybe more for anyone else who is still here) +21:55:45 roadmap? All I know is that I've been having a little more time as of late, and I've been getting back into looking at the DHT code. Especially the reply handling code. +21:56:08 I don't really have anything else to add. +21:56:48 The current roadmap for 0.9: +21:56:48 Include some seed data in the distribution so a central reseed location isn't required? +21:56:48 Reachability Mapping / handle peers partially reachable / enhanced restricted routes +21:56:49 Improve help pages and website +21:56:49 More translations +21:56:56 SSU disconnect message +21:56:57 Iterative floodfill lookups +21:57:13 I have no idea where we are on some of that, or when it was last updated. +21:57:54 The floodfill lookups are iterative as far as I understand them. +21:57:59 1.0 - 3.0 were last updated in 2008. +21:58:14 0.9 was added in 2010. +21:58:14 restricted routes is unlikely +21:58:37 I'll have to go in a minute or two +21:58:42 I think proper evaluation of the roadmap needs another meeting, with more attendance. +21:59:01 Agreed. +21:59:14 hottuna: good to hear you are getting back into the DHT code. +21:59:29 Deferring until later. +21:59:33 And the actual threat model should be looked after. +21:59:43 Okay. +21:59:47 Could we have a long meeting next time for that? +22:00:35 hottuna: I had hoped 2 hours would be enough, but we spent at least an hour debating whether it was even worth doing >_< +22:00:36 I've gotta leave, but thanks for the meeting str4d. You're a natural! +22:01:19 We don't have time to return to 1c), so: +22:01:23 str4d *baf*s the meeting closed diff --git a/i2p2www/meetings/logs/227.rst b/i2p2www/meetings/logs/227.rst new file mode 100644 index 00000000..39adeb06 --- /dev/null +++ b/i2p2www/meetings/logs/227.rst @@ -0,0 +1,16 @@ +I2P dev meeting, October 4, 2013 @ 20:00 UTC +=============================================== + +Quick recap +----------- + +* **Present:** + dg, + equinox, + hottuna, + Mathiasdm, + orion, + psi, + str4d + topiltzin, + zzz diff --git a/i2p2www/meetings/logs/228.log b/i2p2www/meetings/logs/228.log new file mode 100644 index 00000000..b31a19e5 --- /dev/null +++ b/i2p2www/meetings/logs/228.log @@ -0,0 +1,232 @@ +20:00:54 http://zzz.i2p/topics/1490 - 15 minutes max per item, 5 minutes for brief items +20:00:54 0) hi +20:00:54 1) http://salt.i2p console home page request (carried over from Sept. 10 meeting) +20:00:55 2) http://no.i2p services request (Meeh) +20:00:55 2a) Add to default i2ptunnel jump list +20:00:55 2b) Add to recommended addressbook subscriptions on FAQ +20:00:57 2c) Add to registration help text on eepsite help +20:00:59 3) Additional backup release signer - Need new su3 keys, HH hasn't added his yet, do we need another person? +20:01:02 4) Console home page icons needed http://zzz.i2p/topics/1492 (brief) +20:01:04 5) Website revamp (str4d) (brief) +20:01:08 6) Trac update - thanks to Meeh and kytv (brief) +20:01:09 Title: zzz.i2p: Meeting Tues. Oct. 22 8 PM UTC (at zzz.i2p) +20:01:10 7) Allow marketplaces on eepsite registration services? (zab) +20:01:13 0) hi +20:01:14 1) http://salt.i2p console home page request (carried over from Sept. 10 meeting) +20:01:18 hi +20:01:18 Title: salted (at salt.i2p) +20:01:20 efkt_: +20:01:25 efkt_, are you here? +20:01:25 +1 for salt +20:01:36 Title: no.i2p registration service (at no.i2p) +20:01:37 Hi everyone. Yes I'm here. +20:01:41 Title: zzz.i2p: Console Home Page Icons (at zzz.i2p) +20:01:44 Title: salted (at salt.i2p) +20:02:20 +1 for salt +20:02:38 +1 for salt +20:02:43 I only have one question - where does the name come from,does it mean "salt" or does it stand for something? should it be translated? +20:03:00 Salt's a great community resource and efkt_ is extremely welcoming. I don't have a bad word to say about him :) +20:04:26 zzz: It really doesn't stand for anything. Unfortunately some security-thing startup recently opted to use "Salt" as well. The idea behind the name was: NaCl crypto, salted hashes as the "thematic choice" +20:05:07 Just having fun is all. +20:05:38 It leads to a lot of questions "what the hell is salt?" Why salt? +20:07:44 As for translation, I don't see that it would be useful to call it something other than what is used for the short url +20:07:47 so would we translate it in the console or not? +20:07:57 +1 for salt +20:08:51 "take everything you read on I2P with a pinch of salt" +20:09:09 hah +20:09:19 Haha. RN has ceaseless salt jokes. +20:11:36 ok. I noticed the icon you submitted doesn't have a transparent part. It might look better if it did. Or maybe not. May depend on the theme +20:11:36 if you're happy with the icon it's fine I guess +20:11:36 hearing no objections I think that's it for 1). I'll check it in. +20:11:36 moving on to 2) no.i2p - is Meeh here? +20:11:42 Perhaps it should be translated. +20:17:33 in the short term, only generating a public key, storing your private key somewhere safe, and checking it in +20:17:53 if me and kytv get hit by a bus, then you would be the one building and signing the releaase +20:18:07 which is basically running 'ant release' +20:18:08 I can do it also then. I won't be able to take responsibility for writing code though. +20:18:33 writing release notes, distributing the files, starting torrents, writing the news are all related activities +20:18:34 So, please look both ways before crossing! +20:19:00 basically you'd be a packager / release manager +20:19:23 yeah, I can do this. +20:19:41 I think str4d would be well-suited +20:20:40 dg have you done much with monotone? do you have checkin privs now? +20:21:14 zzz: I've not had checkin privs to i2p.i2p and I'm not a mtn pro. I get it enough to use it but advanced trickery is beyond me. +20:21:22 If str4d is more appropriate, that's fine. +20:21:50 other acks/nacks on str4d ? +20:29:18 ffs, I'll relay: +20:30:12 http://pastethis.i2p/show/5965 +20:30:13 Title: Paste #5965 | LodgeIt! (at pastethis.i2p) +20:30:14 bah +20:30:32 welcome back +20:30:43 ok +20:30:44 repost +20:30:50 hearing no objection let's welcome str4d as our newest backup release signer +20:30:55 str4d, kytv or I can help you generate your keys and check them in +20:31:03 excellent. I'm always worried about the bus. +20:31:06 4) Console home page icons needed http://zzz.i2p/topics/1492 (brief) +20:31:12 just wanted to mention the icons thing here, to highlight postman badger sponge eche|on kytv et al +20:31:20 Title: zzz.i2p: Console Home Page Icons (at zzz.i2p) +20:31:41 pick or design an icon or we will pick one for you +20:32:01 you have a couple weeks +20:32:17 thanks to str4d for his detailed post on what is in fux +20:32:32 anybody else have anything to say about this? +20:32:49 * kytv is indifferent +20:33:11 ok moving on +20:33:11 it'd be nice to have a prettier set of icons. I like the fux set. that's all. +20:33:12 5) Website revamp (str4d) (brief) +20:33:33 str4d, what's the status and what's blocking progress? +20:33:57 Status: .py files are not updating for some reason (they were) +20:34:22 When a .py file updates, the webserver needs to be restarted, but the script to do that has stopped working. +20:34:46 Other than that, not much. +20:35:28 CSS issues were shown on IE, it looks like on the frontpage the BG image is not displaying, but I can't test. +20:35:28 you need welterde_ or just time? +20:35:33 revamp site has issues with IE{7,8}. IE9 is untested. I don't know if we care about that. +20:35:57 IE10 works fine so I'm inclined to not worry about the older versions +20:36:05 On non-frontpage pages, IE{7,8} is terrible. +20:36:49 But that is not really a blocker. +20:37:02 wrt content, is everyone happy with the front page as-is? +20:37:13 ok. just would love to see us get past the finish line on this. On Sept. 1st on tx, you announced it would go live in a week :) +20:37:20 yes, str4d +20:37:53 zzz: yea, shortly after that I discovered the .py problem and ran out of time to fix. +20:38:20 ok thx for the update str4d. anything else on 5) ? +20:38:22 If .py -> restart is not working, then welterde_ (and every mirror operator) need to manually restart every time. +20:38:33 A few minor points: +20:38:55 When updating for a release, there are two locations to change: +20:39:16 i2p2www/__init__.py - CURRENT_I2P_VERSION +20:39:38 and i2p2www/pages/downloads/list.html - the hashes at the top. +20:39:50 maybe I can cut you short, no use training me until I need it, as I will ask again... +20:40:07 zzz: fair enough. But that ^ is it. +20:40:30 tl;dr - as soon as the .py problem is fixed, it is safe for live. +20:40:31 ok, I'll copypasta it, might lose it, might not. +20:41:01 (mine's updating/restarting FWIW (http://geti2pj2fl72u4m2.onion/ / http://kfri3jwfsi2uy7j3pbzykbbaoqoj4siyhf3hozln3wxcrkvvityq.b32.i2p/)) +20:41:04 Title: I2P Anonymous Network (at geti2pj2fl72u4m2.onion) +20:41:16 ok then let's ask welterde_ to help out here +20:41:49 ok thx for the update str4d. anything else on 5) ? +20:42:06 It's *much* simpler than current :-P +20:42:13 kytv: thanks - which script? +20:42:16 (but I'm done with 5) +20:42:25 bah. I got my sed script :) +20:42:25 Oh, one other point: +20:42:34 6) Trac update - thanks to Meeh and kytv (brief) +20:42:40 I want us to think about the /research page +20:42:42 (yw) +20:43:11 Just putting the thought out. +20:43:32 str4d: it's a modification of the one in i2p.www.revamp that I made for future appliance. I can pastebin it later. +20:43:35 I just put this on the agenda to thank kytv and his helpers Meeh and welterde_ for moving, hosting, upgrading, managing, etc and getting it back up +20:43:54 * dg thanks +20:44:18 kytv++ +20:44:20 it's an important resource that got hammered by spam and then disabled by me and then broken by kytv, but we won't mention that part since we're thanking him... +20:44:41 bwahaha +20:44:52 * str4d had plans to help, until kytv sabotaged those plans and took the glory for himself ;-P +20:45:17 oh yeah sorry str4d you did do quite a lot too in the early stages +20:45:42 ok enough of that, back to the beatings +20:45:47 7) Allow marketplaces on eepsite registration services? (zab) +20:46:18 I have 3 of them in the stats.i2p queue, everybody piling in after the silk road thing +20:46:49 Enforcing this is not trivial - we only know if the person registering makes it obvious in the domain or the about description. +20:46:51 I don't think anybody will target $registrar for saying yes +20:46:53 we had several silkroad* registrations last year, I approved them at the time, none were around for long +20:46:57 sorry I'm late, but here now +20:47:15 Meeh will be back to you shortly, we are on 7) +20:47:21 np +20:47:52 zab what would you like to say about this +20:48:00 ^^ topiltzin +20:48:00 In the interest of full disclosure, I have significant investment in bitcoin. So I'm naturally supportive of anything that promotes use of bitcoin +20:48:00 enforcing is never a sure thing +20:48:26 in fact, part of the reason I'm working on i2p is to prepare for a possible future where anonymous marketplaces cannot use tor anymore +20:48:42 +20:49:03 I could reject either on 1) its illegal 2) brings attention we dont need or 3) i2p really isnt safe enough for markets +20:49:13 but all those are shaky reasons +20:49:18 If 3), I don't know what we are safe for +20:49:21 But on the assumption that we can discover which domains are for marketplaces: +20:49:22 topiltzin ^ +20:49:35 I don't agree with ever saying "Hey, $group, come to us! We're like, safe and things!" but a little confidence is good too +20:49:49 as regards public perception, policies can help, even if they aren't perfectly enforcible, imo +20:50:01 3) is something the operators of the marketplaces need to decide for themselves +20:50:45 I agree with you there topiltzin, the operators choose +20:50:46 if 2) will happen it will happen regardless of addressbook policy +20:50:49 fyi the three I'm sitting on are silkroadreloaded.i2p, market.i2p, and freemart.i2p - you can jump to all thru stats.i2p +20:51:02 still unable to figure out why restart is not working? +20:51:35 (just arrived) +20:52:00 re 1) - that's the interesting one. I suggest the following policy: if a market place advertises $ILLEGAL_ACTIVITY then it does not indexed +20:52:08 welterde_: I haven't had time to investigate it. +20:52:18 but a market that simply says "anonymous marketplace" should not be banned automatically +20:52:28 ic +20:52:32 +1 on re 1) +20:52:34 most things here are anonymous +20:52:40 fwiw I think its reasonable to not outright ban all marketplaces and use a case-by-case basis, disallowing support for those who wish to use stats.i2p for drugs, assassination, arms, etc focusing on what is unsavory and not neccessarily what is illegal. +20:52:45 I think topiltzin's suggestion is a reasonable compromise, fwiw +20:52:59 stats.i2p is a voluntary service, not an "official" addressbook. +20:53:14 I find it hard to get too worked up about drugs. If they're advertising CP or contract killings that's another story. +20:53:27 * dg nods +20:53:35 Mmm. +20:53:43 str4d: it's still linked on the router console, which would be considered a tacit endorsement by many +20:53:51 agreed +20:54:00 IMHO the standard sort of T&C we require for routerconsole would be fine. +20:54:13 Pseudonemo: A link to a site that doesnt' really link to but can be contacted for info on a site people MAY not like? no +20:54:32 fair enough +20:54:43 I thnk I'll treat markets like forums and torrent sites, i.e. post a TOS and then I'll decide +20:54:48 (but that does not mean that a marketplace would be accepted for routerconsole, that is a separate decision) +20:56:05 that's fine. It would help if you publish some guidelines on what will NOT get indexed +20:56:12 thanks guys for your thoughts. this is more my problem than a project issue and I needed some advice. +20:57:07 if you feel uneasy about it, you are within your rights not to allow them zzz +20:57:24 but i think it's ok as long as it's not crossing any moral lines +20:57:54 it's all on http://stats.i2p/i2p/addkey.html - I guess I would just add 'markets' to the line about forums, trackers, file + image hosts... +20:57:54 ok enough on 7) +20:57:54 back around to: +20:57:54 2) http://no.i2p services request (Meeh) +20:57:54 2a) Add to default i2ptunnel jump list +20:57:54 2b) Add to recommended addressbook subscriptions on FAQ +20:57:56 2c) Add to registration help text on eepsite help +20:58:08 Title: Add a Key (at stats.i2p) +20:58:13 take it away Meeh what you got for us? +20:58:24 Title: no.i2p registration service (at no.i2p) +20:59:06 well, I've setup a registration service with a TOS somewhat the same as stats.i2p, to help "decentralize" the naming service in I2P if needed +20:59:16 ping Meeh +20:59:36 doesn't my messages get trough? +21:01:45 I hear ya +21:01:45 ok good, I checked freenode so iRelay does too +21:01:45 got it, lag +21:01:45 yeah we really need more of everything and my registration service really stands out by itself +21:01:45 just checked, I set it up in January of 2008 after orion.i2p vanished (snif) +21:02:08 yupp, so that's why I added it, to help "decentralize" it if needed +21:02:27 slow's inr.i2p service is great but his anything-goes policy isn't compatible with ours +21:02:40 other than that I haven't thought much on it, so if people want to contribute with ideas, go ahead +21:02:51 if you can work together on his software base, but have something managed, that's great +21:04:56 fwiw, inr.i2p is a thing. +21:05:24 anybody reviewed Meeh's TOS or have any thoughts? +21:05:24 I looked briefly, looked like copy-pasta of yours +21:05:24 yes, I will publish my changes. I've updated it to django 1.5 and added export-alive.txt "view" support +21:05:24 +1 on no.i2p based on nothing but general Meeh awesomeness :) +21:05:24 topiltzin: yes, it was copy paste :P +21:05:24 well, Meeh actually you have two pastas. one of slow's on no.i2p and one of mine on no.i2p/tos/ +21:05:24 I wrote some myself, and copied most of it, to make it as close to the official rules as possible, since I guessed zzz followed them closely +21:05:24 yea, a mix +21:05:24 can change it ofc +21:05:26 +1 for no.i2p. Giving people more jump links and subscriptions is a plus. If there will be more sites registered with no.i2p I think it would be helpful to inform existing userbase about such a change. +21:05:45 I didn't remove the policies that was "acceptable" from the original source +21:05:46 is it confusing to have overlapping stuff in two places? +21:05:51 actually 3 with /postkey/ +21:06:21 ok, I should fix that. but question is then, which is the best? +21:06:30 or, the most agreeable +21:06:37 which I can work more on +21:06:40 speaking from experience you have to shout the TOS loud and clear +21:07:17 fyi I'm getting about 75/month and rejecting about 3-5/month +21:07:54 any objections to 2a) or 2b) or 2c) ? +21:08:16 if not Meeh please do all the checkins yourself +21:08:23 ok, I get about 1-2 a week, but often they double register +21:08:26 both with you and me +21:08:32 for example silkroadreloaded +21:09:02 anybody have anything else while I warm up the baffer? +21:10:21 zzz: dg: kytv: topiltzin: str4d: Thank you. It means a lot to have support. +21:10:56 Maybe we should talk about the roadmap next time and our plans. +21:10:56 hosting for our downloads, unless there's already a plan, needs to be fleshed out. +21:10:56 ok. Then we say add to trunk? (no.i2p) +21:10:56 seems like we're on a new topic already :P +21:10:56 +1 for no.i2p (yes, add to trunk) +21:11:05 oh, I'll note that I wrote down some rather obvious meeting policies on http://zzz.i2p/topics/1491 to make it clear anybody can schedule a meeting for anything at any time +21:11:20 Title: zzz.i2p: Project Meetings (at zzz.i2p) +21:11:25 efkt_: No problem. You're great! +21:11:38 yeah, downloads and roadmap may be good for a future meeting +21:11:53 nothing for now from me +21:11:59 * zzz emphatically ***BAFS*** the meeting closed +21:12:12 :) diff --git a/i2p2www/meetings/logs/228.rst b/i2p2www/meetings/logs/228.rst new file mode 100644 index 00000000..a5e448ad --- /dev/null +++ b/i2p2www/meetings/logs/228.rst @@ -0,0 +1,16 @@ +I2P dev meeting, October 22, 2013 @ 20:00 UTC +============================================= + +Quick recap +----------- + +* **Present:** + dg, + efkt, + kytv, + Meeh, + Pseudonemo, + str4d + topiltzin, + welterde, + zzz diff --git a/i2p2www/meetings/views.py b/i2p2www/meetings/views.py index 1f3581c0..791dcb5f 100644 --- a/i2p2www/meetings/views.py +++ b/i2p2www/meetings/views.py @@ -29,8 +29,8 @@ def meetings_show(id, log=False, rst=False): Either display the raw IRC .log or render as html and include .rst as header if it exists """ # generate file name for the raw meeting file(and header) - lname = str(id) + '.log' - hname = str(id) + '.rst' + lname = '%03d.log' % id + hname = '%03d.rst' % id lfile = safe_join(MEETINGS_DIR, lname) hfile = safe_join(MEETINGS_DIR, hname) diff --git a/i2p2www/pages/blog/post.html b/i2p2www/pages/blog/post.html index 8f2ca980..4bc64e71 100644 --- a/i2p2www/pages/blog/post.html +++ b/i2p2www/pages/blog/post.html @@ -10,4 +10,8 @@ {% autoescape false %} {{ body }} {% endautoescape %} + +

    +Flattr this +

    {% endblock %} diff --git a/i2p2www/pages/downloads/debian.html b/i2p2www/pages/downloads/debian.html index f306a05b..e1e688b3 100644 --- a/i2p2www/pages/downloads/debian.html +++ b/i2p2www/pages/downloads/debian.html @@ -4,25 +4,24 @@

    {{ _('Debian I2P Packages') }}

    {% trans -%} -The packages hosted on the I2P Launchpad site +The Debian packages have been tested and should work on x86/x86_64 platforms running: {%- endtrans %} -{% trans trac=i2pconv('trac.i2p2.i2p') -%} +{% trans trac='http://trac.i2p2.de/newticket?component=package/debian&owner=killyourtv&cc=killyourtv@mail.i2p' -%} The I2P packages may work on systems not listed above. Please report any issues -with these packages on Trac at -http://{{ trac }}. +with these packages on Trac at +https://trac.i2p2.de. {%- endtrans %}
    • {% trans %}Option 1: Recent versions of Ubuntu and its derivatives (Try this if you're not using Debian){% endtrans %}
    • {% trans %}Option 2: Debian (including Debian-derivatives){% endtrans %}
    • -
    • {% trans %}Option 3: Non-Linux (and possibly Non-x86 architectures){% endtrans %}

    {{ _('Instructions for Ubuntu Precise Pangolin (and newer) and derivatives like Linux Mint & Trisquel') }}

    @@ -79,69 +78,56 @@ After the installation process completes you can move on to the next part of starting I2P and configuring it for your system. {%- endtrans %} -

    {{ _('Instructions for Debian Lenny and newer') }}

    +

    {{ _('Instructions for Debian') }}

    -

    ({% trans %}For Squeeze you can follow the instructions here.{% endtrans %})

    +Currently supported architectures include amd64, i386, armel, armhf (for Raspbian), kfreebsd-amd64, kfreebsd-i386, and powerpc. -{% trans -%} -The steps below should be performed with root access (i.e., switching +

    {% trans -%} +Note: The steps below should be performed with root access (i.e., switching user to root with "su" or by prefixing each command with "sudo"). -{%- endtrans %} +{%- endtrans %}

      -
    1. - {% trans %}Add the GPG key that signs the repository with the following command:{% endtrans %}
      -     apt-key adv --keyserver keyserver.ubuntu.com --recv-keys EB2CC88B
      -{% trans %}You'll have output like the following if the command was successful:{% endtrans %}
      -     +
    2. {% trans file='/etc/apt/sources.list.d/i2p.list' -%} +Add lines like the following to {{ file }}.{% endtrans %}
      +For Squeeze:
      +
      +    deb http://deb.i2p2.no/ squeeze main
      +    deb-src http://deb.i2p2.no/ squeeze main
      +
      +
      +For Wheezy (stable):
      +
      +    deb http://deb.i2p2.no/ stable main
      +    deb-src http://deb.i2p2.no/ stable main
      +
      +
      +For Testing or Unstable:
      +
      +     deb http://deb.i2p2.no/ unstable main
      +     deb-src http://deb.i2p2.no/ unstable main
      +
    3. -
    4. - {% trans %}Add the following entries to /etc/apt/sources.list.d/i2p.list{% endtrans %}
      -     deb http://ppa.launchpad.net/i2p-maintainers/i2p/ubuntu raring main
      -    deb-src http://ppa.launchpad.net/i2p-maintainers/i2p/ubuntu raring main

      + +
    5. {% trans repokey=url_for('static', filename='debian-repo.pub') -%} +Download the key used to sign the repository and add it to apt: +{%- endtrans %}
      +    apt-key add debian-repo.pub
    6. +
    7. - {% trans %}Notify your package manager of the new PPA by entering{% endtrans %}
      + {% trans %}Notify your package manager of the new repository by entering{% endtrans %}
          apt-get update
      {% trans -%} This command will retrieve the latest list of software from every -repository enabled on your system, including the I2P PPA added in step +repository enabled on your system, including the I2P repository added in step 1. {%- endtrans %}
    8. -
    9. - {% trans %}You are now ready to install I2P!{% endtrans %}
      -     apt-get install i2p
    10. -
    - -{% trans -%} -After the installation process completes you can move on to the next part of starting I2P and configuring it for your system. -{%- endtrans %} - -

    {{ _('Instructions for Non-Linux / Non-x86') }}

    - -{% trans -%} -The steps below should be performed with root access (i.e., switching -user to root with "su" or by prefixing each command with "sudo"). +
  • {% trans -%} +You are now ready to install I2P! Installing the i2p-keyring +package will ensure that you receive updates to the repository's GPG key. {%- endtrans %}
    -
      -
    1. - {% trans ppa='http://ppa.launchpad.net/i2p-maintainers/i2p/ubuntu/pool/main/i/i2p/', -kytv='http://'+i2pconv('killyourtv.i2p')+'/debian/' -%} -Download the i2p-router package from the PPA. -Alternatively, there are unofficial packages available here for armel, armhf, and kfreebsd. - {%- endtrans %} -
    2. -
    3. - {% trans -%} - Make sure that you have Java installed. Running apt-get install default-jre should be sufficient.{% endtrans %}
      -{% trans %}You are now ready to install I2P!{% endtrans %}
      -     dpkg -i i2p-router_*.deb -
    4. -
    5. - {% trans jbigi=site_url('misc/jbigi') -%} - It is recommended to compile jbigi for your system to achieve better performance. Instructions are available here. - {%- endtrans %} -
    6. +     apt-get install i2p i2p-keyring
    {% trans -%} @@ -150,7 +136,6 @@ After the installation process completes you can move on to the next part of {{ _('Post-install work') }} - {% trans -%} Using these I2P packages the I2P router can be started in the following three ways: diff --git a/i2p2www/pages/downloads/list.html b/i2p2www/pages/downloads/list.html index 6f7e5510..f3e09fd3 100644 --- a/i2p2www/pages/downloads/list.html +++ b/i2p2www/pages/downloads/list.html @@ -1,19 +1,29 @@ {% extends "global/layout.html" %} {% block title %}{{ _('Download') }}{% endblock %} +{% set i2pinstall_windows_sig = '97dd4326ad8afdea0e78ffcb053b23793dfa79d89458be3fe3a1ed62a2d988e9' %} +{% set i2pinstall_jar_sig = '5028910d3fb9747a6724e39f8eccb6d9ebe0530ef017102c372871badfbf6d9f' %} +{% set i2p_android_sig = 'a6120e19186246458a01e498eeb9e5afcb3901b9c058244410bda85d5a474113' %} +{% set i2psource_sig = 'cbbf25dea50a717c3376eb7af226f0b2a653e0372db8782ef37aa8b3d275436c' %} +{% set i2pupdate_sig = '533b0ce2d9e1bfc8762ba17eef3572ae7355ed8f21d5d3557b718a14b05794f2' %} {% block content %}

    {{ _('Download I2P') }}

    {{ _('Dependency') }}

    {% trans java='http://java.com/download/', openjdk='http://openjdk.java.net/install/', -icedtea='http://icedtea.classpath.org/wiki/Main_Page' %} +icedtea='http://icedtea.classpath.org/wiki/Main_Page', +oracle8='https://jdk8.java.net/download.html', +ibmsdk7='http://www.ibm.com/developerworks/java/jdk/linux/download.html', +detectjre='http://java.com/en/download/installed.jsp?detect=jre&try=1' %} Java Runtime 1.5 or higher. -(Oracle Java Version 6/7, -OpenJDK 6/7, or -IcedTea6/7 - recommended) +(Oracle Java Version 7, +OpenJDK 7, or +IcedTea7 + recommended, +except Raspberry Pi: Oracle 8 Early Access, +PowerPC: IBM SDK 7)
    -Determine your installed Java version here +Determine your installed Java version here or type java -version at your command prompt. {% endtrans %}

    @@ -26,8 +36,7 @@ or type java -version at your command prompt. i2pinstall_{{ ver() }}_windows.exe ({{ def_mirror.org }}) ({{ _('select alternate link') }})
    - (SHA256 -bf7d11f0a36acff9cd51ad3ef89d66975b0b0de344ca72719a5576159ec965d1 + (SHA256 {{ i2pinstall_windows_sig }} sig)
    @@ -44,8 +53,7 @@ or type java -version at your command prompt. i2pinstall_{{ ver() }}.jar ({{ def_mirror.org }}) ({{ _('select alternate link') }})
    - (SHA256 -2dd5b67920723dd94202a408de31671b1e9543fcd5611bbe79385cc14f93b371 + (SHA256 {{ i2pinstall_jar_sig }} sig)
    @@ -72,8 +80,7 @@ or type java -version at your command prompt. i2pinstall_{{ ver() }}.jar ({{ def_mirror.org }}) ({{ _('select alternate link') }})
    - (SHA256 -2dd5b67920723dd94202a408de31671b1e9543fcd5611bbe79385cc14f93b371 + (SHA256 {{ i2pinstall_jar_sig }} sig)
    @@ -97,31 +104,29 @@ or type java -version at your command prompt.

    Android

    - i2p-android-0.9.1-0_b1-API8.apk + i2p-android-0.9.7.1-0_b4-API8.apk
    - (SHA256 -66ed388e8038529dd3a18dcf2f18bd627a75156c98d094e92499fdb124bf4948 + (SHA256 {{ i2p_android_sig }} )
    - {% trans spongeurl='http://'+i2pconv('sponge.i2p') -%} - The Android version of I2P is being actively developed. - The latest dev build is version 0.9.3-5_b2-API8, and is available from - sponge's eepsite - - scan or click the QR code. + {% trans devbuild='0.9.7.1-23_b2-API8', str4durl='http://'+i2pconv('str4d.i2p')+'/android/' -%} + The above file is the current release build. + The latest dev build is version {{ devbuild }}, and is available from + str4d's eepsite. {%- endtrans %}
    {% trans -%} The release and dev versions of the I2P APK are not compatible, as they - are signed by zzz and sponge respectively. Uninstall one before installing + are signed by zzz and str4d respectively. Uninstall one before installing the other. {%- endtrans %}
    @@ -134,14 +139,13 @@ or type java -version at your command prompt. i2psource_{{ ver() }}.tar.bz2 ({{ def_mirror.org }}) ({{ _('select alternate link') }})
    - (SHA256 -fd2aa881d68b3164c43de9d91dcb04a93a36228d416582ada14ae40031436d18 + (SHA256 {{ i2psource_sig }} sig)
    {% trans monotoneurl=site_url('get-involved/guides/new-developers'), - gitrepo='http://git.repo.i2p/w/i2p.i2p.git', + gitrepo='http://'+i2pconv('git.repo.i2p')+'/w/i2p.i2p.git', github='https://github.com/i2p/i2p.i2p', i2pversion=ver() -%} Alternately, you can fetch the source from monotone @@ -201,7 +205,7 @@ as the default settings of 96 KBps down / 40 KBps up are fairly slow. If you want to reach eepsites via your browser, have a look on the browser proxy setup page for an easy howto. {%- endtrans %}

    -

    {{ _('Updates from earlier releases:') }}

    +

    {{ _('Updates from earlier releases:') }}

    {% trans -%} Both automatic and manual upgrades are available for the release. @@ -264,8 +268,7 @@ receive the release. i2pupdate_{{ ver() }}.zip ({{ def_mirror.org }}) ({{ _('select alternate link') }})

    - (SHA256 -f322de7a9e3e89411d072d983a6577494981da488bb08f839c521ed340d77b92 + (SHA256 {{ i2pupdate_sig }} sig)
    diff --git a/i2p2www/pages/downloads/mirrors b/i2p2www/pages/downloads/mirrors index 84ca04ef..3016abd7 100644 --- a/i2p2www/pages/downloads/mirrors +++ b/i2p2www/pages/downloads/mirrors @@ -1,7 +1,6 @@ {"protocol": "http", "domain": "i2p.googlecode.com", "path": "/files/%(file)s", "org": "Google Code", "org_url": "http://code.google.com", "country": "us"} {"protocol": "https", "domain": "i2p.googlecode.com", "path": "/files/%(file)s", "org": "Google Code", "org_url": "https://code.google.com", "country": "us"} {"protocol": "https", "domain": "launchpad.net", "path": "/i2p/trunk/%(version)s/+download/%(file)s", "org": "Launchpad", "org_url": "https://launchpad.net", "country": "us"} -{"protocol": "http", "domain": "golden.mtveurope.org", "path": "/~yang/i2p_mirror/%(file)s", "org": "VServer.si", "org_url": "http://www.vserver.si", "country": "lu"} -{"protocol": "http", "domain": "a.mirror.geti2p.net", "path": "/releases/current/%(file)s", "org": "welterde", "country": "de"} -{"protocol": "http", "domain": "download.i2p2.no", "path": "/releases/current/%(file)s", "org": "meeh", "country": "no"} +{"protocol": "http", "domain": "a.mirror.geti2p.net", "path": "/releases/%(version)s/%(file)s", "org": "welterde", "country": "de"} +{"protocol": "http", "domain": "download.i2p2.no", "path": "/releases/%(version)s/%(file)s", "org": "meeh", "country": "no"} {"protocol": "https", "domain": "googledrive.com", "path": "/host/0B4jHEq5G7_EPWV9UeERwdGplZXc/%(version)s/%(file)s", "org": "Google Drive", "country": "us"} diff --git a/i2p2www/pages/global/footer.html b/i2p2www/pages/global/footer.html index f6f49979..657b1134 100644 --- a/i2p2www/pages/global/footer.html +++ b/i2p2www/pages/global/footer.html @@ -1,14 +1,14 @@ diff --git a/i2p2www/pages/global/lang.html b/i2p2www/pages/global/lang.html index 314f7be4..e5f3d8d2 100644 --- a/i2p2www/pages/global/lang.html +++ b/i2p2www/pages/global/lang.html @@ -1,7 +1,4 @@
      -
    • -
    • -
    • -
    • -
    • -
    +{% for lang in supported_langs -%} +
  • +{% endfor %} diff --git a/i2p2www/pages/global/macros b/i2p2www/pages/global/macros index 0f468f7d..89a748f1 100644 --- a/i2p2www/pages/global/macros +++ b/i2p2www/pages/global/macros @@ -6,6 +6,7 @@ {%- endif -%} {%- elif request.endpoint == 'blog_post' -%}{{ url_for('blog_post', lang=lang, slug=slug) }} {%- elif request.endpoint == 'meetings_show' -%}{{ url_for('meetings_show', lang=lang, id=id) }} +{%- elif request.endpoint == 'downloads_debian' -%}{{ url_for('downloads_debian', lang=lang) }} {%- elif request.endpoint == 'downloads_select' -%}{{ url_for('downloads_select', lang=lang, version=version, file=file) }} {%- elif request.endpoint == 'downloads_redirect' -%}{{ url_for('site_show', lang=lang) }} {%- elif request.endpoint -%}{{ url_for(request.endpoint, lang=lang) }} diff --git a/i2p2www/pages/global/nav.html b/i2p2www/pages/global/nav.html index 9dfa3751..c352bb29 100644 --- a/i2p2www/pages/global/nav.html +++ b/i2p2www/pages/global/nav.html @@ -29,40 +29,6 @@
  • -
  • - -
  • -
  • - -
  • -
  • - -
  • -
  • - -
  • +
  • + +
  • +
  • + +
  • +
  • + +
  • +
  • + +
  • @@ -122,6 +119,7 @@
  • +
  • +
  • diff --git a/i2p2www/pages/include/monotonerc.html b/i2p2www/pages/include/monotonerc.html index 7162b37b..2806b551 100644 --- a/i2p2www/pages/include/monotonerc.html +++ b/i2p2www/pages/include/monotonerc.html @@ -48,6 +48,8 @@ function get_revision_cert_trust(signers, id, name, val) "690f278ff6c6157cbaf23b0d602b6d6dcf368313", -- complication@mail.i2p "eb4ac08d5ddbb2bd73889f86c1211424025a6f07", -- dev@robertfoss.se "aae785027c240ebbb0a883fd8ebcf8d6ecee4104", -- dev@welterde.de + "86478595288d1b96b58f0c8cd8a8971bc430f8fd", -- dg2@mail.i2p + "5f75b8f0769770edc3267c21ddc9a00ddae31394", -- digit@mail.i2p "4ebaace9973913416af92ee8d0fb93d64753df4c", -- dream@mail.i2p "7e498ae94c9c322404adfc61b16bed388095906b", -- duck@mail.i2p "56c0064a8638fe180ed2f2726d6e2e404c788d3d", -- echelon@mail.i2p diff --git a/i2p2www/pages/meetings/show.html b/i2p2www/pages/meetings/show.html index c65da553..6605c51d 100644 --- a/i2p2www/pages/meetings/show.html +++ b/i2p2www/pages/meetings/show.html @@ -8,9 +8,8 @@ {% endautoescape %}

    {{ _('Full IRC Log') }}

    -
    +{% highlight lang='irc' %}
     {{ log|escape }}
    -{# TODO: pygments #}
    -
    +{% endhighlight %}
    {% endblock %} diff --git a/i2p2www/pages/papers/anonbib.bib b/i2p2www/pages/papers/anonbib.bib new file mode 100644 index 00000000..68ea60f1 --- /dev/null +++ b/i2p2www/pages/papers/anonbib.bib @@ -0,0 +1,205 @@ +% +% +% Magic fields: +% +% www_tags -- used to control which page groups the paper appears in. +% This is a space-separated list. +% www_section -- the topic used under 'topics.html' +% www_{ps|pdf|ps_gz|txt|html|abstract}_url -- link for text/abstract of +% an entry. +% www_important -- set for important entries +% www_remarks -- annotation for an entry + +%% List of sections +@string{comm = "Anonymous communication"} +@string{traffic = "Traffic analysis"} +@string{pub = "Anonymous publication"} +@string{proofs = "Provable shuffles"} +@string{methods = "Formal methods"} +@string{nym = "Pseudonymity"} +@string{pir = "Private Information Retrieval"} +@string{economics = "Economics"} +@string{censorship = "Communications Censorship"} +@string{credentials = "E-Cash / Anonymous Credentials"} +@string{misc = "Misc"} + +# +# Proposed new sections: application privacy, data anonymization, ... +# + +@string{lncs = "Lecture Notes in Computer Science"} + +@inproceedings{egger2013:practical-attacks, + title = {Practical Attacks Against the I2P Network}, + author = {Christoph Egger and Johannes Schlumberger and Christopher Kruegel and Giovanni Vigna}, + booktitle = {Proceedings of the 16th International Symposium on Research in Attacks, Intrusions and Defenses (RAID 2013)}, + year = {2013}, + month = {October}, + www_pdf_url = {http://wwwcip.informatik.uni-erlangen.de/~spjsschl/i2p.pdf}, + www_section = traffic, +} + +%% hal-00744922, version 1 +%% http://hal.inria.fr/hal-00744922 +@inproceedings{timpanaro:hal-00744922, + hal_id = {hal-00744922}, + url = {http://hal.inria.fr/hal-00744922}, + title = {{Improving Content Availability in the I2P Anonymous File-Sharing Environment}}, + author = {Timpanaro, Juan Pablo and Chrisment, Isabelle and Festor, Olivier}, + abstract = {{Anonymous communication has gained more and more interest from Internet users as privacy and anonymity problems have emerged. Dedicated anonymous networks such as Freenet and I2P allow anonymous file-sharing among users. However, one major problem with anonymous file-sharing networks is that the available content is highly reduced, mostly with outdated files, and non-anonymous networks, such as the BitTorrent network, are still the major source of content: we show that in a 30-days period, 21648 new torrents were introduced in the BitTorrent community, whilst only 236 were introduced in the anonymous I2P network, for four different categories of content. Therefore, how can a user of these anonymous networks access this varied and non-anonymous content without compromising its anonymity? In this paper, we improve content availability in an anonymous environment by proposing the first internetwork model allowing anonymous users to access and share content in large public communities while remaining anonymous. We show that our approach can efficiently interconnect I2P users and public BitTorrent swarms without affecting their anonymity nor their performance. Our model is fully implemented and freely usable.}}, + language = {English}, + affiliation = {MADYNES - INRIA Nancy - Grand Est / LORIA}, + booktitle = {{Proceedings of the 4th International Symposium on Cyberspace Safety and Security}}, + publisher = {Springer}, + pages = {77--92}, + address = {Melbourne, Australia}, + volume = {4}, + audience = {international }, + doi = {10.1007/978-3-642-35362-8 }, + year = {2012}, + month = Dec, + www_pdf_url = {http://hal.inria.fr/hal-00744922/PDF/Improving\_Content\_Availability\_in\_the\_I2P\_0AAnonymous\_File-Sharing\_Environment\_0A.pdf}, + www_section = comm, +} + +%% hal-00744919, version 1 +%% http://hal.inria.fr/hal-00744919 +@inproceedings{timpanaro:hal-00744919, + hal_id = {hal-00744919}, + url = {http://hal.inria.fr/hal-00744919}, + title = {{A Bird's Eye View on the I2P Anonymous File-sharing Environment}}, + author = {Timpanaro, Juan Pablo and Chrisment, Isabelle and Festor, Olivier}, + abstract = {{Anonymous communications have been gaining more and more interest from Internet users as privacy and anonymity problems have emerged. Among anonymous enabled services, anonymous file-sharing is one of the most active one and is increasingly growing. Large scale monitoring on these systems allows us to grasp how they behave, which type of data is shared among users, the overall behaviour in the system. But does large scale monitoring jeopardize the system anonymity? In this work we present the first large scale monitoring architecture and experiments on the I2P network, a low-latency message-oriented anonymous network. We characterize the file-sharing environment within I2P, and evaluate if this monitoring affects the anonymity provided by the network. We show that most activities within the network are file-sharing oriented, along with anonymous web-hosting. We assess the wide geographical location of nodes and network popularity. We also demonstrate that group-based profiling is feasible on this particular network.}}, + keywords = {Large scale monitoring, I2P, Security risks, Anonymous file-sharing}, + language = {English}, + affiliation = {MADYNES - INRIA Nancy - Grand Est / LORIA}, + booktitle = {{Proceedings of the 6th International Conference on Network and System Security}}, + address = {Wu Yi Shan, China}, + audience = {international }, + year = {2012}, + month = Nov, + www_pdf_url = {http://hal.inria.fr/hal-00744919/PDF/A\_Birda\_s\_Eye\_View\_on\_the\_I2P\_Anonymous\_0AFile-sharing\_Environment\_0A.pdf}, + www_section = traffic, +} + +%% hal-00744902, version 1 +%% http://hal.inria.fr/hal-00744902 +@inproceedings{timpanaro:hal-00744902, + hal_id = {hal-00744902}, + url = {http://hal.inria.fr/hal-00744902}, + title = {{I2P's Usage Characterization}}, + author = {Timpanaro, Juan Pablo and Chrisment, Isabelle and Festor, Olivier}, + abstract = {{We present the first monitoring study aiming to characterize the usage of the I2P network, a low-latency anonymous network based on garlic routing. We design a distributed monitoring architecture for the I2P network and show through three one-week measurement experiments the ability of the system to identify a significant number of all running applications, among web servers and file-sharing clients. Additionally, we identify 37\% of published I2P applications, which turn out to be unreachable after their publication on the I2P distributed database.}}, + language = {English}, + affiliation = {MADYNES - INRIA Nancy - Grand Est / LORIA}, + booktitle = {{Proceedings of the 4th International Workshop on Traffic Monitoring and Analysis (TMA 2012)}}, + address = {Vienne, Austria}, + audience = {international }, + year = {2012}, + month = Mar, + www_pdf = {http://hal.inria.fr/hal-00744902/PDF/TMA2012-LNCS.pdf}, + www_section = traffic, +} + +@misc{ehlert2011:usability-comparison-i2p-tor, + title = {I2P Usability vs. Tor Usability A Bandwidth and Latency Comparison}, + author = {Mathias Ehlert}, + howpublished = {Seminar, Humboldt University of Berlin}, + year = {2011}, + month = {November}, + www_pdf_url = {http://userpage.fu-berlin.de/~semu/docs/2011_seminar_ehlert_i2p.pdf}, + www_section = comm, +} + +%% inria-00632259, version 1 +%% http://hal.inria.fr/inria-00632259 +@misc{timpanaro:inria-00632259, + hal_id = {inria-00632259}, + url = {http://hal.inria.fr/inria-00632259}, + title = {{Monitoring the I2P network}}, + author = {Timpanaro, Juan Pablo and Chrisment, Isabelle and Festor, Olivier}, + howpublished = {Preprint}, + abstract = {{We present the first monitoring study aiming to characterize the usage of the I2P network, a low-latency anonymous network based on garlic routing. We design a distributed monitoring architecture for the I2P network and we show through a one week long experiment that we are able to identify 32\% of all running applications, among web servers and file- sharing clients. Additionally, we identify 37\% of published I2P applications, which turn out to be unreachable after its publication on the I2P distributed database.}}, + language = {English}, + affiliation = {MADYNES - INRIA Lorraine - LORIA}, + year = {2011}, + month = Oct, + www_pdf_url = {http://hal.inria.fr/inria-00632259/PDF/TMA2012-LNCS.pdf}, + www_section = traffic, +} + +@inproceedings{pets2011-i2p, + title = {Privacy Implications of Performance-Based Peer Selection by Onion Routers: A Real-World Case Study using I2P}, + author = {Michael Herrmann and Christian Grothoff}, + booktitle = {Proceedings of the 11th Privacy Enhancing Technologies Symposium (PETS 2011)}, + year = {2011}, + month = {July}, + location = {Waterloo, Canada}, + www_tags = {selected}, + www_pdf_url = {http://freehaven.net/anonbib/papers/pets2011/p9-herrmann.pdf}, + www_section = traffic, +} + +@mastersthesis{hermann2011-mthesis, + title = {Privacy-Implications of Performance-Based Peer Selection by Onion-Routers: A Real-World Case Study using I2P}, + author = {Michael Hermann}, + school = {TU-Munich}, + year = {2011}, + month = {March}, + www_section = traffic, +} + +@inproceedings{BlackHat2011-crenshaw, + url = {http://www.irongeek.com/i.php?page=security/darknets-i2p-identifying-hidden-servers}, + title = {Darknets and hidden servers: Identifying the true IP/network identity of I2P service hosts}, + author = {Adrian Crenshaw}, + booktitle = {Proceedings of Black Hat 2011}, + year = {2011}, + month = {January}, + location = {Washington, DC}, + www_pdf_url = {http://www.irongeek.com/downloads/Identifying%20the%20true%20IP%20of%20I2P%20service%20hosts.pdf}, + www_section = traffic, +} + +@mastersthesis{delmer-mthesis, + title = {L'{\'e}mergence au sein d'internet de communaut{\'e}s virtuelles et anonymes, Freenet et i2p}, + author = {Laurie Delmer}, + school = {Universit{\'e} catholique de Louvain - D{\'e}partement des sciences politiques et sociales}, + year = {2009}, + note = {Title : The rise in internet virtual and anonymous communities, Freenet and I2P. School: Catholic University of Leuven - Department of Political and Social Science}, + www_section = comm, +} + +@inproceedings{petcon2009-schomburg, + title = {Anonymity Techniques - Usability Tests of Major Anonymity Networks}, + author = {Jens Schomburg}, + booktitle = {Proceedings of PET-CON 2009.1}, + location = {Dresden, Germany}, + year = {2009}, + month = {March}, + pages = {49--58}, + www_pdf_url = {https://people.torproject.org/~karsten/petcon-proceedings-2009.1.pdf}, + www_section = comm, +} + +@inproceedings{petcon2009-zzz, + title = {Peer Profiling and Selection in the I2P Anonymous Network}, + author = {zzz (Pseudonym) and Lars Schimmer}, + booktitle = {Proceedings of PET-CON 2009.1}, + location = {Dresden, Germany}, + year = {2009}, + month = {March}, + pages = {59--70}, + www_pdf_url = {/_static/pdf/I2P-PET-CON-2009.1.pdf}, + www_section = comm, +} + +@misc{jrandom2003, + title = {Invisible Internet Project (I2P) Project Overview}, + author = {jrandom (Pseudonym)}, + howpublished = {Design document}, + year = {2003}, + month = Aug, + www_pdf_url = {/_static/pdf/i2p_philosophy.pdf}, + www_section = comm, +} diff --git a/i2p2www/pages/papers/anonbib.cfg b/i2p2www/pages/papers/anonbib.cfg new file mode 100644 index 00000000..be14ab8e --- /dev/null +++ b/i2p2www/pages/papers/anonbib.cfg @@ -0,0 +1,163 @@ +# Copyright 2003-2008, Nick Mathewson. See LICENSE for licensing info. + +# Our input filename. +MASTER_BIB = "./anonbib.bib" + +# Where do we put generated HTML? +OUTPUT_DIR = "." + +# Where do we put cached papers (relative to OUTPUT_DIR) +CACHE_DIR = "cache" + +# Where do we cache citations papers (relative to OUTPUT_DIR) +CITE_CACHE_DIR = "cite_cache" + +# Are there subsections for cached papers? This is useful for putting +# different Apache permission on different directories. +CACHE_SECTIONS = [ ] + +# Only include entries that have this key. This is one way to +# generate multiple bibliographies from the same source. Currently +# deprecated in favor of tags. +# +#example: REQUIRE_KEY = "www_selected" +# +REQUIRE_KEY = None + +# Timeout when downloading from a server while caching, in seconds. +DOWNLOAD_CONNECT_TIMEOUT = 15 + +# Template files. +TEMPLATE_FILE = "./_template_.html" +BIBTEX_TEMPLATE_FILE = "./_template_bibtex.html" + +# Map from author name regex to author homepage. +AUTHOR_URLS = { + 'Ross.*Anderson' : 'http://www.cl.cam.ac.uk/users/rja14/', + 'Alessandro.*Acquisti' : 'http://www.heinz.cmu.edu/~acquisti/index.html', + 'Agrawal' : 'http://www.research.ibm.com/people/a/agrawal/', + 'Adam.*Back' : 'http://www.cypherspace.org/~adam/', + 'Berthold' : 'http://page.inf.fu-berlin.de/~berthold/', + 'Borisov' : 'http://hatswitch.org/~nikita/', + 'Bettati' : 'http://faculty.cs.tamu.edu/bettati/', + 'Miguel.*Castro' : 'http://research.microsoft.com/users/mcastro/', + 'Chaum' : 'http://www.chaum.com/', + 'J.*Claessens' : 'http://www.esat.kuleuven.be/~joclaess/', + 'R.*Clayton' : 'http://www.cl.cam.ac.uk/~rnc1/', + 'Wei Dai' : 'http://www.eskimo.com/~weidai/', + 'Danezis' : 'http://homes.esat.kuleuven.be/~gdanezis/', + 'Claudia.*az' : 'http://www.esat.kuleuven.be/~cdiaz/', + 'Dingledine' : 'http://www.freehaven.net/~arma/cv.html', + 'Desmedt' : 'http://www.cs.fsu.edu/~desmedt/', + 'Douceur' : 'http://research.microsoft.com/~johndo/', + 'N.*Hopper' : 'http://www-users.cs.umn.edu/~hopper/', + 'Michael.*Freedman' : 'http://www.scs.cs.nyu.edu/~mfreed/', + 'Gergely' : 'http://www.planeforge.com/home/tgm', + 'Ian.*Goldberg' : 'http://www.cs.uwaterloo.ca/~iang/', + 'Christian.*Grothoff' : 'http://grothoff.org/christian/', + 'D.*Hopwood' : 'http://www.users.zetnet.co.uk/hopwood/', + 'Jakobsson' : 'http://www2.parc.com/csl/members/mjakobss/markus-jakobsson.htm', + 'Juels' : 'http://www.rsasecurity.com/rsalabs/staff/bios/ajuels/', + 'Kaashoek' : 'http://pdos.csail.mit.edu/~kaashoek/', + 'K.*Kurosawa' : 'http://kuro.cis.ibaraki.ac.jp/~kurosawa/', + 'H.*Langos' : 'http://www.wh9.tu-dresden.de/~heinrich/', + 'B.*Liskov' : 'http://www.pmg.lcs.mit.edu/barbara_liskov.html', + 'Mathewson' : 'http://www.wangafu.net/~nickm/', + 'Mazières' : 'http://www.scs.cs.nyu.edu/~dm/', + 'B.*Möller' : ('http://www.informatik.tu-darmstadt.de/TI/' + 'Mitarbeiter/moeller.html'), + 'U.*Möller' : 'http://www.ulfm.de/', + 'D.*Molnar' : 'http://www.cs.berkeley.edu/~dmolnar/', + 'R.*Morris' : 'http://www.pdos.lcs.mit.edu/~rtm/', + 'S.*Murdoch' : 'http://www.cl.cam.ac.uk/users/sjm217/', + 'A.*Pashalidis' : 'http://www.xrtc.com/', + 'A.*Pfitzmann' : 'http://dud.inf.tu-dresden.de/~pfitza/', + 'B.*Pfitzmann' : 'http://www.zurich.ibm.com/~bpf/', + 'B.*Preneel' : 'http://www.esat.kuleuven.be/~preneel/', + 'Daniel.*Simon' : 'http://research.microsoft.com/crypto/dansimon/me.htm', + 'Rackoff' : 'http://www.cs.toronto.edu/DCS/People/Faculty/rackoff.html', + 'Jean F' : 'http://www.geocities.com/j_f_raymond/', + 'M.*Rennhard' : 'http://www.tik.ee.ethz.ch/~rennhard/', + 'M.*Reiter' : 'http://www.ece.cmu.edu/~reiter/', + 'Rivest' : 'http://theory.lcs.mit.edu/~rivest/', + 'Avi.*Rubin' : 'http://avirubin.com/', + 'Sassaman' : 'http://homes.esat.kuleuven.be/~lsassama/', + 'Serjantov' : 'http://home.arachsys.com/~aas/', + 'S.*Seys' : 'http://www.esat.kuleuven.be/~sseys/', + 'Shoup' : 'http://www.shoup.net/', + 'Syverson' : 'http://www.syverson.org/', + 'Tsudik' : 'http://www.ics.uci.edu/~gts/c.html', + 'M.*Waidner' : 'http://www.zurich.ibm.com/~wmi/', + 'David.*Wagner' : 'http://www.cs.berkeley.edu/~daw/', + 'M.*Waldman' : 'http://cs1.cs.nyu.edu/~waldman/', + 'B.*Waters' : 'http://www.cs.utexas.edu/~bwaters/', + 'Chenxi.*Wang' : 'http://www.ece.cmu.edu/~chenxi/', + 'M.*Wright' : 'http://ranger.uta.edu/~mwright/', + 'B.*Levine' : 'http://prisms.cs.umass.edu/brian/', + 'T.*Benjamin' : 'http://www.cs.umass.edu/~tshb/', + 'B.*Defend' : 'http://www.cs.umass.edu/~defend/', + 'K.*Fu' : 'http://www.cs.umass.edu/~kevinfu/', + 'J.*Camenisch' : 'http://www.zurich.ibm.com/~jca/', + 'S.*Hohenberger' : 'http://www.cs.jhu.edu/~susan/', + 'M.*Kohlweiss' : 'http://homes.esat.kuleuven.be/~mkohlwei/', + 'A.*Lysyanskaya' : 'http://www.cs.brown.edu/~anna/', + 'M.*Meyerovich' : 'http://www.cs.brown.edu/~mira/', + 'P.*Zieli.*ski' : 'http://www.cl.cam.ac.uk/~pz215/', + 'S.*Zander' : 'http://caia.swin.edu.au/cv/szander/' + } + +# List of paterns for author names _not_ to do an initial-tolerant +# match on when building section list. E.g., if "J\\. Smith" is in +# this list, he won't be folded into "John Smith". +NO_COLLAPSE_AUTHORS = [ + +] + +# Map from LaTeX-style name of author to collapse to canonical name. +COLLAPSE_AUTHORS = { + "Nicholas Mathewson": "Nick Mathewson", + } + +# Map from author pattern to collation key. +# This keeps 'Zero Knowledge Systems' from getting alphabetized as "Systems, +# Zero Knowledge." +ALPHABETIZE_AUTHOR_AS = { + "Zero.*Knowledge.*Systems": "Zero Knowledge Systems", + "Carlos.*Aguilar.*Melchor": "Aguilar Melchor Carlos", + } + +# Map of strings to initialize BibTeX parsing with. +INITIAL_STRINGS = { + # SECTIONS + 'sec_mix' : "Mix Networks: Design", + 'sec_mixattacks' : "Mix Networks: Attacks", + 'sec_stream' : "Stream-based anonymity", + 'sec_traffic' : "Traffic analysis", + 'sec_pub' : "Anonymous publication", + 'sec_pir' : "Private Information Retrieval", + 'sec_nym' : "Pseudonymity" +} + +# Don't put in any entries of this type. +OMIT_ENTRIES = ("proceedings", "journal") + +# List of all recognized values for www_tags. +ALL_TAGS = ("selected", ) + +# Titles of page, by tag. +TAG_TITLES = { "": "I2P Bibliography", + "selected": "Selected I2P Papers" + } + +# As TAG_TITLES, but shorter. +TAG_SHORT_TITLES = { "": "I2P Bibliography", + "selected": "Selected I2P Papers", + } + +# Directories where tag pages get generated. +TAG_DIRECTORIES = { '': "full", + "selected": "" } + +# Make cached stuff group-writable. Make sure that your cache directories +# are sticky! +CACHE_UMASK = 002 diff --git a/i2p2www/pages/papers/bibtex.html b/i2p2www/pages/papers/bibtex.html new file mode 100644 index 00000000..7ba1750e --- /dev/null +++ b/i2p2www/pages/papers/bibtex.html @@ -0,0 +1,17 @@ +{% extends "global/layout.html" %} +{% block title %}{{ bib.title }}: BibTeX{% endblock %} +{% block headextra %} + +{% endblock %} +{% block content %} + +{% for entry in bib.entries %} + +{% endfor %} +
    +{{ entry.key }} +
    +{{ entry.format(90,8,1) }}
    +
    +
    +{% endblock %} diff --git a/i2p2www/pages/papers/list.html b/i2p2www/pages/papers/list.html new file mode 100644 index 00000000..833e15d1 --- /dev/null +++ b/i2p2www/pages/papers/list.html @@ -0,0 +1,98 @@ +{% extends "global/layout.html" %} +{% block title %}{{ bib.tag_short_titles[bib.tag] }}{% endblock %} +{% block headextra %} + +{% endblock %} +{% block content %} +

    {{ bib.tag_titles[bib.tag] }}

    + +

    +{%- if bib.tag|length %} +{%- endif %}{{ bib.tag_titles[''] }} +{%- if bib.tag|length %}{% endif %} +{%- for tag in bib.tags %} |  +{%- if tag != bib.tag %} +{%- endif %}{{ bib.tag_titles[tag] }} +{%- if tag != bib.tag %}{% endif %} +{%- endfor -%} +

    + +

    +{%- if bib.field != 'topic' %}{% if bib.tag -%} + +{%- else -%} +{% endif %} +{%- endif %}{{ _('By topic') }}{%- if bib.field != 'topic' -%}{% endif -%} + |  +{%- if bib.field != 'date' -%}{% if bib.tag -%} + +{%- else -%} +{% endif %} +{%- endif %}{{ _('By date') }}{%- if bib.field != 'date' -%}{% endif -%} + |  +{%- if bib.field != 'author' -%}{% if bib.tag -%} + +{%- else -%} +{% endif %} +{%- endif %}{{ _('By author') }}{%- if bib.field != 'author' -%}{% endif -%} +

    + + + + + + + + + + + +
    + + + + + + + + +

    Publications by {{ bib.field }}

    + +
      +{% for section in bib.sections %} +
    • {{ section.name }}

      +
        +{% for entry in section.entries %} +{{ entry.to_html()|safe }} +{% endfor %} +
    • +{% endfor %} +
    + +

    + +

    {% trans email="press@i2p2.de"|safe, +bibtex = get_url('papers_bibtex', tag=bib.tag) if bib.tag else get_url('papers_bibtex'), +citeseer = 'http://citeseer.ist.psu.edu/online-nature01/' -%} +Please send new or corrected entries to +.
    +If you can, please format them as BibTeX; see our +BibTeX source page for examples.
    +Remember to include URLs if possible: +offline papers are less useful. +{%- endtrans %}

    + +

    {% trans anonbib="https://gitweb.torproject.org/anonbib.git" -%} +The source code for this page was adapted from +Free Haven's anonymity bibliography. +{%- endtrans %}

    +{% endblock %} diff --git a/i2p2www/pages/site/about/browser-config.html b/i2p2www/pages/site/about/browser-config.html index e3b56988..488c6d12 100644 --- a/i2p2www/pages/site/about/browser-config.html +++ b/i2p2www/pages/site/about/browser-config.html @@ -80,9 +80,9 @@ Instead, it is meant to be used as an internal network. services are run by by private volunteers and could be shut down or unreachable at anytime. {%- endtrans %}

    -

    {% trans -%} -By default, I2P comes with two outproxies configured: false.i2p -(an HTTP-only proxy) and outproxyng.h2ik.i2p (an HTTPS proxy +

    {% trans http='false.i2p', https='outproxy-tor.meeh.i2p' -%} +By default, I2P comes with two outproxies configured: {{ http }} +(an HTTP-only proxy) and {{ https }} (an HTTPS proxy routed through Tor. {%- endtrans %}

    {% trans -%} @@ -91,12 +91,11 @@ that only set amount of accesses are allowed per client. Once the limit is reached, the client is blocked out for a timeframe of 1min/1h/1 day. Be respectful and do not overload these services with too many requests! {%- endtrans %}

    -

    {% trans tpb=i2pconv('tpb.i2p') -%} +

    {% trans -%} Filtering is active on these outproxies (for example, mibbit and torrent -tracker access is blocked). Note that even though the pirate bay is blocked -they host an official eepsite at {{ tpb }}. Eepsites +tracker access is blocked). Eepsites that are accessible via .i2p addresses are also not allowed via the outproxies. -As a convenience, False.i2p blocks ad servers. +As a convenience, false.i2p blocks ad servers. {%- endtrans %}

    {% trans -%} Tor is good application to use as an diff --git a/i2p2www/pages/site/about/hall-of-fame.html b/i2p2www/pages/site/about/hall-of-fame.html index be1615c3..161db01a 100644 --- a/i2p2www/pages/site/about/hall-of-fame.html +++ b/i2p2www/pages/site/about/hall-of-fame.html @@ -1,13 +1,13 @@ {% extends "global/layout.html" %} {% block title %}{{ _('Hall Of Fame') }}{% endblock %} {% block content %} - +

    {% trans %}I2P's Hall of Fame{% endtrans %}

    -{% trans date='2013-05-01' -%} +{% trans date='2013-08-25' -%} Current balance: as of {{ date }} {%- endtrans %}
    {{ _('General fund') }}: -{% trans euroval='28078,57', btcval='626,04640057' %}{{ euroval }} € and {{ btcval }} BTC{% endtrans %}
    +{% trans euroval='28829,12', btcval='607,49839100' %}{{ euroval }} € and {{ btcval }} BTC{% endtrans %}

    {{ _('Datastorage bounty') }}: {% trans euroval='145.0', btcval='2' %}{{ euroval }} € and {{ btcval }} BTC{% endtrans %}
    {{ _('Native IPv6 I2P') }}: @@ -15,9 +15,9 @@ Current balance: as of {{ date }} {{ _('I2PHex bounty') }}: {% trans euroval='100.0' %}{{ euroval }} €{% endtrans %}
    {{ _('I2P in debian mirrors') }}: -{% trans euroval='123.0' %}{{ euroval }} €{% endtrans %}
    +{% trans euroval='148.0' %}{{ euroval }} €{% endtrans %}

    {{ _('Bitcoin client for I2P') }}: -{% trans btcval='50.34' %}{{ btcval }} BTC{% endtrans %}
    +{% trans euroval='10', btcval='50.34' %}{{ euroval }} € and {{ btcval }} BTC{% endtrans %}
    {{ _('Unit Tests for I2P router') }}: {% trans euroval='2305' %}{{ euroval }} €{% endtrans %}
    {{ _('Bounty Robert') }}: 20
    @@ -113,6 +113,7 @@ with your name or nick (and optionally homepage) so we can list you here. Apr, 2013SSL cert i2p2.de2009,70 €General fund Apr, 2013anonymous4 €General fund Apr, 2013anonymous10 €General fund + Apr, 2013anonymous10 €I2PBTC native client Apr, 2013anonymous1 BTCGeneral fund Apr, 201329C3 contest25 BTCGeneral fund Apr, 201329C3 contest5 BTCGeneral fund @@ -128,6 +129,41 @@ with your name or nick (and optionally homepage) so we can list you here. May, 2013anonymous0.3 BTCGeneral fund May, 2013anonymous10 €Bounty CCR Microtic Board + May, 2013anonymous20 €General fund + May, 2013anonymous30 €General fund + May, 2013anonymous3.50 €General fund + May, 2013anonymous0.0008 BTCGeneral fund + May, 2013anonymous0.0991 BTCGeneral fund + + June, 2013anonymous10 €General fund + June, 2013anonymous20 €General fund + June, 2013anonymous30 €General fund + June, 2013DHT bounty325 €DHT bounty + June, 2013anonymous1 €General fund + June, 2013anonymous0.0000067 BTCGeneral fund + June, 2013anonymous0.01 BTCGeneral fund + June, 2013anonymous0.031 BTCGeneral fund + June, 2013anonymous0.25 BTCGeneral fund + + July, 2013first-leon1 €General fund + July, 2013anonymous5 €General fund + July, 2013anonymous2 €General fund + July, 2013anonymous20 €General fund + July, 2013anonymous0.05 €General fund + July, 2013anonymous30 €General fund + July, 2013JULLIAN David15 €General fund + July, 2013anonymous0.00567899 BTCGeneral fund + July, 2013anonymous0.00942744 BTCGeneral fund + July, 2013anonymous0.01807730 BTCGeneral fund + July, 2013fraud loss9 BTCGeneral fund + + Aug, 2013anonymous18 €General fund + Aug, 2013anonymous30 €General fund + Aug, 2013sell BTC820 €10 BTCGeneral fund + Aug, 2013anonymous20 €General fund + Aug, 2013anonymous25 €Bounty deb package + Aug, 2013anonymous0.0279 BTCGeneral fund +

    diff --git a/i2p2www/pages/site/about/media.html b/i2p2www/pages/site/about/media.html index 641326da..17b7a7b6 100644 --- a/i2p2www/pages/site/about/media.html +++ b/i2p2www/pages/site/about/media.html @@ -193,12 +193,10 @@ November 2007. {%- endtrans %}
  • {% trans link='http://www.gulli.com/news/i2p-an-anonymous-network-2009-03-09/', -german='http://www.gulli.com/news/i2p-anonymes-netzwerk-im-2009-03-09/', -russian='http://translated.by/you/i2p-an-anonymous-network-interrogated/into-ru/' -%} +german='http://www.gulli.com/news/i2p-anonymes-netzwerk-im-2009-03-09/' -%} zzz interviewed by gulli.com March 2009 German translation -Russian translation {%- endtrans %}
  • {% trans mp3='http://www.isdpodcast.com/podcasts/infosec-daily-podcast-episode-454.mp3' -%} diff --git a/i2p2www/pages/site/about/performance/history.html b/i2p2www/pages/site/about/performance/history.html index 59f92ba9..896de288 100644 --- a/i2p2www/pages/site/about/performance/history.html +++ b/i2p2www/pages/site/about/performance/history.html @@ -24,7 +24,7 @@ Rather than try to tune this method, we'll call out to ugha and duck are working on the C/JNI glue code, and the existing java code is already deployed with hooks for that whenever its ready. Preliminary results look fantastic - running the router with the native GMP modPow is providing over -a 800% speedup in encryption performance, and the load was cut in half. This +a 800% speedup in encryption performance, and the load was cut in half. This was just on one user's machine, and things are nowhere near ready for packaging and deployment, yet. {%- endtrans %}

    @@ -54,7 +54,7 @@ have to do the network database lookup.

    {% trans -%} For unpublished LeaseSets such as "shared clients", this is the only way to get the LeaseSet to Bob. Unfortunately this bundling every time adds -almost 100% overhead to a high-bandwidth connection, and much more to +almost 100% overhead to a high-bandwidth connection, and much more to a connection with smaller messages. {%- endtrans %}

    {% trans -%} @@ -71,7 +71,7 @@ going through the full (expensive) Diffie-Hellman handshaking to negotiate a private session key. This means that if someone's clock is really wrong, or their NAT/firewall/etc is improperly configured (or they're just running an incompatible version of the router), they're going to consistently (though not -constantly, thanks to the shitlist) cause a futile expensive cryptographic +constantly, thanks to the banlist) cause a futile expensive cryptographic operation on all the peers they know about. While we will want to keep some verification/validation within the encryption boundary, we'll want to update the protocol to do some of it first, so that we can reject them cleanly @@ -140,7 +140,7 @@ message based, with the router providing delivery guarantees by garlic wrapping an "ACK" message in with the payload, so once the data gets to the target, the ACK message is forwarded back to us [through tunnels, of course]). {%- endtrans %}

    -

    {% trans link='http://dev.i2p.net/pipermail/i2p/2004-March/000167.html' -%} +

    {% trans link='http://web.archive.org/web/20070607220008/http://dev.i2p.net/pipermail/i2p/2004-March/000167.html' -%} As I've said, having I2PTunnel (and the ministreaming lib) go this route was the best thing that could be done, but more efficient mechanisms are available. When we rip out the diff --git a/i2p2www/pages/site/about/team.html b/i2p2www/pages/site/about/team.html index 95e1cd31..27fcaa43 100644 --- a/i2p2www/pages/site/about/team.html +++ b/i2p2www/pages/site/about/team.html @@ -11,7 +11,7 @@ network. - + @@ -71,6 +71,16 @@ network. + + + + + + + + + + @@ -82,12 +92,12 @@ network. - + - + @@ -103,7 +113,7 @@ network. - + @@ -174,38 +184,11 @@ network. - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -230,7 +213,7 @@ network. - + @@ -353,6 +336,33 @@ network. + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/i2p2www/pages/site/comparison/index.html b/i2p2www/pages/site/comparison/index.html index f87651a5..e473eb94 100644 --- a/i2p2www/pages/site/comparison/index.html +++ b/i2p2www/pages/site/comparison/index.html @@ -29,9 +29,9 @@ The following are discussed on the other networks
  • Haystack
  • -

    {% trans trac=i2pconv('trac.i2p2.i2p') -%} +

    {% trans trac='https://trac.i2p2.de/report/1' -%} The content of this page is subject to update, discussion and dispute, and we welcome comments and additions. -You may contribute an analysis by entering a new ticket on Trac. +You may contribute an analysis by entering a new ticket on Trac. {%- endtrans %}

    {% endblock %} diff --git a/i2p2www/pages/site/comparison/other-networks.html b/i2p2www/pages/site/comparison/other-networks.html index 61373ac3..34d60e3a 100644 --- a/i2p2www/pages/site/comparison/other-networks.html +++ b/i2p2www/pages/site/comparison/other-networks.html @@ -13,12 +13,12 @@ The following networks are discussed on this page.
  • Haystack
  • -

    {% trans comparison=site_url('comparison'), trac=i2pconv('trac.i2p2.i2p') -%} +

    {% trans comparison=site_url('comparison'), trac='https://trac.i2p2.de/report/1' -%} Most of the following sections are fairly old, and may not be accurate. For an overview of available comparisons, see the main network comparisons page. You may contribute an analysis by entering a -new ticket on {{ trac }}. +new ticket on Trac. {%- endtrans %}

    @@ -150,13 +150,11 @@ comparison of Tarzan, Crowds, Onion Routing (OR), and I2P:

    [Mixminion] [Mixmaster] -

    {% trans %} +

    {% trans syndie='http://syndie.i2p2.de/' %} Mixminion and Mixmaster are networks to support anonymous email against a very powerful adversary. High-latency messaging applications running on top of I2P -(for example -Syndie or -I2PBote) +(for example Syndie or I2PBote) may perhaps prove adequate to meet the threat model of those adversaries, while running in parallel along side the needs of low latency users, to provide a significantly larger anonymity set. @@ -219,25 +217,25 @@ particular issues can be addressed. {%- endtrans %}

    Haystack

    -

    {% trans docs=site_url('docs') -%} +

    {% trans torpost='http://blog.torproject.org/blog/ten-things-look-circumvention-tool', +docs=site_url('docs') -%} This was a closed-source network targeted at Iranian users. -Tor did a -good writeup on what to look for in a circumvention tool. +Tor did a good writeup on what to look for in a circumvention tool. Suffice it to say that being closed source and publicly targeting a specific country are not good ideas. I2P is, of course, open source. However, that source, and our technical documentation, need much more review. {%- endtrans %}

    {{ _('Paid VPN Services') }}

    -

    {% trans trac=i2pconv('trac.i2p2.i2p') -%} +

    {% trans trac='https://trac.i2p2.de/report/1' -%} You may contribute an analysis by entering a -new ticket on {{ trac }}. +new ticket on Trac. {%- endtrans %}

    {{ _('Others') }}

    -

    {% trans trac=i2pconv('trac.i2p2.i2p') -%} +

    {% trans trac='https://trac.i2p2.de/report/1' -%} You may contribute an analysis by entering a -new ticket on {{ trac }}. +new ticket on Trac. {%- endtrans %}

    diff --git a/i2p2www/pages/site/comparison/tor.html b/i2p2www/pages/site/comparison/tor.html index e1c15fd8..4b972f33 100644 --- a/i2p2www/pages/site/comparison/tor.html +++ b/i2p2www/pages/site/comparison/tor.html @@ -3,7 +3,7 @@ {% block content %}

    Tor / Onion Routing

    -[Tor] +[Tor] [Onion Routing]

    {% trans netdb=site_url('docs/how/network-database'), peerselection=site_url('docs/how/peer-selection') -%} Tor and Onion Routing are both anonymizing proxy networks, diff --git a/i2p2www/pages/site/contact.html b/i2p2www/pages/site/contact.html index e8c5502f..a85af313 100644 --- a/i2p2www/pages/site/contact.html +++ b/i2p2www/pages/site/contact.html @@ -1,6 +1,12 @@ {% extends "global/layout.html" %} {% block title %}{{ _('Contact') }}{% endblock %} {% block content %} +

    {{ _('Email') }}

    + +

    IRC

    {% trans -%} Our primary IRC network is the Irc2P network within I2P; a default tunnel to this network is set up with new router installs. diff --git a/i2p2www/pages/site/docs/api/i2pcontrol.html b/i2p2www/pages/site/docs/api/i2pcontrol.html index d3f50466..0faeecbe 100644 --- a/i2p2www/pages/site/docs/api/i2pcontrol.html +++ b/i2p2www/pages/site/docs/api/i2pcontrol.html @@ -2,7 +2,7 @@ {% block title %}I2PControl API{% endblock %} {% block content %}

    {% trans %}I2PControl - Remote Control Service{% endtrans %}

    -

    {% trans itoopie='http://itoopie.net/' -%} +

    {% trans itoopie='http://'+i2pconv('itoopie.i2p')+'/' -%} I2P enables a JSONRPC2 interface via the plugin I2PControl. The aim of the interface is to provide simple way to interface with a running I2P node. A client, itoopie, has been developed in parallel. The JSONRPC2 implementation for the client as well as the plugin is provided by the java libraries JSON-RPC 2.0. @@ -191,6 +191,7 @@ Parameters are only provided in a named way (maps).

  • -32700 – {% trans %}JSON parse error.{% endtrans %}
  • -32600 – {% trans %}Invalid request.{% endtrans %}
  • -32601 – {% trans %}Method not found.{% endtrans %}
  • +
  • -32602 – {% trans %}Invalid parameters.{% endtrans %}
  • -32603 – {% trans %}Internal error.{% endtrans %}
    • {% trans %}I2PControl specific error codes.{% endtrans %} diff --git a/i2p2www/pages/site/docs/api/i2ptunnel.html b/i2p2www/pages/site/docs/api/i2ptunnel.html index 608ab170..f06536cd 100644 --- a/i2p2www/pages/site/docs/api/i2ptunnel.html +++ b/i2p2www/pages/site/docs/api/i2ptunnel.html @@ -40,8 +40,8 @@ A HTTP proxy used for browsing I2P and the regular internet anonymously through Browsing internet through I2P uses a random proxy specified by the "Outproxies:" option. {%- endtrans %}
    • Irc2P - localhost:6668 - {% trans %}An IRC tunnel to the default anonymous IRC network, Irc2P.{% endtrans %}
    • -
    • mtn.i2p2.i2p - localhost:8998 - {% trans -%} -The anonymous monotone +
    • mtn.i2p2.i2p - localhost:8998 - {% trans monotone='http://en.wikipedia.org/wiki/Monotone_%28software%29' -%} +The anonymous monotone sourcecode repository for I2P {%- endtrans %}
    • smtp.postman.i2p - localhost:7659 - {% trans postman=i2pconv('hq.postman.i2p') -%} diff --git a/i2p2www/pages/site/docs/api/streaming.html b/i2p2www/pages/site/docs/api/streaming.html index a7c60e2a..d15c51c8 100644 --- a/i2p2www/pages/site/docs/api/streaming.html +++ b/i2p2www/pages/site/docs/api/streaming.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}{% trans %}Streaming Library{% endtrans %}{% endblock %} -{% block lastupdated %}{% trans %}November 2012{% endtrans %}{% endblock %} -{% block accuratefor %}0.9.3{% endblock %} +{% block lastupdated %}{% trans %}December 2013{% endtrans %}{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

      {% trans %}Overview{% endtrans %}

      @@ -134,10 +134,26 @@ Use the access list as a blacklist for incoming connections. As of release {{ release }}. {%- endtrans %}
    + + + + - + + + + + + + + + + + @@ -357,11 +413,37 @@ CLOSE packets may contain data as well. {%- endtrans %}

    +

    Ping / Pong

    +

    {% trans -%} +There is no ping function at the I2CP layer (equivalent to ICMP echo) or in datagrams. +This function is provided in streaming. +Pings and pongs may not be combined with a standard streaming packet; +if the ECHO option is set, then +most other flags, options, ackThrough, sequenceNum, NACKs, payload, etc. are ignored. +{%- endtrans %}

    + +

    {% trans -%} +A ping packet must have the ECHO, SIGNATURE_INCLUDED, and FROM_INCLUDED flags set. +The sendStreamId must be greater than zero, and the receiveStreamId is ignored. +The sendStreamId may or may not correspond to an existing connection. +{%- endtrans %}

    + +

    {% trans -%} +A pong packet must have the ECHO flag set. +The sendStreamId must be zero, and the receiveStreamId is the sendStreamId from the ping. +The pong packet does not include any payload that was contained in the ping. +{%- endtrans %}

    + +

    {% trans -%} +Streaming may be configured to disable sending pongs with the configuration i2p.streaming.answerPings=false. +{%- endtrans %}

    + +

    {% trans %}Control Block Sharing{% endtrans %}

    {% trans -%} The streaming lib supports "TCP" Control Block sharing. -This shares two important streaming lib parameters -(window size and round trip time) +This shares three important streaming lib parameters +(window size, round trip time, round trip time variance) across connections to the same remote peer. This is used for "temporal" sharing at connection open/close time, not "ensemble" sharing during a connection (See @@ -370,7 +452,14 @@ There is a separate share per ConnectionManager (i.e. per local Destination) so that there is no information leakage to other Destinations on the same router. The share data for a given peer expires after a few minutes. -{%- endtrans %}

    +The following Control Block Sharing parameters can be set per router: +{%- endtrans %} +
      +
    • RTT_DAMPENING = 0.75
    • +
    • RTTDEV_DAMPENING = 0.75
    • +
    • WINDOW_DAMPENING = 0.75
    • +
    +

    {% trans %}Other Parameters{% endtrans %}

    {% trans -%} @@ -381,10 +470,12 @@ The following parameters are hardcoded, but may be of interest for analysis:

  • MAX_RESEND_DELAY = 45*1000 (maximum RTO)
  • MIN_WINDOW_SIZE = 1
  • TREND_COUNT = 3 -
  • RTT_DAMPENING = 0.875
  • MIN_MESSAGE_SIZE = 512 (minimum MTU)
  • INBOUND_BUFFER_SIZE = maxMessageSize * (maxWindowSize + 2) -
  • INITIAL_TIMEOUT = 1.5 * initialRTT +
  • INITIAL_TIMEOUT (valid only before RTT is sampled) = 9000 +
  • "alpha" ( RTT dampening factor as per RFC 6298 ) = 0.125
  • +
  • "beta" ( RTTDEV dampening factor as per RFC 6298 ) = 0.25
  • +
  • "K" ( RTDEV multiplier as per RFC 6298 ) = 4
  • PASSIVE_FLUSH_DELAY = 250
  • Maximum RTT estimate: 60*1000 diff --git a/i2p2www/pages/site/docs/applications/bittorrent.html b/i2p2www/pages/site/docs/applications/bittorrent.html index 79320856..86744fa0 100644 --- a/i2p2www/pages/site/docs/applications/bittorrent.html +++ b/i2p2www/pages/site/docs/applications/bittorrent.html @@ -108,7 +108,7 @@ The peers key may be absent, or the peers value may be zero-length.

    {% trans -%} While compact response support is optional for both clients and trackers, it is highly -recommended as it reduces the nominal response size by over 90%. +recommended as it reduces the nominal response size by over 90%. {%- endtrans %}

    diff --git a/i2p2www/pages/site/docs/applications/supported.html b/i2p2www/pages/site/docs/applications/supported.html index f454e644..02f01c06 100644 --- a/i2p2www/pages/site/docs/applications/supported.html +++ b/i2p2www/pages/site/docs/applications/supported.html @@ -82,10 +82,10 @@ -

    {% trans trac=i2pconv('trac.i2p2.i2p') -%} +

    {% trans trac='https://trac.i2p2.de/report/1' -%} This is intended to be a comprehensive listing of applications used with I2P. If you know of something that's missing please submit a ticket on -Trac, and be sure to select the +Trac, and be sure to select the “www” component in the submission form. {%- endtrans %}

    @@ -310,6 +310,19 @@ email service by default. {%- endtrans %} [{{ _('bundled') }}]

  • + +
  • +

    Sylpheed Claws, Thunderbird, other MUAs — +{% trans reviews='http://'+i2pconv('hq.postman.i2p')+'/?page_id=9', +smtp='http://'+i2pconv('hq.postman.i2p')+'/?page_id=10', +pop3='http://'+i2pconv('hq.postman.i2p')+'/?page_id=11' -%} +Can be configured to use Postman's email service. See +this comparison of MUAs, +and configuration settings for +SMTP and POP3. +{%- endtrans %} + [{{ _('standalone') }}]

    +
  • {% trans %}File Sharing{% endtrans %}

    @@ -584,7 +597,7 @@ Source code available.
  • -

    {{ i2pconv('perv.i2p') }} — +

    {{ i2pconv('identiguy.i2p') }} — {% trans %}Dynamically updated eepsite index.{% endtrans %} [{{ _('service') }}]

  • @@ -632,6 +645,22 @@ Gateways allowing users on the public Internet to access eepsites. i2p.to — {% trans tino=i2pconv('tino.i2p') -%} tino's inproxy on the public Internet. +{%- endtrans %} + [{{ _('service') }}] + + +
  • + i2p.us — +{% trans -%} +Another inproxy on the public Internet. +{%- endtrans %} + [{{ _('service') }}] +
  • + +
  • + i2p.me — +{% trans -%} +Another inproxy on the public Internet. {%- endtrans %} [{{ _('service') }}]
  • @@ -645,7 +674,7 @@ Gateways allowing I2P users to access content hosted on the public Internet.
    • - {{ i2pconv('false.i2p') }} — + false.i2p — {% trans %}Publicly advertised outproxy running Squid, located in Germany.{% endtrans %} [{{ _('service') }}]
    • diff --git a/i2p2www/pages/site/docs/discussions/ntcp.html b/i2p2www/pages/site/docs/discussions/ntcp.html index 50409695..f5bf132e 100644 --- a/i2p2www/pages/site/docs/discussions/ntcp.html +++ b/i2p2www/pages/site/docs/discussions/ntcp.html @@ -144,10 +144,10 @@ Posted to new Syndie, 2007-03-27

      On the whole, I'm open to experimenting with this, though remember why NTCP is there in the first place - SSU failed in a congestion collapse. NTCP "just -works", and while 2-10% retransmission rates can be handled in normal -single-hop networks, that gives us a 40% retransmission rate with 2 hop +works", and while 2-10% retransmission rates can be handled in normal +single-hop networks, that gives us a 40% retransmission rate with 2 hop tunnels. If you loop in some of the measured SSU retransmission rates we saw -back before NTCP was implemented (10-30+%), that gives us an 83% retransmission +back before NTCP was implemented (10-30+%), that gives us an 83% retransmission rate. Perhaps those rates were caused by the low 10 second timeout, but increasing that much would bite us (remember, multiply by 5 and you've got half the journey). @@ -333,7 +333,7 @@ stream timeout. TCP in practice has retransmission timeouts orders of magnitude less, though yes, can get to 60s on links running through exposed wires or satellite transmissions ;) If we increase the streaming lib retransmission timeout to e.g. 75 seconds, we could go get a beer before a web page loads -(especially assuming less than a 98% reliable transport). That's one reason we +(especially assuming less than a 98% reliable transport). That's one reason we prefer NTCP.

      @@ -399,7 +399,7 @@ more often on NTCP, but maybe that's a hint on why NTCP performs worse. Posted to new Syndie, 2007-03-31

      measured SSU retransmission rates we saw back before NTCP was implemented -(10-30+%) +(10-30+%)

      Can the router itself measure this? If so, could a transport be selected based @@ -463,7 +463,7 @@ while still somewhat bothering to carry TCP streams. On that background, a small diversity of transports (as many as needed, but not more) appears sensible in either case. Which should be the main transport, depends on their performance-wise. I've seen nasty stuff on my line when I -tried to use its full capacity with UDP. Packet losses on the level of 35%. +tried to use its full capacity with UDP. Packet losses on the level of 35%.

      We could definitely try playing with UDP versus TCP priorities, but I'd urge @@ -502,7 +502,7 @@ build messages). On that background, a small diversity of transports (as many as needed, but not more) appears sensible in either case. Which should be the main transport, depends on their performance-wise. I've seen nasty stuff on my line when I -tried to use its full capacity with UDP. Packet losses on the level of 35%. +tried to use its full capacity with UDP. Packet losses on the level of 35%.

      diff --git a/i2p2www/pages/site/docs/discussions/tunnel.html b/i2p2www/pages/site/docs/discussions/tunnel.html index e8a3b5ce..0a172038 100644 --- a/i2p2www/pages/site/docs/discussions/tunnel.html +++ b/i2p2www/pages/site/docs/discussions/tunnel.html @@ -35,7 +35,7 @@ None of these are currently implemented.

      These padding strategies can be used on a variety of levels, addressing the exposure of message size information to different adversaries. After gathering -and reviewing some statistics +and reviewing some statistics from the 0.4 network, as well as exploring the anonymity tradeoffs, we're starting with a fixed tunnel message size of 1024 bytes. Within this however, the fragmented messages themselves are not padded by the tunnel at all (though for end to end @@ -165,7 +165,7 @@ vulnerability to predecessor attacks. While the endpoints and gateways of those tunnels will be randomly distributed across the network (perhaps even including the tunnel creator in that set), another alternative is to use the tunnel pathways themselves to pass along the request and response, as is done -in Tor. This, however, may lead to leaks +in Tor. This, however, may lead to leaks during tunnel creation, allowing peers to discover how many hops there are later on in the tunnel by monitoring the timing or packet count as diff --git a/i2p2www/pages/site/docs/how/cryptography.html b/i2p2www/pages/site/docs/how/cryptography.html index c0bb0602..19a0add8 100644 --- a/i2p2www/pages/site/docs/how/cryptography.html +++ b/i2p2www/pages/site/docs/how/cryptography.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}{% trans %}Low-level Cryptography Details{% endtrans %}{% endblock %} -{% block lastupdated %}{% trans %}March 2012{% endtrans %}{% endblock %} -{% block accuratefor %}0.8.13{% endblock %} +{% block lastupdated %}{% trans %}December 2013{% endtrans %}{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

      {% trans -%} This page specifies the low-level details of the cryptography in I2P. @@ -47,7 +47,7 @@ AES encrypted payload using that key and IV.

      {% trans -%} The unencrypted ElGamal contains: {%- endtrans %}

      -
      +{% highlight lang='dataspec' %}
          +----+----+----+----+----+----+----+----+
          |nonz|           H(data)                |
          +----+                                  +
      @@ -58,9 +58,9 @@ The unencrypted ElGamal contains:
          |                                       |
          +    +----+----+----+----+----+----+----+
          |    |  data...
      -   +----+----+----+--//                   
      +   +----+----+----+-//
      +{% endhighlight %}
       
      -

      {% trans -%} The H(data) is the SHA256 of the data that is encrypted in the ElGamal block, and is preceded by a nonzero byte. @@ -77,10 +77,10 @@ Total length: typically 255 bytes. The encrypted ElGamal contains: {%- endtrans %}

      -
      +{% highlight lang='dataspec' %}
          +----+----+----+----+----+----+----+----+
          |  zero padding...       |              |
      -   +----+----+----+--// ----+              +
      +   +----+----+----+-//-+----+              +
          |                                       |
          +                                       +
          |       ElG encrypted part 1            |
      @@ -88,7 +88,7 @@ The encrypted ElGamal contains:
          |                                       |
          +    +----+----+----+----+----+----+----+
          |    |   zero padding...      |         |
      -   +----+----+----+----+--// ----+         +
      +   +----+----+----+----+-//-+----+         +
          |                                       |
          +                                       +
          |       ElG encrypted part 2            |
      @@ -97,8 +97,8 @@ The encrypted ElGamal contains:
          +         +----+----+----+----+----+----+
          |         +
          +----+----+
      +{% endhighlight %}
       
      -

      {% trans -%} Each encrypted part is prepended with zeros to a size of exactly 257 bytes. Total length: 514 bytes. @@ -108,7 +108,7 @@ This is encoded as two 256-byte encrypted parts, and there is a single byte of zero padding before each part at this layer. {%- endtrans %}

      -

      {% trans url='http://'+i2pconv('trac.i2p2.i2p')+'/browser/core/java/src/net/i2p/crypto/ElGamalEngine.java?rev=85a542c53d910dffbf34cdcefb8a2faeee96adc4' -%} +

      {% trans url='https://github.com/i2p/i2p.i2p/tree/master/core/java/src/net/i2p/crypto/ElGamalEngine.java' -%} See the ElGamal code. {%- endtrans %}

      @@ -139,19 +139,21 @@ or as a hexadecimal value: Using 2 as the generator. {%- endtrans %}

      -

      {% trans %}Short Exponent{% endtrans %}

      -

      {% trans commonstructures=site_url('docs/spec/common-structures'), -pdf='http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.14.5952&rep=rep1&type=pdf', -benchmarks=site_url('misc/benchmarks'), -oldbenchmarks='http://www.eskimo.com/~weidai/benchmarks.html' -%} +

      {% trans %}Short Exponent{% endtrans %}

      +

      {% trans commonstructures=site_url('docs/spec/common-structures') -%} While the standard exponent size is 2048 bits (256 bytes) and the I2P PrivateKey -is a full 256 bytes, +is a full 256 bytes, in some cases we use the short exponent size of 226 bits (28.25 bytes). +{%- endtrans %}

      + +

      {% trans pdf='http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.14.5952&rep=rep1&type=pdf', +benchmarks=site_url('misc/benchmarks'), +oldbenchmarks='http://www.eskimo.com/~weidai/benchmarks.html' -%} This should be safe for use with the Oakley primes, per On Diffie-Hellman Key Agreement with Short Exponents - van Oorschot, Weiner at EuroCrypt 96, and crypto++'s benchmarks. -Benchmarks originally at {{ oldbenchmarks }} (now dead), +Benchmarks originally at this link, now dead, rescued from the wayback machine, dated Apr 23, 2008. {%- endtrans %}

      @@ -164,6 +166,13 @@ Also, Koshiba & Kurosawa: Short Exponent Diffie-Hellman The remainder of the PrivateKey is padded with zeroes. {%- endtrans %}

      +

      {% trans -%} +Prior to release 0.9.8, all routers used the short exponent. +As of release 0.9.8, 64-bit x86 routers use a full 2048-bit exponent. +Other routers continue to use the short exponent due to concerns about processor load. +The transition to a longer exponent for these platforms is a topic for further study. +{%- endtrans %}

      +

      {% trans %}Obsolescence{% endtrans %}

      {% trans -%} The vulnerability of the network to an ElGamal attack and the impact of transitioning to a longer bit length is to be studied. @@ -198,9 +207,9 @@ For encryption of periodic tunnel test mes

    {% trans rfc2313='http://tools.ietf.org/html/rfc2313', -code1='http://'+i2pconv('trac.i2p2.i2p')+'/browser/core/java/src/net/i2p/crypto/CryptixAESEngine.java?rev=85a542c53d910dffbf34cdcefb8a2faeee96adc4', -code2='http://'+i2pconv('trac.i2p2.i2p')+'/browser/core/java/src/net/i2p/crypto/CryptixRijndael_Algorithm.java?rev=85a542c53d910dffbf34cdcefb8a2faeee96adc4', -code3='http://'+i2pconv('trac.i2p2.i2p')+'/browser/core/java/src/net/i2p/crypto/ElGamalAESEngine.java?rev=85a542c53d910dffbf34cdcefb8a2faeee96adc4' -%} +code1='https://github.com/i2p/i2p.i2p/tree/master/core/java/src/net/i2p/crypto/CryptixAESEngine.java', +code2='https://github.com/i2p/i2p.i2p/tree/master/core/java/src/net/i2p/crypto/CryptixRijndael_Algorithm.java', +code3='https://github.com/i2p/i2p.i2p/tree/master/core/java/src/net/i2p/crypto/ElGamalAESEngine.java' -%} We use AES with 256 bit keys and 128 bit blocks in CBC mode. The padding used is specified in IETF RFC-2313 (PKCS#5 1.5, section 8.1 (for block type 02)). In this case, padding exists of pseudorandomly generated octets to match 16 byte blocks. @@ -277,7 +286,7 @@ It may be quite difficult to make any change backward-compatible.

    DSA

    -

    {% trans code='http://'+i2pconv('trac.i2p2.i2p')+'/browser/core/java/src/net/i2p/crypto/DSAEngine.java?rev=85a542c53d910dffbf34cdcefb8a2faeee96adc4' -%} +

    {% trans code='https://github.com/i2p/i2p.i2p/tree/master/core/java/src/net/i2p/crypto/DSAEngine.java' -%} Signatures are generated and verified with 1024 bit DSA (L=1024, N=160), as implemented in [DSAEngine]. DSA was chosen because it is much faster for signatures than ElGamal. @@ -326,12 +335,12 @@ DSA was chosen because it is much faster for signatures than ElGamal.

    - C1F4D27D 40093B42 9E962D72 23824E0B BC47E7C8 32A39236
    - FC683AF8 48895810 75FF9082 ED32353D 4374D730 1CDA1D23
    - C431F469 8599DDA0 2451824F F3697525 93647CC3 DDC197DE
    - 985E43D1 36CDCFC6 BD5409CD 2F450821 142A5E6F 8EB1C3AB
    - 5D0484B8 129FCF17 BCE4F7F3 3321C3CB 3DBB14A9 05E7B2B3
    - E93BE470 8CBCC82
    + 0C1F4D27 D40093B4 29E962D7 223824E0 BBC47E7C 832A3923
    + 6FC683AF 84889581 075FF908 2ED32353 D4374D73 01CDA1D2
    + 3C431F46 98599DDA 02451824 FF369752 593647CC 3DDC197D
    + E985E43D 136CDCFC 6BD5409C D2F45082 1142A5E6 F8EB1C3A
    + B5D0484B 8129FCF1 7BCE4F7F 33321C3C B3DBB14A 905E7B2B
    + 3E93BE47 08CBCC82
     

    {% trans commonstructures=site_url('docs/spec/common-structures') -%} @@ -374,7 +383,7 @@ It may be quite difficult to make any change backward-compatible.

    SHA256

    -

    {% trans code='http://'+i2pconv('trac.i2p2.i2p')+'/browser/core/java/src/net/i2p/crypto/SHA256Generator.java?rev=85a542c53d910dffbf34cdcefb8a2faeee96adc4' -%} +

    {% trans code='https://github.com/i2p/i2p.i2p/tree/master/core/java/src/net/i2p/crypto/SHA256Generator.java' -%} Hashes within I2P are plain old SHA256, as implemented in [SHA256Generator] {%- endtrans %}

    diff --git a/i2p2www/pages/site/docs/how/elgamal-aes.html b/i2p2www/pages/site/docs/how/elgamal-aes.html index ec84bc47..9c44a9c3 100644 --- a/i2p2www/pages/site/docs/how/elgamal-aes.html +++ b/i2p2www/pages/site/docs/how/elgamal-aes.html @@ -103,7 +103,7 @@ and an encrypted AES block.

    {% trans -%} The encrypted message contains: {%- endtrans %}

    -
    +{% highlight lang='dataspec' %}
        +----+----+----+----+----+----+----+----+
        |                                       |
        +                                       +
    @@ -121,8 +121,8 @@ The encrypted message contains:
        +         +----+----+----+----+----+----+
        |         +
        +----+----+
    +{% endhighlight %}
     
    -

    {% trans %}ElGamal Block{% endtrans %}

    {% trans -%} @@ -132,7 +132,7 @@ The encrypted ElGamal Block is always 514 bytes long.

    {% trans -%} The unencrypted ElGamal data is 222 bytes long, containing: {%- endtrans %}

    -
    +{% highlight lang='dataspec' %}
        +----+----+----+----+----+----+----+----+
        |                                       |
        +                                       +
    @@ -159,7 +159,7 @@ The unencrypted ElGamal data is 222 bytes long, containing:
        +                             +----+----+
        |                             |
        +----+----+----+----+----+----+
    -
    +{% endhighlight %}

    {% trans commonstructures=site_url('docs/spec/common-structures') -%} The 32-byte diff --git a/i2p2www/pages/site/docs/how/network-database.html b/i2p2www/pages/site/docs/how/network-database.html index c9c9dad6..6eb6b83b 100644 --- a/i2p2www/pages/site/docs/how/network-database.html +++ b/i2p2www/pages/site/docs/how/network-database.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}{% trans %}The Network Database{% endtrans %}{% endblock %} -{% block lastupdated %}{% trans %}June 2013{% endtrans %}{% endblock %} -{% block accuratefor %}0.9.6{% endblock %} +{% block lastupdated %}{% trans %}October 2013{% endtrans %}{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

    {% trans %}Overview{% endtrans %}

    @@ -43,6 +43,20 @@ to be present:
    • caps ({% trans %}Capabilities flags - used to indicate floodfill participation, approximate bandwidth, and perceived reachability{% endtrans %}) +
        +
      • B: {% trans %}SSU Testing{% endtrans %}
      • +
      • C: {% trans %}SSU Introducer{% endtrans %}
      • +
      • f: {% trans %}Floodfill{% endtrans %}
      • +
      • H: {% trans %}Hidden{% endtrans %}
      • +
      • K: {% trans amount='12KBps' %}Under {{amount }} shared bandwidth{% endtrans %}
      • +
      • L: {% trans amount='12 - 32KBps' %}{{ amount }} shared bandwidth{% endtrans %} ({% trans %}default{% endtrans %})
      • +
      • M: {% trans amount='32 - 64KBps' %}{{ amount }} shared bandwidth{% endtrans %}
      • +
      • N: {% trans amount='64 - 128KBps' %}{{ amount }} shared bandwidth{% endtrans %}
      • +
      • O: {% trans amount='128KBps' %}Over {{ amount }} shared bandwidth{% endtrans %}
      • +
      • R: {% trans %}Reachable{% endtrans %}
      • +
      • U: {% trans %}Unreachable{% endtrans %}
      • +
      +"Shared bandwidth" == (share %) * min(in bw, out bw)
    • coreVersion ({% trans %}The core library version, always the same as the router version{% endtrans %}) @@ -279,7 +293,7 @@ automatically enabled. {%- endtrans %}

      {% trans -%} -With the current rules for automatic opt-in, approximately 6% of +With the current rules for automatic opt-in, approximately 6% of the routers in the network are floodfill routers. {%- endtrans %}

      @@ -398,11 +412,11 @@ This message is sent back to one of the client's inbound tunnels.

      {% trans %}Flooding{% endtrans %}

      -

      {% trans -%} +

      {% trans floodsize=3 -%} After a floodfill router receives a DatabaseStoreMessage containing a valid RouterInfo or LeaseSet which is newer than that previously stored in its local NetDb, it "floods" it. -To flood a NetDb entry, it looks up several (currently 4) floodfill routers closest to the routing key +To flood a NetDb entry, it looks up several (currently {{ floodsize }}) floodfill routers closest to the routing key of the NetDb entry. (The routing key is the SHA256 Hash of the RouterIdentity or Destination with the date (yyyyMMdd) appended.) By flooding to those closest to the key, not closest to itself, the floodfill ensures that the storage gets to the right place, even if the storing router did not have good knowledge of the @@ -489,6 +503,13 @@ Given the current size of the network, a router has

      {% trans %}RouterInfo Storage Verification{% endtrans %}

      +

      {% trans egger2013='http://wwwcip.informatik.uni-erlangen.de/~spjsschl/i2p.pdf' -%} +Note: RouterInfo verification is disabled as of release 0.9.7.1 to prevent +the attack described in the paper +Practical Attacks Against the I2P Network. +It is not clear if verification can be redesigned to be done safely. +{%- endtrans %}

      +

      {% trans -%} To verify a storage was successful, a router simply waits about 10 seconds, then sends a lookup to another floodfill router close to the key @@ -567,16 +588,16 @@ Some scenarios are discussed below. {%- endtrans %}

      {% trans %}General Mitigation Through Growth{% endtrans %}

      -

      {% trans -%} -There are currently hundreds of floodfill routers in the network. +

      {% trans ffcount=600 -%} +There are currently around {{ ffcount }} floodfill routers in the network. Most of the following attacks will become more difficult, or have less impact, as the network size and number of floodfill routers increase. {%- endtrans %}

      {% trans %}General Mitigation Through Redundancy{% endtrans %}

      -

      {% trans -%} -Via flooding, all netdb entries are stored on the 8 floodfill routers closest to the key. +

      {% trans floodsize=3 -%} +Via flooding, all netdb entries are stored on the {{ floodsize }} floodfill routers closest to the key. {%- endtrans %}

      @@ -706,15 +727,6 @@ The extent of the issue, and methods for mitigation are a topic for further study. {%- endtrans %}

      -

      {% trans -%} -One consequence of daily keyspace rotation is that the distributed network database -may become unreliable for a few minutes after the rotation -- -lookups will fail because the new "closest" router has not received a store yet. -The extent of the issue, and methods for mitigation -(for example netdb "handoffs" at midnight) -are a topic for further study. -{%- endtrans %}

      -

      {% trans %}Bootstrap Attacks{% endtrans %}

      {% trans -%} @@ -819,7 +831,7 @@ more correlative. Of course, a larger network makes a Sybil attack that much har

      {% trans threatmodel=site_url('docs/how/threat-model') -%} However, the general issue of DHT information leakage in I2P needs further investigation. The floodfill routers are in a position to observe queries and gather information. -Certainly, at a level of f = 0.2 (20% malicious nodes, as specifed in the paper) +Certainly, at a level of f = 0.2 (20% malicious nodes, as specifed in the paper) we expect that many of the Sybil threats we describe (here, here and diff --git a/i2p2www/pages/site/docs/how/peer-selection.html b/i2p2www/pages/site/docs/how/peer-selection.html index fd26f061..d562e0b8 100644 --- a/i2p2www/pages/site/docs/how/peer-selection.html +++ b/i2p2www/pages/site/docs/how/peer-selection.html @@ -55,7 +55,7 @@ of client and exploratory tunnels, and a tunnel lifetime is only 10 minutes.

      {% trans %}Further Information{% endtrans %}

      {% trans pdf=url_for('static', filename='pdf/I2P-PET-CON-2009.1.pdf'), -url='http://www.pet-con.org/index.php/PET_Convention_2009.1' -%} +url='http://web.archive.org/web/20100413184504/http://www.pet-con.org/index.php/PET_Convention_2009.1' -%} For more information see the paper Peer Profiling and Selection in the I2P Anonymous Network presented at PET-CON 2009.1. @@ -237,7 +237,7 @@ To prevent some simple attacks, and for performance, there are the following res Two peers from the same /16 IP space may not be in the same tunnel. {%- endtrans %}

    • {% trans -%} -A peer may participate in a maximum of 33% of all tunnels created by the router. +A peer may participate in a maximum of 33% of all tunnels created by the router. {%- endtrans %}
    • {% trans -%} Peers with extremely low bandwidth are not used. diff --git a/i2p2www/pages/site/docs/how/tech-intro.html b/i2p2www/pages/site/docs/how/tech-intro.html index aa7fdc9e..230bfa78 100644 --- a/i2p2www/pages/site/docs/how/tech-intro.html +++ b/i2p2www/pages/site/docs/how/tech-intro.html @@ -681,7 +681,7 @@ See also the Network Comparisons Page. {%- endtrans %}

      Tor

      -

      {% trans %}website{% endtrans %}

      +

      {% trans %}website{% endtrans %}

      {% trans -%} At first glance, Tor and I2P have many functional and anonymity related @@ -1004,9 +1004,9 @@ It is hosted on http://{{ bob }}/Robert.h

      PyBit

      {% trans dev='Blub' -%}Developed by: {{ dev }}{%- endtrans %}

      -

      {% trans pebcache=i2pconv('pebcache.i2p') -%} +

      {% trans pybit='http://'+i2pconv('echelon.i2p')+'/pybit/' -%} PyBit is a Bittorrent client written in Python. -It is hosted on http://{{ pebcache }}/ +It is hosted on {{ pybit }} {%- endtrans %}

      I2Phex

      diff --git a/i2p2www/pages/site/docs/how/threat-model.html b/i2p2www/pages/site/docs/how/threat-model.html index 33f1cd35..e1e2db5f 100644 --- a/i2p2www/pages/site/docs/how/threat-model.html +++ b/i2p2www/pages/site/docs/how/threat-model.html @@ -2,6 +2,45 @@ {% block title %}{% trans %}I2P's Threat Model{% endtrans %}{% endblock %} {% block lastupdated %}{% trans %}November 2010{% endtrans %}{% endblock %} {% block accuratefor %}0.8.1{% endblock %} + +{% macro get_level(num) %} +{%- if num == 1 %}{{ _('low') }} +{%- elif num == 2 %}{{ _('medium') }} +{%- elif num == 3 %}{{ _('high') }} +{%- else %}ERR_INVALID +{%- endif %} +{%- endmacro %} + +{% macro calculate_severity(Da, R, A) %} +{%- if R + A > 4 %}{{ caller(Da + 2) }} +{%- elif R + A > 3 %}{{ caller(Da + 1) }} +{%- else %}{{ caller(Da) }} +{%- endif %} +{%- endmacro %} + +{% macro calculate_priority(Da, R, E, A, Di) %} +{%- call(severity) calculate_severity(Da, R, A) %} +{%- if E == 3 and Di == 3 %}{{ severity + 4 }} +{%- elif (E == 3 and Di == 2) or ( E == 2 and Di == 3) %}{{ severity + 3}} +{%- elif (E == 3 and Di == 1) or ( E == 1 and Di == 3) %}{{ severity + 2}} +{%- elif E == 2 and Di == 2 %}{{ severity + 1}} +{%- else %}{{ severity }} +{%- endif %} +{%- endcall %} +{%- endmacro %} + +{% macro DREAD_score(Da, R, E, A, Di) %} +
        +
      • {{ _('Damage Potential') }}: {{ get_level(Da) }}
      • +
      • {{ _('Reliability') }}: {{ get_level(R) }}
      • +
      • {{ _('Exploitability') }}: {{ get_level(E) }}
      • +
      • {{ _('Affected Users') }}: {{ get_level(A) }}
      • +
      • {{ _('Discoverability') }}: {{ get_level(Di) }}
      • +
      • {{ _('Severity') }}: {% call(S) calculate_severity(Da, R, A) %}{{ S }}{% endcall %}/5
      • +
      • {{ _('Priority') }}: {{ calculate_priority(Da, R, E, A, Di) }}/9
      • +
      +{% endmacro %} + {% block content %}

      {% trans %}What do we mean by "anonymous"?{% endtrans %}

      @@ -75,12 +114,12 @@ the current implementation does not. {%- endtrans %}

      -

      {% trans %}The Threat Model (Attacks){% endtrans %}

      +

      {% trans %}The Threat Model{% endtrans %}

      {% trans -%} I2P design started in 2003, not long after the advent of [Onion Routing], [Freenet], and -[Tor]. +[Tor]. Our design benefits substantially from the research published around that time. I2P uses several onion routing techniques, so we continue to benefit from the significant academic interest in Tor. @@ -118,7 +157,29 @@ The to review. {%- endtrans %}

      -

      {% trans %}Index{% endtrans %}

      +{# Hide DREAD ratings until we know how we want to use them + +

      {% trans DREAD='https://blogs.msdn.com/b/david_leblanc/archive/2007/08/13/dreadful.aspx' -%} +Attacks are judged using the modified DREAD model: +{%- endtrans %}

      + +
        +
      • {{ _('Damage Potential') }}: {% trans %}If a threat exploit occurs, how much damage will be caused?{% endtrans %}
      • +
      • {{ _('Reliability') }}: {% trans %}How reliable is the attack?{% endtrans %}
      • +
      • {{ _('Exploitability') }}: {% trans %}What is needed to exploit this threat?{% endtrans %}
      • +
      • {{ _('Affected Users') }}: {% trans %}How many users will be affected?{% endtrans %}
      • +
      • {{ _('Discoverability') }}: {% trans %}How easy is it to discover this threat?{% endtrans %}
      • +
      + +

      {% trans DREAD='https://blogs.msdn.com/b/david_leblanc/archive/2007/08/13/dreadful.aspx' -%} +Each category is given a rating of low, medium or high. The severity +and priority scores are calculated using the equations outlined +here. +{%- endtrans %}

      + +#} + +

      {% trans %}Index of Attacks{% endtrans %}

      • {% trans %}Brute force attacks{% endtrans %}
      • {% trans %}Timing attacks{% endtrans %}
      • @@ -142,6 +203,8 @@ to review.

        {% trans %}Brute force attacks{% endtrans %}

        +{# DREAD_score(2, 1, 1, 1, 3) #} +

        {% trans -%} A brute force attack can be mounted by a global passive or active adversary, watching all the messages pass between all of the nodes and attempting to correlate @@ -184,6 +247,8 @@ are discussed on the

        {% trans %}Timing attacks{% endtrans %}

        +{# DREAD_score(2, 2, 2, 3, 2) #} +

        {% trans -%} I2P's messages are unidirectional and do not necessarily imply that a reply will be sent. However, applications on top of I2P will most likely have @@ -219,6 +284,8 @@ References: Low-Resource Routing Attacks Against Anonymous S

        {% trans %}Intersection attacks{% endtrans %}

        +{# DREAD_score(3, 2, 2, 3, 3) #} +

        {% trans -%} Intersection attacks against low latency systems are extremely powerful - periodically make contact with the target and keep track of what peers are on @@ -301,18 +368,19 @@ Reference: One Cell Enough There are a whole slew of denial of service attacks available against I2P, each with different costs and consequences: {%- endtrans %}

        -
          -
        • {% trans -%} + +{# DREAD_score(1, 1, 2, 1, 3) #} +

          {% trans -%} Greedy user attack: This is simply people trying to consume significantly more resources than they are willing to contribute. The defense against this is: -{%- endtrans %} +{%- endtrans %}

          • {% trans comparisons=site_url('comparison') -%} Set defaults so that most users provide resources to the network. In I2P, users route traffic by default. In sharp distinction to other networks, -over 95% of I2P users relay traffic for others. +over 95% of I2P users relay traffic for others. {%- endtrans %}
          • {% trans -%} Provide easy configuration options so that users may increase their @@ -323,8 +391,10 @@ metrics such as "share ratio" so that users may see what they are contributing. Maintain a strong community with blogs, forums, IRC, and other means of communication. {%- endtrans %}
          -
        • -
        • {% trans peerselection=site_url('docs/how/peer-selection') -%} +
          + +{# DREAD_score(2, 1, 1, 2, 3) #} +

          {% trans peerselection=site_url('docs/how/peer-selection') -%} Starvation attack: A hostile user may attempt to harm the network by creating a significant number of peers in the network who are not identified as being under control of the same entity (as with Sybil). These nodes then @@ -340,8 +410,11 @@ them, or using them rarely. We have significantly enhanced the ability to recognize and avoid troublesome peers; however there are still significant efforts required in this area. -{%- endtrans %}

        • -
        • {% trans todo=site_url('get-involved/todo') -%} +{%- endtrans %}

          +
          + +{# DREAD_score(1, 2, 2, 2, 3) #} +

          {% trans todo=site_url('get-involved/todo') -%} Flooding attack: A hostile user may attempt to flood the network, a peer, a destination, or a tunnel. Network and peer flooding is possible, and I2P does nothing to prevent standard IP layer flooding. The flooding of @@ -356,8 +429,11 @@ larger load. If, on the other hand, the load is more than the client can deal with, they can instruct the tunnels to throttle the number of messages or bytes they should pass on (once the advanced tunnel operation is implemented). -{%- endtrans %}

        • -
        • {% trans -%} +{%- endtrans %}

          +
          + +{# DREAD_score(1, 1, 1, 1, 1) #} +

          {% trans -%} CPU load attack: There are currently some methods for people to remotely request that a peer perform some cryptographically expensive operation, and a hostile attacker could use these to flood that peer with @@ -366,8 +442,11 @@ engineering practices and potentially requiring nontrivial certificates (e.g. HashCash) to be attached to these expensive requests should mitigate the issue, though there may be room for an attacker to exploit various bugs in the implementation. -{%- endtrans %}

        • -
        • {% trans peerselection=site_url('docs/how/peer-selection'), +{%- endtrans %}

          +
          + +{# DREAD_score(2, 2, 3, 2, 3) #} +

          {% trans peerselection=site_url('docs/how/peer-selection'), netdb=site_url('docs/how/network-database') -%} Floodfill DOS attack: A hostile user may attempt to harm the network by becoming a floodfill router. The current defenses against unreliable, @@ -379,10 +458,13 @@ Some defenses and however there is much more to do. For more information see the network database page. -{%- endtrans %}

        • -
        +{%- endtrans %}

        +

        {% trans %}Tagging attacks{% endtrans %}

        + +{# DREAD_score(1, 3, 1, 1, 1) #} +

        {% trans todo=site_url('get-involved/todo') -%} Tagging attacks - modifying a message so that it can later be identified further along the path - are by themselves impossible in I2P, as messages @@ -399,6 +481,8 @@ as the links are encrypted and messages signed.

        {% trans %}Partitioning attacks{% endtrans %}

        +{# DREAD_score(3, 1, 1, 1, 2) #} +

        {% trans -%} Partitioning attacks - finding ways to segregate (technically or analytically) the peers in a network - are important to keep in mind when dealing with a @@ -437,6 +521,8 @@ Also discussed on the network database page (bo

        {% trans %}Predecessor attacks{% endtrans %}

        +{# DREAD_score(1, 1, 1, 1, 3) #} +

        {% trans -%} The predecessor attack is passively gathering statistics in an attempt to see what peers are 'close' to the destination by participating in their tunnels and @@ -479,6 +565,9 @@ which is an update to the 2004 predecessor attack paper

        {% trans %}Harvesting attacks{% endtrans %}

        + +{# DREAD_score(1, 1, 2, 2, 3) #} +

        {% trans -%} "Harvesting" means compiling a list of users running I2P. It can be used for legal attacks and to help @@ -523,6 +612,9 @@ enact other restricted route methods.

        {% trans %}Identification Through Traffic Analysis{% endtrans %}

        + +{# DREAD_score(1, 1, 2, 3, 3) #} +

        {% trans transport=site_url('docs/transport') -%} By inspecting the traffic into and out of a router, a malicious ISP or state-level firewall could identify that a computer is running I2P. @@ -574,18 +666,20 @@ Working directly with DPI and obfuscation experts {%- endtrans %}

      -

      {% trans pdf='http://www.cse.chalmers.se/%7Ejohnwolf/publications/hjelmvik_breaking.pdf' -%} +

      {% trans pdf='http://www.iis.se/docs/hjelmvik_breaking.pdf' -%} Reference: Breaking and Improving Protocol Obfuscation {%- endtrans %}

      {% trans %}Sybil attacks{% endtrans %}

      +{# DREAD_score(3, 2, 1, 3, 3) #} +

      {% trans -%} Sybil describes a category of attacks where the adversary creates arbitrarily large numbers of colluding nodes and uses the increased numbers to help mounting other attacks. For instance, if an attacker is in a network where peers -are selected randomly and they want an 80% chance to be one of those peers, they +are selected randomly and they want an 80% chance to be one of those peers, they simply create five times the number of nodes that are in the network and roll the dice. When identity is free, Sybil can be a very potent technique for a powerful adversary. The primary technique to address this is simply to make @@ -627,6 +721,9 @@ for more Sybil discussion.

      {% trans %}Buddy Exhaustion attacks{% endtrans %}

      + +{# DREAD_score(3, 2, 2, 1, 3) #} +

      {% trans pdf='http://www.eecs.berkeley.edu/~pmittal/publications/nisan-torsk-ccs10.pdf' -%} (Reference: In Search of an Anonymouns and Secure Lookup Section 5.2) {%- endtrans %}

      @@ -640,7 +737,7 @@ This is somewhat mitigated by our peer profiling methods used to monitor the performance of peers. However, this is a powerful attack as the number of routers approaches -f = 0.2, or 20% malicious nodes, as specifed in the paper. +f = 0.2, or 20% malicious nodes, as specifed in the paper. The malicous routers could also maintain connections to the target router and provide excellent forwarding bandwidth for traffic over those connections, in an attempt to manipulate the profiles managed by the target and appear attractive. @@ -650,6 +747,8 @@ Further research and defenses may be necessary.

      {% trans %}Cryptographic attacks{% endtrans %}

      +{# DREAD_score(3, 2, 1, 3, 1) #} +

      {% trans cryptography=site_url('docs/how/cryptography') -%} We use strong cryptography with long keys, and we assume the security of the industry-standard cryptographic primitives used in I2P, as documented @@ -689,6 +788,9 @@ end to end messages include simple random padding.

      {% trans %}Floodfill Anonymity attacks{% endtrans %}

      + +{# DREAD_score(3, 2, 1, 2, 2) #} +

      {% trans netdb=site_url('docs/how/network-database') -%} In addition to the floodfill DOS attacks described above, floodfill routers are uniquely positioned @@ -716,6 +818,9 @@ Several scenarios are discussed on the

      {% trans %}Central Resource Attacks{% endtrans %}

      + +{# DREAD_score(1, 1, 1, 3, 3) #} +

      {% trans -%} There are a few centralized or limited resources (some inside I2P, some not) that could be attacked or used as a vector for attacks. @@ -769,6 +874,8 @@ and would shrink the network (in the short-to-medium term), just as the loss of

      {% trans %}Development attacks{% endtrans %}

      +{# DREAD_score(2, 1, 1, 3, 1) #} +

      {% trans -%} These attacks aren't directly on the network, but instead go after its development team by either introducing legal hurdles on anyone contributing to the development @@ -807,6 +914,9 @@ should any defense be necessary. {%- endtrans %}

      {% trans %}Implementation attacks (bugs){% endtrans %}

      + +{# DREAD_score(2, 2, 1, 3, 1) #} +

      {% trans -%} Try as we might, most nontrivial applications include errors in the design or implementation, and I2P is no exception. There may be bugs that could be exploited to diff --git a/i2p2www/pages/site/docs/index.html b/i2p2www/pages/site/docs/index.html index 3a95ba2f..e176b8cc 100644 --- a/i2p2www/pages/site/docs/index.html +++ b/i2p2www/pages/site/docs/index.html @@ -16,10 +16,10 @@ the lower layers are inside the router itself. The interface between applications and the router is the I2CP (I2P Control Protocol) API. {%- endtrans %}

      -

      {% trans trac=i2pconv('trac.i2p2.i2p') -%} +

      {% trans trac='https://trac.i2p2.de/report/1' -%} The I2P Project is committed to maintaining accurate, current documentation. If you find any inaccuracies in the documents linked below, please -enter a ticket identifying the problem. +enter a ticket identifying the problem. {%- endtrans %}

      {% trans %}Index to Technical Documentation{% endtrans %}

      @@ -220,9 +220,9 @@ Traditionally used only by Java applications and higher-level APIs.
    • {{ _('Developer forum inside I2P') }}
    • -{{ _('Bug tracker') }} +{{ _('Bug tracker') }}
    • -{{ _('Viewmtn inside I2P') }}. +{{ _('Viewmtn inside I2P') }}.
    • {{ _('I2P Source exported to GitHub') }}
    • @@ -230,11 +230,13 @@ Traditionally used only by Java applications and higher-level APIs.
    • {{ _('Source translation at Transifex') }}
    • -{{ _('Roadmap wiki') }} +{{ _('Roadmap wiki') }}
    • {{ _('Old roadmap') }} ({{ _('not current') }})
    • {{ _('To Do List') }} ({{ _('not current') }}) +
    • +Ancient invisiblenet I2P documents (2003)
    diff --git a/i2p2www/pages/site/docs/naming.html b/i2p2www/pages/site/docs/naming.html index 405320df..7e3ff247 100644 --- a/i2p2www/pages/site/docs/naming.html +++ b/i2p2www/pages/site/docs/naming.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}{% trans %}Naming and Addressbook{% endtrans %}{% endblock %} -{% block lastupdated %}{% trans %}March 2012{% endtrans %}{% endblock %} -{% block accuratefor %}0.8.13{% endblock %} +{% block lastupdated %}{% trans %}December 2013{% endtrans %}{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

    {% trans %}Naming in I2P{% endtrans %}

    {% trans %}Overview{% endtrans %}

    @@ -50,8 +50,8 @@ The components are:
    1. {% trans -%} -The client application which does local lookups in hosts.txt -and also takes care of the Base32 hostnames. +The local naming service which does lookups +and also handles Base32 hostnames. {%- endtrans %}
    2. {% trans -%} The HTTP proxy which asks the router for lookups and points @@ -74,7 +74,7 @@ for addressbook configuration and viewing of the local host lists.
    -

    {% trans %}Naming Files and Lookups{% endtrans %}

    +

    {{ _('Naming Services') }}

    {% trans namingdiscussion=site_url('docs/discussions/naming'), todo=site_url('get-involved/todo') -%} All destinations in I2P are 516-byte (or longer) keys. @@ -89,38 +89,92 @@ One possible use of certificates is for proof of w If an application (i2ptunnel or the HTTP proxy) wishes to access a destination by name, the router does a very simple local lookup to resolve that name. -The client application (technically, the client side of I2CP in the I2P API) -does a linear search through three local files, in order, to -look up host names and convert them to a 516-byte destination key: {%- endtrans %}

    +

    {{ _('Hosts.txt Naming Service') }}

    + +

    {% trans -%} +The hosts.txt Naming Service does a simple linear search through +text files. This naming service was the default until +release 0.8.8 when it was replaced by the Blockfile Naming Service. +The hosts.txt format had become too slow after the file grew to thousands of entries. +{%- endtrans %}

    + +

    {% trans configuration=site_url('docs/spec/configuration') -%} +It does a linear search through three local files, in order, to +look up host names and convert them to a 516-byte destination key. +Each file is in a simple configuration file format, with hostname=base64, one per line. +The files are: +{%- endtrans %}

    1. privatehosts.txt
    2. userhosts.txt
    3. hosts.txt
    +

    {{ _('Blockfile Naming Service') }}

    +

    {% trans -%} +The Blockfile Naming Service stores multiple "addressbooks" in a single +database file named hostsdb.blockfile. +This Naming Service is the default since release 0.8.8. +{%- endtrans %}

    + +

    {% trans blockfile=site_url('docs/spec/blockfile') -%} +A blockfile is simply on-disk storage of multiple sorted maps (key-value pairs), +implemented as skiplists. +The blockfile format is specified on the Blockfile page. +It provides fast Destination lookup in a compact format. While the blockfile overhead is substantial, +the destinations are stored in binary rather than in Base 64 as in the hosts.txt format. +In addition, the blockfile provides the capability of arbitrary metadata storage +(such as added date, source, and comments) for each entry to implement advanced addressbook features. +The blockfile storage requirement is a modest increase over the hosts.txt format, and the blockfile provides +approximately 10x reduction in lookup times. +{%- endtrans %}

    + +

    {% trans -%} +On creation, the naming service imports entries from the three files used by the hosts.txt Naming Service. +The blockfile mimics the previous implementation by maintaining three maps that +are searched in-order, named privatehosts.txt, userhosts.txt, and hosts.txt. +It also maintains a reverse-lookup map to implement rapid reverse lookups. +{%- endtrans %}

    + +

    {{ _('Other Naming Service Facilities') }}

    + +

    {% trans nsjavadocs='http://docs.i2p-projekt.de/javadoc/net/i2p/client/naming/package-summary.html' -%} The lookup is case-insensitive. The first match is used, and conflicts are not detected. There is no enforcement of naming rules in lookups. +Lookups are cached for a few minutes. +Base 32 resolution is described below. +For a full description of the Naming Service API see the +Naming Service Javadocs. +This API was significantly expanded in release 0.8.7 to provide +adds and removes, storage of arbitrary properties with the hostname, +and other features. {%- endtrans %}

    +

    {{ _('Alternatives and Experimental Naming Services') }}

    +

    {% trans namingdiscussion=site_url('docs/discussions/naming') -%} -Lookups are cached for a few minutes. -There is an experimental facility for real-time lookups (a la DNS) over the network within the router, -although it is not enabled by default -(see "EepGet" under Alternatives on the discussion page). +The naming service is specified with the configuration property i2p.naming.impl=class. +Other implementations are possible. For example, +there is an experimental facility for real-time lookups (a la DNS) over the network within the router. +For more information see the alternatives on the discussion page. {%- endtrans %}

    -

    {% trans %}HTTP Proxy{% endtrans %}

    -

    {% trans -%} The HTTP proxy does a lookup via the router for all hostnames ending in '.i2p'. Otherwise, it forwards the request to a configured HTTP outproxy. Thus, in practice, all HTTP (eepsite) hostnames must end in the pseudo-Top Level Domain '.i2p'. {%- endtrans %}

    +

    {% trans i2ptld='https://datatracker.ietf.org/doc/draft-grothoff-iesg-special-use-p2p-names/', +rfc6761='http://tools.ietf.org/html/rfc6761' -%} +We have applied to reserve the .i2p TLD +following the procedures specified in RFC 6761. +{%- endtrans %}

    +

    {% trans -%} If the router fails to resolve the hostname, the HTTP proxy returns an error page to the user with links to several "jump" services. @@ -221,7 +275,7 @@ Must not contain '--' except in 'xn--' for IDN. {%- endtrans %}

  • {% trans -%} -Base32 hostnames (*.b32.i2p) are not allowed. +Base32 hostnames (*.b32.i2p) are reserved for base 32 use and so are not allowed to be imported. {%- endtrans %}
  • {% trans -%} @@ -247,7 +301,7 @@ Maximum key length 616 bytes (to account for certs up to 100 bytes).

    {% trans -%} -Any name received via subscription that passes all the checks is added to the local hosts.txt. +Any name received via subscription that passes all the checks is added via the local naming service. {%- endtrans %}

    {% trans -%} @@ -408,6 +462,9 @@ Example: ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2pproposal +to convert to an identical format of {52 chars}.onion for their hidden services. Base32 is implemented in the naming service, which queries the router over I2CP to lookup the LeaseSet to get the full Destination. Base32 lookups will only be successful when the Destination is up and publishing diff --git a/i2p2www/pages/site/docs/plugins.html b/i2p2www/pages/site/docs/plugins.html index 0f913c1f..562c1420 100644 --- a/i2p2www/pages/site/docs/plugins.html +++ b/i2p2www/pages/site/docs/plugins.html @@ -137,7 +137,7 @@ or easily add some feature. {%- endtrans %}

    {% trans %}Getting Started{% endtrans %}

    -

    {% trans url='http://'+i2pconv('trac.i2p2.i2p')+'/browser/plugin/makeplugin.sh?rev=776519571fda0689ef09c42f66e7398f30432e87' -%} +

    {% trans url='https://github.com/i2p/i2p.scripts/tree/master/plugin/makeplugin.sh' -%} To create a plugin from an existing binary package you will need to get makeplugin.sh from the i2p.scripts branch in monotone. {%- endtrans %}

    diff --git a/i2p2www/pages/site/docs/ports.html b/i2p2www/pages/site/docs/ports.html index 0c92f137..4c462dd2 100644 --- a/i2p2www/pages/site/docs/ports.html +++ b/i2p2www/pages/site/docs/ports.html @@ -54,6 +54,7 @@ in the 766x range.
  • + diff --git a/i2p2www/pages/site/docs/protocol/i2cp.html b/i2p2www/pages/site/docs/protocol/i2cp.html index 00f4eb96..842a13c4 100644 --- a/i2p2www/pages/site/docs/protocol/i2cp.html +++ b/i2p2www/pages/site/docs/protocol/i2cp.html @@ -1,33 +1,31 @@ {% extends "global/layout.html" %} {% block title %}I2CP{% endblock %} -{% block lastupdated %}{% trans %}November 2012{% endtrans %}{% endblock %} -{% block accuratefor %}0.9.3{% endblock %} +{% block lastupdated %}{% trans %}December 2013{% endtrans %}{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

    {% trans -%} The I2P Client Protocol (I2CP) exposes a strong separation of concerns between the router and any client that wishes to communicate over the network. It enables secure and asynchronous messaging by sending and receiving messages over a -single TCP socket, yet never exposing any private keys and authenticating itself -to the router only through signatures. With I2CP, a client application tells the +single TCP socket. +With I2CP, a client application tells the router who they are (their "destination"), what anonymity, reliability, and latency tradeoffs to make, and where to send messages. In turn the router uses I2CP to tell the client when any messages have arrived, and to request authorization for some tunnels to be used. {%- endtrans %}

    -

    {% trans url='http://docs.i2p-projekt.de/javadoc/net/i2p/client/package-summary.html' -%} -The protocol itself has only been implemented in Java, to provide the +

    {% trans url='http://docs.i2p-projekt.de/javadoc/net/i2p/client/package-summary.html', +libi2cp='http://git.repo.i2p/w/libi2cp.git', +streaming=site_url('docs/api/streaming') -%} +The protocol itself is implemented in Java, to provide the Client SDK. This SDK is exposed in the i2p.jar package, which implements the client-side of I2CP. Clients should never need to access the router.jar package, which contains the router itself and the router-side of I2CP. -{%- endtrans %}

    - -

    {% trans streaming=site_url('docs/api/streaming') -%} -While implementing the client side of I2CP in a non-Java language is certainly feasible, -a non-Java client would also have to implement the +There is also a C library implementation. +A non-Java client would also have to implement the streaming library for TCP-style connections. -Together, implementing I2CP and the streaming library would be a sizable task. {%- endtrans %}

    {% trans streaming=site_url('docs/api/streaming'), datagrams=site_url('docs/spec/datagrams'), @@ -64,6 +62,16 @@ Clients in the same JVM as the router pass messages directly to the router through an internal JVM interface. {%- endtrans %}

    +

    {% trans commonstructures=site_url('docs/spec/common-structures') -%} +The router also supports external connections over SSL. +While SSL is not the default, it is strongly recommended for any traffic that may +be exposed to the open Internet. The authorization user/password (if any), the +Private Key and +Signing Private Key for the +Destination +are all transmitted in-the-clear unless SSL is enabled. +{%- endtrans %}

    +

    {% trans %}I2CP Protocol Specification{% endtrans %}

    {% trans i2cp=site_url('docs/spec/i2cp') -%} Now on the I2CP Specification page. @@ -310,9 +318,11 @@ of sending a MessageStatus and awaiting a ReceiveMessageBegin.

    @@ -322,10 +332,6 @@ If the client is running in the same JVM as a router, this option is not require @@ -416,10 +422,12 @@ in the "unknown options" properties of the outbound tunnel pool's settings. Note: Large quantity, length, or variance settings may cause significant performance or reliability problems. {%- endtrans %}

    -

    {% trans -%} +

    {% trans commonstructures=site_url('docs/spec/common-structures') -%} Note: As of release 0.7.7, option names and values must use UTF-8 encoding. This is primarily useful for nicknames. Prior to that release, options with multi-byte characters were corrupted. +Since options are encoded in a Mapping, +all option names and values are limited to 255 bytes (not characters) maximum. {%- endtrans %}

    {% trans -%} @@ -645,21 +653,14 @@ specified by RFC 1952.

    {% trans %}Future Work{% endtrans %}

      -
    • {% trans -%} -Implement I2CP and the streaming library in another programming language. -{%- endtrans %}
    • - -
    • {% trans -%} -Is the initial Get Date / Set Date handshake required? -{%- endtrans %}
    • -
    • {% trans -%} The current authorization mechanism could be modified to use hashed passwords. {%- endtrans %}
    • {% trans -%} -Private Keys are included in the Create Lease Set message, -are they really required? Revocation is unimplemented. +The Signing Private Keys is included in the Create Lease Set message, +it is not required. Revocation is unimplemented. +It should be replaced with random data or removed. {%- endtrans %}
    • {% trans pdf1=url_for('static', filename='pdf/I2CP_spec.pdf'), pdf2=url_for('static', filename='pdf/datastructures.pdf') -%} @@ -673,5 +674,7 @@ That document also references the
    + +C library implementation {% endblock %} diff --git a/i2p2www/pages/site/docs/spec/blockfile.html b/i2p2www/pages/site/docs/spec/blockfile.html index f9ed3958..88ace4cd 100644 --- a/i2p2www/pages/site/docs/spec/blockfile.html +++ b/i2p2www/pages/site/docs/spec/blockfile.html @@ -169,8 +169,8 @@ The maximum number of entries per span is 16. {%- endtrans %}

    {% trans %}Properties Skiplist{% endtrans %}

    -

    {% trans -%} -"%%__INFO__%%" is the master database skiplist with String/Properties key/value entries containing only one entry: +

    {% trans INFO='"%%__INFO__%%"' -%} +{{ INFO }} is the master database skiplist with String/Properties key/value entries containing only one entry: {%- endtrans %}

         "info": a Properties (UTF-8 String/String Map), serialized as a Mapping:
    @@ -182,8 +182,8 @@ The maximum number of entries per span is 16.
     

    {% trans %}Reverse Lookup Skiplist{% endtrans %}

    -

    {% trans -%} -"%%__REVERSE__%%" is the reverse lookup skiplist with Integer/Properties key/value entries +

    {% trans REVERSE='"%%__REVERSE__%%"' -%} +{{ REVERSE }} is the reverse lookup skiplist with Integer/Properties key/value entries (as of database version 2): {%- endtrans %}

    diff --git a/i2p2www/pages/site/docs/spec/common-structures.html b/i2p2www/pages/site/docs/spec/common-structures.html
    index b3dec1e1..b07258f1 100644
    --- a/i2p2www/pages/site/docs/spec/common-structures.html
    +++ b/i2p2www/pages/site/docs/spec/common-structures.html
    @@ -1,7 +1,7 @@
     {% extends "global/layout.html" %}
     {% block title %}{% trans %}Common structure Specification{% endtrans %}{% endblock %}
    -{% block lastupdated %}{% trans %}June 2013{% endtrans %}{% endblock %}
    -{% block accuratefor %}0.9.6{% endblock %}
    +{% block lastupdated %}{% trans %}December 2013{% endtrans %}{% endblock %}
    +{% block accuratefor %}0.9.9{% endblock %}
     {% block content %}
     

    {% trans %}Data types Specification{% endtrans %}

    {% trans i2np=site_url('docs/protocol/i2np'), @@ -168,7 +168,7 @@ Defines an identifier that is unique to each router in a tunnel. 4 byte Integer {% endtrans %}

    -

    Javadoc

    +

    Javadoc

    Certificate

    {% trans %}Description{% endtrans %}

    @@ -270,9 +270,25 @@ For example, UTF-8 strings in a RouterInfo options mapping in a I2NP Database Store Message will be corrupted. {%- endtrans %} +
  • {% trans -%} +The encoding allows duplicate keys, however in any usage where the mapping is signed, +duplicates may cause a signature failure. +{%- endtrans %}
  • +
  • {% trans -%} Mappings contained in I2NP messages (i.e. in a RouterAddress or RouterInfo) must be sorted by key so that the signature will be invariant. +Duplicate keys are not allowed. +{%- endtrans %}
  • + +
  • {% trans i2cp=site_url('docs/spec/i2cp') -%} +Mappings contained in an I2CP SessionConfig +must be sorted by key so that the signature will be invariant. +Duplicate keys are not allowed. +{%- endtrans %}
  • + +
  • {% trans -%} +While it is application-dependent, keys and values are generally case-sensitive. {%- endtrans %}
  • {% trans -%} @@ -381,6 +397,14 @@ certificate :: Certificate total length: 387+ bytes {% endhighlight %} +

    Notes

    +
    • +The public key of the destination was used for the old i2cp-to-i2cp encryption +which was disabled in version 0.6, it is currently unused +except for the IV for LeaseSet encryption, +which is deprecated. The public key in the LeaseSet is used instead. +
    +

    Javadoc

    Lease

    @@ -409,7 +433,7 @@ SHA256 Hash of the +----+----+----+----+ tunnel_gw :: Hash of the RouterIdentity of the tunnel gateway - length -> >= 32 bytes + length -> 32 bytes tunnel_id :: TunnelId length -> 4 bytes @@ -519,7 +543,7 @@ num :: Integer value: 0 <= num <= 16 leases :: [Lease] - length -> >= $num*44 bytes + length -> $num*44 bytes signature :: Signature length -> 40 bytes @@ -530,7 +554,7 @@ signature :: Signature
    • {% trans -%} The public key of the destination was used for the old i2cp-to-i2cp encryption -which was disabled in version 0.6, it is currently unused? +which was disabled in version 0.6, it is currently unused. {%- endtrans %}
    • {% trans elgamalaes=site_url('docs/how/elgamal-aes') -%} @@ -593,7 +617,7 @@ cost :: Integer case 0 -> free case 255 -> expensive -expiration :: Date +expiration :: Date (must be all zeros, see notes below) length -> 8 bytes case null -> never expires @@ -615,6 +639,12 @@ Expiration is currently unused, always null (all zeroes)). As of release 0.9.3, the expiration is assumed zero and not stored, so any non-zero expiration will fail in the RouterInfo signature verification. Implementing expiration (or another use for these bytes) will be a backwards-incompatible change. +Routers MUST set this field to all zeros. +{%- endtrans %}
    • + +
    • {% trans -%} +The following options, while not required, are standard and expected to be present in most router addresses: +"host" (an IPv4 or IPv6 address or host name) and "port". {%- endtrans %}
    @@ -707,14 +737,21 @@ signature :: Signature {% endhighlight %}

    {% trans %}Notes{% endtrans %}

    -

    {% trans -%} +

      +
    • {% trans -%} The peer_size Integer may be followed by a list of that many router hashes. This is currently unused. It was intended for a form of restricted routes, which is unimplemented. -{% endtrans %}

      +{%- endtrans %}
    • -

      {% trans -%} +

    • {% trans -%} The signature may be verified using the signing public key of the router_ident. -{% endtrans %}

      +{%- endtrans %}
    • + +
    • {% trans netdb=site_url('docs/how/network-database') -%} +See the network database page +for standard options that are expected to be present in all router infos. +{%- endtrans %}
    • +

    Javadoc

    diff --git a/i2p2www/pages/site/docs/spec/geoip.html b/i2p2www/pages/site/docs/spec/geoip.html index 66655450..dc452f4a 100644 --- a/i2p2www/pages/site/docs/spec/geoip.html +++ b/i2p2www/pages/site/docs/spec/geoip.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}{% trans %}GeoIP File Specification{% endtrans %}{% endblock %} -{% block lastupdated %}{% trans %}May 2013{% endtrans %}{% endblock %} -{% block accuratefor %}0.9.6{% endblock %} +{% block lastupdated %}{% trans %}December 2013{% endtrans %}{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

    {% trans %}Overview{% endtrans %}

    @@ -60,8 +60,8 @@ The file is gzipped. Ungzipped format: Bytes 0-9: Magic number "I2PGeoIPv6" Bytes 10-11: Version (0x0001) Bytes 12-15 Options (0x00000000) (future use) - Bytes 16-23: Creation date (Java long) - Bytes 24-xx: Optional comment (UTF-8) + Bytes 16-23: Creation date (ms since 1970-01-01) + Bytes 24-xx: Optional comment (UTF-8) terminated by zero byte Bytes xx-255: null padding Bytes 256-: 18 byte records: 8 byte from (/64) @@ -71,11 +71,11 @@ The file is gzipped. Ungzipped format:

    {% trans %}NOTES:{% endtrans %}

    • Data must be sorted (SIGNED long twos complement), no overlap. -So the order is 8000:: ... FFFF:: 0000:: ... 7FFF:: +So the order is 80000000 ... FFFFFFFF 00000000 ... 7FFFFFFF.
    • The GeoIPv6.java class contains a program to generate this format from public sources such as the Maxmind GeoLite data.
    • -This specification is preliminary; I2P does not yet support IPv6 GeoIP lookup. +IPv6 GeoIP lookup is supported as of release 0.9.8.
    {% endblock %} diff --git a/i2p2www/pages/site/docs/spec/i2cp.html b/i2p2www/pages/site/docs/spec/i2cp.html index 070fa709..b6550f93 100644 --- a/i2p2www/pages/site/docs/spec/i2cp.html +++ b/i2p2www/pages/site/docs/spec/i2cp.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}I2CP Specification{% endblock %} -{% block lastupdated %}June 2013{% endblock %} -{% block accuratefor %}0.9.6{% endblock %} +{% block lastupdated %}December 2013{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

    I2P Control Protocol (I2CP) Specification

    Overview

    @@ -220,11 +220,16 @@ DSA Signa
  • Notes

    -

    +

    • The options are specified on the I2CP Overview page. -

      - +
    • +The Mapping +must be sorted by key so that the signature will be validated correctly in the router. +
    • +The creation date must be within +/- 30 seconds of the current time when processed by +the router, or the config will be rejected. +
    @@ -318,6 +323,20 @@ GetDateMessage
    {{ _('Admin') }}{{ _('Admin') }} {{ _('Project Manager') }} zzz {{ _('point of contact of last resort') }}[{{ _('vacant') }}] {{ _('threat model / crypto expert') }}
    Trac adminKillYourTV{{ _('Manage the project bug tracker') }}
    {{ _('Translation admins') }}eche|on, HungryHobo, KillYourTV, str4d, zzzAdmins on Transifex
    {{ _('User Advocate') }} [{{ _('vacant') }}]{{ _('manage the public project website content design') }}
    {% trans website=i2pconv('www.i2p2.i2p') %}Webserver admin{% endtrans %}{% trans website=site_url() %}Webserver admin{% endtrans %} welterde {{ _('manage the public project webservers') }}
    {% trans website=i2pconv('www.i2p2.i2p') %}Website admin{% endtrans %}{% trans website=site_url() %}Website admin{% endtrans %} str4d {{ _('manage the public project website content') }}

    {{ _('Dev') }}{{ _('Dev') }} {{ _('Core Lead') }} zzz {{ _('lead dev for the SDK and router') }}{{ _('C++ Router') }}
    {{ _('Console Translations') }}walking{{ _('Chinese') }}{{ _('Translators') }}{{ _('many many people!') }}Translators on Transifex
    monkeybrains{{ _('Dutch') }}
    magma{{ _('French') }}
    eche|on, mixxy{{ _('German') }}
    rus, 4get, slow{{ _('Russian') }}
    user{{ _('Spanish') }}
    thelastcode, hamada{{ _('Arabic') }}
    [{{ _('vacant') }}]{{ _('Other languages') }}
    {{ _('Contributors') }} cervantes

    {{ _('Past contributors') }}{{ _('Past contributors') }} mihi {{ _('I2PTunnel development, ministreaming library') }}
    dr|z3d {{ _('Console and website themes') }}
    walkingChinese translation
    monkeybrainsDutch translation
    magmaFrench translation
    eche|on, mixxyGerman translation
    rus, 4get, slowRussian translation
    userSpanish translation
    thelastcode, hamadaArabic translation
    {% trans %}… and many others{% endtrans %}
    i2p.streaming.answerPingstrue{% trans -%} Whether to respond to incoming pings {%- endtrans %}
    i2p.streaming.blacklistnull{% trans -%} Comma- or space-separated list of Base64 peer Hashes to be blacklisted for incoming connections to ALL destinations in the context. @@ -198,12 +214,31 @@ The initial value of the resend delay field in the packet header, times 1000. Not fully implemented; see below. {%- endtrans %}
    i2p.streaming.initialRTT8000 ({% trans %}if no sharing data available{% endtrans %})
    i2p.streaming.initialRTO9000{% trans -%} +Initial timeout +(if no sharing data available). +{%- endtrans %} {% trans release='0.9.8' -%} +As of release {{ release }}. +{%- endtrans %}
    i2p.streaming.initialRTT8000 {% trans -%} +Initial round trip time estimate +(if no sharing data available). +Disabled as of release 0.9.8; uses actual RTT. +{%- endtrans %}
    i2p.streaming.initialWindowSize6({% trans %}if no sharing data available{% endtrans %}) {% trans -%} In standard TCP, window sizes are in bytes, while in I2P, window sizes are in messages. {%- endtrans %}
    i2p.streaming.maxConcurrentStreams-1 {% trans -%} (0 or negative value means unlimited) This is a total limit for incoming and outgoing combined. @@ -273,6 +308,27 @@ while in I2P, window sizes are in messages. A higher number means slower growth. {%- endtrans %}
    i2p.streaming.tcbcache.rttDampening0.75{% trans -%} +Ref: RFC 2140. Floating point value. +May be set only via context properties, not connection options. +{%- endtrans %} {% trans release='0.9.8' -%} +As of release {{ release }}. +{%- endtrans %}
    i2p.streaming.tcbcache.rttdevDampening0.75{% trans -%} +Ref: RFC 2140. Floating point value. +May be set only via context properties, not connection options. +{%- endtrans %} {% trans release='0.9.8' -%} +As of release {{ release }}. +{%- endtrans %}
    i2p.streaming.tcbcache.wdwDampening0.75{% trans -%} +Ref: RFC 2140. Floating point value. +May be set only via context properties, not connection options. +{%- endtrans %} {% trans release='0.9.8' -%} +As of release {{ release }}. +{%- endtrans %}
    i2p.streaming.writeTimeout-1{% trans -%} How long to block on write/flush, in milliseconds. Negative means indefinitely. {%- endtrans %}
    8118Privoxy (reserve)
    8123Tor Polipo (reserve)
    8887Old default network port
    8888Freenet (reserve)
    8997Monotone Proxy (alt)
    8998Monotone Proxy
    8999Monotone Proxy (alt)
    string     -{% trans -%} +{% trans -%} For authorization, if required by the router. If the client is running in the same JVM as a router, this option is not required. +Warning - username and password are sent in the clear to the router, unless using SSL (i2cp.SSL=true). +Authorization is only recommended when using SSL. {%- endtrans %}
    string     -{% trans -%} -For authorization, if required by the router. -If the client is running in the same JVM as a router, this option is not required. -{%- endtrans %}
    32  
    + +HostLookupMessage + +C->R +38 +0.9.10 +
    + +HostReplyMessage + +R->C +39 +0.9.10 +
    MessagePayloadMessage @@ -450,7 +469,11 @@ As of release 0.7.2.

    Create Lease Set

    Description

    -This message is sent in response to a RequestLeaseSetMessage and contains all +This message is sent in response to a +Create Lease Set Message +or +Create Variable Lease Set Message +and contains all of the Lease structures that should be published to the I2NP Network Database. Sent from Client to Router.

    @@ -468,9 +491,9 @@ Sent from Client to Router.

    Notes

    The SigningPrivateKey matches the SigningPublicKey from within the -LeaseSet, as does the PrivateKey with the PublicKey. The Signing keys are +LeaseSet, as does the PrivateKey with the PublicKey. The Signing key is necessary to allow the router to revoke the LeaseSet if the client goes offline, -and the normal keys are necessary for decrypting garlic routed messages. The +and the encryption key is necessary for decrypting garlic routed messages. The LeaseSet granted may include Lease structures for tunnels pointing at another router if the client is actively connected to multiple routers with Leases granted to each. @@ -497,11 +520,16 @@ The router responds with a Session Status Message

    Notes

    -

    -The second message sent by the client after sending the Get Date Message and receiving the Set Date Message response. -If the Date in the Session Config is too far from the router's current time, the session will be rejected. +

    @@ -522,6 +550,7 @@ The router responds with a Dest Reply Message. As of release 0.7. As of release 0.8.3, multiple outstanding lookups are supported, and lookups are supported in both I2PSimpleSession and in standard sessions. +Host Lookup Message is preferred as of release 0.9.10.

    @@ -545,6 +574,7 @@ As of release 0.7. As of release 0.8.3, the requested Hash is returned if the lookup failed, so that the client may have multiple lookups outstanding and correlate the replies to the lookups. +To correlate a Destination response with a request, take the Hash of the Destination. Prior to release 0.8.3, the response was empty on failure.

    @@ -617,18 +647,111 @@ The router responds with a Set Date Message.

    Contents

    1. I2CP Version String +
    2. +Authentication Mapping +(optional, as of release 0.9.10)

    Notes

    -

    +

    • Generally the first message sent by the client after sending the protocol version byte. +
    • The version string is included as of release 0.8.7. This is only useful if the client and router are not in the same JVM. If it is not present, the client is version 0.8.6 or earlier. +
    • +Preliminary: As of release 0.9.10, the authentication +Mapping +may be included, with the keys i2cp.username and i2cp.password. +The Mapping need not be sorted as this message is not signed. +Prior to and including 0.9.10, authentication is included in the +Session Config Mapping, and +no authentication is enforced for GetDate, GetBandwidthLimits, or DestLookup. +The Get Date authentication will be enforced in a future release. +This is only useful outside router context. +This will be an incompatible change, but will only affect sessions outside +router context with authentication, which should be rare. +

    +

    Host Lookup

    +

    Description

    +

    +Sent from Client to Router. +The router responds with a Host Reply Message. +This replaces the Dest Lookup Message +and adds a request ID, a timeout, and host name lookup support. +As it also supports Hash lookups, it may be used for all lookups if the router supports it. +For host name lookups, the router will query its context's naming service. +This is only useful if the client is outside the router's context. +Inside router context, the client should query the naming service itself, +which is much more efficient. +

    + +

    Contents

    +
    1. +4 byte Integer request ID +
    2. +4 byte Integer timeout (ms) +
    3. +1 byte Integer request type +
    4. +SHA-256 Hash +or +host name String +
    + +

    Notes

    +
    • +Preliminary. As of release 0.9.10. +Use Dest Lookup Message for older routers. +
    • +The request ID will be returned in the Host Reply Message. +
    • +Timeout is useful for Hash lookups. Recommended minimum 10,000 (10 sec.). +In the future it may also be useful for remote naming service lookups. +The value may be not be honored for local host name lookups, which should be fast. +
    • +The request type is 0 for Hash and 1 for host name. +
    • +Base 32 host name lookup is supported but it is preferred to convert +it to a Hash first. +
    + + + +

    Host Reply

    +

    Description

    +

    +Sent from Router to Client in response to a +Host Lookup Message. +

    +

    Contents

    +
    1. +4 byte Integer request ID +
    2. +1 byte Integer result code +
    3. +Destination, +only present if result code is zero. +
    + +

    Notes

    +
    • +Preliminary. As of release 0.9.10. +See Host Lookup notes. +
    • +The request ID is that from the Host Lookup. +
    • +The result code is 0 for success, 1-255 for failure. +Only 1 is used for failure now, more specific failure codes may be defined in the future. +
    + + + +

    Message Payload

    Description

    @@ -684,10 +807,68 @@ guaranteed failed. The size Integer specifies the size of the available message and is only relevant for status = 0. Even though guaranteed is unimplemented, (best effort is the only service), the current router implementation uses the guaranteed status codes, not the best effort codes. -As of router version 0.9.5, additional status codes are defined. +As of router version 0.9.5, additional status codes are defined, +however they are not necessarily implemented. See the MessageStatusMessage Javadocs for details. +All status codes: + +
    Status CodeAs Of ReleaseNameDescription +
    0 AvailableFor incoming messages only. + The included size is the size in bytes of the available message. + This is unused in "fast receive" mode, which is the default as of release 0.9.4. + All other status codes below are for outgoing messages. +
    1 AcceptedOutgoing message accepted by the local router for delivery. + The included nonce matches the nonce in the + Send Message Message, and the included Message ID + will be used for subsequent success or failure notification. +
    2 Best Effort SuccessProbable success (unused) +
    3 Best Effort FailureProbable failure +
    4 Guaranteed SuccessProbable success +
    5 Guaranteed FailureGeneric failure, specific cause unknown. + May not really be a guaranteed failure. +
    60.9.5Local SuccessLocal delivery successful. + The destination was another client on the same router. +
    70.9.5Local FailureLocal delivery failure. + The destination was another client on the same router. +
    80.9.5Router FailureThe local router is not ready, has shut down, + or has major problems. + This is a guaranteed failure. +
    90.9.5Network FailureThe local computer apparently has no network connectivity at all. + This is a guaranteed failure. +
    100.9.5Bad SessionThe I2CP session is invalid or closed. + This is a guaranteed failure. +
    110.9.5Bad MessageThe message payload is invalid or zero-length or too big. + This is a guaranteed failure. +
    120.9.5Bad OptionsSomething is invalid in the message options, + or the expiration is in the past or too far in the future. + This is a guaranteed failure. +
    130.9.5Overflow FailureSome queue or buffer in the router is full and the message was dropped. + This is a guaranteed failure. +
    140.9.5Message ExpiredThe message expired before it could be sent. + This is a guaranteed failure. +
    150.9.5Bad Local LeasesetThe client has not yet signed a leaseset, or the local keys + are invalid, or it has expired, or it does not have any tunnels in it. + This is a guaranteed failure. +
    160.9.5No Local TunnelsLocal problems. No outbound tunnel to send through, + or no inbound tunnel if a reply is required. + This is a guaranteed failure. +
    170.9.5Unsupported EncryptionThe certs or options in the destination or its leaseset + indicate that it uses an encryption format that we don't support, so we can't talk to it. + This is a guaranteed failure. +
    180.9.5Bad DestinationSomething is wrong with the far-end destination. + Bad format, unsupported options, certificates, etc. + This is a guaranteed failure. +
    190.9.5Bad LeasesetWe got the far-end leaseset but something strange is wrong with it. + Unsupported options or certificates, no tunnels, etc. + This is a guaranteed failure. +
    200.9.5Expired LeasesetWe got the far-end leaseset but it's expired and we can't get a new one. + This is a guaranteed failure. +
    210.9.5No LeasesetCould not find the far-end leaseset. + This is a common failure, equivalent to a DNS lookup failure. + This is a guaranteed failure. +

    When status = 1 (accepted), the nonce matches the nonce in the @@ -720,6 +901,9 @@ message id specified in the ReceiveMessageBeginMessage is invalid or incorrect, the router may simply not reply, or it may send back a DisconnectMessage.

    +

    +This is unused in "fast receive" mode, which is the default as of release 0.9.4. +

    @@ -743,6 +927,9 @@ Sent from Client to Router. The ReceiveMessageBeginMessage is sent after a MessagePayloadMessage fully delivers a message's payload.

    +

    +This is unused in "fast receive" mode, which is the default as of release 0.9.4. +

    @@ -762,9 +949,14 @@ The router responds with a Session Status Message

    Notes

    -

    +

    @@ -775,7 +967,7 @@ As of release 0.7.1. Tell the other party (client or router) that they are under attack, potentially with reference to a particular messageId. If the router is under attack, the client may decide to migrate to another router, and if a client is under attack, the router may rebuild -its routers or shitlist some of the peers that sent it messages delivering the attack. +its routers or banlist some of the peers that sent it messages delivering the attack. Sent either from router to client or from client to router.

    Contents

    @@ -844,10 +1036,10 @@ The client responds with a Create LeaseSet Message
    1. Session ID
    2. -1 byte Integer number of tunnels +1 byte Integer number of tunnels
    3. That many - Leases + Leases

    Notes

    @@ -904,7 +1096,7 @@ i2cp.messageReliability=none for this message only.

    Send Message Expires

    Description

    -Sent from Client to Router. Same as Send Message Message, except includes an expiration. +Sent from Client to Router. Same as Send Message Message, except includes an expiration and options.

    Contents

    1. @@ -953,7 +1145,19 @@ Definitions are subject to change. Use the SendMessageOptions class to construct

      Bit order: 15...0

      -Bits 15-9: Unused, must be zero +Bits 15-11: Unused, must be zero +

      +Bits 10-9: Message Reliability Override (scheduled for release 0.9.10) + +
      Field valueDescription +
      00Use session setting i2cp.messageReliabiltiy (default) +
      01Use "best effort" message reliabiltiy for this message, + overriding the session setting. The router will send one or more MessageStatusMessages in response. +
      10Use "guaranteed" message reliabiltiy for this message, + overriding the session setting. The router will send one or more MessageStatusMessages in response. +
      11Unused. Use a nonce value of 0 to force "none" and override a session setting + of "best effort" or "guaranteed". +

      Bit 8: If 1, don't send lease set

      diff --git a/i2p2www/pages/site/docs/spec/i2np.html b/i2p2www/pages/site/docs/spec/i2np.html index 7533d2a5..4e6df9e0 100644 --- a/i2p2www/pages/site/docs/spec/i2np.html +++ b/i2p2www/pages/site/docs/spec/i2np.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}I2NP Specification{% endblock %} -{% block lastupdated %}June 2013{% endblock %} -{% block accuratefor %}0.9.6{% endblock %} +{% block lastupdated %}November 2013{% endblock %} +{% block accuratefor %}0.9.8.1{% endblock %} {% block content %}

      I2P Network Protocol (I2NP) Specification

      @@ -58,6 +58,9 @@ msg_id :: Integer length -> 4 bytes purpose -> uniquely identifies this message (for some time at least) + This is usually a locally-generated random number, but for + outgoing tunnel build messages it may be derived from + the incoming message. See below. expiration :: Date 8 bytes @@ -181,7 +184,7 @@ ElGamal encrypted: + + | | +----+----+----+----+----+----+----+----+ -| encrypted data ... | +| encrypted data... | ~ ~ | | +----+----+----+----+----+----+----+----+ @@ -189,7 +192,7 @@ ElGamal encrypted: ElGamal and AES encrypted: +----+----+----+----+----+----+----+----+ -| encrypted data ... | +| encrypted data... | ~ ~ | | +----+----+----+----+----+----+----+----+ @@ -526,6 +529,12 @@ The key is the "real" hash of the RouterIdentity or Destination, NOT the routing

      DatabaseLookup

      +

      Description

      +

      + A request to look up an item in the network database. + The response is either a DatabaseStore or a DatabaseSearchReply. +

      +

      Contents

      {% highlight lang='dataspec' %} +----+----+----+----+----+----+----+----+ | SHA256 hash as the key to look up | @@ -729,7 +738,7 @@ from ::
    2. The lookup key, peer hashes, and from hash are "real" hashes, NOT routing keys.
    3. - + @@ -868,6 +877,12 @@ Expiration :: Date (8 bytes)

      TunnelData

      +

      Description

      +

      + A message sent from a tunnel's gateway or participant to the next participant or endpoint. + The data is of fixed length, containing I2NP messages that are fragmented, batched, padded, and encrypted. +

      +

      Contents

      {% highlight lang='dataspec' %} +----+----+----+----+----+----+----+----+ | tunnnelID | data | @@ -894,12 +909,19 @@ data ::

      Notes

      TunnelGateway

      +

      Description

      +

      + Wraps another I2NP message to be sent into a tunnel at the tunnel's inbound gateway. +

      +

      Contents

      {% highlight lang='dataspec' %} +----+----+----+----+----+----+----+-// | tunnelId | length | data... @@ -931,17 +953,16 @@ data ::

      Data

      Description

      - Used as a wrapper for encrypted Garlic Messages and Garlic Cloves. - Also used previously for network load testing. + Used by Garlic Messages and Garlic Cloves to wrap arbitrary data.

      Contents

      A length Integer, followed by opaque data.

      {% highlight lang='dataspec' %} -+----+----+----+----+----+-// -| length | data... -+----+----+----+----+----+-// ++----+----+----+----+----+-//-+ +| length | data... | ++----+----+----+----+----+-//-+ {% endhighlight %}

      Definition

      @@ -980,9 +1001,12 @@ total size: 8*528 = 4224 bytes {% endhighlight %}

      Notes

      -

      +

        +
      • See also the tunnel creation specification. -

        +
      • + The I2NP message ID for this message must be set according to the tunnel creation specification. +

      TunnelBuildReply

      @@ -991,9 +1015,12 @@ same format as TunnelBuild message, with Build Response Records {% endhighlight %}

      Notes

      -

      +

        +
      • See also the tunnel creation specification. -

        +
      • + The I2NP message ID for this message must be set according to the tunnel creation specification. +

      VariableTunnelBuild

      {% highlight lang='dataspec' %} @@ -1020,6 +1047,8 @@ total size: 1 + $num*528 This message was introduced in router version 0.7.12, and may not be sent to tunnel participants earlier than that version.
    4. See also the tunnel creation specification. +
    5. + The I2NP message ID for this message must be set according to the tunnel creation specification.

      VariableTunnelBuildReply

      @@ -1039,6 +1068,8 @@ Same format as VariableTunnelBuild message, with Build Response Records. This message was introduced in router version 0.7.12, and may not be sent to tunnel participants earlier than that version.
    6. See also the tunnel creation specification. +
    7. + The I2NP message ID for this message must be set according to the tunnel creation specification. diff --git a/i2p2www/pages/site/docs/spec/plugin.html b/i2p2www/pages/site/docs/spec/plugin.html index 406d833e..8526379b 100644 --- a/i2p2www/pages/site/docs/spec/plugin.html +++ b/i2p2www/pages/site/docs/spec/plugin.html @@ -321,7 +321,7 @@ Other plugin guidelines
    8. See i2p.scripts branch or any of the sample plugins on zzz's page for a xpi2p file generator to make it easy.
    9. -Pack200 of jars and wars is strongly recommended for plugins, it generally shrinks plugins by 60-65%. +Pack200 of jars and wars is strongly recommended for plugins, it generally shrinks plugins by 60-65%. See any of the sample plugins on zzz's page for an example. Pack200 unpacking is supported on routers 0.7.11-5 or higher, which is essentially all routers that support plugins at all. diff --git a/i2p2www/pages/site/docs/spec/ssu.html b/i2p2www/pages/site/docs/spec/ssu.html index 083541b7..638f72db 100644 --- a/i2p2www/pages/site/docs/spec/ssu.html +++ b/i2p2www/pages/site/docs/spec/ssu.html @@ -1,11 +1,9 @@ {% extends "global/layout.html" %} {% block title %}SSU Protocol Specification{% endblock %} -{% block lastupdated %}June 2013{% endblock %} -{% block accuratefor %}0.9.6{% endblock %} +{% block lastupdated %}December 2013{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %} -Note: IPv6 information is preliminary. -

      See the SSU page for an overview of the SSU transport. @@ -119,11 +117,18 @@ the following bitfields:

      + + | | +----+----+----+----+----+----+----+----+ - |flag| time | (optionally | - +----+----+----+----+----+ | - | this may have 64 byte keying material | - | and/or a one+N byte extended options) | - +---------------------------------------| + |flag| time | | + +----+----+----+----+----+ + + | keying material (optional) | + + + + | | + ~ ~ + | | + + +----+----+----+ + | |#opt| | + +----+----+----+----+----+----+ + + | #opt extended option bytes (optional) | + ~ ~ {% endhighlight %} @@ -154,18 +159,13 @@ bytes.

      All messages contain 0 or more bytes of padding. Each message must be padded to a 16 byte boundary, as required by the AES256 encryption layer. -Currently, messages are not padded beyond the next 16 byte boundary. -The fixed-size tunnel messages of 1024 bytes (at a higher layer) -provide a significant amount of protection. -In the future, additional padding in the transport layer up to -a set of fixed packet sizes may be appropriate to further hide the data -fragmentation to external adversaries. -

      -Through release 0.9.6, messages were only padded to the next 16 byte boundary, +Through release 0.9.7, messages were only padded to the next 16 byte boundary, and messages not a multiple of 16 bytes could possibly be invalid. As of release 0.9.7, messages may be padded to any length as long as the current MTU is honored. Any extra 1-15 padding bytes beyond the last block of 16 bytes cannot be encrypted or decrypted and will be ignored. However, the full length and all padding is included in the MAC calculation. +As of release 0.9.8, transmitted messages are not necessarily a multiple of 16 bytes. +The SessionConfirmed message is an exception, see below.

      @@ -192,9 +192,9 @@ The key used for the MAC and encryption is specified for each message below.

      Notes

      IPv6 Notes

      -While the protocol specification supports 16-byte IPv6 addresses, -IPv6 addressing is not currently supported within I2P. -All IP addresses are currently 4 bytes. +The protocol specification allows both 4-byte IPv4 and 16-byte IPv6 addresses. +SSU-over-IPv6 is supported as of version 0.9.8. +See the documentation of individual messages below for details on IPv6 support.

      Timestamps

      While most of I2P uses 8-byte Date timestamps with @@ -392,7 +392,7 @@ bits 3-0: total identity fragments (F) 1-15
    sessionKey
    - Fragment 0 through F-2 + Fragment 0 through F-2 (if F > 1): {% highlight lang='dataspec' %} +----+----+----+----+----+----+----+----+ |info| cursize | | @@ -407,7 +407,7 @@ bits 3-0: total identity fragments (F) 1-15

  • +----+----+----+----+----+----+----+----+ {% endhighlight %} - Fragment F-1: + Fragment F-1 (last or only fragment): {% highlight lang='dataspec' %} +----+----+----+----+----+----+----+----+ |info| cursize | | @@ -422,6 +422,7 @@ bits 3-0: total identity fragments (F) 1-15 | arbitrary amount of uninterpreted | | data, to 40 bytes prior to | | end of the current packet | + | Packet length must be mult. of 16 | +----+----+----+----+----+----+----+----+ | DSA signature | + + @@ -444,7 +445,9 @@ Typical size including header, in current implementation: 480 bytes In the current implementation, the maximum fragment size is 512 bytes.
  • The typical Router Identity -is 387 bytes, so no fragmentation is usually necessary. +is 387 bytes, so no fragmentation is ever necessary. +If new crypto extends the size of the RouterIdentity, the fragmentation scheme +must be tested carefully.
  • There is no mechanism for requesting or redelivering missing fragments.
  • @@ -453,6 +456,10 @@ The total fragments field F must be set identically in all fragments. See the Keys section above for details on DSA signatures.
  • Signed-on time appears to be unused or unverified in the current implementation. +
  • +Since the signature is at the end, the padding in the last or only packet must pad the total packet to +a multiple of 16 bytes, or the signature will not get decrypted correctly. +This is different from all the other message types, where the padding is at the end.
  • diff --git a/i2p2www/pages/site/docs/spec/streaming.html b/i2p2www/pages/site/docs/spec/streaming.html index c0a8c84b..c22ca15d 100644 --- a/i2p2www/pages/site/docs/spec/streaming.html +++ b/i2p2www/pages/site/docs/spec/streaming.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}{% trans %}Streaming Library Specification{% endtrans %}{% endblock %} -{% block lastupdated %}{% trans %}November 2012{% endtrans %}{% endblock %} -{% block accuratefor %}0.9.3{% endblock %} +{% block lastupdated %}{% trans %}December 2013{% endtrans %}{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

    {% trans -%} See the Streaming page for an overview of the Streaming Library. @@ -92,44 +92,50 @@ in the given order. Bit order: 15....0 (15 is MSB)

    -
    BitFlagOption DataFunction -
    0SYNCHRONIZE-- +
    BitFlagOption OrderOption DataFunction +
    0SYNCHRONIZE---- Similar to TCP SYN. Set in the initial packet and in the first response. FROM_INCLUDED and SIGNATURE_INCLUDED must be set also. -
    1CLOSE-- +
    1CLOSE---- Similar to TCP FIN. If the response to a SYNCHRONIZE fits in a single message, the response will contain both SYNCHRONIZE and CLOSE. SIGNATURE_INCLUDED must be set also. -
    2RESET-- +
    2RESET---- Abnormal close. SIGNATURE_INCLUDED must be set also. -
    3SIGNATURE_INCLUDED40 byte DSA Signature +
    3SIGNATURE_INCLUDED440 byte DSA Signature -Currently sent only with SYNCHRONIZE, CLOSE, and RESET, where it is required. +Currently sent only with SYNCHRONIZE, CLOSE, and RESET, where it is required, +and with ECHO, where it is required for a ping. The signature uses the Destination's DSA signing keys to sign the entire header and payload with the 40-byte space in the option data field for the signature being set to all zeroes. -
    4SIGNATURE_REQUESTED-- +
    4SIGNATURE_REQUESTED---- Unused. Requests every packet in the other direction to have SIGNATURE_INCLUDED -
    5FROM_INCLUDED387+ byte Destination +
    5FROM_INCLUDED2387+ byte Destination -Currently sent only with SYNCHRONIZE, where it is required. -
    6DELAY_REQUESTED2 byte Integer +Currently sent only with SYNCHRONIZE, where it is required, +and with ECHO, where it is required for a ping. +
    6DELAY_REQUESTED12 byte Integer Optional delay. How many milliseconds the sender of this packet wants the recipient to wait before sending any more data. A value greater than 60000 indicates choking. -
    7MAX_PACKET_SIZE_INCLUDED2 byte Integer +
    7MAX_PACKET_SIZE_INCLUDED32 byte Integer Currently sent with SYNCHRONIZE only. Was also sent in retransmitted packets until release 0.9.1. -
    8PROFILE_INTERACTIVE-- +
    8PROFILE_INTERACTIVE---- Unused or ignored; the interactive profile is unimplemented. -
    9ECHO-- -Unused except by ping programs -
    10NO_ACK-- +
    9ECHO---- +Unused except by ping programs. +If set, most other options, and the payload, are ignored. See +the streaming docs. +
    10NO_ACK---- This flag simply tells the recipient to ignore the ackThrough field in the header. -Currently unused, the ackThrough field is always valid. -
    11-15unused +Currently set in the inital SYN packet, otherwise the ackThrough field is always valid. +Note that this does not save any space, the ackThrough field is before the flags +and is always present. +
    11-15unused
    {% endblock %} diff --git a/i2p2www/pages/site/docs/spec/tunnel-message.html b/i2p2www/pages/site/docs/spec/tunnel-message.html index 9874fd0b..5dec4c6b 100644 --- a/i2p2www/pages/site/docs/spec/tunnel-message.html +++ b/i2p2www/pages/site/docs/spec/tunnel-message.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}Tunnel Message Specification{% endblock %} -{% block lastupdated %}October 2011{% endblock %} -{% block accuratefor %}0.8.10{% endblock %} +{% block lastupdated %}December 2013{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

    Tunnel Message Specification

    This document specifies the format of tunnel messages. @@ -122,7 +122,7 @@ IV :: Checksum :: 4 bytes - the first 4 bytes of the SHA256 hash of the remaining contents of the message concatenated with the IV + the first 4 bytes of the SHA256 hash of the contents of the message after the zero byte concatenated with the IV Nonzero_padding :: 0 or more bytes @@ -145,9 +145,14 @@ total size: 1028 Bytes +

    Notes

    +
    • +The padding, if any, must be before the instruction/message pairs. +There is no provision for padding at the end. +
    • +The checksum does NOT cover the padding or the zero byte. +
    -

    Note that the padding, if any, must be before the instruction/message pairs. -there is no provision for padding at the end.

    Delivery Instructions

    @@ -195,6 +200,7 @@ flag: bits 6-5: delivery type For tunnel messages: 0x0 = LOCAL, 0x01 = TUNNEL, 0x02 = ROUTER, 0x03 = unused + Note: LOCAL is used for inbound tunnels only, unimplemented for outbound tunnels For garlic cloves: 0x0 = LOCAL, 0x01 = DESTINATION, 0x02 = ROUTER, 0x03 = TUNNEL bit 4: delay included? Unimplemented, always 0 @@ -203,7 +209,7 @@ flag: If 1, the message is fragmented, and the instructions contain a Message ID bit 2: extended options? Unimplemented, always 0 If 1, extended options are included - bits 1-0: reserved + bits 1-0: reserved, set to 0 for compatibility with future uses Tunnel ID: 4 bytes @@ -239,12 +245,15 @@ Extended Options: size: 2 bytes + Required in a tunnel message; never present in a garlic clove. The length of the fragment that follows - Valid values: 1 to approx. 960 in a tunnel message; 1 to 64K - 1 in a garlic clove + Valid values: 1 to approx. 960 in a tunnel message Total length: Typical length is: - 3 bytes for LOCAL delivery (garlic clove); - 35 bytes for ROUTER / DESTINATION delivery or 39 bytes for TUNNEL delivery (unfragmented or garlic clove); + 1 byte for LOCAL delivery (garlic clove); + 3 bytes for LOCAL delivery (tunnel message); + 35 bytes for ROUTER / DESTINATION delivery or 39 bytes for TUNNEL delivery (unfragmented tunnel message); + 33 bytes for ROUTER / DESTINATION delivery or 37 bytes for TUNNEL delivery (garlic clove); 39 bytes for ROUTER delivery or 43 bytes for TUNNEL delivery (first fragment) @@ -268,7 +277,7 @@ frag :: Message_ID :: 4 bytes - the same ID specified in the first fragment + the same ID specified in the first fragment for this message ID size :: 2 bytes diff --git a/i2p2www/pages/site/docs/spec/updates.html b/i2p2www/pages/site/docs/spec/updates.html index be572f84..cad13b4f 100644 --- a/i2p2www/pages/site/docs/spec/updates.html +++ b/i2p2www/pages/site/docs/spec/updates.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}{% trans %}I2P Software Update Specification{% endtrans %}{% endblock %} -{% block lastupdated %}{% trans %}May 2013{% endtrans %}{% endblock %} -{% block accuratefor %}0.9.6{% endblock %} +{% block lastupdated %}{% trans %}October 2013{% endtrans %}{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

    {% trans %}Overview{% endtrans %}

    {% trans -%} @@ -33,19 +33,66 @@ The news.xml file may contain the following elements:

    {% trans -%} -The elements may be included inside XML comments to prevent interpretation by browsers. -The i2p.release element and version are required. All others are optional and are -currently unused. +Parameters in the i2p.release entry are as follows. +All keys are case-insensitive. All values must be enclosed in double quotes. {%- endtrans %}

    +
      +
    • +date: The release date of the router version. Unused. Format not specified. +
    • +minJavaVersion: The minimum version of Java required to run the current version. +As of release 0.9.9. +
    • +minVersion: The minimum version of the router required to update to the current version. +If a router is older than this, the user must (manually?) update to an intermediate version first. +As of release 0.9.9. +
    • +su3Clearnet: One or more HTTP URLs where the .su3 update file may +be found on the clearnet (non-I2P). +Multiple URLs must be separated by a space or comma. +As of release 0.9.9. +
    • +su3SSL: One or more HTTPS URLs where the .su3 update file may +be found on the clearnet (non-I2P). +Multiple URLs must be separated by a space or comma. +As of release 0.9.9. +
    • +sudTorrent: The magnet link for the .sud (non-pack200) torrent of the update. +As of release 0.9.4. +
    • +su2Torrent: The magnet link for the .su2 (pack200) torrent of the update. +As of release 0.9.4. +
    • +su3Torrent: The magnet link for the .su3 (new format) torrent of the update. +As of release 0.9.9. +
    • +version: Required. The latest current router version available. +
    +

    {% trans -%} -The news source is trusted only to indicate that a new version is available. -It does not specify the URL of the update, the checksum, or any other information. +The elements may be included inside XML comments to prevent interpretation by browsers. +The i2p.release element and version are required. All others are optional. +NOTE: Due to parser limitations an entire element must be on a single line. {%- endtrans %}

    {% trans %}Update File Specification{% endtrans %}

    {% trans -%} +As of release 0.9.9, the signed update file, named i2pupdate.su3, will +use the "su3" file format specified below. +Approved release signers will use 4096-bit RSA keys. +The X.509 public key certificates for these signers are distributed in the router installation packages. +The updates may contain certificates for new, approved signers, and/or contain +a list of certificates to delete for revocation. +{%- endtrans %}

    + + +

    {% trans %}Old Update File Specification{% endtrans %}

    +

    {% trans -%} +This format is obsolte as of release 0.9.9. +{%- endtrans %}

    +

    {% trans -%} The signed update file, traditionally named i2pupdate.sud, is simply a zip file with a prepended 56 byte header. The header contains: @@ -99,7 +146,7 @@ As of release 0.7.12, the router supports Pack200 decompression. Files inside the zip archive with a .jar.pack or .war.pack suffix are transparently decompressed to a .jar or .war file. Update files containing .pack files are traditionally named with a '.su2' suffix. -Pack200 shrinks the update files by about 60%. +Pack200 shrinks the update files by about 60%. {%- endtrans %}

    {% trans -%} @@ -126,10 +173,10 @@ The router will then delete the deletelist.txt file.

    {% trans %}New "su3" Update File Specification{% endtrans %}

    {% trans -%} -This specification is preliminary and is not yet implemented. +This specification is used for router updates and reseed data as of release 0.9.9. {%- endtrans %}

    -

    {% trans %}Issues with existing .sud/.su2 format:{% endtrans %}

    +

    {% trans %}Issues with the previous .sud/.su2 format:{% endtrans %}

    • {% trans -%} No magic number or flags @@ -177,30 +224,41 @@ existing version checkers 7 su3 file format version = 0 -8 unused +8-9 Signature type +
      • 0x0000 = DSA-160 +
      • 0x0001 = ECDSA-SHA256-P256 +
      • 0x0002 = ECDSA-SHA384-P384 +
      • 0x0003 = ECDSA-SHA512-P521 +
      • 0x0004 = RSA-SHA256-2048 +
      • 0x0005 = RSA-SHA384-3072 +
      • 0x0006 = RSA-SHA512-4096 +
      -9 Version length (in bytes not chars, including padding) +10-11 Signature length, e.g. 40 (0x0028) for DSA-160 + +12 unused + +13 Version length (in bytes not chars, including padding) must be at least 16 (0x10) for compatibility -10 unused +14 unused -11 Signer ID length (in bytes not chars) +15 Signer ID length (in bytes not chars) -12-19 Compressed content length (not including header or sig) - -20 unused - -21 Compressed type 0x00 = zip - -22 unused - -23 Content type 0x00 = router w/o pack200, 0x01 = router w/ pack200, 0x02 = plugin +16-23 Compressed content length (not including header or sig) 24 unused -25 Signature type 0x00 = DSA-160, 0x01 = new algo +25 Compressed type 0x00 = zip -26-27 Signature length 40 (0x0028) = DSA-160 +26 unused + +27 Content type +
      • 0x00 = unknown +
      • 0x01 = router update +
      • 0x02 = plugin or plugin update +
      • 0x03 = reseed data +
      28-39 unused diff --git a/i2p2www/pages/site/docs/transport/index.html b/i2p2www/pages/site/docs/transport/index.html index ab1b7b11..6cf01f95 100644 --- a/i2p2www/pages/site/docs/transport/index.html +++ b/i2p2www/pages/site/docs/transport/index.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}{% trans %}Transport Overview{% endtrans %}{% endblock %} -{% block lastupdated %}{% trans %}April 2013{% endtrans %}{% endblock %} -{% block accuratefor %}0.9.5{% endblock %} +{% block lastupdated %}{% trans %}December 2013{% endtrans %}{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

      {% trans %}Transports in I2P{% endtrans %}

      @@ -34,9 +34,16 @@ flow control, acknowledgments and retransmission. The transport subsystem in I2P provides the following services: {%- endtrans %}

        +
      • {% trans i2np=site_url('docs/protocol/i2np') -%} +Reliable delivery of I2NP messages. +Transports support I2NP message delivery ONLY. +They are not general-purpose data pipes. +{%- endtrans %}
      • {% trans -%} Maintain a set of router addresses, one or more for each transport, -that the router publishes as its global contact information (the RouterInfo) +that the router publishes as its global contact information (the RouterInfo). +Each transport may connect using one of these addresses, which may be +IPv4 or (as of version 0.9.8) IPv6. {%- endtrans %}
      • {% trans %}Selection of the best transport for each outgoing message{% endtrans %}
      • {% trans %}Queueing of outbound messages by priority{% endtrans %}
      • @@ -70,6 +77,7 @@ and refusing outbound and inbound connections to those peers

        {% trans -%} The transport subsystem maintains a set of router addresses, each of which lists a transport method, IP, and port. These addresses constitute the advertised contact points, and are published by the router to the network database. +Addresses may also contain an arbitrary set of additional options. {%- endtrans %}

        {% trans %}Typical scenarios are:{% endtrans %} @@ -90,7 +98,7 @@ directly-accessible IP and ports.

        {% trans i2np=site_url('docs/protocol/i2np'), streaming=site_url('docs/api/streaming'), datagrams=site_url('docs/api/datagrams') -%} -The transport system delivers I2NP messages. The transport selected for any message is +The transport system delivers I2NP messages only. The transport selected for any message is independent of the upper-layer protocols and contents (router or client messages, whether an external application was using TCP or UDP to connect to I2P, whether the upper layer was using the streaming library @@ -139,12 +147,6 @@ Additional transports may be developed, including:

      • {% trans %}An "indirect" transport for routers that are not reachable by all other routers (one form of "restricted routes"){% endtrans %}
      -

      {% trans thread='http://'+i2pconv('zzz.i2p')+'/topics/109' -%} -IPv6: The existing transports must be enhanced to support multiple addresses within a single transport, -including IPV6 addresses. Currently, a transport may only advertise a single IPV4 address. -See this thread for discussion. -{%- endtrans %}

      -

      {% trans -%} Work continues on adjusting default connection limits for each transport. I2P is designed as a "mesh network", where it is assumed that any router can connect to any other router. @@ -159,7 +161,7 @@ However, as NTCP buffers are partially in the kernel and SSU buffers are on the that assumption is difficult to verify. {%- endtrans %}

      -

      {% trans pdf='http://www.cse.chalmers.se/%7Ejohnwolf/publications/hjelmvik_breaking.pdf' -%} +

      {% trans pdf='http://www.iis.se/docs/hjelmvik_breaking.pdf' -%} Analyze Breaking and Improving Protocol Obfuscation and see how transport-layer padding may improve things. {%- endtrans %}

      diff --git a/i2p2www/pages/site/docs/transport/ntcp.html b/i2p2www/pages/site/docs/transport/ntcp.html index 25aac00e..7fda9370 100644 --- a/i2p2www/pages/site/docs/transport/ntcp.html +++ b/i2p2www/pages/site/docs/transport/ntcp.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}NTCP{% endblock %} -{% block lastupdated %}{% trans %}August 2010{% endtrans %}{% endblock %} -{% block accuratefor %}0.8{% endblock %} +{% block lastupdated %}{% trans %}December 2013{% endtrans %}{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

      {% trans %}NTCP (NIO-based TCP){% endtrans %}

      @@ -10,6 +10,7 @@ NTCP is one of two transports currently implement The other is SSU. NTCP is a Java NIO-based transport introduced in I2P release 0.6.1.22. Java NIO (new I/O) does not suffer from the 1 thread per connection issues of the old TCP transport. +NTCP-over-IPv6 is supported as of version 0.9.8. {%- endtrans %}

      {% trans -%} @@ -118,16 +119,18 @@ The mapping between these steps and the messages sent between I2P routers, is marked in bold. {%- endtrans %}

        -
      1. {% trans %}Alice generates a secret 226-bit integer x. She then calculates X = g^x mod p.{% endtrans %}
      2. +
      3. {% trans %}Alice generates a secret integer x. She then calculates X = g^x mod p.{% endtrans %}
      4. {% trans %}Alice sends X to Bob (Message 1).{% endtrans %}
      5. -
      6. {% trans %}Bob generates a secret 226-bit integer y. He then calculates Y = g^y mod p.{% endtrans %}
      7. +
      8. {% trans %}Bob generates a secret integer y. He then calculates Y = g^y mod p.{% endtrans %}
      9. {% trans %}Bob sends Y to Alice.(Message 2){% endtrans %}
      10. {% trans %}Alice can now compute sessionKey = Y^x mod p.{% endtrans %}
      11. {% trans %}Bob can now compute sessionKey = X^y mod p.{% endtrans %}
      12. {% trans %}Both Alice and Bob now have a shared key sessionKey = g^(x*y) mod p.{% endtrans %}
      -

      {% trans -%} +

      {% trans crypto=site_url('docs/how/cryptography') -%} The sessionKey is then used to exchange identities in Message 3 and Message 4. +The exponent (x and y) length for the DH exchange is documented on the +cryptography page. {%- endtrans %}

      {% trans %}Message 1 (Session Request){% endtrans %}

      diff --git a/i2p2www/pages/site/docs/transport/ssu.html b/i2p2www/pages/site/docs/transport/ssu.html index d846503c..131a3cc9 100644 --- a/i2p2www/pages/site/docs/transport/ssu.html +++ b/i2p2www/pages/site/docs/transport/ssu.html @@ -1,7 +1,7 @@ {% extends "global/layout.html" %} {% block title %}{% trans %}SSU Transport{% endtrans %}{% endblock %} -{% block lastupdated %}{% trans %}May 2013{% endtrans %}{% endblock %} -{% block accuratefor %}0.9.6{% endblock %} +{% block lastupdated %}{% trans %}December 2013{% endtrans %}{% endblock %} +{% block accuratefor %}0.9.9{% endblock %} {% block content %}

      {% trans %}Secure Semireliable UDP{% endtrans %} (SSU)

      Note: IPv6 information is preliminary. @@ -16,6 +16,7 @@ The other is NTCP. SSU is the newer of the two transports, introduced in I2P release 0.6. In a standard I2P installation, the router uses both NTCP and SSU for outbound connections. +SSU-over-IPv6 is supported as of version 0.9.8. {%- endtrans %}

      {% trans %}SSU Services{% endtrans %}

      @@ -81,7 +82,7 @@ The MTU value is adjusted based on the percentage of packets that are retransmit {%- endtrans %}

      {% trans -%} -For both MTU values, it is desirable that (MTU % 16) == 12, so that +For both MTU values, it is desirable that (MTU % 16) == 12, so that the payload portion after the 28-byte IP/UDP header is a multiple of 16 bytes, for encryption purposes. {%- endtrans %}

      @@ -118,7 +119,8 @@ honor that when a connection is established.

      {% trans -%} For IPv6, the minimum MTU is 1280. The IPv6 IP/UDP header is 48 bytes, -so we use an MTU where (MTN % 16 == 0), which is true for 1280. +so we use an MTU where (MTN % 16 == 0), which is true for 1280. +The maximum IPv6 MTU is 1472. {%- endtrans %}

      {% trans %}Message Size Limits{% endtrans %}

      @@ -239,6 +241,7 @@ The message sequence is as follows:

      {% trans %}Connection establishment (direct){% endtrans %}

      {% trans -%} Alice connects directly to Bob. +IPv6 is supported as of version 0.9.8. {%- endtrans %}

      {% highlight %} Alice Bob @@ -344,8 +347,9 @@ After the hole punch, the session is established between Alice and Charlie as in

      IPv6 notes: +IPv6 is supported as of version 0.9.8. Alice-Bob communication may be via IPv4 or IPv6. -Bob-Charlie and Alice-Charlie communication is via IPv4. +Bob-Charlie and Alice-Charlie communication is via IPv4 only.

      diff --git a/i2p2www/pages/site/docs/tunnels/old-implementation.html b/i2p2www/pages/site/docs/tunnels/old-implementation.html index 88c5c40d..456f0c25 100644 --- a/i2p2www/pages/site/docs/tunnels/old-implementation.html +++ b/i2p2www/pages/site/docs/tunnels/old-implementation.html @@ -492,7 +492,7 @@ vulnerability to predecessor attacks. While the endpoints and gateways of those tunnels will be randomly distributed across the network (perhaps even including the tunnel creator in that set), another alternative is to use the tunnel pathways themselves to pass along the request and response, as is done -in TOR. This, however, may lead to leaks +in TOR. This, however, may lead to leaks during tunnel creation, allowing peers to discover how many hops there are later on in the tunnel by monitoring the timing or packet count as the tunnel is built. Techniques could be used to minimize this issue, such as using each of diff --git a/i2p2www/pages/site/faq.html b/i2p2www/pages/site/faq.html index f0e8ac46..153de3f6 100644 --- a/i2p2www/pages/site/faq.html +++ b/i2p2www/pages/site/faq.html @@ -50,8 +50,8 @@

      {% trans %}What systems will I2P run on?{% endtrans %} ({{ _('link') }})

      -

      {% trans trac=i2pconv('trac.i2p2.i2p') -%} -While I2P has been reported to run PCs as meagre as a low-end Pentium II with 64 MB of RAM, you'll have a much better experience on a Pentium III (or better) with 128MB of RAM (or more). A chart comparing the performance of the various JREs can be found at http://{{ trac }}/wiki/java, but in short: it's at all possible, use Sun/Oracle Java or OpenJDK. +

      {% trans chart='https://trac.i2p2.de/wiki/java' -%} +While I2P has been reported to run PCs as meagre as a low-end Pentium II with 64 MB of RAM, you'll have a much better experience on a Pentium III (or better) with 128MB of RAM (or more). A chart comparing the performance of the various JREs can be found at {{ chart }}, but in short: it's at all possible, use Sun/Oracle Java or OpenJDK. {%- endtrans %}

      {% trans -%} @@ -66,7 +66,7 @@ I2P has been tested on Windows, Linux, FreeBSD (see the note -

    • {{ i2pconv('trac.i2p2.i2p') }} ticket (preferred method)
    • +
    • {{ i2pconv('trac.i2p2.i2p') }} ticket (preferred method)
    • {{ i2pconv('pastethis.i2p') }} and follow up on IRC in #i2p
    • {% trans -%} Discuss with the developers on IRC in #i2p-dev @@ -101,6 +101,7 @@ Presence on this list does not imply endorsement.
      • http://i2host.i2p/cgi-bin/i2hostetag
      • http://stats.i2p/cgi-bin/newhosts.txt
      • +
      • http://no.i2p/export/alive-hosts.txt
      • @@ -159,7 +160,7 @@ garbage collection. Increase the setting wrapper.java.maxmemory in
      • {% trans -%} -Is the CPU usage simply higher than you would like, or is it pegged at 100% for a long time? +Is the CPU usage simply higher than you would like, or is it pegged at 100% for a long time? If it's pegged, this could be a bug. Look in the logs for clues. {%- endtrans %}
      • @@ -203,7 +204,7 @@ All traffic you route is internal to the I2P network, you are not an {% trans %}Is my router an "exit node" to the regular Internet? I don't want it to be.{% endtrans %} ({{ _('link') }})

        {% trans -%} -No. Unlike Tor, +No. Unlike Tor, "exit nodes" or "outproxies" are not an inherent part of the network. Only volunteers who set up and run separate applications will relay traffic to the regular Internet. There are very, very few of these. @@ -290,7 +291,7 @@ and set your outproxy list to 'false.i2p' (only). Then stop and restart the eepProxy. If it doesn't work, the outproxy is not up. It is not I2P's fault. If your primary reason to use an anonymous network is to anonymously access sites -on the regular Internet, you should probably try Tor. +on the regular Internet, you should probably try Tor. {%- endtrans %}

        {% trans %}I can't access https:// or ftp:// sites through I2P.{% endtrans %} @@ -368,16 +369,16 @@ There is additional discussion about this on Tor. +If this type of service is required, try Tor. {%- endtrans %}

        {% trans %}Most of the eepsites within I2P are down?{% endtrans %} ({{ _('link') }})

        -

        {% trans perv=i2pconv('perv.i2p') -%} +

        {% trans eepstatus='http://'+i2pconv('identiguy.i2p') -%} If you consider every eepsite that has ever been created, yes, most of them are down. People and eepsites come and go. A good way to get started in I2P is check out a list of eepsites that are currently up. -{{ perv }} tracks active eepsites. +{{ eepstatus }} tracks active eepsites. {%- endtrans %}

        {% trans %}How do I set up my own eepsite?{% endtrans %} @@ -450,8 +451,8 @@ with
      • {% trans -%} -Go to http://localhost:7657/configadvanced.jsp -and add a new option: consolePassword=foo (or whatever password you want) +Go to http://localhost:7657/configui +and add a console username and password if desired. {%- endtrans %}
      • @@ -463,10 +464,10 @@ and hit "Graceful restart", which restarts the JVM and reloads the client applic

        {% trans -%} -After that fires up, you should now be able to reach your console remotely. -You will be prompted for a username and password though - the username is -"admin" and the password is whatever you specified in step 2 above. Note: the -0.0.0.0 above specifies an interface, not a network or netmask. 0.0.0.0 +After that fires up, you should now be able to reach your console remotely. Reload the router at +http://127.0.0.1:7657 and you will be prompted for the username and password you specified in step 2 +above if your browser supports the authentication popup. Note: the +0.0.0.0 above specifies an interface, not a network or netmask. 0.0.0.0 means "bind to all interfaces", so it can be reachable on 127.0.0.1:7657 as well as any LAN/WAN IP. {%- endtrans %}

        diff --git a/i2p2www/pages/site/get-involved/bounties/index.html b/i2p2www/pages/site/get-involved/bounties/index.html index 4ccb7c7d..3a8d5f91 100644 --- a/i2p2www/pages/site/get-involved/bounties/index.html +++ b/i2p2www/pages/site/get-involved/bounties/index.html @@ -49,13 +49,6 @@ etc), and the like.

        [{{ _('vacant') }}]

        €100 EUR

        - -

        {{ _('Make I2P IPv6 native') }}

        -

        {{ _('Proposal in development') }}

        -

        Amiga4000

        -

        [{{ _('vacant') }}]

        -

        €100 EUR and 50BTC

        -

        {{ _('I2P package in Debian and Ubuntu mirrors') }}

        {{ _('Proposal in development') }}

        @@ -68,7 +61,7 @@ etc), and the like.

        {{ _('Done, phase of verification') }}

        psychonaut

        giv

        -

        €30 EUR and 118,34BTC

        +

        €30 EUR and 118,34BTC, of which 30 EUR and 68BTC paid to giv

        {{ _('Unit tests and Multi-router Simulation') }}

        @@ -110,6 +103,13 @@ etc), and the like. + + + + + + + @@ -152,7 +152,7 @@ etc), and the like. - +

        {{ _('Name') }}

        {{ _('Status') }}

        {{ _('Dev team') }}*

        {{ _('Make I2P IPv6 native') }}

        done

        Amiga4000

        I2P team

        €100 EUR and 50BTC

        {{ _('Setting up a SILC server') }}

        withdrawn and bounty divided between ReturningNovice and the general fund

        GCJ support

        Claimed

        Claimed

        jrandom

        diff --git a/i2p2www/pages/site/get-involved/bounties/ipv6.html b/i2p2www/pages/site/get-involved/bounties/ipv6.html index 0da3382d..aa05ef8d 100644 --- a/i2p2www/pages/site/get-involved/bounties/ipv6.html +++ b/i2p2www/pages/site/get-involved/bounties/ipv6.html @@ -9,6 +9,8 @@ into I2P I withdrawal the vuze bounty and offer a IPv6 bounty. To claim this bounty, the I2P router needs to run full on native IPv6 connections like it does on IPv4. {%- endtrans %}

        +
        +Bounty is done as of I2P 0.9.8 and money paid to the I2P team. {% endblock %} {% block bountynotes %} diff --git a/i2p2www/pages/site/get-involved/bounties/netdb.html b/i2p2www/pages/site/get-involved/bounties/netdb.html index 2d0b1aed..48474a6f 100644 --- a/i2p2www/pages/site/get-involved/bounties/netdb.html +++ b/i2p2www/pages/site/get-involved/bounties/netdb.html @@ -27,7 +27,7 @@ completed and should therefore be minimized.
        Attack resilience
        The solution should ideally be (or be extensible to be) resilient against -sybil and eclipse attacks. +sybil and eclipse attacks.
        Scalability
        @@ -53,7 +53,7 @@ still maintain an unlimited search horizon. -Trac +Trac

        162.5€

        @@ -73,7 +73,7 @@ still maintain an unlimited search horizon.
        i2p.zzz.kademlia / i2psnarkdht is the most likely base implementation.
        -

        Trac

        +

        Trac

        162.5€

        diff --git a/i2p2www/pages/site/get-involved/bounties/unit-tests.html b/i2p2www/pages/site/get-involved/bounties/unit-tests.html index 9a14fe2c..6e61930a 100644 --- a/i2p2www/pages/site/get-involved/bounties/unit-tests.html +++ b/i2p2www/pages/site/get-involved/bounties/unit-tests.html @@ -93,7 +93,7 @@ To collect this bounty, the automated unit tests of the router

        {% trans -%} To collect this bounty, a new set of unit tests must meet a -measured code coverage of 90% of the streaming lib +measured code coverage of 90% of the streaming lib (i2p/apps/ministreaming/ and i2p/apps/streaming/). {%- endtrans %}


        @@ -104,7 +104,7 @@ measured code coverage of 90% of the streaming lib {%- endtrans %}

        {% trans -%} -To collect this bounty, all above unit tests must meet the 100% +To collect this bounty, all above unit tests must meet the 100% coverage marker (except for log statements). {%- endtrans %}


        diff --git a/i2p2www/pages/site/get-involved/develop/applications.html b/i2p2www/pages/site/get-involved/develop/applications.html index 79659d97..d23578d4 100644 --- a/i2p2www/pages/site/get-involved/develop/applications.html +++ b/i2p2www/pages/site/get-involved/develop/applications.html @@ -578,9 +578,9 @@ Contact us if you would like to contribute. I2Phex

      -See also all the plugins on plugins.i2p, -the applications and source code listed on echelon.i2p, -and the application code hosted on git.repo.i2p. +See also all the plugins on plugins.i2p, +the applications and source code listed on echelon.i2p, +and the application code hosted on git.repo.i2p.

      See also the bundled applications in the I2P distribution - SusiMail and I2PSnark.

      diff --git a/i2p2www/pages/site/get-involved/develop/developers-keys.html b/i2p2www/pages/site/get-involved/develop/developers-keys.html index 48d69fa4..7326151b 100644 --- a/i2p2www/pages/site/get-involved/develop/developers-keys.html +++ b/i2p2www/pages/site/get-involved/develop/developers-keys.html @@ -236,6 +236,21 @@ gO8flnqsOSGb2CtoQ3i3Fqk0HNq2if47LVyaLwgOyoPOhYkDDGr9Wg gBp+svAHALMoGvh0lemKQZpQfdMgZ33k2l2o3Udvj3tpB/KwIDAQAB [end] +[pubkey digit@mail.i2p] +MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDAo+xF9MoHp2S4T/ +smvUtDL7Qcjis5LyqDYELo/IqER5Nn4CNNA9SCrBKm6nlzDjAj7X8J +zbTTvOT9WqTzwHrWjNcG4rAWsG22kbvtlujy3sbgO7VckQolzm+psM +mySqVwAzOZm9ShSxBeb99oULNQwRaW+QrGiB2mBFeCnY1kywIDAQAB +[end] + + +[pubkey dg2-new@mail.i2p] +MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDtrD2xv9Hvsa+VY9 +ymQ2BgNzDenRz9xuSsrhVNqAjKVHs47J/qm7693NhnGlLxofeiF3py +982CcCjnyEa3IfBD82dsuBgYUj09GVhcJyaA1ttNBNSCh5pDJfGFBs +pLYjMbWl/TAdlxkWm+HgRCzhQe5vkLcx1R8nXCxmPjC0Ep3QIDAQAB +[end] +

      {{ _('Developer Transport Keys') }}

      {% trans monotone=site_url('get-involved/guides/monotone') -%} @@ -405,6 +420,13 @@ At7mOUETxO3sLrf70kR9Mn4p44c4NKuFF4APmeYaFfIP9N5ZkBxY+5 hMMlffgZFJeDIEEed3eIttUPsIdW8U+2XQM1zF9R77+K0ZJQIDAQAB [end] +[pubkey dg2-transport-new@mail.i2p] +MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC4YQS3iFx7unaWtY +FFX4VuT/m5o5012LF5UU9LFJmnnnPQpJzTwRrBN8WKFm5AACbg/pdR +PzTJpv6C0kKDavKddKxc4HIuNKnTsB1qd5zOpKBNlxm6TT4N3Pdhb5 +CEXZ1lvlv4PloKFIti7ptAa4HpTs6Gd3a6nMQyHwgP0Ke44wIDAQAB +[end] + {% endblock %} diff --git a/i2p2www/pages/site/get-involved/develop/license-agreements.html b/i2p2www/pages/site/get-involved/develop/license-agreements.html index 2a756ac0..e5219ff0 100644 --- a/i2p2www/pages/site/get-involved/develop/license-agreements.html +++ b/i2p2www/pages/site/get-involved/develop/license-agreements.html @@ -792,6 +792,33 @@ Zf9i4Wqw8wKgkWIIbZ2+V+zP83FLcX6ga+GMr8tf/bwWjSe1PN7mZAQlPB4p8Qc= =NxxT -----END PGP SIGNATURE----- +dg: + +-----BEGIN PGP SIGNED MESSAGE----- +Hash: SHA1 + +I affirm the following with regard to I2P: + + * Unless marked otherwise, all code I commit is implicitly licensed under the component's primary license + * If specified in the source, the code may be explicitly licensed under one of the component's alternate licenses + * I have the right to release the code I commit under the terms I am committing it + +-----BEGIN PGP SIGNATURE----- + +iQIcBAEBAgAGBQJSbZwRAAoJEOLR0LYl1+FTlSIP/1+FGMm08vLk7V7v7fxKrHHf +GOmUa5p706sVMKVjM5cgBAkd1qkon5QKwjDOOHxeN9vu2EZbicRawn19YaOOHje5 +peW4n/guYb8osflGNkBH3vy3EekB/PfuppIxjM7HeO08rJ9VrrYa6hseYvdVpMsi +E2LtF4kMekXxOvuQI4kKL0gT+KCW7OtLWVkRzhsDqLMyP/5JG3uTcI/PAe+y2Iip +ZZBLLkf9gBdPGOk/NeSEX510jlKqSC7TcyVnQe+BH0oLNG2SzlzeFpiTKdKObiq8 +O9rxW2yga6+7bPIgoaoxEK2zzHEGubqqHKgMUQKHs4IW3TYEGBFeCK6HUXpvObIh +wuZK32JDZCytAzV8VcPWlA2tOInkdHZj5A5K1idvM7BLSQ2b1xzRbkflER9ZaPQ6 +NDpkz8fiJYgIU7G8CqTK/8y9bjD+zITz3yOSAM3BGax/NeIs86drovgu2B1CeE0L +BXB/UwWZQhSE1W9AZJvbkPm4wLd9g8DihOJHArbxpEDj97W+faLGLLejm8lLB2Z5 +uBQwb4YM5Lt3FKQPqT8h6yRNtU5inIdrzbQEcKlMfOGiuKxXFlTfxAAJkOLnnK8c +OFE4iD6nFr78yT/jo1BOOc1gs+xiq3P+Cxaa95v/F08JTwwwLqA/94PjTs57I2Ef +cFuIq+msmE5mtJW+czFE +=syT4 +-----END PGP SIGNATURE----- {% endblock %} diff --git a/i2p2www/pages/site/get-involved/donate.html b/i2p2www/pages/site/get-involved/donate.html index 3d4e7c3b..ca585707 100644 --- a/i2p2www/pages/site/get-involved/donate.html +++ b/i2p2www/pages/site/get-involved/donate.html @@ -7,7 +7,38 @@ The details of how you can make your contribution are provided below. {%- endtrans %}

      +

      Bitcoin

      +

      {% trans date='December 2010', +cointype='Bitcoin', +coinurl='http://bitcoin.org', +account='1HkJCceXf7of1sTNRVJbXiZHfDTLL71Siy' -%} +As of {{ date }}, eche|on has been running a +{{ cointype }} account for the I2P project. +If you'd like to donate using {{ cointype }}, just transfer your +desired amount of coins to the account {{ account }} +and leave eche|on a note if you'd like your donation to be +mentioned on the I2P webpage. +{%- endtrans %}

      +

      {% trans %}For easy usage, use the QR code below!{% endtrans %}

      +BTC donation QR code +
      +

      Litecoin

      +

      {% trans date='September 2013', +cointype='Litecoin', +coinurl='http://www.litecoin.org', +account='LefFK8mk55zzfNkongAeVSiJ5rmrCkxdx5' -%} +As of {{ date }}, eche|on has been running a +{{ cointype }} account for the I2P project. +If you'd like to donate using {{ cointype }}, just transfer your +desired amount of coins to the account {{ account }} +and leave eche|on a note if you'd like your donation to be +mentioned on the I2P webpage. +{%- endtrans %}

      +

      {% trans %}For easy usage, use the QR code below!{% endtrans %}

      +LTC donation QR code + +

      PayPal


      {% trans account='echelon@i2pmail.org' -%} @@ -86,12 +117,8 @@ You can donate direct via PayPal to the account "{{ account }}". {{ _('Flattr this') }}

      -

      Bitcoin

      -

      {% trans account='1HkJCceXf7of1sTNRVJbXiZHfDTLL71Siy' -%} -As of December 2010, eche|on has been running a Bitcoin account for the I2P project. -If you'd like to donate using Bitcoin, just transfer your desired amount of coins to the account -{{ account }} and leave eche|on a note if you'd like your donation to be mentioned on the I2P webpage. -{%- endtrans %}

      + +

      SnailMail

      {% trans -%} If you want to keep more or less anonymous, the option to send money via mail is also available. But it is less secure as the envelope can be lost on the way to us. diff --git a/i2p2www/pages/site/get-involved/guides/monotone.html b/i2p2www/pages/site/get-involved/guides/monotone.html index 362b62f1..075598a4 100644 --- a/i2p2www/pages/site/get-involved/guides/monotone.html +++ b/i2p2www/pages/site/get-involved/guides/monotone.html @@ -228,7 +228,7 @@

      {% trans -%} - The default Monotone trust policy is way too lax for our requirements: every comitter is trusted by default. + The default Monotone trust policy is way too lax for our requirements: every committer is trusted by default. That is not acceptable for I2P development. {%- endtrans %}

      @@ -349,7 +349,7 @@ directory where i2p.mtn was created and attempt a checkout of the I {% trans -%} If you are satisfied with results, restore the backup of monotonerc that was created above. If you didn't create a backup - as advised, re-read Setting up trust evaluation hools. + as advised, re-read Setting up trust evaluation hooks. {%- endtrans %}

      diff --git a/i2p2www/pages/site/get-involved/guides/new-developers.html b/i2p2www/pages/site/get-involved/guides/new-developers.html index 277b62f0..d935e00f 100644 --- a/i2p2www/pages/site/get-involved/guides/new-developers.html +++ b/i2p2www/pages/site/get-involved/guides/new-developers.html @@ -102,7 +102,7 @@ The initial pull may take several hours using the tunnel. If it fails after a partial pull, simply rerun it, it will start where it left off. If you are in a hurry, use the non-anonymous access. {%- endtrans %}

      -

      {% trans viewmtn='http://'+i2pconv('stats.i2p')+'/cgi-bin/viewmtn/' -%} +

      {% trans viewmtn='http://'+i2pconv('killyourtv.i2p')+'/viewmtn/' -%} A full list of branches, including i2p.i2p and i2p.www can be found on viewmtn. {%- endtrans %}

      {% trans monotone=site_url('get-involved/guides/monotone') -%} @@ -131,10 +131,10 @@ see the application development guide. {%- endtrans %}

      {% trans %}Development ideas{% endtrans %}

      -

      {% trans zzz=i2pconv('zzz.i2p'), todo=site_url('get-involved/todo'), trac=i2pconv('trac.i2p2.i2p') -%} +

      {% trans zzz=i2pconv('zzz.i2p'), todo=site_url('get-involved/todo'), trac='https://trac.i2p2.de/report/1' -%} See zzz's TODO lists, this website's TODO list or -Trac +Trac for ideas. {%- endtrans %}

      diff --git a/i2p2www/pages/site/get-involved/index.html b/i2p2www/pages/site/get-involved/index.html index e73b584e..3d9636d6 100644 --- a/i2p2www/pages/site/get-involved/index.html +++ b/i2p2www/pages/site/get-involved/index.html @@ -22,9 +22,10 @@ Fix up the Wikipedia article about I2P in your language. Tell your friends. {%- endtrans %}
    • {{ _('Testing') }} — -{% trans monotone=site_url('get-involved/guides/monotone'), trac=i2pconv('trac.i2p2.i2p') -%} +{% trans monotone=site_url('get-involved/guides/monotone'), +trac='https://trac.i2p2.de/report/1' -%} Run the latest builds from monotone -and report results on #i2p or as bugs on Trac. +and report results on #i2p or as bugs on Trac. {%- endtrans %}
    • {{ _('Documentation') }} — {% trans -%} @@ -51,9 +52,11 @@ Write or port applications for I2P! There's some guidelines and a list of ideas on the applications page. {%- endtrans %}
    • {{ _('Coding') }} — -{% trans trac=i2pconv('trac.i2p2.i2p'), zzz=i2pconv('zzz.i2p'), newdevs=site_url('get-involved/guides/new-developers') -%} +{% trans trac='https://trac.i2p2.de/report/1', +zzz=i2pconv('zzz.i2p'), +newdevs=site_url('get-involved/guides/new-developers') -%} There's plenty to do if you know Java or are ready to learn. -Check for open tickets on Trac +Check for open tickets on Trac or the TODO list on {{ zzz }} for some ideas on where to start. See the new developer's guide for details. diff --git a/i2p2www/pages/site/index.html b/i2p2www/pages/site/index.html index a8e7237b..1b483657 100644 --- a/i2p2www/pages/site/index.html +++ b/i2p2www/pages/site/index.html @@ -13,13 +13,16 @@

      {{ _('What is I2P?') }}

      • {% trans -%} -I2P is a full darknet implementation - a network within a network, and is intended to protect against monitoring by third parties, such as hostile governments or ISPs. +I2P is an anonymous overlay network - a network within a network. It is intended to protect communication from dragnet surveillance and monitoring by third parties such as ISPs. {% endtrans %}
      • {% trans -%} -I2P is used by many people who care about their privacy, as well as those in high-risk situations. It is designed to protect activists, oppressed people, journalists and whistle-blowers - as well as the average person. +I2P is used by many people who care about their privacy: activists, oppressed people, journalists and whistleblowers, as well as the average person. {% endtrans %}
      • {% trans -%} -I2P runs on Java and is therefore available anywhere Java will run, including desktops, embedded systems (like the Raspberry Pi) and Android phones. +No network can be "perfectly anonymous". The continued goal of I2P is to make attacks more and more difficult to mount. Its anonymity will get stronger as the size of the network increases and with ongoing academic review. +{% endtrans %}
      • +
      • {% trans -%} +I2P is available on desktops, embedded systems (like the Raspberry Pi) and Android phones. Help spread the word! {% endtrans %}
      • {% trans %}Read more…{% endtrans %} @@ -31,37 +34,37 @@ I2P runs on Java and is therefore available anywhere Java will run, including de
        • {% trans supported=site_url('docs/applications/supported') -%} - Email Integrated web mail interface, plugin for serverless email. + Email: Integrated web mail interface, plugin for serverless email. {%- endtrans %}
        • {% trans supported=site_url('docs/applications/supported') -%} - Web browsing Anonymous websites, gateways to and from the public Internet. + Web browsing: Anonymous websites, gateways to and from the public Internet. {%- endtrans %}
        • {% trans supported=site_url('docs/applications/supported') -%} - Blogging and forums Blogging and Syndie plugins. + Blogging and forums: Blogging and Syndie plugins. {%- endtrans %}
        • {% trans supported=site_url('docs/applications/supported') -%} - Website hosting Integrated anonymous web server. + Website hosting: Integrated anonymous web server. {%- endtrans %}
        • {% trans supported=site_url('docs/applications/supported') -%} - Real-time chat Instant messaging and IRC clients. + Real-time chat: Instant messaging and IRC clients. {%- endtrans %}
        • {% trans supported=site_url('docs/applications/supported') -%} - File sharing ED2K and Gnutella clients, integrated BitTorrent client. + File sharing: ED2K and Gnutella clients, integrated BitTorrent client. {%- endtrans %}
        • {% trans supported=site_url('docs/applications/supported') -%} - Decentralized file storage Tahoe-LAFS distributed filesystem plugin. + Decentralized file storage: Tahoe-LAFS distributed filesystem plugin. {%- endtrans %}
        • diff --git a/i2p2www/pages/site/links.html b/i2p2www/pages/site/links.html index eb137ba3..63b096e0 100644 --- a/i2p2www/pages/site/links.html +++ b/i2p2www/pages/site/links.html @@ -9,18 +9,26 @@ See also the page with
        • تجريب - جرب آخر اصدار من monotone - او #i2p ثم عبر عن المشاكل فيTrac. + او #i2p ثم عبر عن المشاكل فيTrac.
        • وثائق ساعد في تكميل الاجزاء الناقصة او الغير كاملة. ترجم الصفحات الى لغات أخرى. @@ -29,7 +29,7 @@ applications page.
        • برمجة هناك العديد من الاشياء التي يمكنك القيام بها اذا كنت تجيد لغة برمجة جافا او مستعد للتعلم. -انظر هنا Trac +انظر هنا Trac وفي قائمة الاعمال zzz.i2p للحصول على بعض الافكار. أنظر دليل المطورين الجدد . diff --git a/www.i2p2/pages/translations/getinvolved_de.html b/www.i2p2/pages/translations/getinvolved_de.html index 5738694f..6fc950d4 100644 --- a/www.i2p2/pages/translations/getinvolved_de.html +++ b/www.i2p2/pages/translations/getinvolved_de.html @@ -14,7 +14,7 @@ dich einzubringen! Hier ist eine Liste die dir beim Start hilft:

          • Testen — Benutze die letzten Versionen aus dem Monotone Archive -und berichte Ergebnisse in #i2p oder Fehler und Bugs auf Trac.
          • +und berichte Ergebnisse in #i2p oder Fehler und Bugs auf Trac.
          • Dokumentation — Hilf mit beim aktualisieren und erstellen der fehlenden Dokumente auf der Webseite, übersetze die Webseite in andere Sprachen!
          • @@ -31,7 +31,7 @@ Schreibe oder portiere eine Anwendung für I2P! Es gibt ein paar Richtlienen und eine Liste von Ideen auf der Anwendungsseite.
          • Coding — Es gibt viel zu tun falls Du Java kannst oder bereit bist, Java zu lernen. -Kontrolliere Trac nach offenen Tickets oder +Kontrolliere Trac nach offenen Tickets oder die TODO Liste auf zzz.i2p für einige Ideen zum Start. Schaue zur Anleitung für neue Entwickler Seite für Details.
          • Analysen — diff --git a/www.i2p2/pages/translations/getinvolved_el.html b/www.i2p2/pages/translations/getinvolved_el.html index e94bc6a9..9ad2fc6b 100644 --- a/www.i2p2/pages/translations/getinvolved_el.html +++ b/www.i2p2/pages/translations/getinvolved_el.html @@ -24,7 +24,7 @@ Πείτε το στους φίλους σας.
          • Testing — Τρέξτε τις τελευταίες εκδόσεις από το monotone -και αναφέρετε τα αποτελέσματα στο #i2p ή σαν bugs στο Trac. +και αναφέρετε τα αποτελέσματα στο #i2p ή σαν bugs στο Trac.
          • Τεκμηρίωση — Βοηθήστε να φτιάξουμε τα μέρη της ιστοσελίδας που είναι παλιά ή ατελή. Μεταφράστε τις σελίδες σε άλλες γλώσσες. @@ -41,7 +41,7 @@ ιδέες στη σελίδα για τις εφαρμογές.
          • Coding — Υπάρχουν αρκετά πράγματα που πρέπει να κάνετε εάν γνωρίζετε Java ή είστε -έτοιμοι να μάθετε. Ελέγξτε για ανοιχτά tickets στο Trac +έτοιμοι να μάθετε. Ελέγξτε για ανοιχτά tickets στο Trac ή την TODO λίστα στο zzz.i2p για μερικές ιδέες για να ξεκινήσετε. Δείτε τον οδηγό για νέους developers για λεπτομέρειες.
          • Μετάφραση — diff --git a/www.i2p2/pages/translations/getinvolved_fr.html b/www.i2p2/pages/translations/getinvolved_fr.html index 86775f6f..2b3dee19 100644 --- a/www.i2p2/pages/translations/getinvolved_fr.html +++ b/www.i2p2/pages/translations/getinvolved_fr.html @@ -17,7 +17,7 @@ Corrigez ou améliorez l'article I2P Parlez-en à vos amis.
          • Du test — Essayez les dernières version à partir de monotone et donnez vos résultats ou remarques sur -#i2p ou en tant que bogues sur Trac. +#i2p ou en tant que bogues sur Trac.
          • Documentation — Aidez-nous à corriger les parties du site obsolètes ou incomplètes. Traduisez les pages dans d'autres langues. @@ -33,7 +33,7 @@ de recherches, un superviseur de site eep... ça n'est pas si difficile pour la la page applications.
          • Programmation — Il y a beaucoup à faire si vous connaissez Java ou si vous êtes prêts à vous y mettre. -Vérifiez les tickets ouverts sur Trac +Vérifiez les tickets ouverts sur Trac ou la liste de zzz.i2p pour avoir des idées sur un point de départ. Voyez le Guide du nouveau développeur.
          • Traduction — diff --git a/www.i2p2/pages/translations/getinvolved_ru.html b/www.i2p2/pages/translations/getinvolved_ru.html index 7175442d..fa5708c3 100644 --- a/www.i2p2/pages/translations/getinvolved_ru.html +++ b/www.i2p2/pages/translations/getinvolved_ru.html @@ -14,7 +14,7 @@
            • Расскажите о нас! — Расскажите знакомым про I2P, дайте ссылку на проект в форумном обсуждении или в комментариях к статье, прорекламируйте в своём блоге. Создайте/обновите статью об I2P в Википедии на Вашем языке.
            • -
            • Тестирование — Обновляйтесь до текущего билда из monotone-репозитория и сообщайте обо всех обнаруженных ошибках на канале #i2p или в багтрекере.
            • +
            • Тестирование — Обновляйтесь до текущего билда из monotone-репозитория и сообщайте обо всех обнаруженных ошибках на канале #i2p или в багтрекере.
            • Документация — Исправьте устаревший текст, дополните незавершенные инструкции, добавьте перевод на свой язык.
            • @@ -26,7 +26,7 @@
            • Приложения — Создавайте новые I2P-программы или переделайте уже существующие под работу через I2P-сеть. Несколько методических рекомендаций и список нереализованных задумок можно посмотреть на странице Application Development Guide.
            • -
            • Разработка — Если Вы Java-программист, то перед Вами широкий фронт работ. Для начала проверьте багтрекер на наличие открытых тикетов или загляните в TODO-список на форуме zzz.i2p. Подробнее смотрите на странице New Developer's Guide
            • +
            • Разработка — Если Вы Java-программист, то перед Вами широкий фронт работ. Для начала проверьте багтрекер на наличие открытых тикетов или загляните в TODO-список на форуме zzz.i2p. Подробнее смотрите на странице New Developer's Guide
            • Поиск уязвимостей — Проанализируйте или протестируйте код на слабые места. Требуют внимания как уязвимости, касающиеся анонимности (см. описание на странице I2P's Threat Model), так и DoS-уязвимости, и прочие потенциальные угрозы.
            • diff --git a/www.i2p2/pages/translations/halloffame_de.html b/www.i2p2/pages/translations/halloffame_de.html index a3a1e03f..5db69595 100644 --- a/www.i2p2/pages/translations/halloffame_de.html +++ b/www.i2p2/pages/translations/halloffame_de.html @@ -1,15 +1,15 @@ {% extends "_layout_de.html" %} {% block title %}Ruhmeshalle{% endblock %} {% block content %} - +

              I2P's Ruhmeshalle

              -Derzeitiger Stand zum 01.05.2013:
              -Generelles Konto: 28078,57 € und 626,04640057 BTC
              +Derzeitiger Stand zum 25.08.2013:
              +Generelles Konto: 28829,12 € und 607,49839100 BTC
              Datencontainer Belohnung: 145.0 € und 2 BTC
              native IPv6-I2P : 100.0 € und 50 BTC
              I2PHex Code Belohnung: 100.0 €
              -I2P in Debian Spiegelserver: 123.0 €
              -Bitcoin Client für I2P: 50,34 BTC
              +I2P in Debian Spiegelserver: 148.0 €
              +Bitcoin Client für I2P: 10 € und 50,34 BTC
              Unit Tests für den I2P Router: 2305 €
              Bounty Robert: 20
              Bounty Syndie: 18 BTC
              @@ -119,7 +119,41 @@ hinzufügen können. May, 2013anonymous50 €Generelles Konto May, 2013anonymous0.3 BTCGenerelles Konto May, 2013anonymous10 €Belohnung CCR Microtic Board + + May, 2013anonymous20 €Generelles Konto + May, 2013anonymous30 €Generelles Konto + May, 2013anonymous3.50 €Generelles Konto + May, 2013anonymous0.0008 BTCGenerelles Konto + May, 2013anonymous0.0991 BTCGenerelles Konto + + June, 2013anonymous10 €Generelles Konto + June, 2013anonymous20 €Generelles Konto + June, 2013anonymous30 €Generelles Konto + June, 2013DHT Belohnung325 €DHT Belohnung + June, 2013anonymous1 €Generelles Konto + June, 2013anonymous0.0000067 BTCGenerelles Konto + June, 2013anonymous0.01 BTCGenerelles Konto + June, 2013anonymous0.031 BTCGenerelles Konto + June, 2013anonymous0.25 BTCGenerelles Konto + July, 2013first-leon1 €Generelles Konto + July, 2013anonymous5 €Generelles Konto + July, 2013anonymous2 €Generelles Konto + July, 2013anonymous20 €Generelles Konto + July, 2013anonymous0.05 €Generelles Konto + July, 2013anonymous30 €Generelles Konto + July, 2013JULLIAN David15 €Generelles Konto + July, 2013anonymous0.00567899 BTCGenerelles Konto + July, 2013anonymous0.00942744 BTCGenerelles Konto + July, 2013anonymous0.01807730 BTCGenerelles Konto + July, 2013fraud loss9 BTCGenerelles Konto + + Aug, 2013anonymous18 €Generelles Konto + Aug, 2013anonymous30 €Generelles Konto + Aug, 2013Verkauf von BTC820 €10 BTCGenerelles Konto + Aug, 2013anonymous20 €Generelles Konto + Aug, 2013anonymous25 €Bounty DEB Paket + Aug, 2013anonymous0.0279 BTCGenerelles Konto

              diff --git a/www.i2p2/pages/translations/how_de.html b/www.i2p2/pages/translations/how_de.html index ad53535d..de24fa4d 100644 --- a/www.i2p2/pages/translations/how_de.html +++ b/www.i2p2/pages/translations/how_de.html @@ -15,7 +15,7 @@ Die Schnittstelle zwischen Anwendungen und dem Router trägt den Namen I2CP

              Das I2P-Projekt ist um eine vollständige und aktuelle Dokumentation bemüht. Wer in einer der unten verlinkten Seiten Ungenauigkeiten oder Unstimmigkeiten findet, -möge diese bitte hier melden.

              +möge diese bitte hier melden.

              Inhaltsverzeichnis

              diff --git a/www.i2p2/pages/translations/how_el.html b/www.i2p2/pages/translations/how_el.html index 804c0cce..91f120ca 100644 --- a/www.i2p2/pages/translations/how_el.html +++ b/www.i2p2/pages/translations/how_el.html @@ -14,7 +14,7 @@ The interface between applications and the router is the I2CP (I2P Control Proto

              The I2P Project is committed to maintaining accurate, current documentation. If you find any inaccuracies in the documents linked below, please -enter a ticket identifying the problem. +enter a ticket identifying the problem.

              Index to Technical Documentation

              @@ -209,9 +209,9 @@ Note: always verify that javadocs are current by checking the release number.
            • Developer forum inside I2P
            • -Bug tracker +Bug tracker
            • -Viewmtn inside I2P. +Viewmtn inside I2P.
            • I2P Source exported to GitHub
            • @@ -219,7 +219,7 @@ Note: always verify that javadocs are current by checking the release number.
            • Source translation at Transifex
            • -0.9 roadmap wiki (not current) +0.9 roadmap wiki (not current)
            • Old roadmap (not current)
            • diff --git a/www.i2p2/pages/translations/how_fr.html b/www.i2p2/pages/translations/how_fr.html index 44c7562b..2359e0f5 100644 --- a/www.i2p2/pages/translations/how_fr.html +++ b/www.i2p2/pages/translations/how_fr.html @@ -14,7 +14,7 @@ L'interface entre les applications et le routeur est l'API I2CP (I2P Control Pro

              Le projet I2P s'engage à maintenir exacte la documentation actuelle. Si vous trouvez des erreur dans les documents liés ci-dessous, merci d'en faire part via un -ticket Trac.

              +ticket Trac.

              Table

              diff --git a/www.i2p2/pages/translations/how_networkcomparisons_fr.html b/www.i2p2/pages/translations/how_networkcomparisons_fr.html index 69d52fc0..9cfbc786 100644 --- a/www.i2p2/pages/translations/how_networkcomparisons_fr.html +++ b/www.i2p2/pages/translations/how_networkcomparisons_fr.html @@ -22,7 +22,7 @@ très peu de systèmes supportent longtemps la comparaison. Ils sont exposés ic

              Le contenu de cette page est susceptible de mises à jour, de discutions et de débats, et nous accueillons les commentaires et les compléments. Vous pouvez contribuer à l'analyse en soumettant un ticket sur -trac.i2p2.de. +trac.i2p2.de.

              Tor / Onion Routing

              diff --git a/www.i2p2/pages/translations/index_ar.html b/www.i2p2/pages/translations/index_ar.html index b80ec012..785432cf 100644 --- a/www.i2p2/pages/translations/index_ar.html +++ b/www.i2p2/pages/translations/index_ar.html @@ -4,7 +4,7 @@
              الإصدار الأحدث
              -2013-05-28 - I2P 0.9.6 - {{ urlify("release-0.9.6", "Announcement", "html")}} +2013-12-07 - I2P 0.9.9 - {{ urlify("release-0.9.9", "Announcement", "html")}} - تحميل
              2013-02-03 - Syndie 1.103b - @@ -12,10 +12,10 @@
              أحدث الأخبار :
              -2013-05-28 - I2P 0.9.6 تمّ اطلاق النسخة
              -2013-03-08 - I2P 0.9.5 تمّ اطلاق النسخة
              -2012-12-17 - I2P 0.9.4 تمّ اطلاق النسخة
              -2012-10-27 - I2P 0.9.3 تمّ اطلاق النسخة
              +2013-12-07 - I2P 0.9.9 تمّ اطلاق النسخة
              +2013-10-02 - I2P 0.9.8.1 تمّ اطلاق النسخة
              +2013-09-30 - I2P 0.9.8 تمّ اطلاق النسخة
              +2013-08-10 - I2P 0.9.7.1 تمّ اطلاق النسخة
              @@ -12,10 +12,10 @@
              Τελευταία Νέα:
              -2013-05-28 - I2P 0.9.6 Εκδόθηκε
              -2013-03-08 - I2P 0.9.5 Εκδόθηκε
              -2012-12-17 - I2P 0.9.4 Εκδόθηκε
              -2012-10-27 - I2P 0.9.3 Εκδόθηκε
              +2013-12-07 - I2P 0.9.9 Εκδόθηκε
              +2013-10-02 - I2P 0.9.8.1 Εκδόθηκε
              +2013-09-30 - I2P 0.9.8 Εκδόθηκε
              +2013-08-10 - I2P 0.9.7.1 Εκδόθηκε
              @@ -12,10 +12,10 @@
              Dernières nouvelles :
              -2013-05-28 - I2P 0.9.6 Publiée
              -2013-03-08 - I2P 0.9.5 Publiée
              -2012-12-17 - I2P 0.9.4 Publiée
              -2012-10-27 - I2P 0.9.3 Publiée
              +2013-12-07 - I2P 0.9.9 Publiée
              +2013-10-02 - I2P 0.9.8.1 Publiée
              +2013-09-30 - I2P 0.9.8 Publiée
              +2013-08-10 - I2P 0.9.7.1 Publiée
              @@ -12,10 +12,10 @@
              Latest News:
              -2013-05-28 - I2P 0.9.6 Rilasciata
              -2013-03-08 - I2P 0.9.5 Rilasciata
              -2012-12-17 - I2P 0.9.4 Rilasciata
              -2012-10-27 - I2P 0.9.3 Rilasciata
              +2013-12-07 - I2P 0.9.9 Rilasciata
              +2013-10-02 - I2P 0.9.8.1 Rilasciata
              +2013-09-30 - I2P 0.9.8 Rilasciata
              +2013-08-10 - I2P 0.9.7.1 Rilasciata
              @@ -12,10 +12,10 @@
              Laatste Nieuws:
              -2013-05-28 - I2P 0.9.6 Release
              -2013-03-08 - I2P 0.9.5 Release
              -2012-12-17 - I2P 0.9.4 Release
              -2012-10-27 - I2P 0.9.3 Release
              +2013-12-07 - I2P 0.9.9 Release
              +2013-10-02 - I2P 0.9.8.1 Release
              +2013-09-30 - I2P 0.9.8 Release
              +2013-08-10 - I2P 0.9.7.1 Release
              - Скачать @@ -14,10 +14,10 @@
              Последние Новости:
              -2013-05-28 - I2P 0.9.6 Released
              -2013-03-08 - I2P 0.9.5 Released
              -2012-12-17 - I2P 0.9.4 Released
              -2012-10-27 - I2P 0.9.3 Released
              +2013-12-07 - I2P 0.9.9 Released
              +2013-10-02 - I2P 0.9.8.1 Released
              +2013-09-30 - I2P 0.9.8 Released
              +2013-08-10 - I2P 0.9.7.1 Released
              diff --git a/www.i2p2/pages/translations/index_zh.html b/www.i2p2/pages/translations/index_zh.html index d8f9af04..f6ba4f8e 100644 --- a/www.i2p2/pages/translations/index_zh.html +++ b/www.i2p2/pages/translations/index_zh.html @@ -5,7 +5,7 @@
              最新版本:
              -2013-05-28 - I2P 0.9.6 - {{ urlify("release-0.9.6", "Announcement", "html")}} +2013-12-07 - I2P 0.9.9 - {{ urlify("release-0.9.9", "Announcement", "html")}} - 下载
              2013-02-03 - Syndie 1.103b - @@ -14,10 +14,10 @@
              最新动态:
              -2013-05-28 - I2P 0.9.6 新版发布
              -2013-03-08 - I2P 0.9.5 新版发布
              -2012-12-17 - I2P 0.9.4 新版发布
              -2012-10-27 - I2P 0.9.3 新版发布
              +2013-12-07 - I2P 0.9.9 新版发布
              +2013-10-02 - I2P 0.9.8.1 新版发布
              +2013-09-30 - I2P 0.9.8 新版发布
              +2013-08-10 - I2P 0.9.7.1 新版发布