merge of '9b37b78a67eba61d5a48e52b9b63ec41b9019f39'

and 'e44a3689e894a8137c375f95b2cab3bdc29184e6'
This commit is contained in:
dev
2013-12-27 18:11:16 +00:00
413 changed files with 161247 additions and 55415 deletions

View File

@@ -1,17 +1,33 @@
# -*- coding: utf-8 -*-
from flask import Flask, request, g, redirect, url_for, abort, render_template, send_from_directory, safe_join
from flaskext.babel import Babel
from flask.ext.cache import Cache
try:
from flaskext.babel import Babel
except ImportError:
from flask_babel import Babel
try:
from flask.ext.cache import Cache
except ImportError:
from flask_cache import Cache
from docutils.core import publish_parts
import os.path
import os
try:
from i2p2www import settings
except ImportError:
settings = None
###########
# Constants
CURRENT_I2P_VERSION = '0.9.6'
CURRENT_I2P_VERSION = '0.9.9'
CANONICAL_DOMAIN = 'www.i2p2.de'
CANONICAL_DOMAIN = 'new.i2p-projekt.de'
CACHE_CONFIG = settings.CACHE_CONFIG if settings and hasattr(settings, 'CACHE_CONFIG') else {
'CACHE_DEFAULT_TIMEOUT': 600,
}
BLOG_POSTS_PER_FEED = 10
BLOG_POSTS_PER_PAGE = 10
@@ -20,24 +36,54 @@ MEETINGS_PER_PAGE = 20
SUPPORTED_LANGS = [
'en',
'es',
# 'zh',
'zh_CN',
'de',
'fr',
# 'it',
# 'nl',
# 'ru',
'sv',
# 'cs',
# 'ar',
# 'el',
'it',
'ja',
'pl',
'pt',
'pt_BR',
'ro',
'ru',
'sv_SE',
]
SUPPORTED_LANG_NAMES = {
'en': u'English',
'es': u'Castellano',
'zh_CN': u'Chinese',
'de': u'Deutsch',
'fr': u'Français',
'it': u'Italiano',
'ja': u'Japanese',
'pl': u'Polish',
'pt': u'Portugese',
'pt_BR': u'Brazilian Portugese',
'ro': u'Romanian',
'ru': u'Russian',
'sv_SE': u'Svenska',
}
DEFAULT_GETTEXT_DOMAIN = 'priority'
GETTEXT_DOMAIN_MAPPING = {
'about': ['about'],
'blog': ['blog'],
'comparison': ['comparison'],
'docs': ['docs'],
'get-involved': ['get-involved'],
'misc': ['misc'],
'research': ['research'],
}
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'pages')
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'static')
BLOG_DIR = os.path.join(os.path.dirname(__file__), 'blog')
MEETINGS_DIR = os.path.join(os.path.dirname(__file__), 'meetings/logs')
SITE_DIR = os.path.join(TEMPLATE_DIR, 'site')
MIRRORS_FILE = os.path.join(TEMPLATE_DIR, 'downloads/mirrors')
ANONBIB_CFG = os.path.join(TEMPLATE_DIR, 'papers/anonbib.cfg')
ANONBIB_FILE = os.path.join(TEMPLATE_DIR, 'papers/anonbib.bib')
###################
@@ -50,11 +96,8 @@ class MyFlask(Flask):
app = application = MyFlask('i2p2www', template_folder=TEMPLATE_DIR, static_url_path='/_static', static_folder=STATIC_DIR)
app.debug = bool(os.environ.get('APP_DEBUG', 'False'))
babel = Babel(app)
cache = Cache(app, config={
'CACHE_DEFAULT_TIMEOUT': 600,
#'CACHE_TYPE': '', # See http://packages.python.org/Flask-Cache/#configuring-flask-cache
})
babel = Babel(app, default_domain=DEFAULT_GETTEXT_DOMAIN)
cache = Cache(app, config=CACHE_CONFIG)
#################
@@ -67,7 +110,22 @@ def get_locale():
return g.lang
# otherwise try to guess the language from the user accept
# header the browser transmits. The best match wins.
return request.accept_languages.best_match(['en', 'es', 'zh', 'de', 'fr', 'it', 'nl', 'ru', 'sv', 'cs', 'ar'])
return request.accept_languages.best_match(SUPPORTED_LANGS)
@babel.domainselector
def get_domains():
domains = []
frags = request.path.split('/', 2)
if len(frags) == 3:
path = frags[2]
for subpath in GETTEXT_DOMAIN_MAPPING:
if path.startswith(subpath):
domains.extend(GETTEXT_DOMAIN_MAPPING[subpath])
# Always end with the priority domain, as it contains
# various template strings and is likely to be the most
# up-to-date (in case of any common translation strings).
domains.append(DEFAULT_GETTEXT_DOMAIN)
return domains
##########################
@@ -161,7 +219,6 @@ def page_not_found(error):
def server_error(error):
return render_template('global/error_500.html'), 500
# Import these to ensure they get loaded
import templatevars
import urls

1269
i2p2www/anonbib/BibTeX.py Normal file

File diff suppressed because it is too large Load Diff

39
i2p2www/anonbib/Makefile Normal file
View File

@@ -0,0 +1,39 @@
PYTHON=python
VERSION=0.3-dev
all:
$(PYTHON) writeHTML.py anonbib.cfg
clean:
rm -f *~ */*~ *.pyc *.pyo
update:
$(PYTHON) updateCache.py anonbib.cfg
$(PYTHON) rank.py anonbib.cfg
suggest:
$(PYTHON) rank.py suggest anonbib.cfg
test:
$(PYTHON) test.py
veryclean: clean
rm -f author.html date.html topic.html bibtex.html tmp.bib
TEMPLATES=_template_.html _template_bibtex.html
CSS=css/main.css css/pubs.css
BIBTEX=anonbib.bib
SOURCE=BibTeX.py config.py metaphone.py reconcile.py updateCache.py \
writeHTML.py rank.py tests.py
EXTRAS=TODO README Makefile ChangeLog anonbib.cfg gold.gif silver.gif \
upb.gif ups.gif
DISTFILES=$(TEMPLATES) $(CSS) $(BIBTEX) $(SOURCE) $(EXTRAS)
dist: clean
rm -rf anonbib-$(VERSION)
mkdir anonbib-$(VERSION)
tar cf - $(DISTFILES) | (cd anonbib-$(VERSION); tar xf -)
mkdir anonbib-$(VERSION)/cache
tar czf anonbib-$(VERSION).tar.gz anonbib-$(VERSION)
rm -rf anonbib-$(VERSION)

52
i2p2www/anonbib/README Normal file
View File

@@ -0,0 +1,52 @@
anonbib 0.3 -- Code to generate the anonymity bibliography
Copyright (c) 2003-2008 Nick Mathewson
Based on 'PDOSBib' perl code by Eddie Kohler
This software is licensed under the GNU GPL, version 2 or later.
To use this software, you need to understand BibTeX and Python a
little. If it breaks, you get to keep both pieces. You will need
Python 2.2 or later.
To use this package:
- Get a good BibTeX file. You may want to mark it up with some of the
extra keys used in our "anonbib.bib" file. All of the additional
Bibtex keys we use have the prefix "www_"; check out anonbib.bib
for their usage.
- Edit anonbib.cfg and _template_.html and _template_bibtex.html so they
refer to your files, authors, topics, and so on.
- Run 'python updateCache.py anonbib.cfg' to create a local cache of the
papers in your bibliography based on their www_*_url entries. (By
default, the script will ignore any entries you have already cached. To
force a fresh download of a cached file, delete it.)
- Run 'python rank.py anonbib.cfg' to download Google Scholar rankings of
all the papers.
- Run 'python writeHTML.py anonbib.cfg'. Fix any errors you care about.
- Re-run these scripts when you change the bibliography.
- If you want to merge in big BibTeX files, try using the reconcile.py
script. See the comment at the start of the file for usage info.
New in 0.3:
- Support for Google Scholar rankings to denote hot/rising papers.
Implemented by George Danezis.
- Make reconcile script generate more useful output.
- Add support for multiple bibliographies generated from a single bibtex
source. This is done via 'tags' on bibtex entries. If an entry is
tagged, it appears in the corresponding bibliographies. This is good
for generating a master bibliography and one or more selected readings
lists from the same source.
- Handle more errors when downloading files.
- When fetching a paper with a .ps url, generate the .ps.gz file
automatically.
- Note an error when a crossref overrides an existing field in an entry.
- Handle the Proceedings type correctly.
- Enforce proper encoding on pages: it must be number--number.
-

33
i2p2www/anonbib/TODO Normal file
View File

@@ -0,0 +1,33 @@
- More general tasks
. Know about @book
. Write unit tests for everything
. Make name parsing vaguely sane
- Maybe uncrossref in tmp.bib
- Maybe pull important papers to the start of their sections?
. Clean \{}~ when going from note to url; add \{}~ when making
note from url.
. Also clean \_ to _ and back
- Look for urls in wherepublished.
. Forgive newlines in wherepublished, note.
- When sorting by date, entries with unknown months go into a magic
"month zero" before January. Is this right?
- Strip unused features.
o Take a configuration file on the command line instead of just
importing config.py.
- Cache tasks
- Generate a list of broken links
- Re-download all cached items if requested
- Clear dead items from cache
- Use HTTP HEAD requests to decide whetherto update stale
elements in cache.
- Add ability to honor a "www_no_cache={1}" option for entries
if the authors ask us not to cache them.
- Maybe, add ability to cache images from an HTML page.
- Reconcile tasks
- Document it.
- Notice when there is new or different information of certain kinds
(pages, dates, etc) in the new information.

View File

56
i2p2www/anonbib/config.py Normal file
View File

@@ -0,0 +1,56 @@
# Copyright 2003-2006, Nick Mathewson. See LICENSE for licensing info.
import re
_KEYS = [ "ALL_TAGS",
"ALPHABETIZE_AUTHOR_AS","AUTHOR_URLS","CACHE_DIR","CACHE_SECTIONS",
"CACHE_UMASK",
"CITE_CACHE_DIR",
"COLLAPSE_AUTHORS",
"DOWNLOAD_CONNECT_TIMEOUT","INITIAL_STRINGS",
"MASTER_BIB", "NO_COLLAPSE_AUTHORS", "OMIT_ENTRIES",
"OUTPUT_DIR", "TEMPLATE_FILE", "BIBTEX_TEMPLATE_FILE",
"REQUIRE_KEY", "TAG_TITLES", "TAG_DIRECTORIES", "TAG_SHORT_TITLES",
]
for _k in _KEYS:
globals()[_k]=None
del _k
def load(cfgFile):
mod = {}
execfile(cfgFile, mod)
for _k in _KEYS:
try:
globals()[_k]=mod[_k]
except KeyError:
raise KeyError("Configuration option %s is missing"%_k)
INITIAL_STRINGS.update(_EXTRA_INITIAL_STRINGS)
AUTHOR_RE_LIST[:] = [
(re.compile(k, re.I), v,) for k, v in AUTHOR_URLS.items()
]
NO_COLLAPSE_AUTHORS_RE_LIST[:] = [
re.compile(pat, re.I) for pat in NO_COLLAPSE_AUTHORS
]
ALPHABETIZE_AUTHOR_AS_RE_LIST[:] = [
(re.compile(k, re.I), v,) for k,v in ALPHABETIZE_AUTHOR_AS.items()
]
_EXTRA_INITIAL_STRINGS = {
# MONTHS
'jan' : 'January', 'feb' : 'February',
'mar' : 'March', 'apr' : 'April',
'may' : 'May', 'jun' : 'June',
'jul' : 'July', 'aug' : 'August',
'sep' : 'September', 'oct' : 'October',
'nov' : 'November', 'dec' : 'December',
}
AUTHOR_RE_LIST = []
NO_COLLAPSE_AUTHORS_RE_LIST = []
ALPHABETIZE_AUTHOR_AS_RE_LIST = []

View File

@@ -0,0 +1,111 @@
img {
border: 0px;
}
BODY {
background-color: #FFF;
color: #000;
margin: 0px;
}
FORM {
margin-top: 0.5em;
margin-bottom: 0.5em;
}
P, TD {
font-family: lucida, "Lucida Sans Unicode", Geneva, sans-serif;
}
P.contact {
text-align: center;
}
P.contact A {
font-family: lucida, "Lucida Sans Unicode", Geneva, sans-serif;
font-weight: normal;
}
SPAN.email {
font-family: lucidatypewriter, "Lucida Typewriter", Monaco, "Lucida Sans Unicode", monospace;
font-weight: bold;
}
P IMG {
vertical-align: text-bottom;
}
P.crumbbreadth {
margin-top: 0.25em;
}
.compact {
margin-top: -0.5em;
text-indent: 0em;
}
SPAN.biblio {
font-style: italic;
}
SPAN.biblio A {
font-family: lucida, "Lucida Sans Unicode", Geneva, sans-serif;
font-weight: normal;
text-decoration: underline;
}
SPAN.availability {
font-family: lucidatypewriter, "Lucida Typewriter", Monaco, "Lucida Sans Unicode", monospace;
font-weight: normal;
}
UL {
list-style: outside;
}
UL.expand {
margin-bottom: 1em;
}
UL.sections {
list-style: none;
}
/* Font-level properties */
PRE {
font-family: lucidatypewriter, "Lucida Typewriter", Monaco, "Lucida Sans Unicode", monospace;
}
STRONG, A {
font-family: lucidatypewriter, "Lucida Typewriter", Monaco, "Rockwell", "Lucida Sans Unicode", monospace;
font-weight: bold;
}
A:link {
color: #B00;
}
A:visited {
color: #903;
}
H1, H2, H3, H4, H5, H6 {
font-family: lucidatypewriter, "Lucida Typewriter", "Lucida Console", Monaco, monospace;
}
H1 A, H2 A, H3 A, H4 A, H5 A, H6 A {
font-family: lucidatypewriter, "Lucida Typewriter", "Lucida Console", Monaco, monospace;
}
H1 {
color: #00B;
}
H2 {
color: #006;
}
H3 {
color: #006;
}

BIN
i2p2www/anonbib/gold.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 540 B

View File

@@ -0,0 +1,193 @@
#!/usr/bin/python2
# Copyright 2003-2008, Nick Mathewson. See LICENSE for licensing info.
"""metaphone.py -- Pure-python metaphone implementation.
(This is not guaranteed to match the real metaphone algorithm; I
haven't tested it thorougly enough. Let me know if you find bugs.
Based on the original C++ metaphone implementation.)
"""
TRIPLES = {
'dge': 'j',
'dgi': 'j',
'dgy': 'j',
'sia': '+x',
'sio': '+x',
'tia': '+x',
'tio': '+x',
'tch': '',
'tha': '0',
'the': '0',
'thi': '0',
'tho': '0',
'thu': '0',
}
DOUBLES = {
'ph' : 'f',
'sh' : 'x'
}
SINGLETONS = {
'd': 't',
'f': 'f',
'j': 'j',
'l': 'l',
'm': 'm',
'n': 'n',
'r': 'r',
'p': 'p',
'q': 'k',
'v': 'f',
'x': 'ks',
'z': 's',
}
ALLCHARS = "".join(map(chr, range(256)))
NONLCCHARS = "".join([c for c in ALLCHARS if not c.islower()])
def metaphone(s):
"""Return the metaphone equivalent of a provided string"""
s = s.lower()
s = s.translate(ALLCHARS, NONLCCHARS)
if not s: return ""
# If ae, gn, kn, pn, wr then drop the first letter.
if s[:2] in ("ae", "gn", "kn", "pn", "wr"):
s = s[1:]
# Change "x" to "s"
if s[0] == 'x':
s = "s%s" % s[1:]
# Get rid of "h" in "wh".
if s[:2] == 'wh':
s = "w%s" % s[1:]
# Get rid of s from end.
if s[-1] == 's':
s = s[:-1]
result = []
prevLtr = ' '
vowelBefore = 0
lastChar = len(s)-1
for idx in range(len(s)):
curLtr = s[idx]
# If first char is a vowel, keep it.
if curLtr in "aeiou":
if idx == 0:
result.append(curLtr)
continue
# Skip double letters.
if idx < lastChar:
if curLtr == s[idx+1]:
continue
try:
r = TRIPLES[s[idx:idx+3]]
if r == "+x":
if idx > 1:
result.append("x")
continue
else:
result.append(r)
continue
except KeyError:
pass
try:
r = DOUBLES[s[idx:idx+2]]
result.append(r)
continue
except KeyError:
pass
try:
r = SINGLETONS[s[idx]]
result.append(r)
continue
except KeyError:
pass
if idx > 0:
prevLtr = s[idx-1]
vowelBefore = prevLtr in "aeiou"
curLtr = s[idx]
nextLtr2 = ' '
if idx < lastChar:
nextLtr = s[idx+1]
vowelAfter = nextLtr in "aeiou"
frontvAfter = nextLtr in "eiy"
if idx+1 < lastChar:
nextLtr2 = s[idx+2]
else:
nextLtr = ' '
vowelAfter = frontvAfter = 0
if curLtr == 'b':
if idx == lastChar and prevLtr == 'm':
pass
else:
result.append(curLtr)
elif curLtr == 'c':
# silent 'sci', 'sce, 'scy', 'sci', etc OK.
if not (prevLtr == 's' and frontvAfter):
if nextLtr in 'ia':
result.append("x")
elif frontvAfter:
result.append("s")
elif prevLtr == 's' and nextLtr == 'h':
result.append('k')
elif nextLtr == 'h':
if idx == 0 and nextLtr2 in "aeiou":
result.append('k')
else:
result.append('x')
elif prevLtr == 'c':
result.append('c')
else:
result.append('k')
elif curLtr == 'g':
if (idx < lastChar-1) and nextLtr == 'h':
pass
elif s[idx:] == 'gned':
pass
elif s[idx:] == 'gn':
pass
elif prevLtr == 'd' and frontvAfter:
pass
else:
hard = (prevLtr == 'g')
if frontvAfter and not hard:
result.append('j')
else:
result.append('k')
elif curLtr == 'h':
if prevLtr in 'csptg':
pass
elif vowelBefore and not vowelAfter:
pass
else:
result.append('h')
elif curLtr == 'k':
if prevLtr != 'c': result.append('k')
elif curLtr in 'wy':
if vowelAfter:
result.append(curLtr)
return "".join(result)
def demo(a):
print a, "=>", metaphone(a)
if __name__ == '__main__':
demo("Nick. Mathewson")
demo("joe schmidt")
demo("Beethoven")
demo("Because the world is round")

202
i2p2www/anonbib/rank.py Normal file
View File

@@ -0,0 +1,202 @@
# Make rankings of papers and authors for automatic classification of content hotness
# Google Scholar address
# http://scholar.google.com/scholar?as_epq=
# Take care of the caching setup
cache_expire = 60*60*24*30 # 30 days
# Checks
import config
import os
import sys
from os.path import exists, isdir, join, getmtime
from os import listdir, remove
def remove_old():
# Remove all old cached files
filenames = listdir(cache_folder())
from time import time
now = time()
for f in filenames:
pf = join(cache_folder(), f)
time_mt = getmtime(pf)
if now - time_mt > cache_expire: # 30 days
remove(pf)
def cache_folder():
r = join(config.OUTPUT_DIR, config.CITE_CACHE_DIR)
if not exists(r):
os.makedirs(r)
assert isdir(r)
return r
import re
from urllib2 import urlopen, build_opener
from urllib import quote
from datetime import date
import hashlib
# A more handy hash
def md5h(s):
m = hashlib.md5()
m.update(s)
return m.hexdigest()
format_tested = 0
def getPageForTitle(title, cache=True, update=True, save=True):
#Returns (citation-count, scholar url) tuple, or (None,None)
global format_tested
if not format_tested and update:
format_tested = 1
TestScholarFormat()
# Do not assume that the title is clean
title = re.sub("\s+", " ", title)
title = re.sub("[^'a-zA-Z0-9\. \-\/:]", "", title)
title = re.sub("'\/", " ", title)
# We rely on google scholar to return the article with this exact title
gurl = "http://scholar.google.com/scholar?as_q=&as_epq=%s&as_occt=title"
url = gurl % quote(title)
# Access cache or network
if exists(join(cache_folder(), md5h(url))) and cache:
return url, file(join(cache_folder(), md5h(url)),'r').read()
elif update:
print "Downloading rank for %r."%title
# Make a custom user agent (so that we are not filtered by Google)!
opener = build_opener()
opener.addheaders = [('User-agent', 'Anon.Bib.0.1')]
print "connecting..."
connection = opener.open(url)
print "reading"
page = connection.read()
print "done"
if save:
file(join(cache_folder(), md5h(url)),'w').write(page)
return url, page
else:
return url, None
def getCite(title, cache=True, update=True, save=True):
url, page = getPageForTitle(title, cache=cache, update=update, save=save)
if not page:
return None,None
# Check if it finds any articles
if len(re.findall("did not match any articles", page)) > 0:
return (None, None)
# Kill all tags!
cpage = re.sub("<[^>]*>", "", page)
# Add up all citations
s = sum([int(x) for x in re.findall("Cited by ([0-9]*)", cpage)])
return (s, url)
def getPaperURLs(title, cache=True, update=True, save=True):
url, page = getPageForTitle(title, cache=cache, update=update, save=save)
if not page:
return []
pages = re.findall(r'\&\#x25ba\;.*class=fl href="([^"]*)"', page)
return pages
def get_rank_html(title, years=None, base_url=".", update=True,
velocity=False):
s,url = getCite(title, update=update)
# Paper cannot be found
if s is None:
return ''
html = ''
url = url.replace("&","&amp;")
# Hotness
H,h = 50,5
if s >= H:
html += '<a href="%s"><img src="%s/gold.gif" alt="More than %s citations on Google Scholar" title="More than %s citations on Google Scholar" /></a>' % (url,base_url,H,H)
elif s >= h:
html += '<a href="%s"><img src="%s/silver.gif" alt="More than %s citations on Google Scholar" title="More than %s citations on Google Scholar" /></a>' % (url,base_url,h,h)
# Only include the velocity if asked.
if velocity:
# Velocity
d = date.today().year - int(years)
if d >= 0:
if 2 < s / (d +1) < 10:
html += '<img src="%s/ups.gif" />' % base_url
if 10 <= s / (d +1):
html += '<img src="%s/upb.gif" />' % base_url
return html
def TestScholarFormat():
# We need to ensure that Google Scholar does not change its page format under our feet
# Use some cases to check if all is good
print "Checking google scholar formats..."
stopAndGoCites = getCite("Stop-and-Go MIXes: Providing Probabilistic Anonymity in an Open System", False)[0]
dragonCites = getCite("Mixes protected by Dragons and Pixies: an empirical study", False, save=False)[0]
if stopAndGoCites in (0, None):
print """OOPS.\n
It looks like Google Scholar changed their URL format or their output format.
I went to count the cites for the Stop-and-Go MIXes paper, and got nothing."""
sys.exit(1)
if dragonCites != None:
print """OOPS.\n
It looks like Google Scholar changed their URL format or their output format.
I went to count the cites for a fictitious paper, and found some."""
sys.exit(1)
def urlIsUseless(u):
if u.find("freehaven.net/anonbib/") >= 0:
# Our own cache is not the primary citation for anything.
return True
elif u.find("owens.mit.edu") >= 0:
# These citations only work for 'members of the MIT community'.
return True
else:
return False
URLTYPES=[ "pdf", "ps", "txt", "ps_gz", "html" ]
if __name__ == '__main__':
# First download the bibliography file.
import BibTeX
suggest = False
if sys.argv[1] == 'suggest':
suggest = True
del sys.argv[1]
config.load(sys.argv[1])
if config.CACHE_UMASK != None:
os.umask(config.CACHE_UMASK)
bib = BibTeX.parseFile(config.MASTER_BIB)
remove_old()
print "Downloading missing ranks."
for ent in bib.entries:
getCite(ent['title'], cache=True, update=True)
if suggest:
for ent in bib.entries:
haveOne = False
for utype in URLTYPES:
if ent.has_key("www_%s_url"%utype):
haveOne = True
break
if haveOne:
continue
print ent.key, "has no URLs given."
urls = [ u for u in getPaperURLs(ent['title']) if not urlIsUseless(u) ]
for u in urls:
print "\t", u

View File

@@ -0,0 +1,292 @@
#!/usr/bin/python2
# Copyright 2003-2008, Nick Mathewson. See LICENSE for licensing info.
"""Code to determine which entries are new and which are old.
To scan a new file, run "python reconcile.py anonbib.cfg new-file.bib". This
will generate a new bibtex file called 'tmp.bib', with all the new entries
cleaned up a little, and all the duplicate entries commented out.
"""
import sys
import re
assert sys.version_info[:3] >= (2,2,0)
import BibTeX
import config
import metaphone
_MPCACHE = {}
def soundsLike(s1, s2):
c = _MPCACHE
s1 = clean(s1)
s2 = clean(s2)
try:
m1 = c[s1]
except KeyError:
m1 = c[s1] = metaphone.metaphone(s1)
try:
m2 = c[s2]
except KeyError:
m2 = c[s2] = metaphone.metaphone(s2)
return m1 == m2
def mphone(s):
c = _MPCACHE
s = clean(s)
try:
return c[s]
except:
m = c[s] = metaphone.metaphone(s)
return m
def clean(s):
s = re.sub(r'\s+', ' ', s)
s = s.strip()
return s
class MasterBibTeX(BibTeX.BibTeX):
def __init__(self):
BibTeX.BibTeX.__init__(self)
def buildIndex(self):
self.byTitle = {}
for ent in self.entries:
for t in self._titleForms(ent['title']):
self.byTitle.setdefault(t, []).append(ent)
def _titleForms(self, title):
title = title.lower()
title = re.sub(r'\b(an|a|the|of)\b', "", title)
title = clean(title)
res = [ mphone(title) ]
if ':' in title:
for t in title.split(":"):
res.append(mphone(t.strip()))
#print "%r\n => %s" % (title,res)
return res
def _titlesAlike(self, t1, t2):
t1 = clean(t1)
t2 = clean(t2)
if t1 == t2:
return 2
tf1 = self._titleForms(t1)
tf2 = self._titleForms(t2)
for t in tf1:
if t in tf2: return 1
return 0
def _authorsAlike(self, a1, a2):
if not soundsLike(" ".join(a1.last)," ".join(a2.last)):
return 0
if (a1.first == a2.first and a1.von == a2.von
and a1.jr == a2.jr):
return 2
if soundsLike(" ".join(a1.first), " ".join(a2.first)):
return 1
if not a1.first or not a2.first:
return 1
if self._initialize(a1.first) == self._initialize(a2.first):
return 1
return 0
def _initialize(self, name):
name = " ".join(name).lower()
name = re.sub(r'([a-z])[a-z\.]*', r'\1', name)
name = clean(name)
return name
def _authorListsAlike(self, a1, a2):
if len(a1) != len(a2):
return 0
a1 = [ (a.last, a) for a in a1 ]
a2 = [ (a.last, a) for a in a2 ]
a1.sort()
a2.sort()
if len(a1) != len(a2):
return 0
r = 2
for (_, a1), (_, a2) in zip(a1,a2):
x = self._authorsAlike(a1,a2)
if not x:
return 0
elif x == 1:
r = 1
return r
def _entryDatesAlike(self, e1, e2):
try:
if clean(e1['year']) == clean(e2['year']):
return 2
else:
return 0
except KeyError:
return 1
def includes(self, ent, all=0):
title = ent['title']
candidates = []
for form in self._titleForms(title):
try:
candidates.extend(self.byTitle[form])
except KeyError:
pass
goodness = []
for knownEnt in candidates:
match = (self._entryDatesAlike(ent, knownEnt) *
self._titlesAlike(ent['title'], knownEnt['title']) *
self._authorListsAlike(ent.parsedAuthor,
knownEnt.parsedAuthor) )
if match:
goodness.append((match, knownEnt))
goodness.sort()
if all:
return goodness
if goodness:
return goodness[-1]
else:
return None, None
def demo(self):
for e in self.entries:
matches = self.includes(e, 1)
m2 = []
mids = []
for g,m in matches:
if id(m) not in mids:
mids.append(id(m))
m2.append((g,m))
matches = m2
if not matches:
print "No match for %s"%e.key
if matches[-1][1] is e:
print "%s matches for %s: OK."%(len(matches), e.key)
else:
print "%s matches for %s: %s is best!" %(len(matches), e.key,
matches[-1][1].key)
if len(matches) > 1:
for g, m in matches:
print "%%%% goodness", g
print m
def noteToURL(note):
" returns tp, url "
note = note.replace("\n", " ")
m = re.match(r'\s*(?:\\newline\s*)*\s*\\url{(.*)}\s*(?:\\newline\s*)*',
note)
if not m:
return None
url = m.group(1)
for suffix, tp in ((".html", "html"),
(".ps", "ps"),
(".ps.gz", "ps_gz"),
(".pdf", "pdf"),
(".txt", "txt")):
if url.endswith(suffix):
return tp,url
return "???", url
all_ok = 1
def emit(f,ent):
global all_ok
errs = ent._check()
if master.byKey.has_key(ent.key.strip().lower()):
errs.append("ERROR: Key collision with master file")
if errs:
all_ok = 0
note = ent.get("note")
if ent.getURL() and not note:
ent['note'] = "\url{%s}"%ent.getURL()
elif note:
m = re.match(r'\\url{(.*)}', note)
if m:
url = m.group(0)
tp = None
if url.endswith(".txt"):
tp = "txt"
elif url.endswith(".ps.gz"):
tp = "ps_gz"
elif url.endswith(".ps"):
tp = "ps_gz"
elif url.endswith(".pdf"):
tp = "pdf"
elif url.endswith(".html"):
tp = "html"
if tp:
ent['www_%s_url'%tp] = url
if errs:
all_ok = 0
for e in errs:
print >>f, "%%%%", e
print >>f, ent.format(77, 4, v=1, invStrings=invStrings)
def emitKnown(f, ent, matches):
print >>f, "%% Candidates are:", ", ".join([e.key for g,e in matches])
print >>f, "%%"
print >>f, "%"+(ent.format(77,4,1,invStrings).replace("\n", "\n%"))
if __name__ == '__main__':
if len(sys.argv) != 3:
print "reconcile.py expects 2 arguments"
sys.exit(1)
config.load(sys.argv[1])
print "========= Scanning master =========="
master = MasterBibTeX()
master = BibTeX.parseFile(config.MASTER_BIB, result=master)
master.buildIndex()
print "========= Scanning new file ========"
try:
fn = sys.argv[2]
input = BibTeX.parseFile(fn)
except BibTeX.ParseError, e:
print "Error parsing %s: %s"%(fn,e)
sys.exit(1)
f = open('tmp.bib', 'w')
keys = input.newStrings.keys()
keys.sort()
for k in keys:
v = input.newStrings[k]
print >>f, "@string{%s = {%s}}"%(k,v)
invStrings = input.invStrings
for e in input.entries:
if not (e.get('title') and e.get('author')):
print >>f, "%%\n%%%% Not enough information to search for a match: need title and author.\n%%"
emit(f, e)
continue
matches = master.includes(e, all=1)
if not matches:
print >>f, "%%\n%%%% This entry is probably new: No match found.\n%%"
emit(f, e)
else:
print >>f, "%%"
print >>f, "%%%% Possible match found for this entry; max goodness",\
matches[-1][0], "\n%%"
emitKnown(f, e, matches)
if not all_ok:
print >>f, "\n\n\nErrors remain; not finished.\n"
f.close()

BIN
i2p2www/anonbib/silver.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 539 B

File diff suppressed because it is too large Load Diff

86
i2p2www/anonbib/tests.py Normal file
View File

@@ -0,0 +1,86 @@
#!/usr/bin/python2
# Copyright 2004-2008, Nick Mathewson. See LICENSE for licensing info.
"""Unit tests for anonbib."""
import BibTeX
import metaphone
#import reconcile
#import writeHTML
#import updateCache
import unittest
class MetaphoneTests(unittest.TestCase):
def testMetaphone(self):
pass
class BibTeXTests(unittest.TestCase):
def testTranslation(self):
ut = BibTeX.url_untranslate
self.assertEquals(ut("Fred"),"Fred")
self.assertEquals(ut("Hello, World."), "Hello_2c_20World.")
te = BibTeX.TeXescapeURL
ute = BibTeX.unTeXescapeURL
self.assertEquals(te("http://example/~me/my_file"),
r"http://example/\{}~me/my\_file")
self.assertEquals(ute(r"http:{}//example/\{}~me/my\_file"),
"http://example/~me/my_file")
h = BibTeX.htmlize
self.assertEquals(h("Hello, world"), "Hello, world")
self.assertEquals(h(r"\'a\`e\'{i}(\'\i)\"o&\^u"),
"&aacute;&egrave;&iacute;(&iacute;)&ouml;&amp;"
"&ucirc;")
self.assertEquals(h(r"\~n and \c{c}"), "&ntilde; and &ccedil;")
self.assertEquals(h(r"\AE---a ligature"), "&AElig;&mdash;a ligature")
self.assertEquals(h(r"{\it 33}"), " 33")
self.assertEquals(h(r"Pages 33--99 or vice--versa?"),
"Pages 33-99 or vice&ndash;versa?")
t = BibTeX.txtize
self.assertEquals(t("Hello, world"), "Hello, world")
self.assertEquals(t(r"\'a\`e\'{i}(\'\i)\"o&\^u"),
"aei(i)o&u")
self.assertEquals(t(r"\~n and \c{c}"), "n and c")
self.assertEquals(t(r"\AE---a ligature"), "AE---a ligature")
self.assertEquals(t(r"{\it 33}"), " 33")
self.assertEquals(t(r"Pages 33--99 or vice--versa?"),
"Pages 33--99 or vice--versa?")
def authorsParseTo(self,authors,result):
pa = BibTeX.parseAuthor(authors)
self.assertEquals(["|".join(["+".join(item) for item in
[a.first,a.von,a.last,a.jr]])
for a in pa],
result)
def testAuthorParsing(self):
pa = BibTeX.parseAuthor
PA = BibTeX.ParsedAuthor
apt = self.authorsParseTo
apt("Nick A. Mathewson and Roger Dingledine",
["Nick+A.||Mathewson|", "Roger||Dingledine|"])
apt("John van Neumann", ["John|van|Neumann|"])
apt("P. Q. Z. de la Paz", ["P.+Q.+Z.|de+la|Paz|"])
apt("Cher", ["||Cher|"])
apt("Smith, Bob", ["Bob||Smith|"])
apt("de Smith, Bob", ["Bob|de|Smith|"])
apt("de Smith, Bob Z", ["Bob+Z|de|Smith|"])
#XXXX Fix this.
#apt("Roberts Smith Wilkins, Bob Z", ["Bob+Z||Smith+Wilkins|"])
apt("Smith, Jr, Bob", ["Bob||Smith|Jr"])
#XXXX Fix this.
#apt("R Jones, Jr.", ["R||Jones|Jr."])
apt("Smith, Bob and John Smith and Last,First",
["Bob||Smith|", "John||Smith|", "First||Last|"])
apt("Bob Smith and John Smith and John Doe",
["Bob||Smith|", "John||Smith|", "John||Doe|"])
if __name__ == '__main__':
unittest.main()

BIN
i2p2www/anonbib/upb.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 555 B

169
i2p2www/anonbib/updateCache.py Executable file
View File

@@ -0,0 +1,169 @@
#!/usr/bin/python
# Copyright 2003-2008, Nick Mathewson. See LICENSE for licensing info.
"""Download files in bibliography into a local cache.
"""
import os
import sys
import signal
import time
import gzip
import BibTeX
import config
import urllib2
import getopt
import socket
import errno
import httplib
FILE_TYPES = [ "txt", "html", "pdf", "ps", "ps.gz", "abstract" ]
BIN_FILE_TYPES = [ 'pdf', 'ps.gz' ]
class UIError(Exception):
pass
def tryUnlink(fn):
try:
os.unlink(fn)
except OSError:
pass
def getCacheFname(key, ftype, section):
return BibTeX.smartJoin(config.OUTPUT_DIR,config.CACHE_DIR,
section,
"%s.%s"%(key,ftype))
def downloadFile(key, ftype, section, url,timeout=None):
if timeout is None:
timeout = config.DOWNLOAD_CONNECT_TIMEOUT
fname = getCacheFname(key, ftype, section)
parent = os.path.split(fname)[0]
if not os.path.exists(parent):
os.makedirs(parent)
fnameTmp = fname+".tmp"
fnameURL = fname+".url"
tryUnlink(fnameTmp)
def sigalrmHandler(sig,_):
pass
signal.signal(signal.SIGALRM, sigalrmHandler)
signal.alarm(timeout)
try:
try:
infile = urllib2.urlopen(url)
except httplib.InvalidURL, e:
raise UIError("Invalid URL %s: %s"%(url,e))
except IOError, e:
raise UIError("Cannot connect to url %s: %s"%(url,e))
except socket.error, e:
if getattr(e,"errno",-1) == errno.EINTR:
raise UIError("Connection timed out to url %s"%url)
else:
raise UIError("Error connecting to %s: %s"%(url, e))
finally:
signal.alarm(0)
mode = 'w'
if ftype in BIN_FILE_TYPES:
mode = 'wb'
outfile = open(fnameTmp, mode)
try:
while 1:
s = infile.read(1<<16)
if not s: break
outfile.write(s)
finally:
infile.close()
outfile.close()
urlfile = open(fnameURL, 'w')
print >>urlfile, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if "\n" in url: url = url.replace("\n", " ")
print >>urlfile, url
urlfile.close()
os.rename(fnameTmp, fname)
def getURLs(entry):
r = {}
for ftype in FILE_TYPES:
ftype2 = ftype.replace(".", "_")
url = entry.get("www_%s_url"%ftype2)
if url:
r[ftype] = url.strip().replace("\n", " ")
return r
def getCachedURL(key, ftype, section):
fname = getCacheFname(key, ftype, section)
urlFname = fname+".url"
if not os.path.exists(fname) or not os.path.exists(urlFname):
return None
f = open(urlFname, 'r')
lines = f.readlines()
f.close()
if len(lines) != 2:
print >>sys.stderr, "ERROR: unexpected number of lines in", urlFname
return lines[1].strip()
def downloadAll(bibtex, missingOnly=0):
"""returns list of tuples of key, ftype, url, error"""
errors = []
for e in bibtex.entries:
urls = getURLs(e)
key = e.key
section = e.get("www_cache_section", ".")
for ftype, url in urls.items():
if missingOnly:
cachedURL = getCachedURL(key, ftype, section)
if cachedURL == url:
print >>sys.stderr,"Skipping",url
continue
elif cachedURL is not None:
print >>sys.stderr,"URL for %s.%s has changed"%(key,ftype)
else:
print >>sys.stderr,"I have no copy of %s.%s"%(key,ftype)
try:
downloadFile(key, ftype, section, url)
print "Downloaded",url
except UIError, e:
print >>sys.stderr, str(e)
errors.append((key,ftype,url,str(e)))
except (IOError, socket.error), e:
msg = "Error downloading %s: %s"%(url,str(e))
print >>sys.stderr, msg
errors.append((key,ftype,url,msg))
if urls.has_key("ps") and not urls.has_key("ps.gz"):
# Say, this is something we'd like to have gzipped locally.
psFname = getCacheFname(key, "ps", section)
psGzFname = getCacheFname(key, "ps.gz", section)
if os.path.exists(psFname) and not os.path.exists(psGzFname):
# This is something we haven't gzipped yet.
print "Compressing a copy of",psFname
outf = gzip.GzipFile(psGzFname, "wb")
inf = open(psFname, "rb")
while 1:
s = inf.read(4096)
if not s:
break
outf.write(s)
outf.close()
inf.close()
return errors
if __name__ == '__main__':
if len(sys.argv) == 2:
print "Loading from %s"%sys.argv[1]
else:
print >>sys.stderr, "Expected a single configuration file as an argument"
sys.exit(1)
config.load(sys.argv[1])
if config.CACHE_UMASK != None:
os.umask(config.CACHE_UMASK)
bib = BibTeX.parseFile(config.MASTER_BIB)
downloadAll(bib,missingOnly=1)

BIN
i2p2www/anonbib/ups.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 536 B

View File

@@ -0,0 +1,41 @@
This file is to keep track of which volumes of which publications have
been combed for anonymity papers and which we still have to add.
=== DONE:
ExampleConference (through 2008)
PETS 2000-2003
=== CLAIMED:
PETS 2000-2010 -- Nick (claimed 6/16)
ESORICS 1990-2010 -- Nick (claimed 6/16)
CCS -- George (claimed 6/17)
USENIX Security ("Oakland") -- George (claimed 6/17)
=== SHOULD DO:
Infohiding
IEEE Security and privacy
NDSS
WPES
WEIS
Financial Crypto
Eurocrypt
Asiacrypt
Search: Papers that cite Chaum's paper
Search: Papers that cite the Tor paper
Search: Papers that cite the original onion routing papers
Search: Papers mentioning "anonymity" or "anonymous"
Search: Papers mentioning "mixnet" or "mix-net"
=== UNDERSERVED CONTENT; PLEASE SUGGEST SEARCHES AND VENUES
Private information retrieval; PIR
Anti-censorship; censorship
Location privacy
Anonymous credentials
Anonymizing data
Secure multiparty computation

70
i2p2www/anonbib/views.py Normal file
View File

@@ -0,0 +1,70 @@
from flask import render_template
from i2p2www import ANONBIB_CFG, ANONBIB_FILE
from i2p2www.anonbib import BibTeX, config
def papers_list(tag='', choice='date'):
config.load(ANONBIB_CFG)
rbib = BibTeX.parseFile(ANONBIB_FILE)
if tag:
rbib = [ b for b in rbib.entries if tag in b.get('www_tags', '').split() ]
else:
rbib = rbib.entries
if choice == 'topic':
sectionType = 'Topics'
rbib = BibTeX.sortEntriesBy(rbib, 'www_section', 'ZZZZZZZZZZZZZZ')
rbib = BibTeX.splitSortedEntriesBy(rbib, 'www_section')
if rbib[-1][0].startswith("<span class='bad'>"):
rbib[-1] = ("Miscellaneous", rbib[-1][1])
rbib = [ (s, BibTeX.sortEntriesByDate(ents))
for s, ents in rbib
]
elif choice == 'author':
sectionType = 'Authors'
rbib, url_map = BibTeX.splitEntriesByAuthor(rbib)
else:
sectionType = 'Years'
choice = 'date'
rbib = BibTeX.sortEntriesByDate(rbib)
rbib = BibTeX.splitSortedEntriesBy(rbib, 'year')
bib = {
'tags': config.ALL_TAGS,
'tag_titles': config.TAG_TITLES,
'tag_short_titles': config.TAG_SHORT_TITLES,
'tag': tag,
'sectiontypes': sectionType,
'field': choice,
}
sections = []
for section, entries in rbib:
s = {
'name': section,
'slug': BibTeX.url_untranslate(section),
'entries': entries,
}
sections.append(s)
bib['sections'] = sections
return render_template('papers/list.html', bib=bib)
def papers_bibtex(tag=None):
config.load(ANONBIB_CFG)
rbib = BibTeX.parseFile(ANONBIB_FILE)
if tag:
rbib = [ b for b in rbib.entries if tag in b.get('www_tags', '').split() ]
else:
rbib = rbib.entries
entries = [ (ent.key, ent) for ent in rbib ]
entries.sort()
entries = [ ent[1] for ent in entries ]
bib = {
'title': 'Papers on I2P',
'entries': rbib,
}
return render_template('papers/bibtex.html', bib=bib)

246
i2p2www/anonbib/writeHTML.py Executable file
View File

@@ -0,0 +1,246 @@
#!/usr/bin/python
# Copyright 2003-2008, Nick Mathewson. See LICENSE for licensing info.
"""Generate indices by author, topic, date, and BibTeX key."""
import sys
import re
import os
import json
assert sys.version_info[:3] >= (2,2,0)
os.umask(022)
import BibTeX
import config
def getTemplate(name):
f = open(name)
template = f.read()
f.close()
template_s, template_e = template.split("%(entries)s")
return template_s, template_e
def pathLength(s):
n = 0
while s:
parent, leaf = os.path.split(s)
if leaf != '' and leaf != '.':
n += 1
s = parent
return n
def writeBody(f, sections, section_urls, cache_path, base_url):
'''f: an open file
sections: list of (sectionname, [list of BibTeXEntry])
section_urls: map from sectionname to external url'''
for s, entries in sections:
u = section_urls.get(s)
sDisp = re.sub(r'\s+', ' ', s.strip())
sDisp = sDisp.replace(" ", "&nbsp;")
if u:
print >>f, ('<li><h3><a name="%s"></a><a href="%s">%s</a></h3>'%(
(BibTeX.url_untranslate(s), u, sDisp)))
else:
print >>f, ('<li><h3><a name="%s">%s</a></h3>'%(
BibTeX.url_untranslate(s),sDisp))
print >>f, "<ul class='expand'>"
for e in entries:
print >>f, e.to_html(cache_path=cache_path, base_url=base_url)
print >>f, "</ul></li>"
def writeHTML(f, sections, sectionType, fieldName, choices,
tag, config, cache_url_path, section_urls={}):
"""sections: list of (sectionname, [list of BibTeXEntry])'''
sectionType: str
fieldName: str
choices: list of (choice, url)"""
title = config.TAG_TITLES[tag]
short_title = config.TAG_SHORT_TITLES[tag]
#
secStr = []
for s, _ in sections:
hts = re.sub(r'\s+', ' ', s.strip())
hts = s.replace(" ", "&nbsp;")
secStr.append("<p class='l2'><a href='#%s'>%s</a></p>\n"%
((BibTeX.url_untranslate(s),hts)))
secStr = "".join(secStr)
#
tagListStr = []
st = config.TAG_SHORT_TITLES.keys()
st.sort()
root = "../"*pathLength(config.TAG_DIRECTORIES[tag])
if root == "": root = "."
for t in st:
name = config.TAG_SHORT_TITLES[t]
if t == tag:
tagListStr.append(name)
else:
url = BibTeX.smartJoin(root, config.TAG_DIRECTORIES[t], "date.html")
tagListStr.append("<a href='%s'>%s</a>"%(url, name))
tagListStr = "&nbsp;|&nbsp;".join(tagListStr)
#
choiceStr = []
for choice, url in choices:
if url:
choiceStr.append("<a href='%s'>%s</a>"%(url, choice))
else:
choiceStr.append(choice)
choiceStr = ("&nbsp;|&nbsp;".join(choiceStr))
fields = { 'command_line' : "",
'sectiontypes' : sectionType,
'choices' : choiceStr,
'field': fieldName,
'sections' : secStr,
'otherbibs' : tagListStr,
'title': title,
'short_title': short_title,
"root" : root,
}
header, footer = getTemplate(config.TEMPLATE_FILE)
print >>f, header%fields
writeBody(f, sections, section_urls, cache_path=cache_url_path,
base_url=root)
print >>f, footer%fields
def jsonDumper(obj):
if isinstance(obj, BibTeX.BibTeXEntry):
e = obj.entries.copy()
e['key'] = obj.key
return e
else:
raise TypeError("Do not know how to serialize %s"%(obj.__class,))
def writePageSet(config, bib, tag):
if tag:
bib_entries = [ b for b in bib.entries
if tag in b.get('www_tags', "").split() ]
else:
bib_entries = bib.entries[:]
if not bib_entries:
print >>sys.stderr, "No entries with tag %r; skipping"%tag
return
tagdir = config.TAG_DIRECTORIES[tag]
outdir = os.path.join(config.OUTPUT_DIR, tagdir)
cache_url_path = BibTeX.smartJoin("../"*pathLength(tagdir),
config.CACHE_DIR)
if not os.path.exists(outdir):
os.makedirs(outdir, 0755)
##### Sorted views:
## By topic.
entries = BibTeX.sortEntriesBy(bib_entries, "www_section", "ZZZZZZZZZZZZZZ")
entries = BibTeX.splitSortedEntriesBy(entries, "www_section")
if entries[-1][0].startswith("<span class='bad'>"):
entries[-1] = ("Miscellaneous", entries[-1][1])
entries = [ (s, BibTeX.sortEntriesByDate(ents))
for s, ents in entries
]
f = open(os.path.join(outdir,"topic.html"), 'w')
writeHTML(f, entries, "Topics", "topic",
(("By topic", None),
("By date", "./date.html"),
("By author", "./author.html")
),
tag=tag, config=config,
cache_url_path=cache_url_path)
f.close()
## By date.
entries = BibTeX.sortEntriesByDate(bib_entries)
entries = BibTeX.splitSortedEntriesBy(entries, 'year')
for idx in -1, -2:
if entries[idx][0].startswith("<span class='bad'>"):
entries[idx] = ("Unknown", entries[idx][1])
elif entries[idx][0].startswith("forthcoming"):
entries[idx] = ("Forthcoming", entries[idx][1])
sections = [ ent[0] for ent in entries ]
first_year = int(entries[0][1][0]['year'])
try:
last_year = int(entries[-1][1][0].get('year'))
except ValueError:
last_year = int(entries[-2][1][0].get('year'))
years = map(str, range(first_year, last_year+1))
if entries[-1][0] == 'Unknown':
years.append("Unknown")
f = open(os.path.join(outdir,"date.html"), 'w')
writeHTML(f, entries, "Years", "date",
(("By topic", "./topic.html"),
("By date", None),
("By author", "./author.html")
),
tag=tag, config=config,
cache_url_path=cache_url_path)
f.close()
## By author
entries, url_map = BibTeX.splitEntriesByAuthor(bib_entries)
f = open(os.path.join(outdir,"author.html"), 'w')
writeHTML(f, entries, "Authors", "author",
(("By topic", "./topic.html"),
("By date", "./date.html"),
("By author", None),
),
tag=tag, config=config,
cache_url_path=cache_url_path,
section_urls=url_map)
f.close()
## The big BibTeX file
entries = bib_entries[:]
entries = [ (ent.key, ent) for ent in entries ]
entries.sort()
entries = [ ent[1] for ent in entries ]
## Finding the root directory is done by writeHTML(), but
## the BibTeX file doesn't use that, so repeat the code here
root = "../"*pathLength(config.TAG_DIRECTORIES[tag])
if root == "": root = "."
header,footer = getTemplate(config.BIBTEX_TEMPLATE_FILE)
f = open(os.path.join(outdir,"bibtex.html"), 'w')
print >>f, header % { 'command_line' : "",
'title': config.TAG_TITLES[tag],
'root': root }
for ent in entries:
print >>f, (
("<tr><td class='bibtex'><a name='%s'>%s</a>"
"<pre class='bibtex'>%s</pre></td></tr>")
%(BibTeX.url_untranslate(ent.key), ent.key, ent.format(90,8,1)))
print >>f, footer
f.close()
f = open(os.path.join(outdir,"bibtex.json"), 'w')
json.dump(entries, f, default=jsonDumper)
f.close()
if __name__ == '__main__':
if len(sys.argv) == 2:
print "Loading from %s"%sys.argv[1]
else:
print >>sys.stderr, "Expected a single configuration file as an argument"
sys.exit(1)
config.load(sys.argv[1])
bib = BibTeX.parseFile(config.MASTER_BIB)
for tag in config.TAG_DIRECTORIES.keys():
writePageSet(config, bib, tag)

View File

@@ -1,7 +0,0 @@
[python: **.py]
[jinja2: **/pages/**.html]
extensions=jinja2.ext.autoescape,jinja2.ext.with_,i2p2www.extensions.HighlightExtension
[jinja2: **/pages/global/macros]
extensions=jinja2.ext.autoescape,jinja2.ext.with_
[jinja2: **/blog/**.rst]
extensions=jinja2.ext.autoescape,jinja2.ext.with_

View File

@@ -20,7 +20,7 @@ This work was the idea of Jan-Erik Fiske of `ViaEuropa`_ and Peter Sunde of `fla
Current information can be found on `zzz's forum`_ and #i2p-dev on chat.freenode.net
.. _`zzz's forum`: http://zzz.i2p.to/topics/888
.. _`zzz's forum`: http://{{ i2pconv('zzz.i2p') }}/topics/888
Cheers

View File

@@ -16,11 +16,9 @@ The aim of itoopie is to provide an interface that is simpler and has a lower lu
I2PControl is an I2P plugin providing a JSONRPC interface for the I2P router. The interface supports setting basic settings (bandwidth, ports etc.), reading many stats and is provided over an SSL encrypted HTTP connection.
More information and instructions can be found at `itoopie.net`_, `itoopie.i2p.to`_ (via proxy)and `itoopie.i2p`_ (anonymously).
More information and instructions can be found at `{{ i2pconv('itoopie.i2p') }}`_.
.. _`itoopie.net`: http://itoopie.net
.. _`itoopie.i2p.to`: http://itoopie.i2p.to
.. _`itoopie.i2p`: http://itoopie.i2p
.. _`{{ i2pconv('itoopie.i2p') }}`: http://{{ i2pconv('itoopie.i2p') }}
This project has been funded by the VPN services `Relakks`_ & `Ipredator`_ and was initiated by Jan-Erik Fiske and `Peter Sunde`_.

View File

@@ -0,0 +1,88 @@
=============
{% trans %}0.9.7 Release{% endtrans %}
=============
.. meta::
:date: 2013-07-15
:category: release
:excerpt: {% trans %}0.9.7 includes significant bug fixes and improvements.{% endtrans %}
{% trans %}0.9.7 includes significant bug fixes and improvements.{% endtrans %}
{% trans -%}
For the first time, class 'N' routers (those with a minimumum of 128 KBytes/sec of shared bandwidth)
will automatically become floodfill (previously it was only 'O' routers with 256 KBps). This will
increase the floodfill population for additional resistance to certain attacks (see below). Floodfill routers
don't consume much additional bandwidth, but they do tend to use additional memory and concurrent
connections. If you do not wish your router to become floodfill, set the advanced configuration
router.floodfillParticipant=false .
{%- endtrans %}
{% trans -%}
As we think the last release fixed the experimental update-via-torrent bugs, 3% of routers should
update over in-network bittorrent this cycle.
{%- endtrans %}
{% trans -%}
Plugin update checks, possibly broken for several releases, are fixed. Your plugins should once again
auto-update after updating the router.
{%- endtrans %}
{% trans -%}
We fixed a major streaming timer bug that contributed to frequent IRC disconnects.
{%- endtrans %}
{% trans -%}
This release contains additional mitigations for the `"practical attacks" paper`_.
However, we have a lot more work to do to resist Sybil attacks on the floodfills, and resist
traffic analysis at the gateways and endpoints of exploratory tunnels.
It's a good reminder for everybody that our network is still relatively small and vulnerable.
We don't currently recommend any uses that would put anybody in serious jeopardy.
We'll keep working to improve it... please keep working to spread the word. A bigger network is a better network.
{%- endtrans %}
.. _{% trans %}`"practical attacks" paper`{% endtrans %}: http://wwwcip.informatik.uni-erlangen.de/~spjsschl/i2p.pdf
**{% trans %}RELEASE DETAILS{% endtrans %}**
**{% trans %}Anonymity Improvements{% endtrans %}**
- {% trans %}End-to-end encryption of responses to leaseset lookups{% endtrans %}
- {% trans %}Expand floodfill pool by enabling class 'N' floodfills{% endtrans %}
- {% trans %}Randomize padding inside encrypted SSU packets{% endtrans %}
- {% trans %}Preparation for better SSU protocol obfuscation{% endtrans %}
**{% trans %}Bug Fixes{% endtrans %}**
- {% trans %}Fix newer lease sets not getting stored or published{% endtrans %}
- {% trans %}Fix classpath bug when used with 4-year-old installations, causing the console not to start{% endtrans %}
- {% trans %}Fix addressbook database bug preventing update of the reverse index{% endtrans %}
- {% trans %}Fix i2psnark bug that changed the infohash of torrents created by Robert and fetched via magnet link{% endtrans %}
- {% trans %}Fix version checking for plugins{% endtrans %}
- {% trans %}Fix a streaming timer bug causing frequent IRC disconnects (also affects other close-on-idle tunnels){% endtrans %}
**{% trans %}Other{% endtrans %}**
- {% trans %}Don't install as a service on Windows by default{% endtrans %}
- {% trans %}Reduce transport idle timeouts{% endtrans %}
- {% trans %}Reduce tunnels on idle in i2psnark{% endtrans %}
- {% trans %}Change default in i2ptunnel GUI to 3 hops{% endtrans %}
- {% trans %}IE 10 support{% endtrans %}
- {% trans %}Individual expiration times in leases, for efficiency on destinations with a high number of tunnels{% endtrans %}
- {% trans %}Low-level encryption and XOR speedups{% endtrans %}
- {% trans %}Jetty 7.6.11{% endtrans %}
- {% trans %}Tomcat 6.0.37{% endtrans %}
- {% trans %}Translation updates: Chinese, French, German, Portuguese, Russian, Spanish{% endtrans %}
- {% trans %}New Turkish translation{% endtrans %}
- {% trans %}Wrapper 3.5.19 (new installs and PPA only){% endtrans %}
- {% trans %}Update GeoIP data (new installs and PPA only){% endtrans %}
**{% trans %}SHA256 Checksums:{% endtrans %}**
::
48c10fe5c7455e134df44717215ed66ff79cfb2dd03fbfd64c14b3d5c179eab7 i2pinstall_0.9.7_windows.exe
c0a67051bb0c6f1e4ce3ac8a04257063c4b606b76309b39a6c3daeaaa3888e04 i2pinstall_0.9.7.jar
497e2601ab7594c93e0866e7f6ad9c445f05f7660efcea596ab255677056b1cb i2psource_0.9.7.tar.bz2
d0b8f0a2491c5ad401f87c94b3b805c03bccd69f9f1b57177a53287f29f85959 i2pupdate_0.9.7.zip
a620eafff86e8eb919acb5b8cd42578df68928b122dc3e715c0b431cdd4c0ef2 i2pupdate.su2
0d5723c361059a60431f3275ad5c0978c3b66097ecda1d1b8f5310c594f0a1ea i2pupdate.sud

View File

@@ -0,0 +1,39 @@
===============
{% trans %}0.9.7.1 Release{% endtrans %}
===============
.. meta::
:date: 2013-08-10
:category: release
:excerpt: {% trans %}This unscheduled release disables the RouterInfo verification messages that were used in the attack published in the UCSB paper, which should make correlating a LeaseSet and a Router much more difficult. We have also included a limited number of other fixes listed below. Our 0.9.8 release, which will include IPv6 support, is still on-schedule for late September.{% endtrans %}
{% trans %}This unscheduled release disables the RouterInfo verification messages that were used in the attack published in the UCSB paper, which should make correlating a LeaseSet and a Router much more difficult. We have also included a limited number of other fixes listed below. Our 0.9.8 release, which will include IPv6 support, is still on-schedule for late September.{% endtrans %}
{% trans %}As usual, we recommend that all users update to this release.{% endtrans %}
**{% trans %}RELEASE DETAILS{% endtrans %}**
**{% trans %}Anonymity Improvements{% endtrans %}**
- {% trans %}Disable RouterInfo verification messages{% endtrans %}
**{% trans %}Other{% endtrans %}**
- {% trans %}Extend inbound tunnel expiration{% endtrans %}
- {% trans %}i2prouter: bashism fix{% endtrans %}
- {% trans %}i2psnark: increase max piece size, mime type updates{% endtrans %}
- {% trans %}New reseed host{% endtrans %}
- {% trans %}New update hosts, thanks Meeh and dg{% endtrans %}
- {% trans %}Streaming: RTO changes{% endtrans %}
- {% trans %}Updater: Increase update-via-torrent to 30 percent{% endtrans %}
- {% trans %}UPnP fix for some hardware{% endtrans %}
**{% trans %}SHA256 Checksums:{% endtrans %}**
::
293f445196a2f35c4d580f65b548135399e1f4443450b5ecf1cc53b1203fdad1 i2pinstall_0.9.7.1_windows.exe
9fae874a4d680f50f5efd7be70cfcf55f2f4687e011bde9c4b4899bafb002e97 i2pinstall_0.9.7.1.jar
7b73bdb23c53798054741cbaa4e7d8cce832ee566fbb17df0c803d0c22d099e1 i2psource_0.9.7.1.tar.bz2
69ca22a77a2de87f726d86555317f8688891d31f5312cf71d5a43febe2729b38 i2pupdate_0.9.7.1.zip
f59c9c80349c328b3e912113a3842146f647ff22ae323cef6b1e56a23f8c8cf1 i2pupdate.su2
52d1f32e2a72091da10312853e5df6bced12cb97770ba20732f2d9d6c4d2f5fe i2pupdate.sud

View File

@@ -0,0 +1,138 @@
=============
{% trans %}0.9.8 Release{% endtrans %}
=============
.. meta::
:date: 2013-09-30
:category: release
:excerpt: {% trans %}0.9.8 includes the long-awaited support for IPv6. It's enabled by default, but of course you need a public IPv6 address to use it. Configuration is on the 'network' configuration tab in your console. We also have anonymity improvements including padding of SSU packets and longer router private keys.{% endtrans %}
{% trans %}0.9.8 includes the long-awaited support for IPv6. It's enabled by default, but of course you need a public IPv6 address to use it. Configuration is on the 'network' configuration tab in your console. We also have anonymity improvements including padding of SSU packets and longer router private keys.{% endtrans %}
{% trans %}30% of you will update via in-network torrent in this update cycle.{% endtrans %}
**{% trans %}IPv6 Details{% endtrans %}**
{% trans -%}
IPv6 is enabled and preferred by default. If you have a public IPv6 address
and you are connecting to another router with a published IPv6 address, it will
connect via IPv6. There is a new IPv6 configuration section on /confignet in
the router console. If IPv6 is causing problems you may disable it there.
{%- endtrans %}
{% trans -%}
As a part of the IPv6 development effort, I2P now supports multiple
published IP addresses. If you have multiple public IP addresses (IPv4, IPv6,
or both), you may enable or disable them individually on /confignet. The
default is to use the first IPv4 and IPv6 addresses it discovers. If you have
multiple addresses you should review the configuration on /confignet and adjust
it if necessary.
Note that while you may enable multiple IPv4 and IPv6 addresses on /confignet,
we recommend that you use only one IPv4 and one IPv6 address. There are
bugs still to be fixed with multiple addresses of each type.
{%- endtrans %}
{% trans -%}
While IPv6 support was designed and developed over several years, it has
only been tested by a limited number of users and is still beta. If you do have
a public IPv6 address, please monitor your router and the logs for problems,
and disable it necessary. Please report any bugs on
http://trac.i2p2.i2p.
{%- endtrans %}
**{% trans %}Rekeying Details{% endtrans %}**
{% trans -%}
For those of you running I2P on faster hardware (generally, 64-bit x86) the
router will generate a new identity using longer keys. This will substantially
reduce your participating traffic for 48 hours or more, while your router
re-integrates into the network. Due to the new keys, the large number of
torrent updates, and the recent network growth, we expect substantial
disruption to the network for a week or more after the update is released.
Please be patient and things should start to improve after a few days.
{%- endtrans %}
{% trans -%}
These changes may result in higher CPU usage for some of you. We're doing
our best to increase efficiency, but stronger security generally requires more
computation. Performance may also be poor during the first week
due to the network churn.
We will evaluate the network performace before deciding whether to
change the key length on slower hardware in a future release.
{%- endtrans %}
{% trans -%}
We are experiencing rapid network growth in the last few weeks, which is
causing a bit of a bumpy ride for some, especially on weekends. However, the
network is still performing fairly well, so keep spreading the word.
{%- endtrans %}
**{% trans %}More Changes Coming{% endtrans %}**
{% trans -%}
We're in the initial stages of desiging major changes to strengthen our
crypto. Stronger crypto will use more CPU and it may possibly
require a Java 7 JRE at a minimum. We understand your desire to run I2P on low-power
and/or older hardware. We're working hard to minimize the impacts, but some
loss of performance is inevitable. In addition, Java 5 and 6 are no longer
supported by Oracle. Now is a good time to upgrade to Java 7. Any change in
minimum requirements will be announced well in advance.
{%- endtrans %}
**{% trans %}New Website{% endtrans %}**
{% trans -%}
After a heroic effort by str4d, the new website preview is available at
http://i2hq.srv.i2p2.de. We hope to see it go live at
https://geti2p.net and http://www.i2p2.i2p soon. Please
contribute to the new website translations on Transifex, especially the
website_priority resource.
{%- endtrans %}
**{% trans %}Community Participation{% endtrans %}**
{% trans -%}
In early August, hottuna and zzz attended DEFCON 21 in Las Vegas.
Last weekend, echelon attended the CTS IV conference in Berlin and
psi attended the Tahoe-LAFS hackfest at GNU 30 in Cambridge, Mass.
Several of us will be at 30C3 in Hamburg late this year.
It's great to see people participating at these events and representing I2P.
{%- endtrans %}
**{% trans %}RELEASE DETAILS{% endtrans %}**
**{% trans %}Major Changes{% endtrans %}**
- {% trans %}IPv6 support for both NTCP and SSU{% endtrans %}
**{% trans %}Anonymity Improvements{% endtrans %}**
- {% trans %}SSU protocol obfuscation by adding random padding{% endtrans %}
- {% trans %}Longer encryption and DH private keys for users on faster platforms{% endtrans %}
**{% trans %}Bug Fixes{% endtrans %}**
- {% trans %}Fix I2PTunnel / I2CP locking and duplicates (partial){% endtrans %}
- {% trans %}Fix translation of HTTP proxy error pages{% endtrans %}
- {% trans %}Fix occasional runtime exception in NTCP{% endtrans %}
**{% trans %}Other{% endtrans %}**
- {% trans %}Big rework of transport code to accommodate multiple addresses and IPv6{% endtrans %}
- {% trans %}Streaming: Improved recovery from lost acks, other fixes{% endtrans %}
- {% trans %}Use Transifex for translation of initial news and HTTP proxy error pages{% endtrans %}
- {% trans %}Translation updates: Chinese, French, German, Portuguese, Russian, Swedish, Turkish{% endtrans %}
- {% trans %}New Romanian translation{% endtrans %}
- Jetty 7.6.12.v20130726
- {% trans %}Wrapper 3.5.20 (new installs and PPA only){% endtrans %}
- {% trans %}Update GeoIP data (new installs and PPA only){% endtrans %}
**{% trans %}SHA256 Checksums:{% endtrans %}**
::
5a863c43dc986087e5a5facd02b8ede32e1903bad1f4531bff95e61eab0facaf i2pinstall_0.9.8_windows.exe
8af3f933346d76ac67ce814d7f991bbc00fa31c23124313841dbef9ae7bcf908 i2pinstall_0.9.8.jar
787d1fe113398dfcec25d7daaca4e4093f309cb3e622b80757bcdf0558472041 i2psource_0.9.8.tar.bz2
24a08305228b817f87e251af74c4b5e9d1726de8d7d64c17bc2ede5511d42e58 i2pupdate_0.9.8.zip
76b049da4e02b96e9e05eaf69b2e8214a6d6874385ab2d82c2885379ccd65278 i2pupdate.su2
dba0f8e4660cb9147c50b7b3c4a0f95d342cfc65a51e0d37e445bc72026ed05f i2pupdate.sud

View File

@@ -0,0 +1,36 @@
===============
{% trans %}0.9.8.1 Release{% endtrans %}
===============
.. meta::
:date: 2013-10-02
:category: release
:excerpt: {% trans %}0.9.8.1 fixes a problem with updating to 0.9.8 on Windows for some people. New installs and non-Windows platforms are not affected, however all platforms will automatically update even if running 0.9.8.{% endtrans %}
{% trans %}0.9.8.1 fixes a problem with updating to 0.9.8 on Windows for some people. New installs and non-Windows platforms are not affected, however all platforms will automatically update even if running 0.9.8.{% endtrans %}
{% trans -%}
See the `Trac ticket`_ for details and workarounds. See
`the 0.9.8 release notes`_ for information on IPv6 and other changes.
{%- endtrans %}
{% trans -%}
Due to recent attacks, logins are disabled on `Trac`_ and new registrations are
disabled on `zzz.i2p`_. Until those services are restored, please report all
bugs on IRC freenode or IRC2P #i2p-dev.
{%- endtrans %}
.. _{% trans %}`Trac ticket`{% endtrans %}: http://{{ i2pconv('trac.i2p2.i2p') }}/ticket/1056
.. _{% trans %}`the 0.9.8 release notes`{% endtrans %}: {{ url_for('blog_post', slug='2013/09/30/0.9.8-Release') }}
.. _`Trac`: http://{{ i2pconv('trac.i2p2.i2p') }}/
.. _`zzz.i2p`: http://{{ i2pconv('zzz.i2p') }}/
**{% trans %}SHA256 Checksums:{% endtrans %}**
::
e4a0a5929f20a5e176aad1ba4fe85d6c321c06fbc802cd715970ec380bb9e4fe i2pinstall_0.9.8.1_windows.exe
8b933d55622743e3692585d09a1393a898dfd3d8c8f4c7f489adc23981273d30 i2pinstall_0.9.8.1.jar
315072afc19b254a67062affe8b4515198ff64ecfcb4292b5f58b83975b3a1c3 i2psource_0.9.8.1.tar.bz2
a340f84b5893ba0f193ec86e09f15c0ef724735eafb4c67c090f23be020b24ab i2pupdate_0.9.8.1.zip
15d135f9923337df2092e42b9c5aa6ba5904b39c5ff403eef235843b1957b942 i2pupdate.su2
d9902504d63556fa63a503fd088185dbbf3ace8b80e14dd4482b30e56b11f8d6 i2pupdate.sud

View File

@@ -0,0 +1,86 @@
=============
{% trans %}0.9.9 Release{% endtrans %}
=============
.. meta::
:date: 2013-12-07
:category: release
:excerpt: {% trans %}0.9.9 fixes a number of bugs in the netdb, streaming, and i2ptunnel, and starts work on a year-long plan to increase the strength of the cryptographic signing algorithms used in the router, and support multiple algorithms and key lengths simultaneously. Automatic update files will now be signed with 4096-bit RSA keys.{% endtrans %}
{% trans %}0.9.9 fixes a number of bugs in the netdb, streaming, and i2ptunnel, and starts work on a year-long plan to increase the strength of the cryptographic signing algorithms used in the router, and support multiple algorithms and key lengths simultaneously. Automatic update files will now be signed with 4096-bit RSA keys.{% endtrans %}
{% trans -%}
We now support SSL between your router and your servers for security.
See `this development thread`_ for more information.
{%- endtrans %}
.. _{% trans %}`this development thread`{% endtrans %}: http://{{ i2pconv('zzz.i2p') }}/topics/1495
{% trans -%}
As usual, we recommend that you update to this release.
The best way to maintain security and help the network is to run the latest release.
Several members of the I2P team will be at 30C3 in Hamburg this year.
Come say hello and ask for an I2P sticker.
Thanks to everyone for their support this year.
{%- endtrans %}
**{% trans %}RELEASE DETAILS{% endtrans %}**
**{% trans %}Anonymity Improvements{% endtrans %}**
- {% trans %}Don't build client tunnels through zero-hop exploratory tunnels{% endtrans %}
- {% trans %}New "su3" file support using stronger keys{% endtrans %}
- {% trans %}Use su3 for updates{% endtrans %}
**{% trans %}Bug Fixes{% endtrans %}**
- {% trans %}Issues with losing data when closing streams{% endtrans %}
- {% trans %}Fix various streaming connection limit issues{% endtrans %}
- {% trans %}Issues with resource usage of closed connections{% endtrans %}
- {% trans %}Clean up timer threads in close-on-idle tunnels{% endtrans %}
- {% trans %}Several other streaming fixes{% endtrans %}
- {% trans %}Reject more non-public IPv6 addresses{% endtrans %}
- {% trans %}Fix IPv6 GeoIP{% endtrans %}
- {% trans %}Fix peer selection in first minutes after startup{% endtrans %}
- {% trans %}Several I2PTunnel bug fixes{% endtrans %}
- {% trans %}Fix major i2psnark DHT bug that prevented magnets from working well{% endtrans %}
- {% trans %}Fix client tunnels that fail due to name resolution failure at startup, particularly with b32 hostnames{% endtrans %}
- {% trans %}Fix changing client i2ptunnel target list{% endtrans %}
- {% trans %}Fix major bugs preventing reception of encrypted responses to leaseset lookups and verifies{% endtrans %}
- {% trans %}Fix bad links on some i2psnark buttons in Opera and text-mode browsers{% endtrans %}
- {% trans %}Fix NPE in Susimail{% endtrans %}
**{% trans %}Other{% endtrans %}**
- {% trans %}Start work on supporting stronger signing keys in the router{% endtrans %}
- {% trans %}Reduce thread usage for HTTP Server tunnels{% endtrans %}
- {% trans %}Auto-stop update torrent after some time{% endtrans %}
- {% trans %}Add ability to stop webapp via console{% endtrans %}
- {% trans %}New POST throttler in HTTP server tunnel{% endtrans %}
- {% trans %}Improve connection throttling{% endtrans %}
- {% trans %}More work to reduce number of connections{% endtrans %}
- {% trans %}Re-enable router info expiration job{% endtrans %}
- {% trans %}Extend router info expiration and other changes to reduce load on floodfills{% endtrans %}
- {% trans %}Support multiple servers through a single server tunnel{% endtrans %}
- {% trans %}Support specification of server port in i2ptunnel clients{% endtrans %}
- {% trans %}Add support for SSL connections from i2ptunnel to external server{% endtrans %}
- {% trans %}SSL and crypto code refactoring{% endtrans %}
- {% trans %}i2psnark storage code refactoring{% endtrans %}
- {% trans %}New destination cache{% endtrans %}
- {% trans %}Lots of code cleanup and resolution of findbugs warnings{% endtrans %}
- {% trans %}New Japanese translation (partial){% endtrans %}
- {% trans %}Translation updates: French, German, Italian, Romanian, Russian, Spanish, Swedish, and others{% endtrans %}
- Jetty 7.6.13.v20130916
- {% trans %}Wrapper 3.5.22 (new installs and PPA only){% endtrans %}
- {% trans %}Update GeoIP data (new installs and PPA only){% endtrans %}
**{% trans %}SHA256 Checksums:{% endtrans %}**
::
97dd4326ad8afdea0e78ffcb053b23793dfa79d89458be3fe3a1ed62a2d988e9 i2pinstall_0.9.9_windows.exe
5028910d3fb9747a6724e39f8eccb6d9ebe0530ef017102c372871badfbf6d9f i2pinstall_0.9.9.jar
cbbf25dea50a717c3376eb7af226f0b2a653e0372db8782ef37aa8b3d275436c i2psource_0.9.9.tar.bz2
533b0ce2d9e1bfc8762ba17eef3572ae7355ed8f21d5d3557b718a14b05794f2 i2pupdate_0.9.9.zip
77824eb6f754f8b40301b7d260701eb2211ee51105d5f4b43d2c328f71736e0f i2pupdate.su2
78769de16a997730468e2e66c7519e2923d533cd96259dac352e04b07d80486c i2pupdate.su3
81b89ed00705668003b2715b930519bbeb939b9623c7e6e8d63c2762aa748bd8 i2pupdate.sud

View File

@@ -45,10 +45,12 @@ def downloads_list():
# TODO: read mirror list or list of available files
return render_template('downloads/list.html', def_mirror=DEFAULT_MIRROR)
# Debian-specific page
def downloads_debian():
return render_template('downloads/debian.html')
# Specific file downloader
def downloads_select(version, file):
if (file == 'debian'):
return render_template('downloads/debian.html', file=file)
mirrors=read_mirrors()
obj=[]
for protocol in mirrors.keys():
@@ -71,4 +73,5 @@ def downloads_redirect(version, protocol, domain, file):
if not domain in mirrors:
abort(404)
return redirect(mirrors[domain]['url'] % data)
return redirect(mirrors[randint(0, len(mirrors) - 1)]['url'] % data)
randomain = mirrors.keys()[randint(0, len(mirrors) - 1)]
return redirect(mirrors[randomain]['url'] % data)

View File

@@ -6,12 +6,16 @@ from flask import g, redirect, url_for
LEGACY_FUNCTIONS_MAP={
'announcements': {'function': 'blog_index', 'params': {}},
'debian': {'function': 'downloads_select', 'params': {'file': 'debian'}},
'debian': {'function': 'downloads_debian', 'params': {}},
'download': {'function': 'downloads_list', 'params': {}},
'installation': {'function': 'downloads_list', 'params': {}},
'meetings': {'function': 'meetings_index', 'params': {}},
'papers': {'function': 'papers_list', 'params': {}},
'statusnotes': {'function': 'blog_index', 'params': {}},
}
LEGACY_PAGES_MAP={
'api': 'docs',
'applications': 'get-involved/develop/applications',
'benchmarks': 'misc/benchmarks',
'bittorrent': 'docs/applications/bittorrent',
@@ -33,28 +37,27 @@ LEGACY_PAGES_MAP={
'clt': 'misc/clt',
'common_structures_spec': 'docs/spec/common-structures',
'configuration': 'docs/spec/configuration',
'contact': 'about/contact',
'contact': 'contact',
'cvs': 'misc/cvs',
'datagrams': 'docs/api/datagrams',
'dev-guidelines': 'get-involved/guides/dev-guidelines',
'developerskeys': 'get-involved/develop/developers-keys',
'donate': 'get-involved/donate',
'faq': 'support/faq',
'getinvolved': 'get-involved',
'geoip': 'docs/spec/geoip',
'glossary': 'support/glossary',
'glossary': 'about/glossary',
'halloffame': 'about/hall-of-fame',
'how': 'docs',
'how_cryptography': 'docs/how/cryptography',
'how_elgamalaes': 'docs/how/elgamal-aes',
'how_garlicrouting': 'docs/how/garlic-routing',
'how_intro': 'docs/how/intro',
'how_networkcomparisons': 'about/comparison',
'how_networkcomparisons': 'comparison',
'how_networkdatabase': 'docs/how/network-database',
'how_peerselection': 'docs/how/peer-selection',
'how_threatmodel': 'docs/how/threat-model',
'how_tunnelrouting': 'docs/how/tunnel-routing',
'htproxyports': 'support/browser-config',
'htproxyports': 'about/browser-config',
'i2cp': 'docs/protocol/i2cp',
'i2cp_spec': 'docs/spec/i2cp',
'i2np': 'docs/protocol/i2np',
@@ -83,13 +86,13 @@ LEGACY_PAGES_MAP={
'newtranslators': 'get-involved/guides/new-translators',
'ntcp': 'docs/transport/ntcp',
'ntcp_discussion': 'docs/discussions/ntcp',
'othernetworks': 'about/comparison/other-networks',
'papers': 'research/papers',
'performance-history': 'support/performance/history',
'performance': 'support/performance/future',
'othernetworks': 'comparison/other-networks',
'performance-history': 'about/performance/history',
'performance': 'about/performance/future',
'plugin_spec': 'docs/spec/plugin',
'plugins': 'docs/plugins',
'ports': 'docs/ports',
'pressetext-0.7': 'misc/pressetext-0.7',
'protocols': 'docs/protocol',
'ratestats': 'misc/ratestats',
'release-signing-key': 'get-involved/develop/release-signing-key',
@@ -103,6 +106,7 @@ LEGACY_PAGES_MAP={
'supported_applications': 'docs/applications/supported',
'team': 'about/team',
'techintro': 'docs/how/tech-intro',
'ticket1056': 'misc/ticket1056',
'ticket919': 'misc/ticket919',
'todo': 'get-involved/todo',
'transition-guide': 'misc/transition-guide',
@@ -120,14 +124,75 @@ LEGACY_PAGES_MAP={
'upgrade-0.6.1.30': 'misc/upgrade-0.6.1.30',
}
LEGACY_BLOG_POSTS_MAP={
'statnotes0108': {'date': (2008, 2, 1), 'title': 'status'},
'summerofcode-2011': {'date': (2011, 6, 6), 'title': 'Ipredator-SoC'},
'summerofcode-2011-end': {'date': (2011, 9, 3), 'title': 'Ipredator-SoC-itoopie-released'},
}
LEGACY_RELEASES_MAP={
'0.6.1.30': (2007, 10, 7),
'0.6.1.31': (2008, 2, 10),
'0.6.1.32': (2008, 3, 9),
'0.6.1.33': (2008, 4, 26),
'0.6.2': (2008, 6, 7),
'0.6.3': (2008, 8, 26),
'0.6.4': (2008, 10, 6),
'0.6.5': (2008, 12, 1),
'0.7': (2009, 1, 25),
'0.7.1': (2009, 3, 29),
'0.7.2': (2009, 4, 19),
'0.7.3': (2009, 5, 18),
'0.7.4': (2009, 6, 13),
'0.7.5': (2009, 6, 29),
'0.7.6': (2009, 7, 31),
'0.7.7': (2009, 10, 12),
'0.7.8': (2009, 12, 8),
'0.7.9': (2010, 1, 12),
'0.7.10': (2010, 1, 22),
'0.7.11': (2010, 2, 15),
'0.7.12': (2010, 3, 15),
'0.7.13': (2010, 4, 27),
'0.7.14': (2010, 6, 7),
'0.8': (2010, 7, 12),
'0.8.1': (2010, 11, 15),
'0.8.2': (2010, 12, 22),
'0.8.3': (2011, 1, 24),
'0.8.4': (2011, 3, 2),
'0.8.5': (2011, 4, 18),
'0.8.6': (2011, 5, 16),
'0.8.7': (2011, 6, 27),
'0.8.8': (2011, 8, 23),
'0.8.9': (2011, 10, 11),
'0.8.10': (2011, 10, 20),
'0.8.11': (2011, 11, 8),
'0.8.12': (2012, 1, 6),
'0.8.13': (2012, 2, 27),
'0.9': (2012, 5, 2),
'0.9.1': (2012, 7, 30),
'0.9.2': (2012, 9, 21),
'0.9.3': (2012, 10, 27),
'0.9.4': (2012, 12, 17),
'0.9.5': (2013, 3, 8),
'0.9.6': (2013, 5, 28),
'0.9.7': (2013, 7, 15),
'0.9.7.1': (2013, 8, 10),
'0.9.8': (2013, 9, 30),
'0.9.8.1': (2013, 10, 2),
}
def legacy_show(f):
lang = 'en'
if hasattr(g, 'lang') and g.lang:
lang = g.lang
if lang == 'zh':
lang = 'zh_CN'
if f in LEGACY_FUNCTIONS_MAP:
return redirect(url_for(LEGACY_FUNCTIONS_MAP[f]['function'], lang=lang, **LEGACY_FUNCTIONS_MAP[f]['params']))
elif f in LEGACY_PAGES_MAP:
return redirect(url_for('site_show', lang=lang, page=LEGACY_PAGES_MAP[f]))
elif f in LEGACY_BLOG_POSTS_MAP:
return legacy_blog(lang, LEGACY_BLOG_POSTS_MAP[f]['date'], LEGACY_BLOG_POSTS_MAP[f]['title'])
else:
return redirect(url_for('site_show', lang=lang, page=f))
@@ -135,4 +200,16 @@ def legacy_meeting(id):
return redirect(url_for('meetings_show', id=id, lang='en'))
def legacy_status(year, month, day):
return redirect(url_for('blog_post', lang='en', slug=('%s/%s/%s/status' % (year, month, day))))
return legacy_blog('en', (year, month, day), 'status')
def legacy_release(version):
lang = 'en'
if hasattr(g, 'lang') and g.lang:
lang = g.lang
if version in LEGACY_RELEASES_MAP:
return legacy_blog(lang, LEGACY_RELEASES_MAP[version], '%s-Release' % version)
else:
return legacy_show('release-%s' % version)
def legacy_blog(lang, (year, month, day), title):
return redirect(url_for('blog_post', lang=lang, slug=('%d/%02d/%02d/%s' % (year, month, day, title))))

View File

@@ -29,7 +29,9 @@ class DataSpecLexer(RegexLexer):
(r'(-*)(//)(-+\+-)', bygroups(Text, Generic.Strong, Text)),
],
'content': [
(r'(\s*)(\+-)', bygroups(Text, Text), '#pop', 'boundary'),
(r'(\s*)([\+|])$', bygroups(Text, Text), '#pop'),
(r'(\s*)(\.\.\.)(\s)', bygroups(Text, Generic.Strong, Text)),
(r'(\s*)(\.\.\.)$', bygroups(Text, Generic.Strong), '#pop'),
(r'(\s*)(~)$', bygroups(Text, Generic.Strong), '#pop'),
(r'(\s*)([\w=;]+)$', bygroups(Text, Name.Tag), '#pop'),

View File

@@ -56,7 +56,10 @@ def get_meetings_ids(num=0):
# ignore all non-.rst files
if not f.endswith('.rst'):
continue
meetings.append(int(f[:-4]))
try:
meetings.append(int(f[:-4]))
except ValueError:
continue
meetings.sort()
meetings.reverse()
if (num > 0):
@@ -65,7 +68,7 @@ def get_meetings_ids(num=0):
def render_meeting_rst(id):
# check if that file actually exists
name = str(id) + '.rst'
name = '%03d.rst' % id
path = safe_join(MEETINGS_DIR, name)
if not os.path.exists(path):
abort(404)

View File

@@ -1,5 +1,5 @@
I2P dev meeting, July 3 2002
============================
I2P dev meeting, July 3, 2002
=============================
(Courtesy of the wayback machine http://www.archive.org/)

View File

@@ -0,0 +1,7 @@
I2P dev meeting, September 4, 2002
==================================
Quick recap
-----------
TODO

Some files were not shown because too many files have changed in this diff Show More