diff --git a/i2p2www/__init__.py b/i2p2www/__init__.py index 052c16dc..14f16fb1 100644 --- a/i2p2www/__init__.py +++ b/i2p2www/__init__.py @@ -67,32 +67,32 @@ SUPPORTED_LANGS = [ ] SUPPORTED_LANG_NAMES = { - 'ar': u'Arabic العربية', - 'id': u'Bahasa Indonesia', - 'zh': u'Chinese 中文', - 'zh_TW': u'Chinese 中文 (繁體中文, 台灣)', - 'de': u'Deutsch', - 'en': u'English', - 'es': u'Castellano', - 'fr': u'Français', - 'el': u'Greek Ελληνικά', - 'he': u'Hebrew עברית', - 'hu': u'Hungarian', - 'it': u'Italiano', - 'ja': u'Japanese 日本語', - 'ko': u'Korean 한국말', - 'mg': u'Fiteny Malagasy', - 'nl': u'Nederlands', - 'fa': u'Persian فارسی', - 'pl': u'Polski', - 'pt': u'Português', - 'pt_BR': u'Português do Brasil', - 'ro': u'Română', - 'ru': u'Russian Русский язык', - 'fi': u'Suomi', - 'sv': u'Svenska', - 'tr': u'Türkçe', - 'uk': u'Ukrainian Українська', + 'ar': 'Arabic العربية', + 'id': 'Bahasa Indonesia', + 'zh': 'Chinese 中文', + 'zh_TW': 'Chinese 中文 (繁體中文, 台灣)', + 'de': 'Deutsch', + 'en': 'English', + 'es': 'Castellano', + 'fr': 'Français', + 'el': 'Greek Ελληνικά', + 'he': 'Hebrew עברית', + 'hu': 'Hungarian', + 'it': 'Italiano', + 'ja': 'Japanese 日本語', + 'ko': 'Korean 한국말', + 'mg': 'Fiteny Malagasy', + 'nl': 'Nederlands', + 'fa': 'Persian فارسی', + 'pl': 'Polski', + 'pt': 'Português', + 'pt_BR': 'Português do Brasil', + 'ro': 'Română', + 'ru': 'Russian Русский язык', + 'fi': 'Suomi', + 'sv': 'Svenska', + 'tr': 'Türkçe', + 'uk': 'Ukrainian Українська', } RTL_LANGS = [ @@ -213,7 +213,7 @@ def detect_theme(): theme = 'duck' if 'style' in request.cookies: theme = request.cookies['style'] - if 'theme' in request.args.keys(): + if 'theme' in list(request.args.keys()): theme = request.args['theme'] # TEMPORARY: enable external themes # TODO: Remove this (and the corresponding lines in global/layout.html @@ -263,5 +263,5 @@ def server_error(error): return render_template('global/error_500.html'), 500 # Import these to ensure they get loaded -import templatevars -import urls +from . import templatevars +from . import urls diff --git a/i2p2www/anonbib/BibTeX.py b/i2p2www/anonbib/BibTeX.py index 6d1df089..4c0b4658 100644 --- a/i2p2www/anonbib/BibTeX.py +++ b/i2p2www/anonbib/BibTeX.py @@ -6,14 +6,14 @@ Based on perl code by Eddie Kohler; heavily modified. """ -import cStringIO +import io import re import sys import os -import config +from . import config -import rank +from . import rank __all__ = [ 'ParseError', 'BibTeX', 'BibTeXEntry', 'htmlize', 'ParsedAuthor', 'FileIter', 'Parser', 'parseFile', @@ -66,7 +66,7 @@ class BibTeX: """Add a BibTeX entry to this file.""" k = ent.key if self.byKey.get(ent.key.lower()): - print >> sys.stderr, "Already have an entry named %s"%k + print("Already have an entry named %s"%k, file=sys.stderr) return self.entries.append(ent) self.byKey[ent.key.lower()] = ent @@ -79,7 +79,7 @@ class BibTeX: try: cr = self.byKey[ent['crossref'].lower()] except KeyError: - print "No such crossref: %s"% ent['crossref'] + print("No such crossref: %s"% ent['crossref']) break if seen.get(cr.key): raise ParseError("Circular crossref at %s" % ent.key) @@ -87,12 +87,12 @@ class BibTeX: del ent.entries['crossref'] if cr.entryLine < ent.entryLine: - print "Warning: crossref %s used after declaration"%cr.key + print("Warning: crossref %s used after declaration"%cr.key) - for k in cr.entries.keys(): - if ent.entries.has_key(k): - print "ERROR: %s defined both in %s and in %s"%( - k,ent.key,cr.key) + for k in list(cr.entries.keys()): + if k in ent.entries: + print("ERROR: %s defined both in %s and in %s"%( + k,ent.key,cr.key)) else: ent.entries[k] = cr.entries[k] @@ -105,7 +105,7 @@ class BibTeX: rk = "title" for ent in self.entries: - if ent.type in config.OMIT_ENTRIES or not ent.has_key(rk): + if ent.type in config.OMIT_ENTRIES or rk not in ent: ent.check() del self.byKey[ent.key.lower()] else: @@ -122,7 +122,7 @@ def buildAuthorTable(entries): authorsByLast.setdefault(tuple(a.last), []).append(a) # map from author to collapsed author. result = {} - for k,v in config.COLLAPSE_AUTHORS.items(): + for k,v in list(config.COLLAPSE_AUTHORS.items()): a = parseAuthor(k)[0] c = parseAuthor(v)[0] result[c] = c @@ -130,7 +130,7 @@ def buildAuthorTable(entries): for e in entries: for author in e.parsedAuthor: - if result.has_key(author): + if author in result: continue c = author @@ -141,16 +141,16 @@ def buildAuthorTable(entries): result[author] = c if 0: - for a,c in result.items(): + for a,c in list(result.items()): if a != c: - print "Collapsing authors: %s => %s" % (a,c) + print("Collapsing authors: %s => %s" % (a,c)) if 0: - print parseAuthor("Franz Kaashoek")[0].collapsesTo( - parseAuthor("M. Franz Kaashoek")[0]) - print parseAuthor("Paul F. Syverson")[0].collapsesTo( - parseAuthor("Paul Syverson")[0]) - print parseAuthor("Paul Syverson")[0].collapsesTo( - parseAuthor("Paul F. Syverson")[0]) + print(parseAuthor("Franz Kaashoek")[0].collapsesTo( + parseAuthor("M. Franz Kaashoek")[0])) + print(parseAuthor("Paul F. Syverson")[0].collapsesTo( + parseAuthor("Paul Syverson")[0])) + print(parseAuthor("Paul Syverson")[0].collapsesTo( + parseAuthor("Paul F. Syverson")[0])) return result @@ -221,7 +221,7 @@ def splitEntriesByAuthor(entries): htmlResult[sortkey] = secname result.setdefault(sortkey, []).append(ent) - sortnames = result.keys() + sortnames = list(result.keys()) sortnames.sort() sections = [ (htmlResult[n], result[n]) for n in sortnames ] return sections, url_map @@ -255,13 +255,13 @@ def sortEntriesByDate(entries): monthname = match.group(1) mon = MONTHS.index(monthname) except ValueError: - print "Unknown month %r in %s"%(ent.get("month"), ent.key) + print("Unknown month %r in %s"%(ent.get("month"), ent.key)) mon = 0 try: date = int(ent['year'])*13 + mon except KeyError: - print "ERROR: No year field in %s"%ent.key + print("ERROR: No year field in %s"%ent.key) date = 10000*13 except ValueError: date = 10000*13 @@ -286,7 +286,7 @@ class BibTeXEntry: def get(self, k, v=None): return self.entries.get(k,v) def has_key(self, k): - return self.entries.has_key(k) + return k in self.entries def __getitem__(self, k): return self.entries[k] def __setitem__(self, k, v): @@ -312,13 +312,13 @@ class BibTeXEntry: d = ["@%s{%s,\n" % (self.type, self.key)] if v: df = DISPLAYED_FIELDS[:] - for k in self.entries.keys(): + for k in list(self.entries.keys()): if k not in df: df.append(k) else: df = DISPLAYED_FIELDS for f in df: - if not self.entries.has_key(f): + if f not in self.entries: continue v = self.entries[f] if v.startswith(""): @@ -330,7 +330,7 @@ class BibTeXEntry: d.append("%%%%% "+("ERROR: Non-ASCII characters: '%r'\n"%np)) d.append(" ") v = v.replace("&", "&") - if invStrings.has_key(v): + if v in invStrings: s = "%s = %s,\n" %(f, invStrings[v]) else: s = "%s = {%s},\n" % (f, v) @@ -359,7 +359,7 @@ class BibTeXEntry: none.""" errs = self._check() for e in errs: - print e + print(e) return not errs def _check(self): @@ -396,14 +396,14 @@ class BibTeXEntry: not self['booktitle'].startswith("{Proceedings of"): errs.append("ERROR: %s's booktitle (%r) doesn't start with 'Proceedings of'" % (self.key, self['booktitle'])) - if self.has_key("pages") and not re.search(r'\d+--\d+', self['pages']): + if "pages" in self and not re.search(r'\d+--\d+', self['pages']): errs.append("ERROR: Misformed pages in %s"%self.key) if self.type == 'proceedings': if self.get('title'): errs.append("ERROR: %s is a proceedings: it should have a booktitle, not a title." % self.key) - for field, value in self.entries.items(): + for field, value in list(self.entries.items()): if value.translate(ALLCHARS, PRINTINGCHARS): errs.append("ERROR: %s.%s has non-ASCII characters"%( self.key, field)) @@ -551,8 +551,8 @@ class BibTeXEntry: cache_section = self.get('www_cache_section', ".") if cache_section not in config.CACHE_SECTIONS: if cache_section != ".": - print >>sys.stderr, "Unrecognized cache section %s"%( - cache_section) + print("Unrecognized cache section %s"%( + cache_section), file=sys.stderr) cache_section="." for key, name, ext in (('www_abstract_url', 'abstract','abstract'), @@ -766,13 +766,13 @@ class ParsedAuthor: short = o.first; long = self.first initials_s = "".join([n[0] for n in short]) - initials_l = "".join([n[0] for n in long]) + initials_l = "".join([n[0] for n in int]) idx = initials_l.find(initials_s) if idx < 0: return self - n = long[:idx] + n = int[:idx] for i in range(idx, idx+len(short)): - a = long[i]; b = short[i-idx] + a = int[i]; b = short[i-idx] if a == b: n.append(a) elif len(a) == 2 and a[1] == '.' and a[0] == b[0]: @@ -781,7 +781,7 @@ class ParsedAuthor: n.append(a) else: return self - n += long[idx+len(short):] + n += int[idx+len(short):] if n == self.first: return self @@ -842,7 +842,7 @@ def _split(s,w=79,indent=8): first = 1 indentation = "" while len(s) > w: - for i in xrange(w-1, 20, -1): + for i in range(w-1, 20, -1): if s[i] == ' ': r.append(indentation+s[:i]) s = s[i+1:] @@ -864,14 +864,14 @@ class FileIter: if fname: file = open(fname, 'r') if string: - file = cStringIO.StringIO(string) + file = io.StringIO(string) if file: - it = iter(file.xreadlines()) + it = iter(file) self.iter = it assert self.iter self.lineno = 0 - self._next = it.next - def next(self): + self._next = it.__next__ + def __next__(self): self.lineno += 1 return self._next() @@ -880,7 +880,7 @@ def parseAuthor(s): try: return _parseAuthor(s) except: - print >>sys.stderr, "Internal error while parsing author %r"%s + print("Internal error while parsing author %r"%s, file=sys.stderr) raise def _parseAuthor(s): @@ -891,7 +891,7 @@ def _parseAuthor(s): while s: s = s.strip() bracelevel = 0 - for i in xrange(len(s)): + for i in range(len(s)): if s[i] == '{': bracelevel += 1 elif s[i] == '}': @@ -947,8 +947,8 @@ def _parseAuthor(s): return parsedAuthors -ALLCHARS = "".join(map(chr,range(256))) -PRINTINGCHARS = "\t\n\r"+"".join(map(chr,range(32, 127))) +ALLCHARS = "".join(map(chr,list(range(256)))) +PRINTINGCHARS = "\t\n\r"+"".join(map(chr,list(range(32, 127)))) LC_CHARS = "abcdefghijklmnopqrstuvwxyz" SV_DELCHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz" @@ -995,7 +995,7 @@ class Parser: self.strings.update(initial_strings) self.newStrings = {} self.invStrings = {} - for k,v in config.INITIAL_STRINGS.items(): + for k,v in list(config.INITIAL_STRINGS.items()): self.invStrings[v]=k self.fileiter = fileiter if result is None: @@ -1049,7 +1049,7 @@ class Parser: continue data.append(line) data.append(" ") - line = it.next() + line = next(it) self.litStringLine = 0 elif line[0] == '{': bracelevel += 1 @@ -1076,13 +1076,13 @@ class Parser: #print bracelevel, "C", repr(line) data.append(line) data.append(" ") - line = it.next() + line = next(it) elif line[0] == '#': - print >>sys.stderr, "Weird concat on line %s"%it.lineno + print("Weird concat on line %s"%it.lineno, file=sys.stderr) elif line[0] in "},": if not data: - print >>sys.stderr, "No data after field on line %s"%( - it.lineno) + print("No data after field on line %s"%( + it.lineno), file=sys.stderr) else: m = RAW_DATA_RE.match(line) if m: @@ -1170,7 +1170,7 @@ class Parser: else: key = v[0] d = {} - for i in xrange(1,len(v),2): + for i in range(1,len(v),2): d[v[i].lower()] = v[i+1] ent = BibTeXEntry(self.curEntType, key, d) ent.entryLine = self.entryLine @@ -1197,11 +1197,11 @@ class Parser: def _parse(self): it = self.fileiter - line = it.next() + line = next(it) while 1: # Skip blank lines. while not line or line.isspace() or OUTER_COMMENT_RE.match(line): - line = it.next() + line = next(it) # Get the first line of an entry. m = ENTRY_BEGIN_RE.match(line) if m: @@ -1215,7 +1215,7 @@ class Parser: def _advance(it,line): while not line or line.isspace() or COMMENT_RE.match(line): - line = it.next() + line = next(it) return line # Matches a comment line outside of an entry. @@ -1265,5 +1265,5 @@ if __name__ == '__main__': for e in r.entries: if e.type in ("proceedings", "journal"): continue - print e.to_html() + print(e.to_html()) diff --git a/i2p2www/anonbib/config.py b/i2p2www/anonbib/config.py index c1b1b6ec..095a1e62 100644 --- a/i2p2www/anonbib/config.py +++ b/i2p2www/anonbib/config.py @@ -19,7 +19,7 @@ del _k def load(cfgFile): mod = {} - execfile(cfgFile, mod) + exec(compile(open(cfgFile, "rb").read(), cfgFile, 'exec'), mod) for _k in _KEYS: try: globals()[_k]=mod[_k] @@ -28,7 +28,7 @@ def load(cfgFile): INITIAL_STRINGS.update(_EXTRA_INITIAL_STRINGS) AUTHOR_RE_LIST[:] = [ - (re.compile(k, re.I), v,) for k, v in AUTHOR_URLS.items() + (re.compile(k, re.I), v,) for k, v in list(AUTHOR_URLS.items()) ] NO_COLLAPSE_AUTHORS_RE_LIST[:] = [ @@ -36,7 +36,7 @@ def load(cfgFile): ] ALPHABETIZE_AUTHOR_AS_RE_LIST[:] = [ - (re.compile(k, re.I), v,) for k,v in ALPHABETIZE_AUTHOR_AS.items() + (re.compile(k, re.I), v,) for k,v in list(ALPHABETIZE_AUTHOR_AS.items()) ] _EXTRA_INITIAL_STRINGS = { diff --git a/i2p2www/anonbib/metaphone.py b/i2p2www/anonbib/metaphone.py index f57135d7..7fa3dec3 100644 --- a/i2p2www/anonbib/metaphone.py +++ b/i2p2www/anonbib/metaphone.py @@ -45,7 +45,7 @@ SINGLETONS = { 'z': 's', } -ALLCHARS = "".join(map(chr, range(256))) +ALLCHARS = "".join(map(chr, list(range(256)))) NONLCCHARS = "".join([c for c in ALLCHARS if not c.islower()]) def metaphone(s): """Return the metaphone equivalent of a provided string""" @@ -182,7 +182,7 @@ def metaphone(s): return "".join(result) def demo(a): - print a, "=>", metaphone(a) + print(a, "=>", metaphone(a)) if __name__ == '__main__': demo("Nick. Mathewson") diff --git a/i2p2www/anonbib/rank.py b/i2p2www/anonbib/rank.py index 175a10d6..a8a572a9 100644 --- a/i2p2www/anonbib/rank.py +++ b/i2p2www/anonbib/rank.py @@ -7,7 +7,7 @@ cache_expire = 60*60*24*30 # 30 days # Checks -import config +from . import config import os import sys from os.path import exists, isdir, join, getmtime @@ -32,8 +32,8 @@ def cache_folder(): return r import re -from urllib2 import urlopen, build_opener -from urllib import quote +from urllib.request import urlopen, build_opener +from urllib.parse import quote from datetime import date import hashlib @@ -66,17 +66,17 @@ def getPageForTitle(title, cache=True, update=True, save=True): if exists(join(cache_folder(), md5h(url))) and cache: return url, file(join(cache_folder(), md5h(url)),'r').read() elif update: - print "Downloading rank for %r."%title + print("Downloading rank for %r."%title) # Make a custom user agent (so that we are not filtered by Google)! opener = build_opener() opener.addheaders = [('User-agent', 'Anon.Bib.0.1')] - print "connecting..." + print("connecting...") connection = opener.open(url) - print "reading" + print("reading") page = connection.read() - print "done" + print("done") if save: file(join(cache_folder(), md5h(url)),'w').write(page) return url, page @@ -140,20 +140,20 @@ def get_rank_html(title, years=None, base_url=".", update=True, def TestScholarFormat(): # We need to ensure that Google Scholar does not change its page format under our feet # Use some cases to check if all is good - print "Checking google scholar formats..." + print("Checking google scholar formats...") stopAndGoCites = getCite("Stop-and-Go MIXes: Providing Probabilistic Anonymity in an Open System", False)[0] dragonCites = getCite("Mixes protected by Dragons and Pixies: an empirical study", False, save=False)[0] if stopAndGoCites in (0, None): - print """OOPS.\n + print("""OOPS.\n It looks like Google Scholar changed their URL format or their output format. -I went to count the cites for the Stop-and-Go MIXes paper, and got nothing.""" +I went to count the cites for the Stop-and-Go MIXes paper, and got nothing.""") sys.exit(1) if dragonCites != None: - print """OOPS.\n + print("""OOPS.\n It looks like Google Scholar changed their URL format or their output format. -I went to count the cites for a fictitious paper, and found some.""" +I went to count the cites for a fictitious paper, and found some.""") sys.exit(1) def urlIsUseless(u): @@ -170,7 +170,7 @@ URLTYPES=[ "pdf", "ps", "txt", "ps_gz", "html" ] if __name__ == '__main__': # First download the bibliography file. - import BibTeX + from . import BibTeX suggest = False if sys.argv[1] == 'suggest': suggest = True @@ -182,7 +182,7 @@ if __name__ == '__main__': bib = BibTeX.parseFile(config.MASTER_BIB) remove_old() - print "Downloading missing ranks." + print("Downloading missing ranks.") for ent in bib.entries: getCite(ent['title'], cache=True, update=True) @@ -190,13 +190,13 @@ if __name__ == '__main__': for ent in bib.entries: haveOne = False for utype in URLTYPES: - if ent.has_key("www_%s_url"%utype): + if "www_%s_url"%utype in ent: haveOne = True break if haveOne: continue - print ent.key, "has no URLs given." + print(ent.key, "has no URLs given.") urls = [ u for u in getPaperURLs(ent['title']) if not urlIsUseless(u) ] for u in urls: - print "\t", u + print("\t", u) diff --git a/i2p2www/anonbib/reconcile.py b/i2p2www/anonbib/reconcile.py index e601af48..69c24afd 100644 --- a/i2p2www/anonbib/reconcile.py +++ b/i2p2www/anonbib/reconcile.py @@ -13,9 +13,9 @@ import re assert sys.version_info[:3] >= (2,2,0) -import BibTeX -import config -import metaphone +from . import BibTeX +from . import config +from . import metaphone _MPCACHE = {} def soundsLike(s1, s2): @@ -168,16 +168,16 @@ class MasterBibTeX(BibTeX.BibTeX): matches = m2 if not matches: - print "No match for %s"%e.key + print("No match for %s"%e.key) if matches[-1][1] is e: - print "%s matches for %s: OK."%(len(matches), e.key) + print("%s matches for %s: OK."%(len(matches), e.key)) else: - print "%s matches for %s: %s is best!" %(len(matches), e.key, - matches[-1][1].key) + print("%s matches for %s: %s is best!" %(len(matches), e.key, + matches[-1][1].key)) if len(matches) > 1: for g, m in matches: - print "%%%% goodness", g - print m + print("%%%% goodness", g) + print(m) def noteToURL(note): @@ -202,7 +202,7 @@ def emit(f,ent): global all_ok errs = ent._check() - if master.byKey.has_key(ent.key.strip().lower()): + if ent.key.strip().lower() in master.byKey: errs.append("ERROR: Key collision with master file") if errs: @@ -210,7 +210,7 @@ def emit(f,ent): note = ent.get("note") if ent.getURL() and not note: - ent['note'] = "\url{%s}"%ent.getURL() + ent['note'] = "\\url{%s}"%ent.getURL() elif note: m = re.match(r'\\url{(.*)}', note) if m: @@ -232,61 +232,61 @@ def emit(f,ent): if errs: all_ok = 0 for e in errs: - print >>f, "%%%%", e + print("%%%%", e, file=f) - print >>f, ent.format(77, 4, v=1, invStrings=invStrings) + print(ent.format(77, 4, v=1, invStrings=invStrings), file=f) def emitKnown(f, ent, matches): - print >>f, "%% Candidates are:", ", ".join([e.key for g,e in matches]) - print >>f, "%%" - print >>f, "%"+(ent.format(77,4,1,invStrings).replace("\n", "\n%")) + print("%% Candidates are:", ", ".join([e.key for g,e in matches]), file=f) + print("%%", file=f) + print("%"+(ent.format(77,4,1,invStrings).replace("\n", "\n%")), file=f) if __name__ == '__main__': if len(sys.argv) != 3: - print "reconcile.py expects 2 arguments" + print("reconcile.py expects 2 arguments") sys.exit(1) config.load(sys.argv[1]) - print "========= Scanning master ==========" + print("========= Scanning master ==========") master = MasterBibTeX() master = BibTeX.parseFile(config.MASTER_BIB, result=master) master.buildIndex() - print "========= Scanning new file ========" + print("========= Scanning new file ========") try: fn = sys.argv[2] input = BibTeX.parseFile(fn) - except BibTeX.ParseError, e: - print "Error parsing %s: %s"%(fn,e) + except BibTeX.ParseError as e: + print("Error parsing %s: %s"%(fn,e)) sys.exit(1) f = open('tmp.bib', 'w') - keys = input.newStrings.keys() + keys = list(input.newStrings.keys()) keys.sort() for k in keys: v = input.newStrings[k] - print >>f, "@string{%s = {%s}}"%(k,v) + print("@string{%s = {%s}}"%(k,v), file=f) invStrings = input.invStrings for e in input.entries: if not (e.get('title') and e.get('author')): - print >>f, "%%\n%%%% Not enough information to search for a match: need title and author.\n%%" + print("%%\n%%%% Not enough information to search for a match: need title and author.\n%%", file=f) emit(f, e) continue matches = master.includes(e, all=1) if not matches: - print >>f, "%%\n%%%% This entry is probably new: No match found.\n%%" + print("%%\n%%%% This entry is probably new: No match found.\n%%", file=f) emit(f, e) else: - print >>f, "%%" - print >>f, "%%%% Possible match found for this entry; max goodness",\ - matches[-1][0], "\n%%" + print("%%", file=f) + print("%%%% Possible match found for this entry; max goodness",\ + matches[-1][0], "\n%%", file=f) emitKnown(f, e, matches) if not all_ok: - print >>f, "\n\n\nErrors remain; not finished.\n" + print("\n\n\nErrors remain; not finished.\n", file=f) f.close() diff --git a/i2p2www/anonbib/tests.py b/i2p2www/anonbib/tests.py index 3caa1674..915a783a 100644 --- a/i2p2www/anonbib/tests.py +++ b/i2p2www/anonbib/tests.py @@ -3,8 +3,8 @@ """Unit tests for anonbib.""" -import BibTeX -import metaphone +from . import BibTeX +from . import metaphone #import reconcile #import writeHTML #import updateCache @@ -18,40 +18,40 @@ class MetaphoneTests(unittest.TestCase): class BibTeXTests(unittest.TestCase): def testTranslation(self): ut = BibTeX.url_untranslate - self.assertEquals(ut("Fred"),"Fred") - self.assertEquals(ut("Hello, World."), "Hello_2c_20World.") + self.assertEqual(ut("Fred"),"Fred") + self.assertEqual(ut("Hello, World."), "Hello_2c_20World.") te = BibTeX.TeXescapeURL ute = BibTeX.unTeXescapeURL - self.assertEquals(te("http://example/~me/my_file"), + self.assertEqual(te("http://example/~me/my_file"), r"http://example/\{}~me/my\_file") - self.assertEquals(ute(r"http:{}//example/\{}~me/my\_file"), + self.assertEqual(ute(r"http:{}//example/\{}~me/my\_file"), "http://example/~me/my_file") h = BibTeX.htmlize - self.assertEquals(h("Hello, world"), "Hello, world") - self.assertEquals(h(r"\'a\`e\'{i}(\'\i)\"o&\^u"), + self.assertEqual(h("Hello, world"), "Hello, world") + self.assertEqual(h(r"\'a\`e\'{i}(\'\i)\"o&\^u"), "áèí(í)ö&" "û") - self.assertEquals(h(r"\~n and \c{c}"), "ñ and ç") - self.assertEquals(h(r"\AE---a ligature"), "Æ—a ligature") - self.assertEquals(h(r"{\it 33}"), " 33") - self.assertEquals(h(r"Pages 33--99 or vice--versa?"), + self.assertEqual(h(r"\~n and \c{c}"), "ñ and ç") + self.assertEqual(h(r"\AE---a ligature"), "Æ—a ligature") + self.assertEqual(h(r"{\it 33}"), " 33") + self.assertEqual(h(r"Pages 33--99 or vice--versa?"), "Pages 33-99 or vice–versa?") t = BibTeX.txtize - self.assertEquals(t("Hello, world"), "Hello, world") - self.assertEquals(t(r"\'a\`e\'{i}(\'\i)\"o&\^u"), + self.assertEqual(t("Hello, world"), "Hello, world") + self.assertEqual(t(r"\'a\`e\'{i}(\'\i)\"o&\^u"), "aei(i)o&u") - self.assertEquals(t(r"\~n and \c{c}"), "n and c") - self.assertEquals(t(r"\AE---a ligature"), "AE---a ligature") - self.assertEquals(t(r"{\it 33}"), " 33") - self.assertEquals(t(r"Pages 33--99 or vice--versa?"), + self.assertEqual(t(r"\~n and \c{c}"), "n and c") + self.assertEqual(t(r"\AE---a ligature"), "AE---a ligature") + self.assertEqual(t(r"{\it 33}"), " 33") + self.assertEqual(t(r"Pages 33--99 or vice--versa?"), "Pages 33--99 or vice--versa?") def authorsParseTo(self,authors,result): pa = BibTeX.parseAuthor(authors) - self.assertEquals(["|".join(["+".join(item) for item in + self.assertEqual(["|".join(["+".join(item) for item in [a.first,a.von,a.last,a.jr]]) for a in pa], result) diff --git a/i2p2www/anonbib/updateCache.py b/i2p2www/anonbib/updateCache.py index 7b7fe645..ef70d21e 100755 --- a/i2p2www/anonbib/updateCache.py +++ b/i2p2www/anonbib/updateCache.py @@ -10,13 +10,13 @@ import signal import time import gzip -import BibTeX -import config -import urllib2 +from . import BibTeX +from . import config +import urllib.request, urllib.error, urllib.parse import getopt import socket import errno -import httplib +import http.client FILE_TYPES = [ "txt", "html", "pdf", "ps", "ps.gz", "abstract" ] BIN_FILE_TYPES = [ 'pdf', 'ps.gz' ] @@ -53,12 +53,12 @@ def downloadFile(key, ftype, section, url,timeout=None): signal.alarm(timeout) try: try: - infile = urllib2.urlopen(url) - except httplib.InvalidURL, e: + infile = urllib.request.urlopen(url) + except http.client.InvalidURL as e: raise UIError("Invalid URL %s: %s"%(url,e)) - except IOError, e: + except IOError as e: raise UIError("Cannot connect to url %s: %s"%(url,e)) - except socket.error, e: + except socket.error as e: if getattr(e,"errno",-1) == errno.EINTR: raise UIError("Connection timed out to url %s"%url) else: @@ -80,9 +80,9 @@ def downloadFile(key, ftype, section, url,timeout=None): outfile.close() urlfile = open(fnameURL, 'w') - print >>urlfile, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file=urlfile) if "\n" in url: url = url.replace("\n", " ") - print >>urlfile, url + print(url, file=urlfile) urlfile.close() os.rename(fnameTmp, fname) @@ -105,7 +105,7 @@ def getCachedURL(key, ftype, section): lines = f.readlines() f.close() if len(lines) != 2: - print >>sys.stderr, "ERROR: unexpected number of lines in", urlFname + print("ERROR: unexpected number of lines in", urlFname, file=sys.stderr) return lines[1].strip() def downloadAll(bibtex, missingOnly=0): @@ -115,33 +115,33 @@ def downloadAll(bibtex, missingOnly=0): urls = getURLs(e) key = e.key section = e.get("www_cache_section", ".") - for ftype, url in urls.items(): + for ftype, url in list(urls.items()): if missingOnly: cachedURL = getCachedURL(key, ftype, section) if cachedURL == url: - print >>sys.stderr,"Skipping",url + print("Skipping",url, file=sys.stderr) continue elif cachedURL is not None: - print >>sys.stderr,"URL for %s.%s has changed"%(key,ftype) + print("URL for %s.%s has changed"%(key,ftype), file=sys.stderr) else: - print >>sys.stderr,"I have no copy of %s.%s"%(key,ftype) + print("I have no copy of %s.%s"%(key,ftype), file=sys.stderr) try: downloadFile(key, ftype, section, url) - print "Downloaded",url - except UIError, e: - print >>sys.stderr, str(e) + print("Downloaded",url) + except UIError as e: + print(str(e), file=sys.stderr) errors.append((key,ftype,url,str(e))) - except (IOError, socket.error), e: + except (IOError, socket.error) as e: msg = "Error downloading %s: %s"%(url,str(e)) - print >>sys.stderr, msg + print(msg, file=sys.stderr) errors.append((key,ftype,url,msg)) - if urls.has_key("ps") and not urls.has_key("ps.gz"): + if "ps" in urls and "ps.gz" not in urls: # Say, this is something we'd like to have gzipped locally. psFname = getCacheFname(key, "ps", section) psGzFname = getCacheFname(key, "ps.gz", section) if os.path.exists(psFname) and not os.path.exists(psGzFname): # This is something we haven't gzipped yet. - print "Compressing a copy of",psFname + print("Compressing a copy of",psFname) outf = gzip.GzipFile(psGzFname, "wb") inf = open(psFname, "rb") while 1: @@ -156,9 +156,9 @@ def downloadAll(bibtex, missingOnly=0): if __name__ == '__main__': if len(sys.argv) == 2: - print "Loading from %s"%sys.argv[1] + print("Loading from %s"%sys.argv[1]) else: - print >>sys.stderr, "Expected a single configuration file as an argument" + print("Expected a single configuration file as an argument", file=sys.stderr) sys.exit(1) config.load(sys.argv[1]) diff --git a/i2p2www/anonbib/writeHTML.py b/i2p2www/anonbib/writeHTML.py index 19a7c146..e2c4f1f7 100755 --- a/i2p2www/anonbib/writeHTML.py +++ b/i2p2www/anonbib/writeHTML.py @@ -9,10 +9,10 @@ import os import json assert sys.version_info[:3] >= (2,2,0) -os.umask(022) +os.umask(0o22) -import BibTeX -import config +from . import BibTeX +from . import config def getTemplate(name): f = open(name) @@ -39,15 +39,15 @@ def writeBody(f, sections, section_urls, cache_path, base_url): sDisp = re.sub(r'\s+', ' ', s.strip()) sDisp = sDisp.replace(" ", " ") if u: - print >>f, ('
  • %s

    '%( - (BibTeX.url_untranslate(s), u, sDisp))) + print(('
  • %s

    '%( + (BibTeX.url_untranslate(s), u, sDisp))), file=f) else: - print >>f, ('
  • %s

    '%( - BibTeX.url_untranslate(s),sDisp)) - print >>f, "
  • ", file=f) def writeHTML(f, sections, sectionType, fieldName, choices, tag, config, cache_url_path, section_urls={}): @@ -69,7 +69,7 @@ def writeHTML(f, sections, sectionType, fieldName, choices, # tagListStr = [] - st = config.TAG_SHORT_TITLES.keys() + st = list(config.TAG_SHORT_TITLES.keys()) st.sort() root = "../"*pathLength(config.TAG_DIRECTORIES[tag]) if root == "": root = "." @@ -104,10 +104,10 @@ def writeHTML(f, sections, sectionType, fieldName, choices, } header, footer = getTemplate(config.TEMPLATE_FILE) - print >>f, header%fields + print(header%fields, file=f) writeBody(f, sections, section_urls, cache_path=cache_url_path, base_url=root) - print >>f, footer%fields + print(footer%fields, file=f) def jsonDumper(obj): if isinstance(obj, BibTeX.BibTeXEntry): @@ -125,7 +125,7 @@ def writePageSet(config, bib, tag): bib_entries = bib.entries[:] if not bib_entries: - print >>sys.stderr, "No entries with tag %r; skipping"%tag + print("No entries with tag %r; skipping"%tag, file=sys.stderr) return tagdir = config.TAG_DIRECTORIES[tag] @@ -133,7 +133,7 @@ def writePageSet(config, bib, tag): cache_url_path = BibTeX.smartJoin("../"*pathLength(tagdir), config.CACHE_DIR) if not os.path.exists(outdir): - os.makedirs(outdir, 0755) + os.makedirs(outdir, 0o755) ##### Sorted views: ## By topic. @@ -174,7 +174,7 @@ def writePageSet(config, bib, tag): except ValueError: last_year = int(entries[-2][1][0].get('year')) - years = map(str, range(first_year, last_year+1)) + years = list(map(str, list(range(first_year, last_year+1)))) if entries[-1][0] == 'Unknown': years.append("Unknown") @@ -216,15 +216,15 @@ def writePageSet(config, bib, tag): header,footer = getTemplate(config.BIBTEX_TEMPLATE_FILE) f = open(os.path.join(outdir,"bibtex.html"), 'w') - print >>f, header % { 'command_line' : "", + print(header % { 'command_line' : "", 'title': config.TAG_TITLES[tag], - 'root': root } + 'root': root }, file=f) for ent in entries: - print >>f, ( + print(( ("%s" "
    %s
    ") - %(BibTeX.url_untranslate(ent.key), ent.key, ent.format(90,8,1))) - print >>f, footer + %(BibTeX.url_untranslate(ent.key), ent.key, ent.format(90,8,1))), file=f) + print(footer, file=f) f.close() f = open(os.path.join(outdir,"bibtex.json"), 'w') @@ -234,13 +234,13 @@ def writePageSet(config, bib, tag): if __name__ == '__main__': if len(sys.argv) == 2: - print "Loading from %s"%sys.argv[1] + print("Loading from %s"%sys.argv[1]) else: - print >>sys.stderr, "Expected a single configuration file as an argument" + print("Expected a single configuration file as an argument", file=sys.stderr) sys.exit(1) config.load(sys.argv[1]) bib = BibTeX.parseFile(config.MASTER_BIB) - for tag in config.TAG_DIRECTORIES.keys(): + for tag in list(config.TAG_DIRECTORIES.keys()): writePageSet(config, bib, tag) diff --git a/i2p2www/blog/helpers.py b/i2p2www/blog/helpers.py index 4507caea..2294fb58 100644 --- a/i2p2www/blog/helpers.py +++ b/i2p2www/blog/helpers.py @@ -10,10 +10,10 @@ from i2p2www import helpers BLOG_METATAGS = { - 'author': u'I2P devs', + 'author': 'I2P devs', 'category': None, 'date': None, - 'excerpt': u'', + 'excerpt': '', } BLOG_LIST_METATAGS = [ diff --git a/i2p2www/downloads.py b/i2p2www/downloads.py index 848512e0..7c280e3e 100644 --- a/i2p2www/downloads.py +++ b/i2p2www/downloads.py @@ -141,12 +141,12 @@ def downloads_config(): def downloads_select(version, file): mirrors=read_mirrors() obj=[] - for net in mirrors.keys(): + for net in list(mirrors.keys()): a={} a['key']=net a['name']=net a['protocols']=[] - for protocol in mirrors[net].keys(): + for protocol in list(mirrors[net].keys()): b={} b['key']=protocol b['name']=protocol @@ -166,13 +166,13 @@ def downloads_redirect(version, net, protocol, domain, file): } if not protocol: - protocol = mirrors.keys()[randint(0, len(mirrors) - 1)] + protocol = list(mirrors.keys())[randint(0, len(mirrors) - 1)] if not protocol in mirrors: abort(404) mirrors=mirrors[protocol] if not domain: - domain = mirrors.keys()[randint(0, len(mirrors) - 1)] + domain = list(mirrors.keys())[randint(0, len(mirrors) - 1)] if not domain in mirrors: abort(404) return render_template('downloads/redirect.html', diff --git a/i2p2www/extensions.py b/i2p2www/extensions.py index 27d6f677..4eaef1c8 100644 --- a/i2p2www/extensions.py +++ b/i2p2www/extensions.py @@ -29,8 +29,8 @@ def we_are_frozen(): def module_path(): encoding = sys.getfilesystemencoding() if we_are_frozen(): - return os.path.dirname(unicode(sys.executable, encoding)) - return os.path.dirname(unicode(__file__, encoding)) + return os.path.dirname(str(sys.executable, encoding)) + return os.path.dirname(str(__file__, encoding)) class HighlightExtension(Extension): diff --git a/i2p2www/formatters.py b/i2p2www/formatters.py index 8cd36825..dd4f8a00 100644 --- a/i2p2www/formatters.py +++ b/i2p2www/formatters.py @@ -12,7 +12,7 @@ import os import sys import os.path -import StringIO +import io from pygments.formatter import Formatter from pygments.token import Token, Text, STANDARD_TYPES @@ -27,11 +27,11 @@ __all__ = ['I2PHtmlFormatter', 'TextSpecFormatter'] _escape_html_table = { - ord('&'): u'&', - ord('<'): u'<', - ord('>'): u'>', - ord('"'): u'"', - ord("'"): u''', + ord('&'): '&', + ord('<'): '<', + ord('>'): '>', + ord('"'): '"', + ord("'"): ''', } kinds = { @@ -459,7 +459,7 @@ class I2PHtmlFormatter(Formatter): """ if arg is None: arg = ('cssclass' in self.options and '.'+self.cssclass or '') - if isinstance(arg, basestring): + if isinstance(arg, str): args = [arg] else: args = list(arg) @@ -473,7 +473,7 @@ class I2PHtmlFormatter(Formatter): return ', '.join(tmp) styles = [(level, ttype, cls, style) - for cls, (style, ttype, level) in self.class2style.iteritems() + for cls, (style, ttype, level) in self.class2style.items() if cls and style] styles.sort() lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:]) @@ -511,8 +511,8 @@ class I2PHtmlFormatter(Formatter): cssfilename = os.path.join(os.path.dirname(filename), self.cssfile) except AttributeError: - print >>sys.stderr, 'Note: Cannot determine output file name, ' \ - 'using current directory as base for the CSS file name' + print('Note: Cannot determine output file name, ' \ + 'using current directory as base for the CSS file name', file=sys.stderr) cssfilename = self.cssfile # write CSS file only if noclobber_cssfile isn't given as an option. try: @@ -521,7 +521,7 @@ class I2PHtmlFormatter(Formatter): cf.write(CSSFILE_TEMPLATE % {'styledefs': self.get_style_defs('body')}) cf.close() - except IOError, err: + except IOError as err: err.strerror = 'Error writing CSS file: ' + err.strerror raise @@ -540,7 +540,7 @@ class I2PHtmlFormatter(Formatter): yield 0, DOC_FOOTER def _wrap_tablelinenos(self, inner): - dummyoutfile = StringIO.StringIO() + dummyoutfile = io.StringIO() lncount = 0 for t, line in inner: if t: @@ -884,7 +884,7 @@ class TextSpecFormatter(Formatter): else: outfile.write(value) - for ref in refs.values(): + for ref in list(refs.values()): if enc: outfile.write(ref.encode(enc)) else: diff --git a/i2p2www/helpers.py b/i2p2www/helpers.py index 41e6c6a8..07901b63 100644 --- a/i2p2www/helpers.py +++ b/i2p2www/helpers.py @@ -56,7 +56,7 @@ class Pagination(object): def iter_pages(self, left_edge=2, left_current=2, right_current=5, right_edge=2): last = 0 - for num in xrange(1, self.pages + 1): + for num in range(1, self.pages + 1): if num <= left_edge or \ (num > self.page - left_current - 1 and \ num < self.page + right_current) or \ diff --git a/i2p2www/legacy.py b/i2p2www/legacy.py index 013c9d5e..cc6a2a43 100644 --- a/i2p2www/legacy.py +++ b/i2p2www/legacy.py @@ -197,7 +197,7 @@ LEGACY_RELEASES_MAP={ '0.9.8': (2013, 9, 30), '0.9.8.1': (2013, 10, 2), '0.9.9': (2013, 12, 7), - '0.9.10': (2014, 01, 22), + '0.9.10': (2014, 0o1, 22), } def legacy_show(f): @@ -232,5 +232,6 @@ def legacy_release(version): else: return legacy_show('release-%s' % version) -def legacy_blog(lang, (year, month, day), title): +def legacy_blog(lang, xxx_todo_changeme, title): + (year, month, day) = xxx_todo_changeme return redirect(url_for('blog_post', lang=lang, slug=('%d/%02d/%02d/%s' % (year, month, day, title))), 301) diff --git a/i2p2www/meetings/helpers.py b/i2p2www/meetings/helpers.py index 2dc58582..8362a64e 100644 --- a/i2p2www/meetings/helpers.py +++ b/i2p2www/meetings/helpers.py @@ -54,7 +54,7 @@ def get_meetings_ids(num=0): # iterate over all files for f in v[2]: # ignore all non-.rst files - print("Meeting file found", f) + print(("Meeting file found", f)) if not f.endswith('.rst'): continue try: diff --git a/i2p2www/spec/views.py b/i2p2www/spec/views.py index b32ccb2e..6327dff0 100644 --- a/i2p2www/spec/views.py +++ b/i2p2www/spec/views.py @@ -40,12 +40,12 @@ SPEC_CATEGORY_SORT = defaultdict(lambda: 999, { }) PROPOSAL_METATAGS = { - 'author': u'I2P devs', + 'author': 'I2P devs', 'created': None, 'editor': None, 'implementedin': None, 'lastupdated': None, - 'status': u'Draft', + 'status': 'Draft', 'supercededby': None, 'supercedes': None, 'target': None, @@ -70,18 +70,18 @@ PROPOSAL_STATUS_SORT = defaultdict(lambda: 999, { }) METATAG_LABELS = { - 'accuratefor': u'Accurate for', - 'author': u'Author', - 'category': u'Category', - 'created': u'Created', - 'editor': u'Editor', - 'implementedin': u'Implemented in', - 'lastupdated': u'Last updated', - 'status': u'Status', - 'supercededby': u'Superceded by', - 'supercedes': u'Supercedes', - 'target': u'Target', - 'thread': u'Thread', + 'accuratefor': 'Accurate for', + 'author': 'Author', + 'category': 'Category', + 'created': 'Created', + 'editor': 'Editor', + 'implementedin': 'Implemented in', + 'lastupdated': 'Last updated', + 'status': 'Status', + 'supercededby': 'Superceded by', + 'supercedes': 'Supercedes', + 'target': 'Target', + 'thread': 'Thread', } @@ -150,7 +150,7 @@ def render_rst(directory, name, meta_parser, template): # Change highlight formatter content = content.replace('{% highlight', "{% highlight formatter='textspec'") # Metatags - for (metatag, label) in METATAG_LABELS.items(): + for (metatag, label) in list(METATAG_LABELS.items()): content = content.replace(' :%s' % metatag, label) # render the post with Jinja2 to handle URLs etc. diff --git a/i2p2www/templatevars.py b/i2p2www/templatevars.py index 3c342a85..dbdc8f2b 100644 --- a/i2p2www/templatevars.py +++ b/i2p2www/templatevars.py @@ -1,7 +1,7 @@ import ctags from flask import g, request, safe_join, url_for import os.path -from urlparse import urlsplit, urlunsplit +from urllib.parse import urlsplit, urlunsplit from i2p2www import ( CANONICAL_DOMAIN, diff --git a/netdb.i2p2/app.py b/netdb.i2p2/app.py index 7d43d3be..29ba6cdc 100644 --- a/netdb.i2p2/app.py +++ b/netdb.i2p2/app.py @@ -23,7 +23,7 @@ def app(environ, start_response): path = req.path[1:] if path == '': # page - page = u'NetDB' + page = 'NetDB' # generate links entries = os.listdir('netdb') @@ -46,7 +46,7 @@ def app(environ, start_response): res += '
  • %s
  • ' % (entry, entry) resp = Response(page % res, mimetype='text/html') elif path == 'robots.txt': - dat = u"User-agent: *\nDisallow: /routerInfo-*.dat$\n" + dat = "User-agent: *\nDisallow: /routerInfo-*.dat$\n" resp = Response(dat, mimetype='text/plain') else: # load file diff --git a/netdb.i2p2/fixedapp.py b/netdb.i2p2/fixedapp.py index 1a333729..758b538e 100644 --- a/netdb.i2p2/fixedapp.py +++ b/netdb.i2p2/fixedapp.py @@ -122,7 +122,7 @@ def application(environ, start_response): if path == '': - page = u'NetDB' + page = 'NetDB' if len(info) == 0: # tag the ip as new @@ -136,7 +136,7 @@ def application(environ, start_response): new = [] if len(entries) > 150: # select some randomly - for i in xrange(100): + for i in range(100): while True: sel = choice(entries) if not sel.startswith('routerInfo-'): @@ -179,7 +179,7 @@ def application(environ, start_response): resp.add_etag() elif path == 'robots.txt': - dat = u"User-agent: *\nDisallow: /routerInfo-*.dat$\n" + dat = "User-agent: *\nDisallow: /routerInfo-*.dat$\n" resp = Response(dat, mimetype='text/plain') resp.add_etag()