#!/usr/bin/env python3 """ darcsweb - A web interface for darcs Alberto Bertogli (albertito@blitiri.com.ar) Inspired on gitweb (as of 28/Jun/2005), which is written by Kay Sievers and Christian Gierke """ import time time_begin = time.time() import sys import os import io import string import unicodedata import re import stat import cgi import cgitb import xml.sax import urllib.parse import xml.sax.saxutils # For typing annotations. from typing import List, Tuple, Iterable cgitb.enable() time_imports = time.time() - time_begin iso_datetime = "%Y-%m-%dT%H:%M:%SZ" PATCHES_PER_PAGE = 50 # In order to be able to store the config file in /etc/darcsweb, it has to be # added to sys.path. It's mainly used by distributions, which place the # default configuration there. Add it second place, so it goes after '.' but # before the normal path. This allows per-directory config files (desirable # for multiple darcsweb installations on the same machin), and avoids name # clashing if there's a config.py in the standard path. sys.path.insert(1, "/etc/darcsweb") # Similarly, when hosting multiple darcsweb instrances on the same # server, you can just 'SetEnv DARCSWEB_CONFPATH' in the httpd config, # and this will have a bigger priority than the system-wide # configuration file. if "DARCSWEB_CONFPATH" in os.environ: sys.path.insert(1, os.environ["DARCSWEB_CONFPATH"]) # Empty configuration class, we will fill it in later depending on the repo. # The attributes are purely for type checking. Once we have Python 3.7 as the # minimum supported version, we can turn these into proper annotations. class config: darcslogo = "" darcsfav = "" cssfile = "" myname = "" myurl = "" darcspath = "" summary = "" footer = "" cachedir = "" searchlimit = 0 logtimes = "" url_links = [] # type: Iterable[Tuple[str, str]] author_links = "" disable_annotate = False reponame = "" # list of run_darcs() invocations, for performance measures darcs_runs = [] # type: List[Tuple[str]] # exception handling def exc_handle(t, v, tb): try: cache.cancel() except: pass cgitb.handler((t, v, tb)) sys.excepthook = exc_handle # # utility functions # def filter_num(s): l = [c for c in s if c in string.digits] return "".join(l) allowed_in_action = string.ascii_letters + string.digits + "_" def filter_act(s): l = [c for c in s if c in allowed_in_action] return "".join(l) allowed_in_hash = string.ascii_letters + string.digits + "-." def filter_hash(s): l = [c for c in s if c in allowed_in_hash] return "".join(l) def filter_file(s): if ".." in s: raise Exception("Invalid file name, contains '..'") if s == "/": return s # remove extra "/"s r = s[0] last = s[0] for c in s[1:]: if c == last and c == "/": continue r += c last = c return r def printd(*params): print(" ".join(params), "
") def xmlescape(s): """Escape string for use inside XML.""" return xml.sax.saxutils.escape(s, entities={'"': """}) def urlescape(s): """Escape string for use inside URLs/links.""" # Using errors=surrogateescape makes invalid UTF8 translate well through # the URL. We have to also use it in the cgi.FieldStorage so the # unescaping works. return urllib.parse.quote(s, errors="surrogateescape") def how_old(epoch): if config.cachedir: # when we have a cache, the how_old() becomes a problem since # the cached entries will have old data; so in this case just # return a nice string t = time.localtime(epoch) currentYear = time.localtime()[0] if t[0] == currentYear: s = time.strftime("%d %b %H:%M", t) else: s = time.strftime("%d %b %Y %H:%M", t) return s age = int(time.time()) - int(epoch) if age > 60 * 60 * 24 * 365 * 2: s = str(int(age / 60 / 60 / 24 / 365)) s += " years ago" elif age > 60 * 60 * 24 * (365 / 12) * 2: s = str(int(age / 60 / 60 / 24 / (365 / 12))) s += " months ago" elif age > 60 * 60 * 24 * 7 * 2: s = str(int(age / 60 / 60 / 24 / 7)) s += " weeks ago" elif age > 60 * 60 * 24 * 2: s = str(int(age / 60 / 60 / 24)) s += " days ago" elif age > 60 * 60 * 2: s = str(int(age / 60 / 60)) s += " hours ago" elif age > 60 * 2: s = str(int(age / 60)) s += " minutes ago" elif age > 2: s = str(int(age)) s += " seconds ago" else: s = "right now" return s def shorten_str(s, max=60): if len(s) > max: s = s[: max - 4] + " ..." return s def replace_tabs(s): pos = s.find("\t") while pos != -1: count = 8 - (pos % 8) if count: spaces = " " * count s = s.replace("\t", spaces, 1) pos = s.find("\t") return s def replace_links(s): """Replace user defined strings with links, as specified in the configuration file.""" vardict = { "myreponame": config.myreponame, "reponame": config.reponame, } for link_pat, link_dst in config.url_links: s = re.sub(link_pat, link_dst % vardict, s) return s def strip_ignore_this(s): """Strip out darcs' Ignore-this: metadata if present.""" return re.sub(r"^Ignore-this:[^\n]*\n?", "", s) def highlight(s, l): "Highlights appearences of s in l" # build the regexp by leaving "(s)", replacing '(' and ') first s = s.replace("\\", "\\\\") s = s.replace("(", "\\(") s = s.replace(")", "\\)") s = "(" + xmlescape(s) + ")" try: pat = re.compile(s, re.I) repl = '\\1' l = re.sub(pat, repl, l) except: pass return l def fperms(fname): m = os.stat(fname)[stat.ST_MODE] m = m & 0o777 s = [] if os.path.isdir(fname): s.append("d") else: s.append("-") if m & 0o400: s.append("r") else: s.append("-") if m & 0o200: s.append("w") else: s.append("-") if m & 0o100: s.append("x") else: s.append("-") if m & 0o040: s.append("r") else: s.append("-") if m & 0o020: s.append("w") else: s.append("-") if m & 0o010: s.append("x") else: s.append("-") if m & 0o004: s.append("r") else: s.append("-") if m & 0o002: s.append("w") else: s.append("-") if m & 0o001: s.append("x") else: s.append("-") return "".join(s) def fsize(fname): s = os.stat(fname)[stat.ST_SIZE] if s < 1024: return "%s" % s elif s < 1048576: return "%sK" % int(s / 1024) elif s < 1073741824: return "%sM" % int(s / 1048576) def isbinary(fname): bins = open(config.repodir + "/_darcs/prefs/binaries").readlines() bins = [b[:-1] for b in bins if b and b[0] != "#"] for b in bins: if re.compile(b).search(fname): return 1 return 0 def realpath(fname): realf = filter_file(config.repodir + "/" + fname) return realf def log_times(cache_hit, repo=None, event=None): if not config.logtimes: return time_total = time.time() - time_begin processing = time_total - time_imports if not event: event = action if cache_hit: event = event + " (hit)" s = "%s\n" % event if repo: s += "\trepo: %s\n" % repo s += """\ total: %.3f processing: %.3f imports: %.3f\n""" % ( time_total, processing, time_imports, ) if darcs_runs: s += "\truns:\n" for params in darcs_runs: s += "\t\t%s\n" % " ".join(params) s += "\n" lf = open(config.logtimes, "a", errors="surrogateescape") lf.write(s) lf.close() def parse_darcs_time(s): "Try to convert a darcs' time string into a Python time tuple." try: return time.strptime(s, "%Y%m%d%H%M%S") except ValueError: # very old darcs commits use a different format, for example: # "Wed May 21 19:39:10 CEST 2003" or even: # "Sun Sep 21 07:23:57 Pacific Daylight Time 2003" # we can't parse the time zone part reliably, so we ignore it fmt = "%a %b %d %H:%M:%S %Y" parts = s.split() ns = " ".join(parts[0:4]) + " " + parts[-1] return time.strptime(ns, fmt) # # generic html functions # def print_header(): print("Content-type: text/html; charset=utf-8") print( """ darcs - %(reponame)s """ % { "reponame": config.reponame, "css": config.cssfile, "url": config.myurl + "/" + config.myreponame, "fav": config.darcsfav, "logo": config.darcslogo, "myname": config.myname, "myreponame": config.myreponame, "action": action, } ) def print_footer(put_rss=1): print( """ \n\n") def print_navbar(h="", f=""): print( """ ") def print_readme(): if not config.readme: return try: file = open(config.repodir + "/" + config.readme, mode="r") except: return readme_markdown = file.read() file.close() try: import markdown readme_html = markdown.markdown(readme_markdown) except: return print( """

%s
""" % config.readme ) print(readme_html) print("
") def print_plain_header(): print("Content-type: text/plain; charset=utf-8\n") def print_binary_header(fname=None): import mimetypes if fname: (mime, enc) = mimetypes.guess_type(fname) else: mime = None if mime: print("Content-type: %s" % mime) else: print("Content-type: application/octet-stream") if fname: print("Content-Disposition:attachment;filename=%s" % fname) print def gen_authorlink(author, shortauthor=None): if not config.author_links: if shortauthor: return shortauthor else: return author if not shortauthor: shortauthor = author return ( '%s' % shortauthor ) # # basic caching # class Cache: def __init__(self, basedir, url): import hashlib self.basedir = basedir self.url = url self.fname = hashlib.sha1(repr(url)).hexdigest() self.file = None self.mode = None self.real_stdout = sys.stdout def open(self): "Returns 1 on hit, 0 on miss" fname = self.basedir + "/" + self.fname if not os.access(fname, os.R_OK): # the file doesn't exist, direct miss pid = str(os.getpid()) fname = self.basedir + "/." + self.fname + "-" + pid self.file = open(fname, "w") self.mode = "w" os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR) # step over stdout so when "print" tries to write # output, we get it first sys.stdout = self return 0 inv = config.repodir + "/_darcs/patches" cache_lastmod = os.stat(fname).st_mtime repo_lastmod = os.stat(inv).st_mtime dw_lastmod = os.stat(sys.argv[0]).st_mtime if repo_lastmod > cache_lastmod or dw_lastmod > cache_lastmod: # the entry is too old, remove it and return a miss os.unlink(fname) pid = str(os.getpid()) fname = self.basedir + "/." + self.fname + "-" + pid self.file = open(fname, "w") self.mode = "w" sys.stdout = self return 0 # the entry is still valid, hit! self.file = open(fname, "r") self.mode = "r" return 1 def dump(self): for l in self.file: self.real_stdout.write(l) def write(self, s): # this gets called from print, because we replaced stdout with # ourselves self.file.write(s) self.real_stdout.write(s) def close(self): if self.file: self.file.close() sys.stdout = self.real_stdout if self.mode == "w": pid = str(os.getpid()) fname1 = self.basedir + "/." + self.fname + "-" + pid fname2 = self.basedir + "/" + self.fname os.rename(fname1, fname2) self.mode = "c" def cancel(self): "Like close() but don't save the entry." if self.file: self.file.close() sys.stdout = self.real_stdout if self.mode == "w": pid = str(os.getpid()) fname = self.basedir + "/." + self.fname + "-" + pid os.unlink(fname) self.mode = "c" # # darcs repo manipulation # def repo_get_owner(): try: fd = open(config.repodir + "/_darcs/prefs/author") except IOError: return None author = None for line in fd: line = line.strip() if not line.startswith("#"): author = line break return author def run_subprocess(params): """Wrapper to subprocess.Popen(), which we use to provide backwards compatibility when it's not available. Returns stdout.""" import subprocess p = subprocess.Popen( params, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) return io.TextIOWrapper( p.stdout, encoding="utf8", errors="backslashreplace" ) def run_darcs(*params): """Runs darcs on the repodir with the given params, return a file object with its output.""" os.chdir(config.repodir) try: original_8bit_setting = os.environ["DARCS_DONT_ESCAPE_8BIT"] except KeyError: original_8bit_setting = None os.environ["DARCS_DONT_ESCAPE_8BIT"] = "1" # Recent darcs versions require the PATH environment variable to be # present in order to find "diff"; if it's not set we give it a # reasonably basic one which would work in most cases, but ideally it # should be properly set by the web server. if "PATH" not in os.environ: os.environ["PATH"] = ":".join([config.darcspath, "/bin", "/usr/bin"]) outf = run_subprocess([config.darcspath + "darcs"] + list(params)) darcs_runs.append(params) if original_8bit_setting == None: del os.environ["DARCS_DONT_ESCAPE_8BIT"] else: os.environ["DARCS_DONT_ESCAPE_8BIT"] = original_8bit_setting return outf class Patch: "Represents a single patch/record" def __init__(self): self.hash = "" self.author = "" self.shortauthor = "" self.date = 0 self.local_date = 0 self.name = "" self.comment = "" self.inverted = False self.adds = [] self.removes = [] self.modifies = {} self.diradds = [] self.dirremoves = [] self.replaces = {} self.moves = {} def tostr(self): s = "%s\n\tAuthor: %s\n\tDate: %s\n\tHash: %s\n" % ( self.name, self.author, self.date, self.hash, ) return s def getdiff(self): """Returns a list of lines from the diff -u corresponding with the patch.""" f = run_darcs( "diff", "--quiet", "-u", "--match", "hash %s" % self.hash ) return f.readlines() def matches(self, s): "Defines if the patch matches a given string" if s.lower() in self.comment.lower(): return self.comment elif s.lower() in self.name.lower(): return self.name elif s.lower() in self.author.lower(): return self.author elif s == self.hash: return self.hash s = s.lower() for l in ( self.adds, self.removes, self.modifies, self.diradds, self.dirremoves, self.replaces.keys(), self.moves.keys(), self.moves.keys(), ): for i in l: if s in i.lower(): return i return "" class XmlInputWrapper: def __init__(self, fd): self.fd = fd self._header_done = False self._changelog_done = False self._read = self.read def read(self, *args, **kwargs): if not self._header_done: self._header_done = True return '\n' if not self._changelog_done: # Read lines until we see "". Sometimes the output has # human-readable lines before the XML begins, this is our way to # work around that. self._changelog_done = True while True: l = self.fd.readline() if not l: return l if "" in l: return l return self.fd.read(*args, **kwargs) def close(self, *args, **kwargs): return self.fd.close(*args, **kwargs) # patch parsing, we get them through "darcs changes --xml-output" class BuildPatchList(xml.sax.handler.ContentHandler): def __init__(self): self.db = {} self.list = [] self.cur_hash = "" self.cur_elem = None self.cur_val = "" self.cur_file = "" # Ignore the following elements (and sub-elements): # - created_as: When you ask for changes to a given file, the xml # output begins with the patch that creates it is enclosed in a # "created_as" tag; then, later, it gets shown again in its usual # place. Ignore it to avoid double-counting. # - explicit_dependencies: they appear within tag patches as a way of # indicating dependencies, but we don't really use them, and # counting them in could be confusing. self.ignored_elems = ["created_as", "explicit_dependencies"] def startElement(self, name, attrs): # Skip ignored elements and sub-elements. for ignore_elem in self.ignored_elems: if name == ignore_elem: self.cur_elem = ignore_elem return if self.cur_elem == ignore_elem: return # now parse the tags normally if name == "patch": p = Patch() p.hash = attrs.get("hash") au = attrs.get("author", None) p.author = xmlescape(au) if au.find("<") != -1: au = au[: au.find("<")].strip() p.shortauthor = xmlescape(au) td = parse_darcs_time(attrs.get("date", None)) p.date = time.mktime(td) p.date_str = time.strftime("%a, %d %b %Y %H:%M:%S", td) td = time.strptime( attrs.get("local_date", None), "%a %b %d %H:%M:%S %Z %Y" ) p.local_date = time.mktime(td) p.local_date_str = time.strftime("%a, %d %b %Y %H:%M:%S", td) inverted = attrs.get("inverted", None) if inverted and inverted == "True": p.inverted = True self.db[p.hash] = p self.current = p.hash self.list.append(p.hash) elif name == "name": self.db[self.current].name = "" self.cur_elem = "name" elif name == "comment": self.db[self.current].comment = "" self.cur_elem = "comment" elif name == "add_file": self.cur_elem = "add_file" elif name == "remove_file": self.cur_elem = "remove_file" elif name == "add_directory": self.cur_elem = "add_directory" elif name == "remove_directory": self.cur_elem = "remove_dir" elif name == "modify_file": self.cur_elem = "modify_file" elif name == "removed_lines": if self.cur_val: self.cur_file = self.cur_val.strip() cf = self.cur_file p = self.db[self.current] # the current value holds the file name at this point if not cf in p.modifies: p.modifies[cf] = {"+": 0, "-": 0} p.modifies[cf]["-"] = int(attrs.get("num", None)) elif name == "added_lines": if self.cur_val: self.cur_file = self.cur_val.strip() cf = self.cur_file p = self.db[self.current] if not cf in p.modifies: p.modifies[cf] = {"+": 0, "-": 0} p.modifies[cf]["+"] = int(attrs.get("num", None)) elif name == "move": src = attrs.get("from", None) dst = attrs.get("to", None) p = self.db[self.current] p.moves[src] = dst elif name == "replaced_tokens": if self.cur_val: self.cur_file = self.cur_val.strip() cf = self.cur_file p = self.db[self.current] if not cf in p.replaces: p.replaces[cf] = 0 p.replaces[cf] = int(attrs.get("num", None)) else: self.cur_elem = None def characters(self, s): if not self.cur_elem: return self.cur_val += s def endElement(self, name): # Skip ignored elements and sub-elements. for ignore_elem in self.ignored_elems: if name == ignore_elem: self.cur_elem = None self.cur_val = "" return if self.cur_elem == ignore_elem: return if name == "replaced_tokens": return if name == "name": p = self.db[self.current] p.name = self.cur_val if p.inverted: p.name = "UNDO: " + p.name elif name == "comment": self.db[self.current].comment = strip_ignore_this(self.cur_val) elif name == "add_file": scv = self.cur_val.strip() self.db[self.current].adds.append(scv) elif name == "remove_file": scv = self.cur_val.strip() self.db[self.current].removes.append(scv) elif name == "add_directory": scv = self.cur_val.strip() self.db[self.current].diradds.append(scv) elif name == "remove_directory": scv = self.cur_val.strip() self.db[self.current].dirremoves.append(scv) elif name == "modify_file": if not self.cur_file: # binary modification appear without a line # change summary, so we add it manually here f = self.cur_val.strip() p = self.db[self.current] p.modifies[f] = {"+": 0, "-": 0, "b": 1} self.cur_file = "" self.cur_elem = None self.cur_val = "" def get_list(self): plist = [] for h in self.list: plist.append(self.db[h]) return plist def get_db(self): return self.db def get_list_db(self): return (self.list, self.db) def get_changes_handler(*params): "Returns a handler for the changes output, run with the given params" parser = xml.sax.make_parser() handler = BuildPatchList() parser.setContentHandler(handler) # get the xml output and parse it xmlf = run_darcs("changes", "--xml-output", *params) parser.parse(XmlInputWrapper(xmlf)) xmlf.close() return handler def get_last_patches(last=15, topi=0, fname=None): """Gets the last N patches from the repo, returns a patch list. If "topi" is specified, then it will return the N patches that preceeded the patch number topi in the list. It sounds messy but it's quite simple. You can optionally pass a filename and only changes that affect it will be returned. FIXME: there's probably a more efficient way of doing this.""" # darcs calculate last first, and then filters the filename, # so it's not so simple to combine them; that's why we do so much # special casing here toget = last + topi if fname: if fname[0] == "/": fname = fname[1:] params = ("-s", fname) else: params = ("-s", "--last=%d" % toget) handler = get_changes_handler(*params) # return the list of all the patch objects return handler.get_list()[topi:toget] def get_patch(hash): handler = get_changes_handler("-s", "--match", "hash %s" % hash) patch = handler.db[handler.list[0]] return patch def get_diff(hash): return run_darcs("diff", "--quiet", "-u", "--match", "hash %s" % hash) def get_file_diff(hash, fname): return run_darcs( "diff", "--quiet", "-u", "--match", "hash %s" % hash, fname ) def get_file_headdiff(hash, fname): return run_darcs( "diff", "--quiet", "-u", "--from-match", "hash %s" % hash, fname ) def get_patch_headdiff(hash): return run_darcs("diff", "--quiet", "-u", "--from-match", "hash %s" % hash) def get_darcs_diff(hash, fname=None): params = ["changes", "-v", "--matches", "hash %s" % hash] if fname: params.append(fname) return run_darcs(*params) def get_darcs_headdiff(hash, fname=None): params = ["changes", "-v", "--from-match", "hash %s" % hash] if fname: params.append(fname) return run_darcs(*params) class Annotate: def __init__(self): self.fname = "" self.lastchange_hash = "" self.lastchange_author = "" self.lastchange_name = "" self.lastchange_date = None self.lines = [] # hash -> date self.patches = {} class Line: def __init__(self): self.text = "" self.phash = None self.pauthor = None self.pdate = None def parse_annotate(fname, src): annotate = Annotate() annotate.fname = fname # Hash -> Patch. patches = {} prevhash = None for l in src: if "|" not in l: continue line = Annotate.Line() line.phash, line.text = l.split("|", 1) # Darcs sometimes generates 00..00 hashes in the middle of the output # due to a bug. Skip these and use the previous hash instead. if int(line.phash, 16) == 0: line.phash = prevhash else: prevhash = line.phash if line.phash: if line.phash not in patches: patch = get_patch(line.phash) patches[line.phash] = patch line.pauthor = patches[line.phash].author line.pdate = time.localtime(patches[line.phash].date) annotate.patches[line.phash] = patch.date annotate.lines.append(line) if not annotate.lines: return # Find the last patch, and update annotate information accordingly. lastp = sorted(patches.values(), key=lambda a: a.date)[-1] annotate.lastchange_hash = lastp.hash annotate.lastchange_author = lastp.author annotate.lastchange_name = lastp.name annotate.lastchange_date = time.localtime(lastp.date) return annotate def get_annotate(fname, hash=None): if config.disable_annotate: return None params = ["annotate", "--machine-readable"] if hash: params += ["--match", "hash %s" % hash] if fname.startswith("/"): # darcs 2 doesn't like files starting with /, and darcs 1 # doesn't really care fname = fname[1:] params.append(fname) return parse_annotate(fname, run_darcs(*params)) # # specific html functions # def print_diff(dsrc): for l in dsrc: # remove the trailing newline if len(l) > 1: l = l[:-1] if l.startswith("diff"): # file lines, they have their own class print('
%s
' % xmlescape(l)) continue color = "" if l[0] == "+": color = 'style="color:#008800;"' elif l[0] == "-": color = 'style="color:#cc0000;"' elif l[0] == "@": color = 'style="color:#990099; ' color += "border: solid #ffe0ff; " color += "border-width: 1px 0px 0px 0px; " color += 'margin-top: 2px;"' elif l.startswith("Files"): # binary differences color = 'style="color:#666;"' print('
' % color + xmlescape(l) + "
") def print_darcs_diff(dsrc): for l in dsrc: if not l.startswith(" "): # comments and normal stuff print('
' + xmlescape(l) + "
") continue l = l.strip() if not l: continue if l[0] == "+": cl = 'class="pre" style="color:#008800;"' elif l[0] == "-": cl = 'class="pre" style="color:#cc0000;"' else: cl = 'class="diff_info"' print("
" % cl + xmlescape(l) + "
") def print_shortlog(last=PATCHES_PER_PAGE, topi=0, fname=None): ps = get_last_patches(last, topi, fname) if fname: title = '' % ( config.myreponame, fname, ) title += "History for path %s" % xmlescape(fname) title += "" else: title = ( 'shortlog' % config.myreponame ) print("
%s
" % title) print('') if topi != 0: # put a link to the previous page ntopi = topi - last if ntopi < 0: ntopi = 0 print("") alt = True for p in ps: if p.name.startswith("TAG "): print('') elif alt: print('') else: print('') alt = not alt print( """ """ % { "age": how_old(p.local_date), "author": gen_authorlink( p.author, shorten_str(p.shortauthor, 26) ), "myrname": config.myreponame, "hash": p.hash, "name": xmlescape(shorten_str(p.name)), "fullname": xmlescape(p.name), } ) print("") if len(ps) >= last: # only show if we've not shown them all already print("") print("
") if fname: print( '...' % (config.myreponame, ntopi, fname) ) else: print( '...' % (config.myreponame, ntopi) ) print("
%(age)s %(author)s %(name)s
") if fname: print( '...' % (config.myreponame, topi + last, fname) ) else: print( '...' % (config.myreponame, topi + last) ) print("
") def print_log(last=PATCHES_PER_PAGE, topi=0): ps = get_last_patches(last, topi) if topi != 0: # put a link to the previous page ntopi = topi - last if ntopi < 0: ntopi = 0 print( '

<- Prev

' % (config.myreponame, ntopi) ) for p in ps: if p.comment: comment = replace_links(xmlescape(p.comment)) fmt_comment = comment.replace("\n", "
") + "\n" fmt_comment += "

" else: fmt_comment = "" print( """

%(age)s%(desc)s
%(author)s [%(date)s]
%(comment)s
""" % { "myreponame": config.myreponame, "age": how_old(p.local_date), "date": p.local_date_str, "author": gen_authorlink(p.author, p.shortauthor), "hash": p.hash, "desc": xmlescape(p.name), "comment": fmt_comment, } ) if len(ps) >= last: # only show if we've not shown them all already print( '

Next ->

' % (config.myreponame, topi + last) ) def print_blob(fname): print('
%s
' % xmlescape(fname)) if isbinary(fname): print( """
This is a binary file and its contents will not be displayed.
""" ) return try: import pygments except ImportError: pygments = False if not pygments: print_blob_simple(fname) return else: try: print_blob_highlighted(fname) except ValueError: # pygments couldn't guess a lexer to highlight the code, try # another method with sampling the file contents. try: print_blob_highlighted(fname, sample_code=True) except ValueError: # pygments really could not find any lexer for this file. print_blob_simple(fname) def print_blob_simple(fname): print('
') f = open(realpath(fname), "r", errors="backslashreplace") count = 1 for l in f: l = xmlescape(l) if l and l[-1] == "\n": l = l[:-1] l = replace_tabs(l) print( """\
\ %(c)4d %(l)s\
\ """ % {"c": count, "l": l} ) count += 1 print("
") def print_blob_highlighted(fname, sample_code=False): import pygments import pygments.lexers import pygments.formatters code = open(realpath(fname), "r").read() if sample_code: lexer = pygments.lexers.guess_lexer( code[:200], encoding=config.repoencoding[0] ) else: lexer = pygments.lexers.guess_lexer_for_filename( fname, code[:200], encoding=config.repoencoding[0] ) formatter = pygments.formatters.HtmlFormatter( linenos="inline", cssclass="page_body", ) print(pygments.highlight(code, lexer, formatter)) def print_annotate(ann, style): print('
') if isbinary(ann.fname): print( """ This is a binary file and its contents will not be displayed.
""" ) return if style == "shade": # here's the idea: we will assign to each patch a shade of # color from its date (newer gets darker) max = 0xFF min = max - 80 # to do that, we need to get a list of the patch hashes # ordered by their dates l = [(date, hash) for (hash, date) in ann.patches.items()] l.sort() l = [hash for (date, hash) in l] # now we have to map each element to a number in the range # min-max, with max being close to l[0] and min l[len(l) - 1] lenn = max - min lenl = len(l) shadetable = {} for i in range(0, lenl): hash = l[i] n = float(i * lenn) / lenl n = max - int(round(n)) shadetable[hash] = n elif style == "zebra": lineclass = "dark" count = 1 prevhash = None for l in ann.lines: text = xmlescape(l.text) text = text.rstrip() text = replace_tabs(text) plongdate = time.strftime("%Y-%m-%d %H:%M:%S", l.pdate) title = "%s by %s" % (plongdate, xmlescape(l.pauthor)) link = "%(myrname)s;a=commit;h=%(hash)s" % { "myrname": config.myreponame, "hash": l.phash, } if style == "shade": linestyle = ( 'style="background-color:#ffff%.2x"' % shadetable[l.phash] ) lineclass = "" elif style == "zebra": linestyle = "" if l.phash != prevhash: if lineclass == "dark": lineclass = "light" else: lineclass = "dark" else: linestyle = "" lineclass = "" if l.phash != prevhash: pdate = time.strftime("%Y-%m-%d", l.pdate) left = l.pauthor.find("<") right = l.pauthor.find("@") if left != -1 and right != -1: shortau = l.pauthor[left + 1 : right] elif l.pauthor.find(" ") != -1: shortau = l.pauthor[: l.pauthor.find(" ")] elif right != -1: shortau = l.pauthor[:right] else: shortau = l.pauthor desc = "%12.12s" % shortau date = "%-10.10s" % pdate prevhash = l.phash line = 1 else: if line == 1 and style in ["shade", "zebra"]: t = "%s " % time.strftime("%H:%M:%S", l.pdate) desc = "%12.12s" % "'" date = "%-10.10s" % t else: desc = "%12.12s" % "'" date = "%-10.10s" % "" line += 1 print( """\ \ """ % { "class": lineclass, "style": linestyle, "date": date, "desc": xmlescape(desc), "c": count, "text": text, "title": title, "link": link, } ) count += 1 print("") # # available actions # def do_summary(): print_header() print_navbar() owner = repo_get_owner() # we should optimize this, it's a pity to go in such a mess for just # one hash ps = get_last_patches(1) print('
 
') print('') print( " " % xmlescape(config.repodesc) ) if owner: print(" " % xmlescape(owner)) if len(ps) > 0: print( " " % ps[0].local_date_str ) print( ' ' % {"url": config.repourl} ) if config.repoprojurl: print(" ") print( ' ' % {"url": config.repoprojurl} ) print("
description%s
owner%s
last change%s
url%(url)s
project url%(url)s
") print_shortlog(15) print_readme() print_footer() def do_commitdiff(phash): print_header() print_navbar(h=phash) p = get_patch(phash) print( """
%(name)s
""" % { "myreponame": config.myreponame, "hash": p.hash, "name": xmlescape(p.name), } ) dsrc = p.getdiff() print_diff(dsrc) print_footer() def do_plain_commitdiff(phash): print_plain_header() dsrc = get_diff(phash) for l in dsrc: sys.stdout.write((l)) def do_darcs_commitdiff(phash): print_header() print_navbar(h=phash) p = get_patch(phash) print( """
%(name)s
""" % { "myreponame": config.myreponame, "hash": p.hash, "name": xmlescape(p.name), } ) dsrc = get_darcs_diff(phash) print_darcs_diff(dsrc) print_footer() def do_headdiff(phash): print_header() print_navbar(h=phash) p = get_patch(phash) print( """
%(name)s --> to head
""" % { "myreponame": config.myreponame, "hash": p.hash, "name": xmlescape(p.name), } ) dsrc = get_patch_headdiff(phash) print_diff(dsrc) print_footer() def do_plain_headdiff(phash): print_plain_header() dsrc = get_patch_headdiff(phash) for l in dsrc: sys.stdout.write((l)) def do_darcs_headdiff(phash): print_header() print_navbar(h=phash) p = get_patch(phash) print( """
%(name)s --> to head
""" % { "myreponame": config.myreponame, "hash": p.hash, "name": xmlescape(p.name), } ) dsrc = get_darcs_headdiff(phash) print_darcs_diff(dsrc) print_footer() def do_filediff(phash, fname): print_header() print_navbar(h=phash, f=fname) p = get_patch(phash) dsrc = get_file_diff(phash, fname) print( """
%(name)s
%(fname)s
""" % { "myreponame": config.myreponame, "hash": p.hash, "name": xmlescape(p.name), "fname": xmlescape(fname), } ) print_diff(dsrc) print_footer() def do_plain_filediff(phash, fname): print_plain_header() dsrc = get_file_diff(phash, fname) for l in dsrc: sys.stdout.write((l)) def do_darcs_filediff(phash, fname): print_header() print_navbar(h=phash, f=fname) p = get_patch(phash) print( """
%(name)s
%(fname)s
""" % { "myreponame": config.myreponame, "hash": p.hash, "name": xmlescape(p.name), "fname": xmlescape(fname), } ) dsrc = get_darcs_diff(phash, fname) print_darcs_diff(dsrc) print_footer() def do_file_headdiff(phash, fname): print_header() print_navbar(h=phash, f=fname) p = get_patch(phash) dsrc = get_file_headdiff(phash, fname) print( """
%(name)s --> to head
%(fname)s
""" % { "myreponame": config.myreponame, "hash": p.hash, "name": xmlescape(p.name), "fname": xmlescape(fname), } ) print_diff(dsrc) print_footer() def do_plain_fileheaddiff(phash, fname): print_plain_header() dsrc = get_file_headdiff(phash, fname) for l in dsrc: sys.stdout.write((l)) def do_darcs_fileheaddiff(phash, fname): print_header() print_navbar(h=phash, f=fname) p = get_patch(phash) print( """
%(name)s --> to head
%(fname)s
""" % { "myreponame": config.myreponame, "hash": p.hash, "name": xmlescape(p.name), "fname": xmlescape(fname), } ) dsrc = get_darcs_headdiff(phash, fname) print_darcs_diff(dsrc) print_footer() print_plain_header() print("Not yet implemented") def do_commit(phash): print_header() print_navbar(h=phash) p = get_patch(phash) print( """
%(name)s
author%(author)s
local date%(local_date)s
date%(date)s
hash%(hash)s
""" % { "myreponame": config.myreponame, "author": gen_authorlink(p.author), "local_date": p.local_date_str, "date": p.date_str, "hash": p.hash, "name": xmlescape(p.name), } ) if p.comment: comment = replace_links(xmlescape(p.comment)) c = comment.replace("\n", "
\n") print('
') print(replace_links(xmlescape(p.name)), "

") print(c) print("
") changed = ( p.adds + p.removes + list(p.modifies.keys()) + list(p.moves.keys()) + p.diradds + p.dirremoves + list(p.replaces.keys()) ) if changed or p.moves: n = len(changed) print('
%d file(s) changed:
' % n) print('') changed.sort() alt = True for f in changed: if alt: print('') else: print('') alt = not alt show_diff = 1 if f in p.moves: # don't show diffs for moves, they're broken as of # darcs 1.0.3 show_diff = 0 if show_diff: print( """ """ % { "myreponame": config.myreponame, "hash": p.hash, "file": urlescape(f), "fname": xmlescape(f), } ) else: print("" % f) show_diff = 1 if f in p.adds: print('") elif f in p.diradds: print('") elif f in p.removes: print('") elif f in p.dirremoves: print('") elif p in p.replaces: print('") elif f in p.replaces: print('") show_diff = 0 else: print('") if show_diff: print( """ """ % { "myreponame": config.myreponame, "hash": p.hash, "file": urlescape(f), } ) print("") print("
%(fname)s %s', end="") print("[added]", end="") print("', end="") print("[added dir]", end="") print("', end="") print("[removed]", end="") print("', end="") print("[removed dir]", end="") print("', end="") print("[replaced %d tokens]" % p.replaces[f], end="") print("', end="") print('[moved to "%s"]' % p.moves[f]) print("', end="") if "b" in p.modifies[f]: # binary modification print("(binary)") else: print("+%(+)d -%(-)d" % p.modifies[f], end="") print("
") print_footer() def do_tree(dname): print_header() print_navbar() # the head print( """
Current tree
""" % config.myreponame ) # and the linked, with links parts = dname.split("/") print("/ ") sofar = "/" for p in parts: if not p: continue sofar += "/" + p print( '%s /' % ( config.myreponame, urlescape(sofar), p, ) ) print( """
""" ) path = realpath(dname) + "/" alt = True files = os.listdir(path) files.sort() # list directories first dlist = [] flist = [] for f in files: if f == "_darcs": continue realfile = path + f if os.path.isdir(realfile): dlist.append(f) else: flist.append(f) files = dlist + flist for f in files: if alt: print('') else: print('') alt = not alt realfile = path + f fullf = filter_file(dname + "/" + f) print('") print('") if f in dlist: print( """ """ % { "myrname": config.myreponame, "f": xmlescape(f), "fullf": urlescape(fullf), } ) else: print( """ """ % { "myrname": config.myreponame, "f": xmlescape(f), "fullf": urlescape(fullf), } ) print("") print("
', fperms(realfile), end="") print("', fsize(realfile), end="") print(" %(f)s/ %(f)s
") print_readme() print_footer() def do_headblob(fname): print_header() print_navbar(f=fname) filepath = os.path.dirname(fname) if filepath == "/": print( '
/
' % (config.myreponame) ) else: print('
') # and the linked, with links parts = filepath.split("/") print("/ ") sofar = "/" for p in parts: if not p: continue sofar += "/" + p print( '%s /' % (config.myreponame, sofar, xmlescape(p)) ) print("
") print_blob(fname) print_footer() def do_plainblob(fname): f = open(realpath(fname), "r") if isbinary(fname): print_binary_header(os.path.basename(fname)) for l in f: sys.stdout.write(l) else: print_plain_header() for l in f: sys.stdout.write((l)) def do_annotate(fname, phash, style): print_header() ann = get_annotate(fname, phash) if not ann: print( """ The annotate feature has been disabled """ ) print_footer() return print_navbar(f=fname, h=ann.lastchange_hash) print( """
%(name)s
Annotate for file %(fname)s
""" % { "myreponame": config.myreponame, "hash": ann.lastchange_hash, "name": xmlescape(ann.lastchange_name), "fname": xmlescape(fname), } ) print_annotate(ann, style) print_footer() def do_annotate_plain(fname, phash): print_plain_header() ann = get_annotate(fname, phash) for l in ann.lines: sys.stdout.write(l.text) def do_shortlog(topi, last=PATCHES_PER_PAGE): print_header() print_navbar() print_shortlog(topi=topi, last=last) print_footer() def do_filehistory(topi, f, last=PATCHES_PER_PAGE): print_header() print_navbar(f=fname) print_shortlog(topi=topi, fname=fname, last=last) print_footer() def do_log(topi, last=PATCHES_PER_PAGE): print_header() print_navbar() print_log(topi=topi, last=last) print_footer() def do_atom(): print("Content-type: application/atom+xml; charset=utf-8\n") print('') inv = config.repodir + "/_darcs/patches" repo_lastmod = os.stat(inv).st_mtime str_lastmod = time.strftime(iso_datetime, time.localtime(repo_lastmod)) print( """ %(reponame)s darcs repository %(url)s darcs repository (several authors) darcsweb.cgi %(lastmod)s %(desc)s """ % { "reponame": config.reponame, "url": config.myurl + "/" + config.myreponame, "desc": xmlescape(config.repodesc), "lastmod": str_lastmod, } ) ps = get_last_patches(20) for p in ps: title = time.strftime("%d %b %H:%M", time.localtime(p.date)) title += " - " + p.name pdate = time.strftime(iso_datetime, time.localtime(p.date)) link = "%s/%s;a=commit;h=%s" % ( config.myurl, config.myreponame, p.hash, ) import email.utils addr, author = email.utils.parseaddr(p.author) if not addr: addr = "unknown_email@example.com" if not author: author = addr print( """ %(title)s %(author)s %(email)s %(pdate)s %(link)s %(desc)s

""" % { "title": xmlescape(title), "author": author, "email": addr, "url": config.myurl + "/" + config.myreponame, "pdate": pdate, "myrname": config.myreponame, "hash": p.hash, "pname": xmlescape(p.name), "link": link, "desc": xmlescape(p.name), } ) # TODO: allow to get plain text, not HTML? print(xmlescape(p.name) + "
") if p.comment: print("
") print(xmlescape(p.comment).replace("\n", "
\n")) print("
") print("
") changed = ( p.adds + p.removes + list(p.modifies.keys()) + list(p.moves.keys()) + p.diradds + p.dirremoves + list(p.replaces.keys()) ) for i in changed: # TODO: link to the file print("%s
" % i) print("

") print("
") print("
") def do_rss(): print("Content-type: text/xml; charset=utf-8\n") print('') print( """ %(reponame)s %(url)s %(desc)s en """ % { "reponame": config.reponame, "url": config.myurl + "/" + config.myreponame, "desc": xmlescape(config.repodesc), } ) ps = get_last_patches(20) for p in ps: title = time.strftime("%d %b %H:%M", time.localtime(p.date)) title += " - " + p.name pdate = time.strftime( "%a, %d %b %Y %H:%M:%S +0000", time.localtime(p.date) ) link = "%s/%s;a=commit;h=%s" % ( config.myurl, config.myreponame, p.hash, ) # the author field is tricky because the standard requires it # has an email address; so we need to check that and lie # otherwise; there's more info at # http://feedvalidator.org/docs/error/InvalidContact.html if "@" in p.author: author = p.author else: author = "%s <unknown@email>" % p.author print( """ %(title)s %(author)s %(pdate)s %(link)s %(desc)s """ % { "title": xmlescape(title), "author": author, "pdate": pdate, "link": link, "desc": xmlescape(p.name), } ) print(" ") if p.comment: print("
") print(xmlescape(p.comment).replace("\n", "
\n")) print("
") print("
") changed = ( p.adds + p.removes + list(p.modifies.keys()) + list(p.moves.keys()) + p.diradds + p.dirremoves + list(p.replaces.keys()) ) for i in changed: print("%s
" % i) print("]]>") print("
") print("
") def do_search(s): print_header() print_navbar() ps = get_last_patches(config.searchlimit) print( '
Search last %d commits for "%s"
' % (config.searchlimit, xmlescape(s)) ) print('') alt = True for p in ps: match = p.matches(s) if not match: continue if alt: print('') else: print('') alt = not alt print( """ """ % { "age": how_old(p.local_date), "author": gen_authorlink( p.author, shorten_str(p.shortauthor, 26) ), "myrname": config.myreponame, "hash": p.hash, "name": xmlescape(shorten_str(p.name)), "fullname": xmlescape(p.name), "match": highlight(s, shorten_str(match)), } ) print("") print("
%(age)s %(author)s %(name)s
%(match)s
") print_footer() def do_die(): print_header() print("

Error! Malformed query

") print_footer() def do_listrepos(): import config as all_configs expand_multi_config(all_configs) # the header here is special since we don't have a repo print("Content-type: text/html; charset=utf-8\n") print('') print( """ darcs - Repositories
%(summary)s
""" % { "myname": config.myname, "css": config.cssfile, "fav": config.darcsfav, "logo": config.darcslogo, "summary": config.summary, } ) # some python magic alt = True for conf in dir(all_configs): if conf.startswith("__"): continue c = all_configs.__getattribute__(conf) if "reponame" not in dir(c): continue name = xmlescape(c.reponame) desc = xmlescape(c.repodesc) if alt: print('') else: print('') alt = not alt print( """ """ % { "myname": config.myname, "dname": name, "name": urlescape(name), "desc": shorten_str(desc, 60), } ) print("
Project Description
%(dname)s %(desc)s
") print_footer(put_rss=0) def expand_multi_config(config): """Expand configuration entries that serve as "template" to others; this make it easier to have a single directory with all the repos, because they don't need specific entries in the configuration anymore. """ for conf in dir(config): if conf.startswith("__"): continue c = config.__getattribute__(conf) if "multidir" not in dir(c): continue if not os.path.isdir(c.multidir): continue if "exclude" not in dir(c): c.exclude = [] entries = [] if "multidir_deep" in dir(c) and c.multidir_deep: for (root, dirs, files) in os.walk(c.multidir): # do not visit hidden directories dirs[:] = [d for d in dirs if not d.startswith(".")] if "_darcs" in dirs: p = root[1 + len(c.multidir) :] entries.append(p) else: entries = os.listdir(c.multidir) entries.sort() for name in entries: name = name.replace("\\", "/") if name.startswith("."): continue fulldir = c.multidir + "/" + name if not os.path.isdir(fulldir + "/_darcs"): continue if name in c.exclude: continue # set the display name at the beginning, so it can be # used by the other replaces if "displayname" in dir(c): dname = c.displayname % {"name": name} else: dname = name rep_dict = {"name": name, "dname": dname} if "autoexclude" in dir(c) and c.autoexclude: dpath = fulldir + "/_darcs/third_party/darcsweb" if not os.path.isdir(dpath): continue if "autodesc" in dir(c) and c.autodesc: dpath = fulldir + "/_darcs/third_party/darcsweb/desc" if os.access(dpath, os.R_OK): desc = open(dpath).readline().rstrip("\n") else: desc = c.repodesc % rep_dict else: desc = c.repodesc % rep_dict if "autourl" in dir(c) and c.autourl: dpath = fulldir + "/_darcs/third_party/darcsweb/url" if os.access(dpath, os.R_OK): url = open(dpath).readline().rstrip("\n") else: url = c.repourl % rep_dict else: url = c.repourl % rep_dict if "autoprojurl" in dir(c) and c.autoprojurl: dpath = fulldir + "/_darcs/third_party/darcsweb/projurl" if os.access(dpath, os.R_OK): projurl = open(dpath).readline().rstrip("\n") elif "repoprojurl" in dir(c): projurl = c.repoprojurl % rep_dict else: projurl = None elif "repoprojurl" in dir(c): projurl = c.repoprojurl % rep_dict else: projurl = None rdir = fulldir class tmp_config: reponame = dname repodir = rdir repodesc = desc repourl = url repoencoding = c.repoencoding repoprojurl = projurl if "footer" in dir(c): footer = c.footer # index by display name to avoid clashes config.__setattr__(dname, tmp_config) def fill_config(name=None): import config as all_configs expand_multi_config(all_configs) if name: # we only care about setting some configurations if a repo was # specified; otherwise we only set the common configuration # directives for conf in dir(all_configs): if conf.startswith("__"): continue c = all_configs.__getattribute__(conf) if "reponame" not in dir(c): continue if c.reponame == name: break else: # not found raise Exception("Repo not found: " + repr(name)) # fill the configuration base = all_configs.base if "myname" not in dir(base): # SCRIPT_NAME has the full path, we only take the file name config.myname = os.path.basename(os.environ["SCRIPT_NAME"]) else: config.myname = base.myname if "myurl" not in dir(base) and "cachedir" not in dir(base): n = os.environ["SERVER_NAME"] p = os.environ["SERVER_PORT"] s = os.path.dirname(os.environ["SCRIPT_NAME"]) u = os.environ.get("HTTPS", "off") in ("on", "1") if not u and p == "80" or u and p == "443": p = "" else: p = ":" + p config.myurl = "http%s://%s%s%s" % (u and "s" or "", n, p, s) else: config.myurl = base.myurl config.darcslogo = base.darcslogo config.darcsfav = base.darcsfav config.cssfile = base.cssfile if name: config.myreponame = config.myname + "?r=" + urlescape(name) config.reponame = c.reponame config.repodesc = c.repodesc config.repodir = c.repodir config.repourl = c.repourl config.repoprojurl = None if "repoprojurl" in dir(c): config.repoprojurl = c.repoprojurl # repoencoding must be a tuple if isinstance(c.repoencoding, str): config.repoencoding = (c.repoencoding,) else: config.repoencoding = c.repoencoding # optional parameters if "darcspath" in dir(base): config.darcspath = base.darcspath + "/" else: config.darcspath = "" if "summary" in dir(base): config.summary = base.summary else: config.summary = """ This is the repository index for a darcsweb site.
These are all the available repositories.
""" if "cachedir" in dir(base): config.cachedir = base.cachedir else: config.cachedir = None if "searchlimit" in dir(base): config.searchlimit = base.searchlimit else: config.searchlimit = 100 if "logtimes" in dir(base): config.logtimes = base.logtimes else: config.logtimes = None if "url_links" in dir(base): config.url_links = base.url_links else: config.url_links = () if name and "footer" in dir(c): config.footer = c.footer elif "footer" in dir(base): config.footer = base.footer else: config.footer = ( "Crece desde el pueblo el futuro / " + "crece desde el pie" ) if "author_links" in dir(base): config.author_links = base.author_links else: config.author_links = None if "disable_annotate" in dir(base): config.disable_annotate = base.disable_annotate else: config.disable_annotate = False if name and "readme" in dir(c): config.readme = c.readme elif "readme" in dir(base): config.readme = base.readme else: config.readme = None # # main # # In Python 3.9.2, the ';' is no longer a valid separator by default, which # breaks the links we use. A new "separator" parameter is introduced to allow # to adjust this behaviour. if sys.version_info >= (3, 9, 2): form = cgi.FieldStorage(errors="surrogateescape", separator=";") else: form = cgi.FieldStorage(errors="surrogateescape") # if they don't specify a repo, print the list and exit if "r" not in form: fill_config() do_listrepos() log_times(cache_hit=0, event="index") sys.exit(0) # get the repo configuration and fill the config class current_repo = urllib.parse.unquote(form["r"].value) fill_config(current_repo) # get the action, or default to summary if "a" not in form: action = "summary" else: action = filter_act(form["a"].value) # check if we have the page in the cache if config.cachedir: # create a string representation of the request, ignoring all the # unused parameters to avoid DoS params = ["r", "a", "f", "h", "topi", "last"] params = [x for x in form.keys() if x in params] url_request = [(x, form[x].value) for x in params] url_request.sort() cache = Cache(config.cachedir, url_request) if cache.open(): # we have a hit, dump and run cache.dump() cache.close() log_times(cache_hit=1, repo=config.reponame) sys.exit(0) # if there is a miss, the cache will step over stdout, intercepting # all "print"s and writing them to the cache file automatically # see what should we do according to the received action if action == "summary": do_summary() elif action == "commit": phash = filter_hash(form["h"].value) do_commit(phash) elif action == "commitdiff": phash = filter_hash(form["h"].value) do_commitdiff(phash) elif action == "plain_commitdiff": phash = filter_hash(form["h"].value) do_plain_commitdiff(phash) elif action == "darcs_commitdiff": phash = filter_hash(form["h"].value) do_darcs_commitdiff(phash) elif action == "headdiff": phash = filter_hash(form["h"].value) do_headdiff(phash) elif action == "plain_headdiff": phash = filter_hash(form["h"].value) do_plain_headdiff(phash) elif action == "darcs_headdiff": phash = filter_hash(form["h"].value) do_darcs_headdiff(phash) elif action == "filediff": phash = filter_hash(form["h"].value) fname = filter_file(form["f"].value) do_filediff(phash, fname) elif action == "plain_filediff": phash = filter_hash(form["h"].value) fname = filter_file(form["f"].value) do_plain_filediff(phash, fname) elif action == "darcs_filediff": phash = filter_hash(form["h"].value) fname = filter_file(form["f"].value) do_darcs_filediff(phash, fname) elif action == "headfilediff": phash = filter_hash(form["h"].value) fname = filter_file(form["f"].value) do_file_headdiff(phash, fname) elif action == "plain_headfilediff": phash = filter_hash(form["h"].value) fname = filter_file(form["f"].value) do_plain_fileheaddiff(phash, fname) elif action == "darcs_headfilediff": phash = filter_hash(form["h"].value) fname = filter_file(form["f"].value) do_darcs_fileheaddiff(phash, fname) elif action == "annotate_normal": fname = filter_file(form["f"].value) if "h" in form: phash = filter_hash(form["h"].value) else: phash = None do_annotate(fname, phash, "normal") elif action == "annotate_plain": fname = filter_file(form["f"].value) if "h" in form: phash = filter_hash(form["h"].value) else: phash = None do_annotate_plain(fname, phash) elif action == "annotate_zebra": fname = filter_file(form["f"].value) if "h" in form: phash = filter_hash(form["h"].value) else: phash = None do_annotate(fname, phash, "zebra") elif action == "annotate_shade": fname = filter_file(form["f"].value) if "h" in form: phash = filter_hash(form["h"].value) else: phash = None do_annotate(fname, phash, "shade") elif action == "shortlog": if "topi" in form: topi = int(filter_num(form["topi"].value)) else: topi = 0 if "last" in form: last = int(filter_num(form["last"].value)) else: last = PATCHES_PER_PAGE do_shortlog(topi=topi, last=last) elif action == "filehistory": if "topi" in form: topi = int(filter_num(form["topi"].value)) else: topi = 0 fname = filter_file(form["f"].value) if "last" in form: last = int(filter_num(form["last"].value)) else: last = PATCHES_PER_PAGE do_filehistory(topi, fname, last=last) elif action == "log": if "topi" in form: topi = int(filter_num(form["topi"].value)) else: topi = 0 if "last" in form: last = int(filter_num(form["last"].value)) else: last = PATCHES_PER_PAGE do_log(topi, last=last) elif action == "headblob": fname = filter_file(form["f"].value) do_headblob(fname) elif action == "plainblob": fname = filter_file(form["f"].value) do_plainblob(fname) elif action == "tree": if "f" in form: fname = filter_file(form["f"].value) else: fname = "/" do_tree(fname) elif action == "rss": do_rss() elif action == "atom": do_atom() elif action == "search": if "s" in form: s = form["s"].value else: s = "" do_search(s) if config.cachedir: cache.cancel() else: action = "invalid query" do_die() if config.cachedir: cache.cancel() if config.cachedir: cache.close() log_times(cache_hit=0, repo=config.reponame)