#!/usr/bin/env python """ darcsweb - A web interface for darcs Alberto Bertogli (albertogli@telpin.com.ar) Inspired on gitweb (as of 28/Jun/2005), which is written by Kay Sievers and Christian Gierke """ import sys import os import string import time import stat import sha import cgi import cgitb; cgitb.enable() import urllib import xml.sax from xml.sax.saxutils import escape as xml_escape import email.Utils iso_datetime = '%Y-%m-%dT%H:%M:%SZ' # empty configuration class, we will fill it in later depending on the repo class config: pass # # utility functions # def filter_num(s): l = [c for c in s if c in string.digits] return string.join(l, "") allowed_in_action = string.ascii_letters + string.digits + '_' def filter_act(s): l = [c for c in s if c in allowed_in_action] return string.join(l, "") allowed_in_hash = string.ascii_letters + string.digits + '-.' def filter_hash(s): l = [c for c in s if c in allowed_in_hash] return string.join(l, "") def filter_file(s): if '..' in s: raise 'FilterFile FAILED' if s == '/': return s # remove extra "/"s r = s[0] last = s[0] for c in s[1:]: if c == last and c == '/': continue r += c last = c return r def printd(*params): print string.join(params), '
' # I _hate_ this. def fixu8(s): openpos = s.find('[_') if openpos < 0: # small optimization to avoid the conversion to utf8 and # entering the loop if type(s) == unicode: # workaround for python < 2.4 return s.encode('utf8') else: return s.decode(config.repoencoding).encode('utf8') s = s.encode(config.repoencoding).decode('raw_unicode_escape') while openpos >= 0: closepos = s.find('_]', openpos) if closepos < 0: # not closed, probably just luck break # middle should be something like 'c3', so we get it by # removing the first three characters ("[_\") middle = s[openpos + 3:closepos] if len(middle) == 2: # now we turn middle into the character "\xc3" char = chr(int(middle, 16)) # finally, replace s with our new improved string, and # repeat the ugly procedure char = char.decode(config.repoencoding) mn = '[_\\' + middle + '_]' s = s.replace(mn, char, 1) openpos = s.find('[_', openpos + 1) if config.repoencoding != 'utf8': s = s.encode('utf8') else: s = s.encode('raw_unicode_escape', 'replace') return s def escape(s): s = xml_escape(s) s = s.replace('"', '"') return s def how_old(epoch): if config.cachedir: # when we have a cache, the how_old() becomes a problem since # the cached entries will have old data; so in this case just # return a nice string t = time.localtime(epoch) s = time.strftime("%d %b %H:%M", t) return s age = int(time.time()) - int(epoch) if age > 60*60*24*365*2: s = str(age/60/60/24/365) s += " years ago" elif age > 60*60*24*(365/12)*2: s = str(age/60/60/24/(365/12)) s += " months ago" elif age > 60*60*24*7*2: s = str(age/60/60/24/7) s += " weeks ago" elif age > 60*60*24*2: s = str(age/60/60/24) s += " days ago" elif age > 60*60*2: s = str(age/60/60) s += " hours ago" elif age > 60*2: s = str(age/60) s += " minutes ago" elif age > 2: s = str(age) s += " seconds ago" else: s = "right now" return s def shorten_str(s, max = 60): if len(s) > max: s = s[:max - 4] + ' ...' return s def replace_tabs(s): pos = s.find("\t") while pos != -1: count = 8 - (pos % 8) if count: spaces = ' ' * count s = s.replace('\t', spaces, 1) pos = s.find("\t") return s def fperms(fname): m = os.stat(fname)[stat.ST_MODE] m = m & 0777 s = [] if os.path.isdir(fname): s.append('d') else: s.append('-') if m & 0400: s.append('r') else: s.append('-') if m & 0200: s.append('w') else: s.append('-') if m & 0100: s.append('x') else: s.append('-') if m & 0040: s.append('r') else: s.append('-') if m & 0020: s.append('w') else: s.append('-') if m & 0010: s.append('x') else: s.append('-') if m & 0004: s.append('r') else: s.append('-') if m & 0002: s.append('w') else: s.append('-') if m & 0001: s.append('x') else: s.append('-') return string.join(s, '') def isbinary(fname): import re bins = open(config.repodir + '/_darcs/prefs/binaries').readlines() bins = [b[:-1] for b in bins if b and b[0] != '#'] for b in bins: if re.compile(b).search(fname): return 1 return 0 def realpath(fname): realf = filter_file(config.repodir + '/_darcs/current/' + fname) if not os.path.exists(realf): realf = filter_file(config.repodir + '/' + fname) return realf # # generic html functions # def print_header(): print "Content-type: text/html; charset=utf-8" print """ darcs - %(reponame)s " def print_footer(put_rss = 1): print """ \n\n" def print_navbar(h = "", f = ""): print """ ' def print_plain_header(): print "Content-type: text/plain; charset=utf-8\n" # # basic caching # class Cache: def __init__(self, basedir, url): self.basedir = basedir self.url = url self.fname = sha.sha(url).hexdigest() self.file = None self.mode = None self.real_stdout = sys.stdout def open(self): "Returns 1 on hit, 0 on miss" fname = self.basedir + '/' + self.fname if not os.access(fname, os.R_OK): # the file doesn't exist, direct miss pid = str(os.getpid()) fname = self.basedir + '/.' + self.fname + '-' + pid self.file = open(fname, 'w') self.mode = 'w' # step over stdout so when "print" tries to write # output, we get it first sys.stdout = self return 0 inv = config.repodir + '/_darcs/inventory' cache_lastmod = os.stat(fname).st_mtime repo_lastmod = os.stat(inv).st_mtime dw_lastmod = os.stat(sys.argv[0]).st_mtime if repo_lastmod > cache_lastmod or dw_lastmod > cache_lastmod: # the entry is too old, remove it and return a miss os.unlink(fname) pid = str(os.getpid()) fname = self.basedir + '/.' + self.fname + '-' + pid self.file = open(fname, 'w') self.mode = 'w' sys.stdout = self return 0 # the entry is still valid, hit! self.file = open(fname, 'r') self.mode = 'r' return 1 def dump(self): for l in self.file: self.real_stdout.write(l) def write(self, s): # this gets called from print, because we replaced stdout with # ourselves self.file.write(s) self.real_stdout.write(s) def close(self): if self.file: self.file.close() sys.stdout = self.real_stdout if self.mode == 'w': pid = str(os.getpid()) fname1 = self.basedir + '/.' + self.fname + '-' + pid fname2 = self.basedir + '/' + self.fname os.rename(fname1, fname2) self.mode = 'c' def cancel(self): "Like close() but don't save the entry." if self.file: self.file.close() sys.stdout = self.real_stdout if self.mode == 'w': pid = str(os.getpid()) fname = self.basedir + '/.' + self.fname + '-' + pid os.unlink(fname) self.mode = 'c' # # darcs repo manipulation # def repo_get_owner(): try: fd = open(config.repodir + '/_darcs/prefs/author') author = fd.readlines()[0].strip() except: author = None return author def run_darcs(params): """Runs darcs on the repodir with the given params, return a file object with its output.""" os.chdir(config.repodir) cmd = config.darcspath + "darcs " + params inf, outf = os.popen4(cmd, 't') return outf class Patch: "Represents a single patch/record" def __init__(self): self.hash = '' self.author = '' self.shortauthor = '' self.date = 0 self.local_date = 0 self.name = '' self.comment = '' self.inverted = False; self.adds = [] self.removes = [] self.modifies = {} self.diradds = [] self.dirremoves = [] self.replaces = {} self.moves = {} def tostr(self): s = "%s\n\tAuthor: %s\n\tDate: %s\n\tHash: %s\n" % \ (self.name, self.author, self.date, self.hash) return s def getdiff(self): """Returns a list of lines from the diff -u corresponding with the patch.""" params = 'diff -u --match "hash %s"' % self.hash f = run_darcs(params) return f.readlines() # patch parsing, we get them through "darcs changes --xml-output" class BuildPatchList(xml.sax.handler.ContentHandler): def __init__(self): self.db = {} self.list = [] self.cur_hash = '' self.cur_elem = None self.cur_val = '' self.cur_file = '' def startElement(self, name, attrs): # When you ask for changes to a given file, the xml output # begins with the patch that creates it is enclosed in a # "created_as" tag; then, later, it gets shown again in its # usual place. The following two "if"s take care of ignoring # everything inside the "created_as" tag, since we don't care. if name == 'created_as': self.cur_elem = 'created_as' return if self.cur_elem == 'created_as': return # now parse the tags normally if name == 'patch': p = Patch() p.hash = fixu8(attrs.get('hash')) au = attrs.get('author', None) p.author = fixu8(escape(au)) if au.find('<') != -1: au = au[:au.find('<')].strip() p.shortauthor = fixu8(escape(au)) td = time.strptime(attrs.get('date', None), "%Y%m%d%H%M%S") p.date = time.mktime(td) p.date_str = time.strftime("%a, %d %b %Y %H:%M:%S", td) td = time.strptime(attrs.get('local_date', None), "%a %b %d %H:%M:%S %Z %Y") p.local_date = time.mktime(td) p.local_date_str = \ time.strftime("%a, %d %b %Y %H:%M:%S", td) inverted = attrs.get('inverted', None) if inverted and inverted == 'True': p.inverted = True self.db[p.hash] = p self.current = p.hash self.list.append(p.hash) elif name == 'name': self.db[self.current].name = '' self.cur_elem = 'name' elif name == 'comment': self.db[self.current].comment = '' self.cur_elem = 'comment' elif name == 'add_file': self.cur_elem = 'add_file' elif name == 'remove_file': self.cur_elem = 'remove_file' elif name == 'add_directory': self.cur_elem = 'add_directory' elif name == 'remove_directory': self.cur_elem = 'remove_dir' elif name == 'modify_file': self.cur_elem = 'modify_file' elif name == 'removed_lines': if self.cur_val: self.cur_file = fixu8(self.cur_val.strip()) cf = self.cur_file p = self.db[self.current] # the current value holds the file name at this point if not p.modifies.has_key(cf): p.modifies[cf] = { '+': 0, '-': 0 } p.modifies[cf]['-'] = int(attrs.get('num', None)) elif name == 'added_lines': if self.cur_val: self.cur_file = fixu8(self.cur_val.strip()) cf = self.cur_file p = self.db[self.current] if not p.modifies.has_key(cf): p.modifies[cf] = { '+': 0, '-': 0 } p.modifies[cf]['+'] = int(attrs.get('num', None)) elif name == 'move': src = fixu8(attrs.get('from', None)) dst = fixu8(attrs.get('to', None)) p = self.db[self.current] p.moves[src] = dst elif name == 'replaced_tokens': if self.cur_val: self.cur_file = fixu8(self.cur_val.strip()) cf = self.cur_file p = self.db[self.current] if not p.replaces.has_key(cf): p.replaces[cf] = 0 p.replaces[cf] = int(attrs.get('num', None)) else: self.cur_elem = None def characters(self, s): if not self.cur_elem: return self.cur_val += s def endElement(self, name): # See the comment in startElement() if name == 'created_as': self.cur_elem = None self.cur_val = '' return if self.cur_elem == 'created_as': return if name == 'replaced_tokens': return if name == 'name': p = self.db[self.current] p.name = fixu8(self.cur_val) if p.inverted: p.name = 'UNDO: ' + p.name elif name == 'comment': self.db[self.current].comment = fixu8(self.cur_val) elif name == 'add_file': scv = fixu8(self.cur_val.strip()) self.db[self.current].adds.append(scv) elif name == 'remove_file': scv = fixu8(self.cur_val.strip()) self.db[self.current].removes.append(scv) elif name == 'add_directory': scv = fixu8(self.cur_val.strip()) self.db[self.current].diradds.append(scv) elif name == 'remove_directory': scv = fixu8(self.cur_val.strip()) self.db[self.current].dirremoves.append(scv) elif name == 'modify_file': if not self.cur_file: # binary modification appear without a line # change summary, so we add it manually here f = fixu8(self.cur_val.strip()) p = self.db[self.current] p.modifies[f] = { '+': 0, '-': 0, 'b': 1 } self.cur_file = '' self.cur_elem = None self.cur_val = '' def get_list(self): plist = [] for h in self.list: plist.append(self.db[h]) return plist def get_db(self): return self.db def get_list_db(self): return (self.list, self.db) def get_changes_handler(params): "Returns a handler for the changes output, run with the given params" parser = xml.sax.make_parser() handler = BuildPatchList() parser.setContentHandler(handler) # get the xml output and parse it xmlf = run_darcs("changes --xml-output " + params) parser.parse(xmlf) xmlf.close() return handler def get_last_patches(last = 15, topi = 0, fname = None): """Gets the last N patches from the repo, returns a patch list. If "topi" is specified, then it will return the N patches that preceeded the patch number topi in the list. It sounds messy but it's quite simple. You can optionally pass a filename and only changes that affect it will be returned. FIXME: there's probably a more efficient way of doing this.""" # darcs calculate last first, and then filters the filename, # so it's not so simple to combine them; that's why we do so much # special casing here toget = last + topi if fname: if fname[0] == '/': fname = fname[1:] s = "-s " + fname else: s = "-s --last=%d" % toget handler = get_changes_handler(s) # return the list of all the patch objects return handler.get_list()[topi:toget] def get_patch(hash): handler = get_changes_handler('-s --match "hash %s"' % hash) patch = handler.db[handler.list[0]] return patch def get_diff(hash): return run_darcs('diff -u --match "hash %s"' % hash) def get_file_diff(hash, fname): return run_darcs('diff -u --match "hash %s" "%s"' % (hash, fname)) def get_file_headdiff(hash, fname): return run_darcs('diff -u --from-match "hash %s" "%s"' % (hash, fname)) def get_patch_headdiff(hash): return run_darcs('diff -u --from-match "hash %s"' % hash) def get_raw_diff(hash): import gzip realf = filter_file(config.repodir + '/_darcs/patches/' + hash) if not os.path.isfile(realf): return None dsrc = gzip.open(realf) return dsrc def get_darcs_diff(hash, fname = None): cmd = 'changes -v --matches "hash %s"' % hash if fname: cmd += ' "%s"' % fname return run_darcs(cmd) def get_darcs_headdiff(hash, fname = None): cmd = 'changes -v --from-match "hash %s"' % hash if fname: cmd += ' "%s"' % fname return run_darcs(cmd) class Annotate: def __init__(self): self.fname = "" self.creator_hash = "" self.created_as = "" self.lastchange_hash = "" self.lastchange_author = "" self.lastchange_name = "" self.lastchange_date = None self.firstdate = None self.lastdate = None self.lines = [] self.patches = {} class Line: def __init__(self): self.text = "" self.phash = None self.pauthor = None self.pdate = None def parse_annotate(src): import xml.dom.minidom annotate = Annotate() # FIXME: convert the source to UTF8; it _has_ to be a way to let # minidom know the source encoding s = "" for i in src: s += fixu8(i) dom = xml.dom.minidom.parseString(s) file = dom.getElementsByTagName("file")[0] annotate.fname = fixu8(file.getAttribute("name")) createinfo = dom.getElementsByTagName("created_as")[0] annotate.created_as = fixu8(createinfo.getAttribute("original_name")) creator = createinfo.getElementsByTagName("patch")[0] annotate.creator_hash = fixu8(creator.getAttribute("hash")) mod = dom.getElementsByTagName("modified")[0] lastpatch = mod.getElementsByTagName("patch")[0] annotate.lastchange_hash = fixu8(lastpatch.getAttribute("hash")) annotate.lastchange_author = fixu8(lastpatch.getAttribute("author")) lastname = lastpatch.getElementsByTagName("name")[0] lastname = lastname.childNodes[0].wholeText annotate.lastchange_name = fixu8(lastname) lastdate = lastpatch.getAttribute("date") lastdate = time.strptime(lastdate, "%Y%m%d%H%M%S") annotate.lastchange_date = lastdate annotate.patches[annotate.lastchange_hash] = annotate.lastchange_date # these will be overriden by the real dates later annotate.firstdate = lastdate annotate.lastdate = 0 file = dom.getElementsByTagName("file")[0] for l in file.childNodes: # we're only intrested in normal and added lines if l.nodeName not in ["normal_line", "added_line"]: continue line = Annotate.Line() if l.nodeName == "normal_line": patch = l.getElementsByTagName("patch")[0] phash = patch.getAttribute("hash") pauthor = patch.getAttribute("author") pdate = patch.getAttribute("date") pdate = time.strptime(pdate, "%Y%m%d%H%M%S") else: # added lines inherit the creation from the annotate # patch phash = annotate.lastchange_hash pauthor = annotate.lastchange_author pdate = annotate.lastchange_date text = "" for node in l.childNodes: if node.nodeType == node.TEXT_NODE: text += node.wholeText # strip all "\n"s at the beginning; because the way darcs # formats the xml output it makes the DOM parser to add "\n"s # in front of it text = text.lstrip("\n") line.text = fixu8(text) line.phash = fixu8(phash) line.pauthor = fixu8(pauthor) line.pdate = pdate annotate.lines.append(line) annotate.patches[line.phash] = line.pdate if pdate > annotate.lastdate: annotate.lastdate = pdate if pdate < annotate.firstdate: annotate.firstdate = pdate return annotate def get_annotate(fname, hash = None): cmd = 'annotate --xml-output' if hash: cmd += ' --match="hash %s"' % hash cmd += ' %s' % fname out = run_darcs(cmd) return parse_annotate(out) # # specific html functions # def print_diff(dsrc): for l in dsrc: l = l.decode(config.repoencoding, 'replace').encode('utf-8') # remove the trailing newline if len(l) > 1: l = l[:-1] if l.startswith('diff'): # file lines, they have their own class print '
%s
' % escape(l) continue color = "" if l[0] == '+': color = 'style="color:#008800;"' elif l[0] == '-': color = 'style="color:#cc0000;"' elif l[0] == '@': color = 'style="color:#990099;"' elif l.startswith('Files'): # binary differences color = 'style="color:#666;"' print '
' % color + escape(l) + '
' def print_darcs_diff(dsrc): for l in dsrc: l = l.decode(config.repoencoding, 'replace').encode('utf-8') if not l.startswith(" "): # comments and normal stuff print '
' + escape(l) + "
" continue l = l.strip() if l[0] == '+': cl = 'class="pre" style="color:#008800;"' elif l[0] == '-': cl = 'class="pre" style="color:#cc0000;"' else: cl = 'class="diff_info"' print '
' % cl + escape(l) + '
' def print_shortlog(last = 50, topi = 0, fname = None): ps = get_last_patches(last, topi, fname) if fname: title = '' % \ (config.myreponame, fname) title += 'History for path %s' % fname title += '' else: title = 'shortlog' \ % config.myreponame print '
%s
' % title print '' if topi != 0: # put a link to the previous page ntopi = topi - last if ntopi < 0: ntopi = 0 print '' alt = False for p in ps: if p.name.startswith("TAG "): print '' elif alt: print '' else: print '' alt = not alt print """ """ % { 'age': how_old(p.local_date), 'author': shorten_str(p.shortauthor, 26), 'myrname': config.myreponame, 'hash': p.hash, 'name': shorten_str(p.name), 'fullname': escape(p.name), } print "" if len(ps) >= last: # only show if we've not shown them all already print '' print "
' if fname: print '...' \ % (config.myreponame, ntopi, fname) else: print '...' \ % (config.myreponame, ntopi) print '
%(age)s %(author)s %(name)s
' if fname: print '...' \ % (config.myreponame, topi + last, fname) else: print '...' \ % (config.myreponame, topi + last) print '
" def print_log(last = 50, topi = 0): ps = get_last_patches(last, topi) if topi != 0: # put a link to the previous page ntopi = topi - last if ntopi < 0: ntopi = 0 print '

<- Prev

' % \ (config.myreponame, ntopi) for p in ps: if p.comment: comment = escape(p.comment) fmt_comment = comment.replace('\n', '
') + '\n' fmt_comment += '

' else: fmt_comment = '' print """

%(age)s%(desc)s
%(author)s [%(date)s]
%(desc)s

%(comment)s
""" % { 'myreponame': config.myreponame, 'age': how_old(p.local_date), 'date': p.local_date_str, 'author': p.shortauthor, 'hash': p.hash, 'desc': escape(p.name), 'comment': fmt_comment } if len(ps) >= last: # only show if we've not shown them all already print '

Next ->

' % \ (config.myreponame, topi + last) def print_blob(fname): print '
%s
' % fname print '
' if isbinary(fname): print """ This is a binary file and it's contents will not be displayed.
""" return f = open(realpath(fname), 'r') count = 1 for l in f: l = fixu8(escape(l)) if l and l[-1] == '\n': l = l[:-1] l = replace_tabs(l) print """\
\ %(c)4d %(l)s\
""" % { 'c': count, 'l': l } count += 1 print '' def print_annotate(ann, style): print '
' if isbinary(ann.fname): print """ This is a binary file and it's contents will not be displayed.
""" return if style == 'shade': # here's the idea: we will assign to each patch a shade of # color from its date (newer gets darker) max = 0xff min = max - 80 # to do that, we need to get a list of the patch hashes # ordered by their dates l = [ (date, hash) for (hash, date) in ann.patches.items() ] l.sort() l = [ hash for (date, hash) in l ] # now we have to map each element to a number in the range # min-max, with max being close to l[0] and min l[len(l) - 1] lenn = max - min lenl = len(l) shadetable = {} for i in range(0, lenl): hash = l[i] n = float(i * lenn) / lenl n = max - int(round(n)) shadetable[hash] = n elif style == "zebra": lineclass = 'dark' count = 1 prevhash = None for l in ann.lines: text = escape(l.text) text = text.rstrip() text = replace_tabs(text) plongdate = time.strftime("%Y-%m-%d %H:%M:%S", l.pdate) title = "%s by %s" % (plongdate, escape(l.pauthor) ) link = "%(myrname)s;a=commit;h=%(hash)s" % { 'myrname': config.myreponame, 'hash': l.phash } if style == "shade": linestyle = 'style="background-color:#ffff%.2x"' % \ shadetable[l.phash] lineclass = '' elif style == "zebra": linestyle = '' if l.phash != prevhash: if lineclass == 'dark': lineclass = 'light' else: lineclass = 'dark' else: linestyle = '' lineclass = '' if l.phash != prevhash: pdate = time.strftime("%Y-%m-%d", l.pdate) left = l.pauthor.find('<') right = l.pauthor.find('@') if left != -1 and right != -1: shortau = l.pauthor[left + 1:right] elif l.pauthor.find(" ") != -1: shortau = l.pauthor[:l.pauthor.find(" ")] elif right != -1: shortau = l.pauthor[:right] else: shortau = l.pauthor desc = "%12.12s" % shortau date = "%-10.10s" % pdate prevhash = l.phash line = 1 else: if line == 1 and style in ["shade", "zebra"]: t = "%s " % time.strftime("%H:%M:%S", l.pdate) desc = "%12.12s" % "'" date = "%-10.10s" % t else: desc = "%12.12s" % "'" date = "%-10.10s" % "" line += 1 print """\ """ % { 'class': lineclass, 'style': linestyle, 'date': date, 'desc': escape(desc), 'c': count, 'text': text, 'title': title, 'link': link } count += 1 print '' # # available actions # def do_summary(): print_header() print_navbar() owner = repo_get_owner() # we should optimize this, it's a pity to go in such a mess for just # one hash ps = get_last_patches(1) print '
 
' print '' print ' ' % config.repodesc if owner: print ' ' % escape(owner) if len(ps) > 0: print ' ' % \ ps[0].local_date_str print ' ' %\ { 'url': config.repourl } print '
description%s
owner%s
last change%s
url%(url)s
' print_shortlog(15) print_footer() def do_commitdiff(phash): print_header() print_navbar(h = phash) p = get_patch(phash) print """
%(name)s
""" % { 'myreponame': config.myreponame, 'hash': p.hash, 'name': p.name, } dsrc = p.getdiff() print_diff(dsrc) print_footer() def do_plain_commitdiff(phash): print_plain_header() dsrc = get_diff(phash) for l in dsrc: sys.stdout.write(fixu8(l)) def do_darcs_commitdiff(phash): print_header() print_navbar(h = phash) p = get_patch(phash) print """
%(name)s
""" % { 'myreponame': config.myreponame, 'hash': p.hash, 'name': p.name, } dsrc = get_darcs_diff(phash) print_darcs_diff(dsrc) print_footer() def do_raw_commitdiff(phash): print_plain_header() dsrc = get_raw_diff(phash) if not dsrc: print "Error opening file!" return for l in dsrc: sys.stdout.write(l) def do_headdiff(phash): print_header() print_navbar(h = phash) p = get_patch(phash) print """
%(name)s --> to head
""" % { 'myreponame': config.myreponame, 'hash': p.hash, 'name': p.name, } dsrc = get_patch_headdiff(phash) print_diff(dsrc) print_footer() def do_plain_headdiff(phash): print_plain_header() dsrc = get_patch_headdiff(phash) for l in dsrc: sys.stdout.write(fixu8(l)) def do_darcs_headdiff(phash): print_header() print_navbar(h = phash) p = get_patch(phash) print """
%(name)s --> to head
""" % { 'myreponame': config.myreponame, 'hash': p.hash, 'name': p.name, } dsrc = get_darcs_headdiff(phash) print_darcs_diff(dsrc) print_footer() def do_raw_headdiff(phash): print_plain_header() dsrc = get_darcs_headdiff(phash) for l in dsrc: sys.stdout.write(l) def do_filediff(phash, fname): print_header() print_navbar(h = phash, f = fname) p = get_patch(phash) dsrc = get_file_diff(phash, fname) print """
%(name)s
%(fname)s
""" % { 'myreponame': config.myreponame, 'hash': p.hash, 'name': p.name, 'fname': fname, } print_diff(dsrc) print_footer() def do_plain_filediff(phash, fname): print_plain_header() dsrc = get_file_diff(phash, fname) for l in dsrc: sys.stdout.write(fixu8(l)) def do_darcs_filediff(phash, fname): print_header() print_navbar(h = phash, f = fname) p = get_patch(phash) print """
%(name)s
%(fname)s
""" % { 'myreponame': config.myreponame, 'hash': p.hash, 'name': p.name, 'fname': fname, } dsrc = get_darcs_diff(phash, fname) print_darcs_diff(dsrc) print_footer() def do_file_headdiff(phash, fname): print_header() print_navbar(h = phash, f = fname) p = get_patch(phash) dsrc = get_file_headdiff(phash, fname) print """
%(name)s --> to head
%(fname)s
""" % { 'myreponame': config.myreponame, 'hash': p.hash, 'name': p.name, 'fname': fname, } print_diff(dsrc) print_footer() def do_plain_fileheaddiff(phash, fname): print_plain_header() dsrc = get_file_headdiff(phash, fname) for l in dsrc: sys.stdout.write(fixu8(l)) def do_darcs_fileheaddiff(phash, fname): print_header() print_navbar(h = phash, f = fname) p = get_patch(phash) print """
%(name)s --> to head
%(fname)s
""" % { 'myreponame': config.myreponame, 'hash': p.hash, 'name': p.name, 'fname': fname, } dsrc = get_darcs_headdiff(phash, fname) print_darcs_diff(dsrc) print_footer() print_plain_header() print "Not yet implemented" def do_commit(phash): print_header() print_navbar(h = phash) p = get_patch(phash) print """
%(name)s
author%(author)s
local date%(local_date)s
date%(date)s
hash%(hash)s
""" % { 'myreponame': config.myreponame, 'author': p.author, 'local_date': p.local_date_str, 'date': p.date_str, 'hash': p.hash, 'name': p.name, } if p.comment: c = p.comment.replace('\n', '
\n') print '
' print p.name, '

' print c print '
' changed = p.adds + p.removes + p.modifies.keys() + p.moves.keys() + \ p.diradds + p.dirremoves + p.replaces.keys() if changed or p.moves: n = len(changed) print '
%d file(s) changed:
' % n print '' changed.sort() alt = False for f in changed: if alt: print '' else: print '' alt = not alt show_diff = 1 if p.moves.has_key(f): # don't show diffs for moves, they're broken as of # darcs 1.0.3 show_diff = 0 if show_diff: print """ """ % { 'myreponame': config.myreponame, 'hash': p.hash, 'file': f } else: print "" % f show_diff = 1 if f in p.adds: print '' elif f in p.diradds: print '' elif f in p.removes: print '' elif f in p.dirremoves: print '' elif p.replaces.has_key(f): print '' elif p.moves.has_key(f): print '' show_diff = 0 else: print '' if show_diff: print """ """ % { 'myreponame': config.myreponame, 'hash': p.hash, 'file': f } print '' print '
%(file)s %s', print '[added]', print '', print '[added dir]', print '', print '[removed]', print '', print '[removed dir]', print '', print '[replaced %d tokens]' % p.replaces[f], print '', print '[moved to "%s"]' % p.moves[f] print '', if p.modifies[f].has_key('b'): # binary modification print '(binary)' else: print '+%(+)d -%(-)d' % p.modifies[f], print '
' print_footer() def do_tree(dname): print_header() print_navbar() # the head print """
Current tree
""" % config.myreponame # and the linked, with links parts = dname.split('/') print '/ ' sofar = '/' for p in parts: if not p: continue sofar += '/' + p print '%s /' % \ (config.myreponame, sofar, p) print """
""" path = realpath(dname) + '/' alt = False files = os.listdir(path) files.sort() # list directories first dlist = [] flist = [] for f in files: if f == "_darcs": continue realfile = path + f if os.path.isdir(realfile): dlist.append(f) else: flist.append(f) files = dlist + flist for f in files: if alt: print '' else: print '' alt = not alt realfile = path + f print '' if f in dlist: print """ """ % { 'myrname': config.myreponame, 'f': f, 'newf': filter_file(dname + '/' + f), } else: print """ """ % { 'myrname': config.myreponame, 'f': f, 'fullf': filter_file(dname + '/' + f), } print '' print '
', fperms(realfile), print '%(f)s %(f)s
' print_footer() def do_headblob(fname): print_header() print_navbar(f = fname) filepath = os.path.dirname(fname) print """
""" # and the linked, with links parts = filepath.split('/') print '/ ' sofar = '/' for p in parts: if not p: continue sofar += '/' + p print '%s /' % \ (config.myreponame, sofar, p) print '
' print_blob(fname) print_footer() def do_plainblob(fname): print_plain_header() f = open(realpath(fname), 'r') for l in f: sys.stdout.write(fixu8(l)) def do_annotate(fname, phash, style): print_header() ann = get_annotate(fname, phash) print_navbar(f = fname, h = ann.lastchange_hash) print """
%(name)s
Annotate for file %(fname)s
""" % { 'myreponame': config.myreponame, 'hash': ann.lastchange_hash, 'name': escape(ann.lastchange_name), 'fname': escape(fname), } print_annotate(ann, style) print_footer() def do_annotate_plain(fname, phash): print_plain_header() ann = get_annotate(fname, phash) for l in ann.lines: sys.stdout.write(l.text) def do_shortlog(topi): print_header() print_navbar() print_shortlog(topi = topi) print_footer() def do_filehistory(topi, f): print_header() print_navbar(f = fname) print_shortlog(topi = topi, fname = fname) print_footer() def do_log(topi): print_header() print_navbar() print_log(topi = topi) print_footer() def do_atom(): print "Content-type: application/atom+xml; charset=utf-8\n" print '' inv = config.repodir + '/_darcs/inventory' repo_lastmod = os.stat(inv).st_mtime str_lastmod = time.strftime(iso_datetime, time.localtime(repo_lastmod)) print """ %(reponame)s darcs repository %(url)s darcs repository (several authors) darcsweb.cgi %(lastmod)s %(desc)s """ % { 'reponame': config.reponame, 'url': config.myurl + '/' + config.myreponame, 'desc': config.repodesc, 'lastmod': str_lastmod, } ps = get_last_patches(20) for p in ps: title = time.strftime('%d %b %H:%M', time.localtime(p.date)) title += ' - ' + p.name pdate = time.strftime(iso_datetime, time.localtime(p.date)) link = '%s/%s;a=commit;h=%s' % (config.myurl, config.myreponame, p.hash) addr, author = email.Utils.parseaddr(p.author) if not addr: addr = "unknown_email@example.com" if not author: author = addr print """ %(title)s %(author)s %(email)s %(pdate)s %(link)s %(desc)s

""" % { 'title': escape(title), 'author': author, 'email': addr, 'url': config.myurl + '/' + config.myreponame, 'pdate': pdate, 'myrname': config.myreponame, 'hash': p.hash, 'pname': escape(p.name), 'link': link, 'desc': escape(p.name), } # TODO: allow to get plain text, not HTML? print escape(p.name) + '
' if p.comment: print '
' print escape(p.comment).replace('\n', '
\n') print '
' print '
' changed = p.adds + p.removes + p.modifies.keys() + \ p.moves.keys() + p.diradds + p.dirremoves + \ p.replaces.keys() for i in changed: # TODO: link to the file print '%s
' % i print '

' print '
' print '
' def do_rss(): print "Content-type: text/xml; charset=utf-8\n" print '' print """ %(reponame)s %(url)s %(desc)s en """ % { 'reponame': config.reponame, 'url': config.myurl + '/' + config.myreponame, 'desc': config.repodesc, } ps = get_last_patches(20) for p in ps: title = time.strftime('%d %b %H:%M', time.localtime(p.date)) title += ' - ' + p.name pdate = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime(p.date)) link = '%s/%s;a=commit;h=%s' % (config.myurl, config.myreponame, p.hash) # the author field is tricky because the standard requires it # has an email address; so we need to check that and lie # otherwise; there's more info at # http://feedvalidator.org/docs/error/InvalidContact.html if "@" in p.author: author = p.author else: author = "%s <unknown@email>" % p.author print """ %(title)s %(author)s %(pdate)s %(link)s %(desc)s """ % { 'title': escape(title), 'author': author, 'pdate': pdate, 'link': link, 'desc': escape(p.name), } print ' ' if p.comment: print '
' print escape(p.comment).replace('\n', '
\n') print '
' print '
' changed = p.adds + p.removes + p.modifies.keys() + \ p.moves.keys() + p.diradds + p.dirremoves + \ p.replaces.keys() for i in changed: print '%s
' % i print ']]>' print '
' print '
' def do_die(): print_header() print "

Error! Malformed query

" print_footer() def do_listrepos(): import config as all_configs # the header here is special since we don't have a repo print "Content-type: text/html; charset=utf-8\n" print '' print """ darcs - Repositories
%(summary)s
""" % { 'myname': config.myname, 'css': config.cssfile, 'fav': config.darcsfav, 'logo': config.darcslogo, 'summary': config.summary } # some python magic alt = False expand_multi_config(all_configs) for conf in dir(all_configs): if conf.startswith('__'): continue c = all_configs.__getattribute__(conf) if 'reponame' not in dir(c): continue name = escape(c.reponame) desc = escape(c.repodesc) if alt: print '' else: print '' alt = not alt print """ """ % { 'myname': all_configs.base.myname, 'dname': name, 'name': urllib.quote(name), 'desc': shorten_str(desc, 60) } print "
Project Description
%(dname)s %(desc)s
" print_footer(put_rss = 0) def expand_multi_config(config): """Expand configuration entries that serve as "template" to others; this make it easier to have a single directory with all the repos, because they don't need specific entries in the configuration anymore. """ for conf in dir(config): if conf.startswith('__'): continue c = config.__getattribute__(conf) if 'multidir' not in dir(c): continue if 'exclude' not in dir(c): c.exclude = [] entries = os.listdir(c.multidir) entries.sort() for name in entries: if name.startswith('.'): continue fulldir = c.multidir + '/' + name if not os.path.isdir(fulldir + '/_darcs'): continue if name in c.exclude: continue rdir = fulldir desc = c.repodesc % { 'name': name } url = c.repourl % { 'name': name } class tmp_config: reponame = name repodir = rdir repodesc = desc repourl = url repoencoding = c.repoencoding if 'footer' in dir(c): footer = c.footer config.__setattr__(name, tmp_config) def fill_config(name = None): import config as all_configs expand_multi_config(all_configs) if name: # we only care about setting some configurations if a repo was # specified; otherwise we only set the common configuration # directives for conf in dir(all_configs): if conf.startswith('__'): continue c = all_configs.__getattribute__(conf) if 'reponame' not in dir(c): continue if c.reponame == name: break else: # not found raise "RepoNotFound", name # fill the configuration base = all_configs.base config.myname = base.myname config.myurl = base.myurl config.darcslogo = base.darcslogo config.darcsfav = base.darcsfav config.cssfile = base.cssfile if name: config.myreponame = base.myname + '?r=' + urllib.quote(name) config.reponame = c.reponame config.repodesc = c.repodesc config.repodir = c.repodir config.repourl = c.repourl config.repoencoding = c.repoencoding # optional parameters if "darcspath" in dir(base): config.darcspath = base.darcspath + '/' else: config.darcspath = "" if "summary" in dir(base): config.summary = base.summary else: config.summary = """ This is the repository index for a darcsweb site.
These are all the available repositories.
""" if "cachedir" in dir(base): config.cachedir = base.cachedir else: config.cachedir = None if name and "footer" in dir(c): config.footer = c.footer elif "footer" in dir(base): config.footer = base.footer else: config.footer = "Crece desde el pueblo el futuro / " \ + "crece desde el pie" # # main # form = cgi.FieldStorage() # if they don't specify a repo, print the list and exit if not form.has_key('r'): fill_config() do_listrepos() sys.exit(0) # get the repo configuration and fill the config class current_repo = urllib.unquote(form['r'].value) fill_config(current_repo) # get the action, or default to summary if not form.has_key("a"): action = "summary" else: action = filter_act(form["a"].value) # check if we have the page in the cache if config.cachedir: url_request = os.environ['QUERY_STRING'] cache = Cache(config.cachedir, url_request) if cache.open(): # we have a hit, dump and run cache.dump() cache.close() sys.exit(0) # if there is a miss, the cache will step over stdout, intercepting # all "print"s and writing them to the cache file automatically # see what should we do according to the received action if action == "summary": do_summary() elif action == "commit": phash = filter_hash(form["h"].value) do_commit(phash) elif action == "commitdiff": phash = filter_hash(form["h"].value) do_commitdiff(phash) elif action == "plain_commitdiff": phash = filter_hash(form["h"].value) do_plain_commitdiff(phash) elif action == "darcs_commitdiff": phash = filter_hash(form["h"].value) do_darcs_commitdiff(phash) elif action == "raw_commitdiff": phash = filter_hash(form["h"].value) do_raw_commitdiff(phash) elif action == 'headdiff': phash = filter_hash(form["h"].value) do_headdiff(phash) elif action == "plain_headdiff": phash = filter_hash(form["h"].value) do_plain_headdiff(phash) elif action == "darcs_headdiff": phash = filter_hash(form["h"].value) do_darcs_headdiff(phash) elif action == "filediff": phash = filter_hash(form["h"].value) fname = filter_file(form["f"].value) do_filediff(phash, fname) elif action == "plain_filediff": phash = filter_hash(form["h"].value) fname = filter_file(form["f"].value) do_plain_filediff(phash, fname) elif action == "darcs_filediff": phash = filter_hash(form["h"].value) fname = filter_file(form["f"].value) do_darcs_filediff(phash, fname) elif action == 'headfilediff': phash = filter_hash(form["h"].value) fname = filter_file(form["f"].value) do_file_headdiff(phash, fname) elif action == "plain_headfilediff": phash = filter_hash(form["h"].value) fname = filter_file(form["f"].value) do_plain_fileheaddiff(phash, fname) elif action == "darcs_headfilediff": phash = filter_hash(form["h"].value) fname = filter_file(form["f"].value) do_darcs_fileheaddiff(phash, fname) elif action == "annotate_normal": fname = filter_file(form["f"].value) if form.has_key("h"): phash = filter_hash(form["h"].value) else: phash = None do_annotate(fname, phash, "normal") elif action == "annotate_plain": fname = filter_file(form["f"].value) if form.has_key("h"): phash = filter_hash(form["h"].value) else: phash = None do_annotate_plain(fname, phash) elif action == "annotate_zebra": fname = filter_file(form["f"].value) if form.has_key("h"): phash = filter_hash(form["h"].value) else: phash = None do_annotate(fname, phash, "zebra") elif action == "annotate_shade": fname = filter_file(form["f"].value) if form.has_key("h"): phash = filter_hash(form["h"].value) else: phash = None do_annotate(fname, phash, "shade") elif action == "shortlog": if form.has_key("topi"): topi = int(filter_num(form["topi"].value)) else: topi = 0 do_shortlog(topi) elif action == "filehistory": if form.has_key("topi"): topi = int(filter_num(form["topi"].value)) else: topi = 0 fname = filter_file(form["f"].value) do_filehistory(topi, fname) elif action == "log": if form.has_key("topi"): topi = int(filter_num(form["topi"].value)) else: topi = 0 do_log(topi) elif action == 'headblob': fname = filter_file(form["f"].value) do_headblob(fname) elif action == 'plainblob': fname = filter_file(form["f"].value) do_plainblob(fname) elif action == 'tree': if form.has_key('f'): fname = filter_file(form["f"].value) else: fname = '/' do_tree(fname) elif action == 'rss': do_rss() elif action == 'atom': do_atom() else: action = "invalid query" do_die() if config.cachedir: cache.cancel() if config.cachedir: cache.close()