# HG changeset patch
# User sirebral
# Date 1256959300 18000
# Node ID 496dbf12a6cb4e7aa1443e9bcbe8dbe6adc2bd17
# Parent d86e762a994f540a245c0df1420865625104c02d
Traipse Alpha 'OpenRPG' {091030-00}
Traipse is a distribution of OpenRPG that is designed to be easy to setup and go.
Traipse also makes it easy for developers to work on code without fear of
sacrifice. 'Ornery-Orc' continues the trend of 'Grumpy' and adds fixes to the
code. 'Ornery-Orc's main goal is to offer more advanced features and enhance the
productivity of the user.
Update Summary (Cleaning up for Beta):
Adds Bookmarks (Alpha) with cool Smiley Star and Plus Symbol images!
Changes made to the map for increased portability. SnowDog has changes planned in
Core, though.
Added an initial push to the BCG. Not much to see, just shows off how it is
re-writing Main code.
Fix to remote admin commands
Minor fix to texted based server, works in /System/ folder
Some Core changes to gametree to correctly disply Pretty Print, thanks David!
Fix to Splitter Nodes not being created.
Added images to Plugin Control panel for Autostart feature
Fix to massive amounts of images loading; from Core
fix to gsclient so with_statement imports
Added 'boot' command to remote admin
Prep work in Pass tool for remote admin rankings and different passwords, ei,
Server, Admin, Moderator, etc.
Remote Admin Commands more organized, more prep work.
Added Confirmation window for sent nodes.
Minor changes to allow for portability to an OpenSUSE linux OS (hopefully without
breaking)
{091028}
Made changes to gametree to start working with Element Tree, mostly from Core
Minor changes to Map to start working with Element Tree, from Core
Preliminary changes to map efficiency, from FlexiRPG
Miniatures Layer pop up box allows users to turn off Mini labels, from FlexiRPG
Changes to main.py to start working with Element Tree
{091029}
Changes made to server to start working with Element Tree.
Changes made to Meta Server Lib. Prepping test work for a multi meta network
page.
Minor bug fixed with mini to gametree
Zoom Mouse plugin added.
{091030}
Getting ready for Beta. Server needs debugging so Alpha remains bugged.
Plugin UI code cleaned. Auto start works with a graphic, pop-up asks to enable or
disable plugin.
Update Manager now has a partially working Status Bar. Status Bar captures
terminal text, so Merc out put is visible. Manifest.xml file, will be renamed, is
now much cleaner.
Debug Console has a clear button and a Report Bug button. Prep work for a Term2Win
class in Debug Console.
Known: Current Alpha fails in Windows.
diff -r d86e762a994f -r 496dbf12a6cb orpg/orpg_version.py
--- a/orpg/orpg_version.py Thu Oct 29 20:37:11 2009 -0500
+++ b/orpg/orpg_version.py Fri Oct 30 22:21:40 2009 -0500
@@ -4,7 +4,7 @@
#BUILD NUMBER FORMAT: "YYMMDD-##" where ## is the incremental daily build index (if needed)
DISTRO = "Traipse Alpha"
DIS_VER = "Ornery Orc"
-BUILD = "091029-00"
+BUILD = "091030-00"
# This version is for network capability.
PROTOCOL_VERSION = "1.2"
diff -r d86e762a994f -r 496dbf12a6cb orpg/tools/orpg_log.py
--- a/orpg/tools/orpg_log.py Thu Oct 29 20:37:11 2009 -0500
+++ b/orpg/tools/orpg_log.py Fri Oct 30 22:21:40 2009 -0500
@@ -37,12 +37,14 @@
#########################
## Error Types
#########################
+ORPG_PRINT = 0
ORPG_CRITICAL = 1
ORPG_GENERAL = 2
ORPG_INFO = 4
ORPG_NOTE = 8
ORPG_DEBUG = 16
+
def Crash(type, value, crash):
crash_report = open(dir_struct["home"] + 'crash-report.txt', "w")
traceback.print_exception(type, value, crash, file=crash_report)
@@ -55,25 +57,46 @@
logger.exception("Crash Report Created!!")
logger.info("Printed out crash-report.txt in your System folder", True)
+class Term2Win(object):
+ # A stdout redirector. Allows the messages from Mercurial to be seen in the Install Window
+ def write(self, text):
+ #logger.stdout(text)
+ wx.Yield()
+ #sys.__stdout__.write(text)
+
class DebugConsole(wx.Frame):
def __init__(self, parent):
super(DebugConsole, self).__init__(parent, -1, "Debug Console")
- icon = None
icon = wx.Icon(dir_struct["icon"]+'note.ico', wx.BITMAP_TYPE_ICO)
- self.SetIcon( icon )
+ self.SetIcon(icon)
self.console = wx.TextCtrl(self, -1, style=wx.TE_MULTILINE | wx.TE_READONLY)
- sizer = wx.BoxSizer(wx.VERTICAL)
- sizer.Add(self.console, 1, wx.EXPAND)
+ self.bt_clear = wx.Button(self, wx.ID_CLEAR)
+ self.report = wx.Button(self, wx.ID_ANY, 'Bug Report')
+ sizer = wx.GridBagSizer(hgap=1, vgap=1)
+ sizer.Add(self.console, (0,0), span=(1,2), flag=wx.EXPAND)
+ sizer.Add(self.bt_clear, (1,0), flag=wx.ALIGN_LEFT)
+ sizer.Add(self.report, (1,1), flag=wx.ALIGN_LEFT)
+ sizer.AddGrowableCol(0)
+ sizer.AddGrowableRow(0)
self.SetSizer(sizer)
self.SetAutoLayout(True)
- self.SetSize((300, 175))
+ self.SetSize((450, 175))
self.Bind(wx.EVT_CLOSE, self.Min)
+ self.Bind(wx.EVT_BUTTON, self.clear, self.bt_clear)
+ self.Bind(wx.EVT_BUTTON, self.bug_report, self.report)
self.Min(None)
+ sys.stdout = Term2Win()
component.add('debugger', self.console)
def Min(self, evt):
self.Hide()
+ def clear(self, evt):
+ self.console.SetValue('')
+
+ def bug_report(self, evt):
+ pass
+
class orpgLog(object):
_log_level = 7
_log_name = None
diff -r d86e762a994f -r 496dbf12a6cb upmana/default_manifest.xml
--- a/upmana/default_manifest.xml Thu Oct 29 20:37:11 2009 -0500
+++ b/upmana/default_manifest.xml Fri Oct 30 22:21:40 2009 -0500
@@ -1,13 +1,8 @@
http://hg.assembla.com/traipse
- http://hg.assembla.com/traipse
-
-
-
- repo-Traipse
-
-
-
+
+ http://hg.assembla.com/traipse
+
diff -r d86e762a994f -r 496dbf12a6cb upmana/manifest.py
--- a/upmana/manifest.py Thu Oct 29 20:37:11 2009 -0500
+++ b/upmana/manifest.py Fri Oct 30 22:21:40 2009 -0500
@@ -1,185 +1,214 @@
-import xmltramp
+from __future__ import with_statement
+
from orpg.dirpath import dir_struct
-import upmana.validate
-from os import sep
+from upmana.validate import validate
+from orpg.tools.orpg_log import logger
+from os import sep, getcwd
from types import *
-class ManifestChanges:
- def __init__(self, filename="updatemana.xml"):
- self.filename = dir_struct["home"] + 'upmana' + sep + filename
- upmana.validate.Validate(dir_struct["home"] + 'upmana' + sep).config_file(filename,"default_manifest.xml")
- self.xml_dom = self.LoadDoc()
+from xml.etree.ElementTree import ElementTree, Element, parse, fromstring
+from xml.etree.ElementPath import find
+
+class ManifestChanges(object):
+ etree = ElementTree()
+ filename = dir_struct['home'] + 'upmana' + sep + 'updatemana.xml'
- def GetString(self, plugname, strname, defaultval, verbose=0):
+ def __new__(cls, *args, **kwargs):
+ it = cls.__dict__.get("__it__")
+ if it is not None:
+ return it
+ cls.__it__ = it = object.__new__(cls)
+ it._init()
+ return it
+
+ def _init(self):
+ validate.config_file('updatemana.xml', "default_manifest.xml")
+ self.LoadDoc()
+
+ def PluginChildren(self, plugname):
+ plugin = self.etree.find(plugname)
+ children = plugin.getchildren()
+ nodes = []
+ for child in children:
+ nodes.append(child.tag)
+ return nodes
+
+ def GetString(self, plugname, strname, defaultval="", verbose=False):
strname = self.safe(strname)
- for plugin in self.xml_dom:
- if plugname == plugin._name:
- for child in plugin._dir:
- if child._name == strname:
- #str() on this to make sure it's ASCII, not unicode, since orpg can't handle unicode.
- if verbose: print "successfully found the value"
- if len(child): return str( self.normal(child[0]) )
- else: return ""
- else:
- if verbose:
- print "manifest: no value has been stored for " + strname + " in " + plugname + " so the default has been returned"
+ plugin = self.etree.find(plugname)
+ if plugin is None or plugin.find(strname) is None:
+ msg = ["plugindb: no value has been stored for", strname, "in",
+ plugname, "so the default has been returned"]
return defaultval
+ return self.normal(plugin.find(strname).text)
+
+ def DelString(self, plugname, strname):
+ strname = self.safe(strname)
+ plugin = self.etree.find(plugname)
+ plugin.remove(plugin.find(strname))
+ self.SaveDoc()
def SetString(self, plugname, strname, val):
- #Set Node,
- #Set Setting,
val = self.safe(val)
strname = self.safe(strname)
- for plugin in self.xml_dom:
- ##this isn't absolutely necessary, but it saves the trouble of sending a parsed object instead of a simple string.
- if plugname == plugin._name:
- plugin[strname] = val
- plugin[strname]._attrs["type"] = "string"
- self.SaveDoc()
- return "found plugin"
- else:
- self.xml_dom[plugname] = xmltramp.parse("<" + strname + " type=\"string\">" + val + "" + strname + ">")
- self.SaveDoc()
- return "added plugin"
-
+ plugin = self.etree.find(plugname)
+ if plugin is None:
+ plugin = Element(plugname)
+ self.etree.getroot().append(plugin)
+ str_el = plugin.find(strname)
+ if str_el is None:
+ str_el = Element(strname)
+ str_el.set('type', 'str')
+ plugin.append(str_el)
+ str_el.text = val
+ self.SaveDoc()
def FetchList(self, parent):
retlist = []
- if not len(parent): return []
- for litem in parent[0]._dir:
- if len(litem):
- if litem._attrs["type"] == "int": retlist += [int(litem[0])]
- elif litem._attrs["type"] == "long": retlist += [long(litem[0])]
- elif litem._attrs["type"] == "float": retlist += [float(litem[0])]
- elif litem._attrs["type"] == "list": retlist += [self.FetchList(litem)]
- elif litem._attrs["type"] == "dict": retlist += [self.FetchDict(litem)]
- else: retlist += [str( self.normal(litem[0]) )]
- else: retlist += [""]
+ for litem in parent.findall('lobject'):
+ if litem.get('type') == 'int': retlist.append(int(litem.text))
+ if litem.get('type') == 'bool': retlist.append(litem.text == 'True')
+ elif litem.get('type') == 'float': retlist.append(float(litem.text))
+ elif litem.get('type') == 'list': retlist.append(self.FetchList(litem))
+ elif litem.get('type') == 'dict': retlist.append(self.FetchDict(litem))
+ else: retlist.append(str(self.normal(litem.text)))
+ return retlist
+
+ def GetList(self, plugname, listname, defaultval=list(), verbose=False):
+ listname = self.safe(listname)
+ plugin = self.etree.find(plugname)
+ if plugin is None or plugin.find(listname) is None:
+ msg = ["plugindb: no value has been stored for", listname, "in",
+ plugname, "so the default has been returned"]
+ return defaultval
+ retlist = self.FetchList(plugin.find(listname))
return retlist
- def GetList(self, plugname, listname, defaultval, verbose=0):
- listname = self.safe(listname)
- for plugin in self.xml_dom:
- if plugname == plugin._name:
- for child in plugin._dir:
- if child._name == listname and child._attrs["type"] == "list":
- retlist = self.FetchList(child)
- if verbose: print "successfully found the value"
- return retlist
- else:
- if verbose:
- print "plugindb: no value has been stored for " + listname + " in " + plugname + " so the default has been returned"
- return defaultval
-
def BuildList(self, val):
- listerine = ""
+ list_el = Element('list')
for item in val:
- if isinstance(item, basestring):#it's a string
- listerine += "" + self.safe(item) + ""
- elif isinstance(item, IntType):#it's an int
- listerine += "" + str(item) + ""
- elif isinstance(item, FloatType):#it's a float
- listerine += "" + str(item) + ""
- elif isinstance(item, LongType):#it's a long
- listerine += "" + str(item) + ""
- elif isinstance(item, ListType):#it's a list
- listerine += "" + self.BuildList(item) + ""
- elif isinstance(item, DictType):#it's a dictionary
- listerine += "" + self.BuildDict(item) + ""
- else: return "type unknown"
- listerine += "
"
- return listerine
+ i = Element('lobject')
+ if isinstance(item, bool):
+ i.set('type', 'bool')
+ i.text = str(item)
+ elif isinstance(item, int):#it's an int
+ i.set('type', 'int')
+ i.text = str(item)
+ elif isinstance(item, float):#it's a float
+ i.set('type', 'float')
+ i.text = str(item)
+ elif isinstance(item, (list, tuple)):#it's a list
+ i.set('type', 'list')
+ i.append(self.BuildList(item))
+ elif isinstance(item, dict):#it's a dictionary
+ i.set('type', 'dict')
+ i.append(self.BuildDict(item))
+ else:
+ i.set('type', 'str')
+ i.text = self.safe(item)
+ list_el.append(i)
+ return list_el
def SetList(self, plugname, listname, val):
listname = self.safe(listname)
- list = xmltramp.parse(self.BuildList(val))
- for plugin in self.xml_dom:
- if plugname == plugin._name:
- plugin[listname] = list
- plugin[listname]._attrs["type"] = "list"
- self.SaveDoc()
- return "found plugin"
+ plugin = self.etree.find(plugname)
+ if plugin is None:
+ plugin = Element(plugname)
+ self.etree.getroot().append(plugin)
+ list_el = plugin.find(listname)
+ if list_el is None:
+ list_el = Element(listname)
+ list_el.set('type', 'list')
+ plugin.append(list_el)
else:
- self.xml_dom[plugname] = xmltramp.parse("<" + listname + ">" + listname + ">")
- self.xml_dom[plugname][listname] = list
- self.xml_dom[plugname][listname]._attrs["type"] = "list"
- self.SaveDoc()
- return "added plugin"
+ list_el.remove(list_el.find('list'))
+ list_el.append(self.BuildList(val))
+ self.SaveDoc()
def BuildDict(self, val):
- dictator = ""
- for item in val.keys():
- if isinstance(val[item], basestring):
- dictator += "" + self.safe(val[item]) + ""
- elif isinstance(val[item], IntType):#it's an int
- dictator += "" + str(val[item]) + ""
- elif isinstance(val[item], FloatType):#it's a float
- dictator += "" + str(val[item]) + ""
- elif isinstance(val[item], LongType):#it's a long
- dictator += "" + str(val[item]) + ""
- elif isinstance(val[item], DictType):#it's a dictionary
- dictator += "" + self.BuildDict(val[item]) + ""
- elif isinstance(val[item], ListType):#it's a list
- dictator += "" + self.BuildList(val[item]) + ""
- else: return str(val[item]) + ": type unknown"
- dictator += ""
- return dictator
+ dict_el = Element('dict')
+ for key, item in val.items():
+ i = Element('dobject')
+ if isinstance(item, bool):
+ i.set('type', 'bool')
+ i.set('name', self.safe(key))
+ i.text = str(item)
+ elif isinstance(item, int):#it's an int
+ i.set('type', 'int')
+ i.set('name', self.safe(key))
+ i.text = str(item)
+ elif isinstance(item, float):#it's a float
+ i.set('type', 'float')
+ i.set('name', self.safe(key))
+ i.text = str(item)
+ elif isinstance(item, (list, tuple)):#it's a list
+ i.set('type', 'list')
+ i.set('name', self.safe(key))
+ i.append(self.BuildList(item))
+ elif isinstance(item, dict):#it's a dictionary
+ i.set('type', 'dict')
+ i.set('name', self.safe(key))
+ i.append(self.BuildDict(item))
+ else:
+ i.set('type', 'str')
+ i.set('name', self.safe(key))
+ i.text = self.safe(item)
+ dict_el.append(i)
+ return dict_el
- def SetDict(self, plugname, dictname, val, file="plugindb.xml"):
+ def SetDict(self, plugname, dictname, val):
dictname = self.safe(dictname)
- dict = xmltramp.parse(self.BuildDict(val))
- for plugin in self.xml_dom:
- if plugname == plugin._name:
- plugin[dictname] = dict
- plugin[dictname]._attrs["type"] = "dict"
- self.SaveDoc()
- return "found plugin"
+ plugin = self.etree.find(plugname)
+ if plugin is None:
+ plugin = Element(plugname)
+ self.etree.getroot().append(plugin)
+ dict_el = plugin.find(dictname)
+ if dict_el is None:
+ dict_el = Element(dictname)
+ dict_el.set('type', 'dict')
+ plugin.append(dict_el)
else:
- self.xml_dom[plugname] = xmltramp.parse("<" + dictname + ">" + dictname + ">")
- self.xml_dom[plugname][dictname] = dict
- self.xml_dom[plugname][dictname]._attrs["type"] = "dict"
- self.SaveDoc()
- return "added plugin"
+ dict_el.remove(list_el.find('dict'))
+ dict_el.append(self.BuildDict(val))
+ self.SaveDoc()
def FetchDict(self, parent):
retdict = {}
- if not len(parent): return {}
- for ditem in parent[0]._dir:
- if len(ditem):
- ditem._attrs["name"] = self.normal(ditem._attrs["name"])
- if ditem._attrs["type"] == "int": retdict[ditem._attrs["name"]] = int(ditem[0])
- elif ditem._attrs["type"] == "long": retdict[ditem._attrs["name"]] = long(ditem[0])
- elif ditem._attrs["type"] == "float": retdict[ditem._attrs["name"]] = float(ditem[0])
- elif ditem._attrs["type"] == "list": retdict[ditem._attrs["name"]] = self.FetchList(ditem)
- elif ditem._attrs["type"] == "dict": retdict[ditem._attrs["name"]] = self.FetchDict(ditem)
- else: retdict[ditem._attrs["name"]] = str( self.normal(ditem[0]) )
- else: retdict[ditem._attrs["name"]] = ""
+ for ditem in parent.findall('dobject'):
+ key = self.normal(ditem.get('name'))
+ if ditem.get('type') == 'int': value = int(ditem.text)
+ elif ditem.get('type') == 'bool': value = ditem.text == 'True'
+ elif ditem.get('type') == 'float': value = float(ditem.text)
+ elif ditem.get('type') == 'list': value = self.FetchList(ditem)
+ elif ditem.get('type') == 'dict': value = self.FetchDict(ditem)
+ else: value = str(self.normal(ditem[0]))
+ retdict[key] = value
return retdict
- def GetDict(self, plugname, dictname, defaultval, verbose=0):
+ def GetDict(self, plugname, dictname, defaultval=dict(), verbose=False):
dictname = self.safe(dictname)
- for plugin in self.xml_dom:
- if plugname == plugin._name:
- for child in plugin._dir:
- if child._name == dictname and child._attrs["type"] == "dict": return self.FetchDict(child)
- else:
- if verbose:
- print "plugindb: no value has been stored for " + dictname + " in " + plugname + " so the default has been returned"
+ plugin = self.etree.find(plugname)
+ if plugin is None or plugin.find(dictname) is None:
+ msg = ["plugindb: no value has been stored for", dictname, "in",
+ plugname, "so the default has been returned"]
return defaultval
+ retdict = self.FetchDict(plugin.find(dictname))
+ return retdict
def safe(self, string):
- return string.replace("<", "$$lt$$").replace(">", "$$gt$$").replace("&","$$amp$$").replace('"',"$$quote$$")
+ return string.replace("<", "$$lt$$").replace(">", "$$gt$$")\
+ .replace("&","$$amp$$").replace('"',"$$quote$$")
def normal(self, string):
- return string.replace("$$lt$$", "<").replace("$$gt$$", ">").replace("$$amp$$","&").replace("$$quote$$",'"')
+ return string.replace("$$lt$$", "<").replace("$$gt$$", ">")\
+ .replace("$$amp$$","&").replace("$$quote$$",'"')
def SaveDoc(self):
- f = open(self.filename, "w")
- f.write(self.xml_dom.__repr__(1, 1))
- f.close()
+ with open(self.filename, "w") as f:
+ self.etree.write(f)
def LoadDoc(self):
- xml_file = open(self.filename)
- manifest = xml_file.read()
- xml_file.close()
- return xmltramp.parse(manifest)
+ with open(self.filename) as f:
+ self.etree.parse(f)
+
+manifest = ManifestChanges()
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/__init__.py
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/__version__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/__version__.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,2 @@
+# this file is autogenerated by setup.py
+version = "1.3.1"
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/ancestor.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/ancestor.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,85 @@
+# ancestor.py - generic DAG ancestor algorithm for mercurial
+#
+# Copyright 2006 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import heapq
+
+def ancestor(a, b, pfunc):
+ """
+ return the least common ancestor of nodes a and b or None if there
+ is no such ancestor.
+
+ pfunc must return a list of parent vertices
+ """
+
+ if a == b:
+ return a
+
+ # find depth from root of all ancestors
+ parentcache = {}
+ visit = [a, b]
+ depth = {}
+ while visit:
+ vertex = visit[-1]
+ pl = pfunc(vertex)
+ parentcache[vertex] = pl
+ if not pl:
+ depth[vertex] = 0
+ visit.pop()
+ else:
+ for p in pl:
+ if p == a or p == b: # did we find a or b as a parent?
+ return p # we're done
+ if p not in depth:
+ visit.append(p)
+ if visit[-1] == vertex:
+ depth[vertex] = min([depth[p] for p in pl]) - 1
+ visit.pop()
+
+ # traverse ancestors in order of decreasing distance from root
+ def ancestors(vertex):
+ h = [(depth[vertex], vertex)]
+ seen = set()
+ while h:
+ d, n = heapq.heappop(h)
+ if n not in seen:
+ seen.add(n)
+ yield (d, n)
+ for p in parentcache[n]:
+ heapq.heappush(h, (depth[p], p))
+
+ def generations(vertex):
+ sg, s = None, set()
+ for g, v in ancestors(vertex):
+ if g != sg:
+ if sg:
+ yield sg, s
+ sg, s = g, set((v,))
+ else:
+ s.add(v)
+ yield sg, s
+
+ x = generations(a)
+ y = generations(b)
+ gx = x.next()
+ gy = y.next()
+
+ # increment each ancestor list until it is closer to root than
+ # the other, or they match
+ try:
+ while 1:
+ if gx[0] == gy[0]:
+ for v in gx[1]:
+ if v in gy[1]:
+ return v
+ gy = y.next()
+ gx = x.next()
+ elif gx[0] > gy[0]:
+ gy = y.next()
+ else:
+ gx = x.next()
+ except StopIteration:
+ return None
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/archival.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/archival.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,226 @@
+# archival.py - revision archival for mercurial
+#
+# Copyright 2006 Vadim Gelfer
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from i18n import _
+from node import hex
+import util
+import cStringIO, os, stat, tarfile, time, zipfile
+import zlib, gzip
+
+def tidyprefix(dest, prefix, suffixes):
+ '''choose prefix to use for names in archive. make sure prefix is
+ safe for consumers.'''
+
+ if prefix:
+ prefix = util.normpath(prefix)
+ else:
+ if not isinstance(dest, str):
+ raise ValueError('dest must be string if no prefix')
+ prefix = os.path.basename(dest)
+ lower = prefix.lower()
+ for sfx in suffixes:
+ if lower.endswith(sfx):
+ prefix = prefix[:-len(sfx)]
+ break
+ lpfx = os.path.normpath(util.localpath(prefix))
+ prefix = util.pconvert(lpfx)
+ if not prefix.endswith('/'):
+ prefix += '/'
+ if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
+ raise util.Abort(_('archive prefix contains illegal components'))
+ return prefix
+
+class tarit(object):
+ '''write archive to tar file or stream. can write uncompressed,
+ or compress with gzip or bzip2.'''
+
+ class GzipFileWithTime(gzip.GzipFile):
+
+ def __init__(self, *args, **kw):
+ timestamp = None
+ if 'timestamp' in kw:
+ timestamp = kw.pop('timestamp')
+ if timestamp is None:
+ self.timestamp = time.time()
+ else:
+ self.timestamp = timestamp
+ gzip.GzipFile.__init__(self, *args, **kw)
+
+ def _write_gzip_header(self):
+ self.fileobj.write('\037\213') # magic header
+ self.fileobj.write('\010') # compression method
+ # Python 2.6 deprecates self.filename
+ fname = getattr(self, 'name', None) or self.filename
+ flags = 0
+ if fname:
+ flags = gzip.FNAME
+ self.fileobj.write(chr(flags))
+ gzip.write32u(self.fileobj, long(self.timestamp))
+ self.fileobj.write('\002')
+ self.fileobj.write('\377')
+ if fname:
+ self.fileobj.write(fname + '\000')
+
+ def __init__(self, dest, prefix, mtime, kind=''):
+ self.prefix = tidyprefix(dest, prefix, ['.tar', '.tar.bz2', '.tar.gz',
+ '.tgz', '.tbz2'])
+ self.mtime = mtime
+
+ def taropen(name, mode, fileobj=None):
+ if kind == 'gz':
+ mode = mode[0]
+ if not fileobj:
+ fileobj = open(name, mode + 'b')
+ gzfileobj = self.GzipFileWithTime(name, mode + 'b',
+ zlib.Z_BEST_COMPRESSION,
+ fileobj, timestamp=mtime)
+ return tarfile.TarFile.taropen(name, mode, gzfileobj)
+ else:
+ return tarfile.open(name, mode + kind, fileobj)
+
+ if isinstance(dest, str):
+ self.z = taropen(dest, mode='w:')
+ else:
+ # Python 2.5-2.5.1 have a regression that requires a name arg
+ self.z = taropen(name='', mode='w|', fileobj=dest)
+
+ def addfile(self, name, mode, islink, data):
+ i = tarfile.TarInfo(self.prefix + name)
+ i.mtime = self.mtime
+ i.size = len(data)
+ if islink:
+ i.type = tarfile.SYMTYPE
+ i.mode = 0777
+ i.linkname = data
+ data = None
+ i.size = 0
+ else:
+ i.mode = mode
+ data = cStringIO.StringIO(data)
+ self.z.addfile(i, data)
+
+ def done(self):
+ self.z.close()
+
+class tellable(object):
+ '''provide tell method for zipfile.ZipFile when writing to http
+ response file object.'''
+
+ def __init__(self, fp):
+ self.fp = fp
+ self.offset = 0
+
+ def __getattr__(self, key):
+ return getattr(self.fp, key)
+
+ def write(self, s):
+ self.fp.write(s)
+ self.offset += len(s)
+
+ def tell(self):
+ return self.offset
+
+class zipit(object):
+ '''write archive to zip file or stream. can write uncompressed,
+ or compressed with deflate.'''
+
+ def __init__(self, dest, prefix, mtime, compress=True):
+ self.prefix = tidyprefix(dest, prefix, ('.zip',))
+ if not isinstance(dest, str):
+ try:
+ dest.tell()
+ except (AttributeError, IOError):
+ dest = tellable(dest)
+ self.z = zipfile.ZipFile(dest, 'w',
+ compress and zipfile.ZIP_DEFLATED or
+ zipfile.ZIP_STORED)
+ self.date_time = time.gmtime(mtime)[:6]
+
+ def addfile(self, name, mode, islink, data):
+ i = zipfile.ZipInfo(self.prefix + name, self.date_time)
+ i.compress_type = self.z.compression
+ # unzip will not honor unix file modes unless file creator is
+ # set to unix (id 3).
+ i.create_system = 3
+ ftype = stat.S_IFREG
+ if islink:
+ mode = 0777
+ ftype = stat.S_IFLNK
+ i.external_attr = (mode | ftype) << 16L
+ self.z.writestr(i, data)
+
+ def done(self):
+ self.z.close()
+
+class fileit(object):
+ '''write archive as files in directory.'''
+
+ def __init__(self, name, prefix, mtime):
+ if prefix:
+ raise util.Abort(_('cannot give prefix when archiving to files'))
+ self.basedir = name
+ self.opener = util.opener(self.basedir)
+
+ def addfile(self, name, mode, islink, data):
+ if islink:
+ self.opener.symlink(data, name)
+ return
+ f = self.opener(name, "w", atomictemp=True)
+ f.write(data)
+ f.rename()
+ destfile = os.path.join(self.basedir, name)
+ os.chmod(destfile, mode)
+
+ def done(self):
+ pass
+
+archivers = {
+ 'files': fileit,
+ 'tar': tarit,
+ 'tbz2': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'bz2'),
+ 'tgz': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'gz'),
+ 'uzip': lambda name, prefix, mtime: zipit(name, prefix, mtime, False),
+ 'zip': zipit,
+ }
+
+def archive(repo, dest, node, kind, decode=True, matchfn=None,
+ prefix=None, mtime=None):
+ '''create archive of repo as it was at node.
+
+ dest can be name of directory, name of archive file, or file
+ object to write archive to.
+
+ kind is type of archive to create.
+
+ decode tells whether to put files through decode filters from
+ hgrc.
+
+ matchfn is function to filter names of files to write to archive.
+
+ prefix is name of path to put before every archive member.'''
+
+ def write(name, mode, islink, getdata):
+ if matchfn and not matchfn(name): return
+ data = getdata()
+ if decode:
+ data = repo.wwritedata(name, data)
+ archiver.addfile(name, mode, islink, data)
+
+ if kind not in archivers:
+ raise util.Abort(_("unknown archive type '%s'") % kind)
+
+ ctx = repo[node]
+ archiver = archivers[kind](dest, prefix, mtime or ctx.date()[0])
+
+ if repo.ui.configbool("ui", "archivemeta", True):
+ write('.hg_archival.txt', 0644, False,
+ lambda: 'repo: %s\nnode: %s\n' % (
+ hex(repo.changelog.node(0)), hex(node)))
+ for f in ctx:
+ ff = ctx.flags(f)
+ write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
+ archiver.done()
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/base85.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/base85.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,74 @@
+# base85.py: pure python base85 codec
+#
+# Copyright (C) 2009 Brendan Cully
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import struct
+
+_b85chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
+ "abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"
+_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
+_b85dec = {}
+
+def _mkb85dec():
+ for i, c in enumerate(_b85chars):
+ _b85dec[c] = i
+
+def b85encode(text, pad=False):
+ """encode text in base85 format"""
+ l = len(text)
+ r = l % 4
+ if r:
+ text += '\0' * (4 - r)
+ longs = len(text) >> 2
+ words = struct.unpack('>%dL' % (longs), text)
+
+ out = ''.join(_b85chars[(word / 52200625) % 85] +
+ _b85chars2[(word / 7225) % 7225] +
+ _b85chars2[word % 7225]
+ for word in words)
+
+ if pad:
+ return out
+
+ # Trim padding
+ olen = l % 4
+ if olen:
+ olen += 1
+ olen += l / 4 * 5
+ return out[:olen]
+
+def b85decode(text):
+ """decode base85-encoded text"""
+ if not _b85dec:
+ _mkb85dec()
+
+ l = len(text)
+ out = []
+ for i in range(0, len(text), 5):
+ chunk = text[i:i+5]
+ acc = 0
+ for j, c in enumerate(chunk):
+ try:
+ acc = acc * 85 + _b85dec[c]
+ except KeyError:
+ raise TypeError('Bad base85 character at byte %d' % (i + j))
+ if acc > 4294967295:
+ raise OverflowError('Base85 overflow in hunk starting at byte %d' % i)
+ out.append(acc)
+
+ # Pad final chunk if necessary
+ cl = l % 5
+ if cl:
+ acc *= 85 ** (5 - cl)
+ if cl > 1:
+ acc += 0xffffff >> (cl - 2) * 8
+ out[-1] = acc
+
+ out = struct.pack('>%dL' % (len(out)), *out)
+ if cl:
+ out = out[:-(5 - cl)]
+
+ return out
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/bdiff.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/bdiff.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,76 @@
+# bdiff.py - Python implementation of bdiff.c
+#
+# Copyright 2009 Matt Mackall and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import struct, difflib
+
+def splitnewlines(text):
+ '''like str.splitlines, but only split on newlines.'''
+ lines = [l + '\n' for l in text.split('\n')]
+ if lines:
+ if lines[-1] == '\n':
+ lines.pop()
+ else:
+ lines[-1] = lines[-1][:-1]
+ return lines
+
+def _normalizeblocks(a, b, blocks):
+ prev = None
+ for curr in blocks:
+ if prev is None:
+ prev = curr
+ continue
+ shift = 0
+
+ a1, b1, l1 = prev
+ a1end = a1 + l1
+ b1end = b1 + l1
+
+ a2, b2, l2 = curr
+ a2end = a2 + l2
+ b2end = b2 + l2
+ if a1end == a2:
+ while a1end+shift < a2end and a[a1end+shift] == b[b1end+shift]:
+ shift += 1
+ elif b1end == b2:
+ while b1end+shift < b2end and a[a1end+shift] == b[b1end+shift]:
+ shift += 1
+ yield a1, b1, l1+shift
+ prev = a2+shift, b2+shift, l2-shift
+ yield prev
+
+def bdiff(a, b):
+ a = str(a).splitlines(True)
+ b = str(b).splitlines(True)
+
+ if not a:
+ s = "".join(b)
+ return s and (struct.pack(">lll", 0, 0, len(s)) + s)
+
+ bin = []
+ p = [0]
+ for i in a: p.append(p[-1] + len(i))
+
+ d = difflib.SequenceMatcher(None, a, b).get_matching_blocks()
+ d = _normalizeblocks(a, b, d)
+ la = 0
+ lb = 0
+ for am, bm, size in d:
+ s = "".join(b[lb:bm])
+ if am > la or s:
+ bin.append(struct.pack(">lll", p[la], p[am], len(s)) + s)
+ la = am + size
+ lb = bm + size
+
+ return "".join(bin)
+
+def blocks(a, b):
+ an = splitnewlines(a)
+ bn = splitnewlines(b)
+ d = difflib.SequenceMatcher(None, an, bn).get_matching_blocks()
+ d = _normalizeblocks(an, bn, d)
+ return [(i, i + n, j, j + n) for (i, j, n) in d]
+
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/bundlerepo.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/bundlerepo.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,303 @@
+# bundlerepo.py - repository class for viewing uncompressed bundles
+#
+# Copyright 2006, 2007 Benoit Boissinot
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+"""Repository class for viewing uncompressed bundles.
+
+This provides a read-only repository interface to bundles as if they
+were part of the actual repository.
+"""
+
+from node import nullid
+from i18n import _
+import os, struct, bz2, zlib, tempfile, shutil
+import changegroup, util, mdiff
+import localrepo, changelog, manifest, filelog, revlog, error
+
+class bundlerevlog(revlog.revlog):
+ def __init__(self, opener, indexfile, bundlefile,
+ linkmapper=None):
+ # How it works:
+ # to retrieve a revision, we need to know the offset of
+ # the revision in the bundlefile (an opened file).
+ #
+ # We store this offset in the index (start), to differentiate a
+ # rev in the bundle and from a rev in the revlog, we check
+ # len(index[r]). If the tuple is bigger than 7, it is a bundle
+ # (it is bigger since we store the node to which the delta is)
+ #
+ revlog.revlog.__init__(self, opener, indexfile)
+ self.bundlefile = bundlefile
+ self.basemap = {}
+ def chunkpositer():
+ for chunk in changegroup.chunkiter(bundlefile):
+ pos = bundlefile.tell()
+ yield chunk, pos - len(chunk)
+ n = len(self)
+ prev = None
+ for chunk, start in chunkpositer():
+ size = len(chunk)
+ if size < 80:
+ raise util.Abort(_("invalid changegroup"))
+ start += 80
+ size -= 80
+ node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
+ if node in self.nodemap:
+ prev = node
+ continue
+ for p in (p1, p2):
+ if not p in self.nodemap:
+ raise error.LookupError(p1, self.indexfile,
+ _("unknown parent"))
+ if linkmapper is None:
+ link = n
+ else:
+ link = linkmapper(cs)
+
+ if not prev:
+ prev = p1
+ # start, size, full unc. size, base (unused), link, p1, p2, node
+ e = (revlog.offset_type(start, 0), size, -1, -1, link,
+ self.rev(p1), self.rev(p2), node)
+ self.basemap[n] = prev
+ self.index.insert(-1, e)
+ self.nodemap[node] = n
+ prev = node
+ n += 1
+
+ def bundle(self, rev):
+ """is rev from the bundle"""
+ if rev < 0:
+ return False
+ return rev in self.basemap
+ def bundlebase(self, rev): return self.basemap[rev]
+ def chunk(self, rev, df=None, cachelen=4096):
+ # Warning: in case of bundle, the diff is against bundlebase,
+ # not against rev - 1
+ # XXX: could use some caching
+ if not self.bundle(rev):
+ return revlog.revlog.chunk(self, rev, df)
+ self.bundlefile.seek(self.start(rev))
+ return self.bundlefile.read(self.length(rev))
+
+ def revdiff(self, rev1, rev2):
+ """return or calculate a delta between two revisions"""
+ if self.bundle(rev1) and self.bundle(rev2):
+ # hot path for bundle
+ revb = self.rev(self.bundlebase(rev2))
+ if revb == rev1:
+ return self.chunk(rev2)
+ elif not self.bundle(rev1) and not self.bundle(rev2):
+ return revlog.revlog.revdiff(self, rev1, rev2)
+
+ return mdiff.textdiff(self.revision(self.node(rev1)),
+ self.revision(self.node(rev2)))
+
+ def revision(self, node):
+ """return an uncompressed revision of a given"""
+ if node == nullid: return ""
+
+ text = None
+ chain = []
+ iter_node = node
+ rev = self.rev(iter_node)
+ # reconstruct the revision if it is from a changegroup
+ while self.bundle(rev):
+ if self._cache and self._cache[0] == iter_node:
+ text = self._cache[2]
+ break
+ chain.append(rev)
+ iter_node = self.bundlebase(rev)
+ rev = self.rev(iter_node)
+ if text is None:
+ text = revlog.revlog.revision(self, iter_node)
+
+ while chain:
+ delta = self.chunk(chain.pop())
+ text = mdiff.patches(text, [delta])
+
+ p1, p2 = self.parents(node)
+ if node != revlog.hash(text, p1, p2):
+ raise error.RevlogError(_("integrity check failed on %s:%d")
+ % (self.datafile, self.rev(node)))
+
+ self._cache = (node, self.rev(node), text)
+ return text
+
+ def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
+ raise NotImplementedError
+ def addgroup(self, revs, linkmapper, transaction):
+ raise NotImplementedError
+ def strip(self, rev, minlink):
+ raise NotImplementedError
+ def checksize(self):
+ raise NotImplementedError
+
+class bundlechangelog(bundlerevlog, changelog.changelog):
+ def __init__(self, opener, bundlefile):
+ changelog.changelog.__init__(self, opener)
+ bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
+
+class bundlemanifest(bundlerevlog, manifest.manifest):
+ def __init__(self, opener, bundlefile, linkmapper):
+ manifest.manifest.__init__(self, opener)
+ bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
+ linkmapper)
+
+class bundlefilelog(bundlerevlog, filelog.filelog):
+ def __init__(self, opener, path, bundlefile, linkmapper):
+ filelog.filelog.__init__(self, opener, path)
+ bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
+ linkmapper)
+
+class bundlerepository(localrepo.localrepository):
+ def __init__(self, ui, path, bundlename):
+ self._tempparent = None
+ try:
+ localrepo.localrepository.__init__(self, ui, path)
+ except error.RepoError:
+ self._tempparent = tempfile.mkdtemp()
+ localrepo.instance(ui,self._tempparent,1)
+ localrepo.localrepository.__init__(self, ui, self._tempparent)
+
+ if path:
+ self._url = 'bundle:' + path + '+' + bundlename
+ else:
+ self._url = 'bundle:' + bundlename
+
+ self.tempfile = None
+ self.bundlefile = open(bundlename, "rb")
+ header = self.bundlefile.read(6)
+ if not header.startswith("HG"):
+ raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
+ elif not header.startswith("HG10"):
+ raise util.Abort(_("%s: unknown bundle version") % bundlename)
+ elif (header == "HG10BZ") or (header == "HG10GZ"):
+ fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
+ suffix=".hg10un", dir=self.path)
+ self.tempfile = temp
+ fptemp = os.fdopen(fdtemp, 'wb')
+ def generator(f):
+ if header == "HG10BZ":
+ zd = bz2.BZ2Decompressor()
+ zd.decompress("BZ")
+ elif header == "HG10GZ":
+ zd = zlib.decompressobj()
+ for chunk in f:
+ yield zd.decompress(chunk)
+ gen = generator(util.filechunkiter(self.bundlefile, 4096))
+
+ try:
+ fptemp.write("HG10UN")
+ for chunk in gen:
+ fptemp.write(chunk)
+ finally:
+ fptemp.close()
+ self.bundlefile.close()
+
+ self.bundlefile = open(self.tempfile, "rb")
+ # seek right after the header
+ self.bundlefile.seek(6)
+ elif header == "HG10UN":
+ # nothing to do
+ pass
+ else:
+ raise util.Abort(_("%s: unknown bundle compression type")
+ % bundlename)
+ # dict with the mapping 'filename' -> position in the bundle
+ self.bundlefilespos = {}
+
+ @util.propertycache
+ def changelog(self):
+ c = bundlechangelog(self.sopener, self.bundlefile)
+ self.manstart = self.bundlefile.tell()
+ return c
+
+ @util.propertycache
+ def manifest(self):
+ self.bundlefile.seek(self.manstart)
+ m = bundlemanifest(self.sopener, self.bundlefile, self.changelog.rev)
+ self.filestart = self.bundlefile.tell()
+ return m
+
+ @util.propertycache
+ def manstart(self):
+ self.changelog
+ return self.manstart
+
+ @util.propertycache
+ def filestart(self):
+ self.manifest
+ return self.filestart
+
+ def url(self):
+ return self._url
+
+ def file(self, f):
+ if not self.bundlefilespos:
+ self.bundlefile.seek(self.filestart)
+ while 1:
+ chunk = changegroup.getchunk(self.bundlefile)
+ if not chunk:
+ break
+ self.bundlefilespos[chunk] = self.bundlefile.tell()
+ for c in changegroup.chunkiter(self.bundlefile):
+ pass
+
+ if f[0] == '/':
+ f = f[1:]
+ if f in self.bundlefilespos:
+ self.bundlefile.seek(self.bundlefilespos[f])
+ return bundlefilelog(self.sopener, f, self.bundlefile,
+ self.changelog.rev)
+ else:
+ return filelog.filelog(self.sopener, f)
+
+ def close(self):
+ """Close assigned bundle file immediately."""
+ self.bundlefile.close()
+
+ def __del__(self):
+ bundlefile = getattr(self, 'bundlefile', None)
+ if bundlefile and not bundlefile.closed:
+ bundlefile.close()
+ tempfile = getattr(self, 'tempfile', None)
+ if tempfile is not None:
+ os.unlink(tempfile)
+ if self._tempparent:
+ shutil.rmtree(self._tempparent, True)
+
+ def cancopy(self):
+ return False
+
+ def getcwd(self):
+ return os.getcwd() # always outside the repo
+
+def instance(ui, path, create):
+ if create:
+ raise util.Abort(_('cannot create new bundle repository'))
+ parentpath = ui.config("bundle", "mainreporoot", "")
+ if parentpath:
+ # Try to make the full path relative so we get a nice, short URL.
+ # In particular, we don't want temp dir names in test outputs.
+ cwd = os.getcwd()
+ if parentpath == cwd:
+ parentpath = ''
+ else:
+ cwd = os.path.join(cwd,'')
+ if parentpath.startswith(cwd):
+ parentpath = parentpath[len(cwd):]
+ path = util.drop_scheme('file', path)
+ if path.startswith('bundle:'):
+ path = util.drop_scheme('bundle', path)
+ s = path.split("+", 1)
+ if len(s) == 1:
+ repopath, bundlename = parentpath, s[0]
+ else:
+ repopath, bundlename = s
+ else:
+ repopath, bundlename = parentpath, path
+ return bundlerepository(ui, repopath, bundlename)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/byterange.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/byterange.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,469 @@
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the
+# Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330,
+# Boston, MA 02111-1307 USA
+
+# This file is part of urlgrabber, a high-level cross-protocol url-grabber
+# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
+
+# $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
+
+import os
+import stat
+import urllib
+import urllib2
+import email.Utils
+
+try:
+ from cStringIO import StringIO
+except ImportError, msg:
+ from StringIO import StringIO
+
+class RangeError(IOError):
+ """Error raised when an unsatisfiable range is requested."""
+ pass
+
+class HTTPRangeHandler(urllib2.BaseHandler):
+ """Handler that enables HTTP Range headers.
+
+ This was extremely simple. The Range header is a HTTP feature to
+ begin with so all this class does is tell urllib2 that the
+ "206 Partial Content" reponse from the HTTP server is what we
+ expected.
+
+ Example:
+ import urllib2
+ import byterange
+
+ range_handler = range.HTTPRangeHandler()
+ opener = urllib2.build_opener(range_handler)
+
+ # install it
+ urllib2.install_opener(opener)
+
+ # create Request and set Range header
+ req = urllib2.Request('http://www.python.org/')
+ req.header['Range'] = 'bytes=30-50'
+ f = urllib2.urlopen(req)
+ """
+
+ def http_error_206(self, req, fp, code, msg, hdrs):
+ # 206 Partial Content Response
+ r = urllib.addinfourl(fp, hdrs, req.get_full_url())
+ r.code = code
+ r.msg = msg
+ return r
+
+ def http_error_416(self, req, fp, code, msg, hdrs):
+ # HTTP's Range Not Satisfiable error
+ raise RangeError('Requested Range Not Satisfiable')
+
+class RangeableFileObject:
+ """File object wrapper to enable raw range handling.
+ This was implemented primarilary for handling range
+ specifications for file:// urls. This object effectively makes
+ a file object look like it consists only of a range of bytes in
+ the stream.
+
+ Examples:
+ # expose 10 bytes, starting at byte position 20, from
+ # /etc/aliases.
+ >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
+ # seek seeks within the range (to position 23 in this case)
+ >>> fo.seek(3)
+ # tell tells where your at _within the range_ (position 3 in
+ # this case)
+ >>> fo.tell()
+ # read EOFs if an attempt is made to read past the last
+ # byte in the range. the following will return only 7 bytes.
+ >>> fo.read(30)
+ """
+
+ def __init__(self, fo, rangetup):
+ """Create a RangeableFileObject.
+ fo -- a file like object. only the read() method need be
+ supported but supporting an optimized seek() is
+ preferable.
+ rangetup -- a (firstbyte,lastbyte) tuple specifying the range
+ to work over.
+ The file object provided is assumed to be at byte offset 0.
+ """
+ self.fo = fo
+ (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
+ self.realpos = 0
+ self._do_seek(self.firstbyte)
+
+ def __getattr__(self, name):
+ """This effectively allows us to wrap at the instance level.
+ Any attribute not found in _this_ object will be searched for
+ in self.fo. This includes methods."""
+ if hasattr(self.fo, name):
+ return getattr(self.fo, name)
+ raise AttributeError(name)
+
+ def tell(self):
+ """Return the position within the range.
+ This is different from fo.seek in that position 0 is the
+ first byte position of the range tuple. For example, if
+ this object was created with a range tuple of (500,899),
+ tell() will return 0 when at byte position 500 of the file.
+ """
+ return (self.realpos - self.firstbyte)
+
+ def seek(self, offset, whence=0):
+ """Seek within the byte range.
+ Positioning is identical to that described under tell().
+ """
+ assert whence in (0, 1, 2)
+ if whence == 0: # absolute seek
+ realoffset = self.firstbyte + offset
+ elif whence == 1: # relative seek
+ realoffset = self.realpos + offset
+ elif whence == 2: # absolute from end of file
+ # XXX: are we raising the right Error here?
+ raise IOError('seek from end of file not supported.')
+
+ # do not allow seek past lastbyte in range
+ if self.lastbyte and (realoffset >= self.lastbyte):
+ realoffset = self.lastbyte
+
+ self._do_seek(realoffset - self.realpos)
+
+ def read(self, size=-1):
+ """Read within the range.
+ This method will limit the size read based on the range.
+ """
+ size = self._calc_read_size(size)
+ rslt = self.fo.read(size)
+ self.realpos += len(rslt)
+ return rslt
+
+ def readline(self, size=-1):
+ """Read lines within the range.
+ This method will limit the size read based on the range.
+ """
+ size = self._calc_read_size(size)
+ rslt = self.fo.readline(size)
+ self.realpos += len(rslt)
+ return rslt
+
+ def _calc_read_size(self, size):
+ """Handles calculating the amount of data to read based on
+ the range.
+ """
+ if self.lastbyte:
+ if size > -1:
+ if ((self.realpos + size) >= self.lastbyte):
+ size = (self.lastbyte - self.realpos)
+ else:
+ size = (self.lastbyte - self.realpos)
+ return size
+
+ def _do_seek(self, offset):
+ """Seek based on whether wrapped object supports seek().
+ offset is relative to the current position (self.realpos).
+ """
+ assert offset >= 0
+ if not hasattr(self.fo, 'seek'):
+ self._poor_mans_seek(offset)
+ else:
+ self.fo.seek(self.realpos + offset)
+ self.realpos += offset
+
+ def _poor_mans_seek(self, offset):
+ """Seek by calling the wrapped file objects read() method.
+ This is used for file like objects that do not have native
+ seek support. The wrapped objects read() method is called
+ to manually seek to the desired position.
+ offset -- read this number of bytes from the wrapped
+ file object.
+ raise RangeError if we encounter EOF before reaching the
+ specified offset.
+ """
+ pos = 0
+ bufsize = 1024
+ while pos < offset:
+ if (pos + bufsize) > offset:
+ bufsize = offset - pos
+ buf = self.fo.read(bufsize)
+ if len(buf) != bufsize:
+ raise RangeError('Requested Range Not Satisfiable')
+ pos += bufsize
+
+class FileRangeHandler(urllib2.FileHandler):
+ """FileHandler subclass that adds Range support.
+ This class handles Range headers exactly like an HTTP
+ server would.
+ """
+ def open_local_file(self, req):
+ import mimetypes
+ import mimetools
+ host = req.get_host()
+ file = req.get_selector()
+ localfile = urllib.url2pathname(file)
+ stats = os.stat(localfile)
+ size = stats[stat.ST_SIZE]
+ modified = email.Utils.formatdate(stats[stat.ST_MTIME])
+ mtype = mimetypes.guess_type(file)[0]
+ if host:
+ host, port = urllib.splitport(host)
+ if port or socket.gethostbyname(host) not in self.get_names():
+ raise urllib2.URLError('file not on local host')
+ fo = open(localfile,'rb')
+ brange = req.headers.get('Range', None)
+ brange = range_header_to_tuple(brange)
+ assert brange != ()
+ if brange:
+ (fb, lb) = brange
+ if lb == '':
+ lb = size
+ if fb < 0 or fb > size or lb > size:
+ raise RangeError('Requested Range Not Satisfiable')
+ size = (lb - fb)
+ fo = RangeableFileObject(fo, (fb, lb))
+ headers = mimetools.Message(StringIO(
+ 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' %
+ (mtype or 'text/plain', size, modified)))
+ return urllib.addinfourl(fo, headers, 'file:'+file)
+
+
+# FTP Range Support
+# Unfortunately, a large amount of base FTP code had to be copied
+# from urllib and urllib2 in order to insert the FTP REST command.
+# Code modifications for range support have been commented as
+# follows:
+# -- range support modifications start/end here
+
+from urllib import splitport, splituser, splitpasswd, splitattr, \
+ unquote, addclosehook, addinfourl
+import ftplib
+import socket
+import sys
+import mimetypes
+import mimetools
+
+class FTPRangeHandler(urllib2.FTPHandler):
+ def ftp_open(self, req):
+ host = req.get_host()
+ if not host:
+ raise IOError('ftp error', 'no host given')
+ host, port = splitport(host)
+ if port is None:
+ port = ftplib.FTP_PORT
+
+ # username/password handling
+ user, host = splituser(host)
+ if user:
+ user, passwd = splitpasswd(user)
+ else:
+ passwd = None
+ host = unquote(host)
+ user = unquote(user or '')
+ passwd = unquote(passwd or '')
+
+ try:
+ host = socket.gethostbyname(host)
+ except socket.error, msg:
+ raise urllib2.URLError(msg)
+ path, attrs = splitattr(req.get_selector())
+ dirs = path.split('/')
+ dirs = map(unquote, dirs)
+ dirs, file = dirs[:-1], dirs[-1]
+ if dirs and not dirs[0]:
+ dirs = dirs[1:]
+ try:
+ fw = self.connect_ftp(user, passwd, host, port, dirs)
+ type = file and 'I' or 'D'
+ for attr in attrs:
+ attr, value = splitattr(attr)
+ if attr.lower() == 'type' and \
+ value in ('a', 'A', 'i', 'I', 'd', 'D'):
+ type = value.upper()
+
+ # -- range support modifications start here
+ rest = None
+ range_tup = range_header_to_tuple(req.headers.get('Range', None))
+ assert range_tup != ()
+ if range_tup:
+ (fb, lb) = range_tup
+ if fb > 0:
+ rest = fb
+ # -- range support modifications end here
+
+ fp, retrlen = fw.retrfile(file, type, rest)
+
+ # -- range support modifications start here
+ if range_tup:
+ (fb, lb) = range_tup
+ if lb == '':
+ if retrlen is None or retrlen == 0:
+ raise RangeError('Requested Range Not Satisfiable due to unobtainable file length.')
+ lb = retrlen
+ retrlen = lb - fb
+ if retrlen < 0:
+ # beginning of range is larger than file
+ raise RangeError('Requested Range Not Satisfiable')
+ else:
+ retrlen = lb - fb
+ fp = RangeableFileObject(fp, (0, retrlen))
+ # -- range support modifications end here
+
+ headers = ""
+ mtype = mimetypes.guess_type(req.get_full_url())[0]
+ if mtype:
+ headers += "Content-Type: %s\n" % mtype
+ if retrlen is not None and retrlen >= 0:
+ headers += "Content-Length: %d\n" % retrlen
+ sf = StringIO(headers)
+ headers = mimetools.Message(sf)
+ return addinfourl(fp, headers, req.get_full_url())
+ except ftplib.all_errors, msg:
+ raise IOError('ftp error', msg), sys.exc_info()[2]
+
+ def connect_ftp(self, user, passwd, host, port, dirs):
+ fw = ftpwrapper(user, passwd, host, port, dirs)
+ return fw
+
+class ftpwrapper(urllib.ftpwrapper):
+ # range support note:
+ # this ftpwrapper code is copied directly from
+ # urllib. The only enhancement is to add the rest
+ # argument and pass it on to ftp.ntransfercmd
+ def retrfile(self, file, type, rest=None):
+ self.endtransfer()
+ if type in ('d', 'D'):
+ cmd = 'TYPE A'
+ isdir = 1
+ else:
+ cmd = 'TYPE ' + type
+ isdir = 0
+ try:
+ self.ftp.voidcmd(cmd)
+ except ftplib.all_errors:
+ self.init()
+ self.ftp.voidcmd(cmd)
+ conn = None
+ if file and not isdir:
+ # Use nlst to see if the file exists at all
+ try:
+ self.ftp.nlst(file)
+ except ftplib.error_perm, reason:
+ raise IOError('ftp error', reason), sys.exc_info()[2]
+ # Restore the transfer mode!
+ self.ftp.voidcmd(cmd)
+ # Try to retrieve as a file
+ try:
+ cmd = 'RETR ' + file
+ conn = self.ftp.ntransfercmd(cmd, rest)
+ except ftplib.error_perm, reason:
+ if str(reason).startswith('501'):
+ # workaround for REST not supported error
+ fp, retrlen = self.retrfile(file, type)
+ fp = RangeableFileObject(fp, (rest,''))
+ return (fp, retrlen)
+ elif not str(reason).startswith('550'):
+ raise IOError('ftp error', reason), sys.exc_info()[2]
+ if not conn:
+ # Set transfer mode to ASCII!
+ self.ftp.voidcmd('TYPE A')
+ # Try a directory listing
+ if file:
+ cmd = 'LIST ' + file
+ else:
+ cmd = 'LIST'
+ conn = self.ftp.ntransfercmd(cmd)
+ self.busy = 1
+ # Pass back both a suitably decorated object and a retrieval length
+ return (addclosehook(conn[0].makefile('rb'),
+ self.endtransfer), conn[1])
+
+
+####################################################################
+# Range Tuple Functions
+# XXX: These range tuple functions might go better in a class.
+
+_rangere = None
+def range_header_to_tuple(range_header):
+ """Get a (firstbyte,lastbyte) tuple from a Range header value.
+
+ Range headers have the form "bytes=-". This
+ function pulls the firstbyte and lastbyte values and returns
+ a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
+ the header value, it is returned as an empty string in the
+ tuple.
+
+ Return None if range_header is None
+ Return () if range_header does not conform to the range spec
+ pattern.
+
+ """
+ global _rangere
+ if range_header is None:
+ return None
+ if _rangere is None:
+ import re
+ _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
+ match = _rangere.match(range_header)
+ if match:
+ tup = range_tuple_normalize(match.group(1, 2))
+ if tup and tup[1]:
+ tup = (tup[0], tup[1]+1)
+ return tup
+ return ()
+
+def range_tuple_to_header(range_tup):
+ """Convert a range tuple to a Range header value.
+ Return a string of the form "bytes=-" or None
+ if no range is needed.
+ """
+ if range_tup is None:
+ return None
+ range_tup = range_tuple_normalize(range_tup)
+ if range_tup:
+ if range_tup[1]:
+ range_tup = (range_tup[0], range_tup[1] - 1)
+ return 'bytes=%s-%s' % range_tup
+
+def range_tuple_normalize(range_tup):
+ """Normalize a (first_byte,last_byte) range tuple.
+ Return a tuple whose first element is guaranteed to be an int
+ and whose second element will be '' (meaning: the last byte) or
+ an int. Finally, return None if the normalized tuple == (0,'')
+ as that is equivelant to retrieving the entire file.
+ """
+ if range_tup is None:
+ return None
+ # handle first byte
+ fb = range_tup[0]
+ if fb in (None, ''):
+ fb = 0
+ else:
+ fb = int(fb)
+ # handle last byte
+ try:
+ lb = range_tup[1]
+ except IndexError:
+ lb = ''
+ else:
+ if lb is None:
+ lb = ''
+ elif lb != '':
+ lb = int(lb)
+ # check if range is over the entire file
+ if (fb, lb) == (0, ''):
+ return None
+ # check that the range is valid
+ if lb < fb:
+ raise RangeError('Invalid byte range: %s-%s' % (fb, lb))
+ return (fb, lb)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/changegroup.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/changegroup.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,140 @@
+# changegroup.py - Mercurial changegroup manipulation functions
+#
+# Copyright 2006 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from i18n import _
+import util
+import struct, os, bz2, zlib, tempfile
+
+def getchunk(source):
+ """get a chunk from a changegroup"""
+ d = source.read(4)
+ if not d:
+ return ""
+ l = struct.unpack(">l", d)[0]
+ if l <= 4:
+ return ""
+ d = source.read(l - 4)
+ if len(d) < l - 4:
+ raise util.Abort(_("premature EOF reading chunk"
+ " (got %d bytes, expected %d)")
+ % (len(d), l - 4))
+ return d
+
+def chunkiter(source):
+ """iterate through the chunks in source"""
+ while 1:
+ c = getchunk(source)
+ if not c:
+ break
+ yield c
+
+def chunkheader(length):
+ """build a changegroup chunk header"""
+ return struct.pack(">l", length + 4)
+
+def closechunk():
+ return struct.pack(">l", 0)
+
+class nocompress(object):
+ def compress(self, x):
+ return x
+ def flush(self):
+ return ""
+
+bundletypes = {
+ "": ("", nocompress),
+ "HG10UN": ("HG10UN", nocompress),
+ "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
+ "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
+}
+
+# hgweb uses this list to communicate it's preferred type
+bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
+
+def writebundle(cg, filename, bundletype):
+ """Write a bundle file and return its filename.
+
+ Existing files will not be overwritten.
+ If no filename is specified, a temporary file is created.
+ bz2 compression can be turned off.
+ The bundle file will be deleted in case of errors.
+ """
+
+ fh = None
+ cleanup = None
+ try:
+ if filename:
+ fh = open(filename, "wb")
+ else:
+ fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
+ fh = os.fdopen(fd, "wb")
+ cleanup = filename
+
+ header, compressor = bundletypes[bundletype]
+ fh.write(header)
+ z = compressor()
+
+ # parse the changegroup data, otherwise we will block
+ # in case of sshrepo because we don't know the end of the stream
+
+ # an empty chunkiter is the end of the changegroup
+ # a changegroup has at least 2 chunkiters (changelog and manifest).
+ # after that, an empty chunkiter is the end of the changegroup
+ empty = False
+ count = 0
+ while not empty or count <= 2:
+ empty = True
+ count += 1
+ for chunk in chunkiter(cg):
+ empty = False
+ fh.write(z.compress(chunkheader(len(chunk))))
+ pos = 0
+ while pos < len(chunk):
+ next = pos + 2**20
+ fh.write(z.compress(chunk[pos:next]))
+ pos = next
+ fh.write(z.compress(closechunk()))
+ fh.write(z.flush())
+ cleanup = None
+ return filename
+ finally:
+ if fh is not None:
+ fh.close()
+ if cleanup is not None:
+ os.unlink(cleanup)
+
+def unbundle(header, fh):
+ if header == 'HG10UN':
+ return fh
+ elif not header.startswith('HG'):
+ # old client with uncompressed bundle
+ def generator(f):
+ yield header
+ for chunk in f:
+ yield chunk
+ elif header == 'HG10GZ':
+ def generator(f):
+ zd = zlib.decompressobj()
+ for chunk in f:
+ yield zd.decompress(chunk)
+ elif header == 'HG10BZ':
+ def generator(f):
+ zd = bz2.BZ2Decompressor()
+ zd.decompress("BZ")
+ for chunk in util.filechunkiter(f, 4096):
+ yield zd.decompress(chunk)
+ return util.chunkbuffer(generator(fh))
+
+def readbundle(fh, fname):
+ header = fh.read(6)
+ if not header.startswith('HG'):
+ raise util.Abort(_('%s: not a Mercurial bundle file') % fname)
+ if not header.startswith('HG10'):
+ raise util.Abort(_('%s: unknown bundle version') % fname)
+ elif header not in bundletypes:
+ raise util.Abort(_('%s: unknown bundle compression type') % fname)
+ return unbundle(header, fh)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/changelog.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/changelog.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,230 @@
+# changelog.py - changelog class for mercurial
+#
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from node import bin, hex, nullid
+from i18n import _
+import util, error, revlog, encoding
+
+def _string_escape(text):
+ """
+ >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
+ >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
+ >>> s
+ 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
+ >>> res = _string_escape(s)
+ >>> s == res.decode('string_escape')
+ True
+ """
+ # subset of the string_escape codec
+ text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
+ return text.replace('\0', '\\0')
+
+def decodeextra(text):
+ extra = {}
+ for l in text.split('\0'):
+ if l:
+ k, v = l.decode('string_escape').split(':', 1)
+ extra[k] = v
+ return extra
+
+def encodeextra(d):
+ # keys must be sorted to produce a deterministic changelog entry
+ items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
+ return "\0".join(items)
+
+class appender(object):
+ '''the changelog index must be updated last on disk, so we use this class
+ to delay writes to it'''
+ def __init__(self, fp, buf):
+ self.data = buf
+ self.fp = fp
+ self.offset = fp.tell()
+ self.size = util.fstat(fp).st_size
+
+ def end(self):
+ return self.size + len("".join(self.data))
+ def tell(self):
+ return self.offset
+ def flush(self):
+ pass
+ def close(self):
+ self.fp.close()
+
+ def seek(self, offset, whence=0):
+ '''virtual file offset spans real file and data'''
+ if whence == 0:
+ self.offset = offset
+ elif whence == 1:
+ self.offset += offset
+ elif whence == 2:
+ self.offset = self.end() + offset
+ if self.offset < self.size:
+ self.fp.seek(self.offset)
+
+ def read(self, count=-1):
+ '''only trick here is reads that span real file and data'''
+ ret = ""
+ if self.offset < self.size:
+ s = self.fp.read(count)
+ ret = s
+ self.offset += len(s)
+ if count > 0:
+ count -= len(s)
+ if count != 0:
+ doff = self.offset - self.size
+ self.data.insert(0, "".join(self.data))
+ del self.data[1:]
+ s = self.data[0][doff:doff+count]
+ self.offset += len(s)
+ ret += s
+ return ret
+
+ def write(self, s):
+ self.data.append(str(s))
+ self.offset += len(s)
+
+class changelog(revlog.revlog):
+ def __init__(self, opener):
+ self._realopener = opener
+ self._delayed = False
+ revlog.revlog.__init__(self, self._delayopener, "00changelog.i")
+
+ def delayupdate(self):
+ "delay visibility of index updates to other readers"
+ self._delayed = True
+ self._delaycount = len(self)
+ self._delaybuf = []
+ self._delayname = None
+
+ def finalize(self, tr):
+ "finalize index updates"
+ self._delayed = False
+ # move redirected index data back into place
+ if self._delayname:
+ util.rename(self._delayname + ".a", self._delayname)
+ elif self._delaybuf:
+ fp = self.opener(self.indexfile, 'a')
+ fp.write("".join(self._delaybuf))
+ fp.close()
+ self._delaybuf = []
+ # split when we're done
+ self.checkinlinesize(tr)
+
+ def _delayopener(self, name, mode='r'):
+ fp = self._realopener(name, mode)
+ # only divert the index
+ if not self._delayed or not name == self.indexfile:
+ return fp
+ # if we're doing an initial clone, divert to another file
+ if self._delaycount == 0:
+ self._delayname = fp.name
+ if not len(self):
+ # make sure to truncate the file
+ mode = mode.replace('a', 'w')
+ return self._realopener(name + ".a", mode)
+ # otherwise, divert to memory
+ return appender(fp, self._delaybuf)
+
+ def readpending(self, file):
+ r = revlog.revlog(self.opener, file)
+ self.index = r.index
+ self.nodemap = r.nodemap
+ self._chunkcache = r._chunkcache
+
+ def writepending(self):
+ "create a file containing the unfinalized state for pretxnchangegroup"
+ if self._delaybuf:
+ # make a temporary copy of the index
+ fp1 = self._realopener(self.indexfile)
+ fp2 = self._realopener(self.indexfile + ".a", "w")
+ fp2.write(fp1.read())
+ # add pending data
+ fp2.write("".join(self._delaybuf))
+ fp2.close()
+ # switch modes so finalize can simply rename
+ self._delaybuf = []
+ self._delayname = fp1.name
+
+ if self._delayname:
+ return True
+
+ return False
+
+ def checkinlinesize(self, tr, fp=None):
+ if self.opener == self._delayopener:
+ return
+ return revlog.revlog.checkinlinesize(self, tr, fp)
+
+ def read(self, node):
+ """
+ format used:
+ nodeid\n : manifest node in ascii
+ user\n : user, no \n or \r allowed
+ time tz extra\n : date (time is int or float, timezone is int)
+ : extra is metadatas, encoded and separated by '\0'
+ : older versions ignore it
+ files\n\n : files modified by the cset, no \n or \r allowed
+ (.*) : comment (free text, ideally utf-8)
+
+ changelog v0 doesn't use extra
+ """
+ text = self.revision(node)
+ if not text:
+ return (nullid, "", (0, 0), [], "", {'branch': 'default'})
+ last = text.index("\n\n")
+ desc = encoding.tolocal(text[last + 2:])
+ l = text[:last].split('\n')
+ manifest = bin(l[0])
+ user = encoding.tolocal(l[1])
+
+ extra_data = l[2].split(' ', 2)
+ if len(extra_data) != 3:
+ time = float(extra_data.pop(0))
+ try:
+ # various tools did silly things with the time zone field.
+ timezone = int(extra_data[0])
+ except:
+ timezone = 0
+ extra = {}
+ else:
+ time, timezone, extra = extra_data
+ time, timezone = float(time), int(timezone)
+ extra = decodeextra(extra)
+ if not extra.get('branch'):
+ extra['branch'] = 'default'
+ files = l[3:]
+ return (manifest, user, (time, timezone), files, desc, extra)
+
+ def add(self, manifest, files, desc, transaction, p1, p2,
+ user, date=None, extra={}):
+ user = user.strip()
+ # An empty username or a username with a "\n" will make the
+ # revision text contain two "\n\n" sequences -> corrupt
+ # repository since read cannot unpack the revision.
+ if not user:
+ raise error.RevlogError(_("empty username"))
+ if "\n" in user:
+ raise error.RevlogError(_("username %s contains a newline")
+ % repr(user))
+
+ # strip trailing whitespace and leading and trailing empty lines
+ desc = '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
+
+ user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
+
+ if date:
+ parseddate = "%d %d" % util.parsedate(date)
+ else:
+ parseddate = "%d %d" % util.makedate()
+ if extra and extra.get("branch") in ("default", ""):
+ del extra["branch"]
+ if extra:
+ extra = encodeextra(extra)
+ parseddate = "%s %s" % (parseddate, extra)
+ l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
+ text = "\n".join(l)
+ return self.addrevision(text, transaction, len(self), p1, p2)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/cmdutil.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/cmdutil.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,1254 @@
+# cmdutil.py - help for command processing in mercurial
+#
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from node import hex, nullid, nullrev, short
+from i18n import _
+import os, sys, errno, re, glob
+import mdiff, bdiff, util, templater, patch, error, encoding
+import match as _match
+
+revrangesep = ':'
+
+def findpossible(cmd, table, strict=False):
+ """
+ Return cmd -> (aliases, command table entry)
+ for each matching command.
+ Return debug commands (or their aliases) only if no normal command matches.
+ """
+ choice = {}
+ debugchoice = {}
+ for e in table.keys():
+ aliases = e.lstrip("^").split("|")
+ found = None
+ if cmd in aliases:
+ found = cmd
+ elif not strict:
+ for a in aliases:
+ if a.startswith(cmd):
+ found = a
+ break
+ if found is not None:
+ if aliases[0].startswith("debug") or found.startswith("debug"):
+ debugchoice[found] = (aliases, table[e])
+ else:
+ choice[found] = (aliases, table[e])
+
+ if not choice and debugchoice:
+ choice = debugchoice
+
+ return choice
+
+def findcmd(cmd, table, strict=True):
+ """Return (aliases, command table entry) for command string."""
+ choice = findpossible(cmd, table, strict)
+
+ if cmd in choice:
+ return choice[cmd]
+
+ if len(choice) > 1:
+ clist = choice.keys()
+ clist.sort()
+ raise error.AmbiguousCommand(cmd, clist)
+
+ if choice:
+ return choice.values()[0]
+
+ raise error.UnknownCommand(cmd)
+
+def bail_if_changed(repo):
+ if repo.dirstate.parents()[1] != nullid:
+ raise util.Abort(_('outstanding uncommitted merge'))
+ modified, added, removed, deleted = repo.status()[:4]
+ if modified or added or removed or deleted:
+ raise util.Abort(_("outstanding uncommitted changes"))
+
+def logmessage(opts):
+ """ get the log message according to -m and -l option """
+ message = opts.get('message')
+ logfile = opts.get('logfile')
+
+ if message and logfile:
+ raise util.Abort(_('options --message and --logfile are mutually '
+ 'exclusive'))
+ if not message and logfile:
+ try:
+ if logfile == '-':
+ message = sys.stdin.read()
+ else:
+ message = open(logfile).read()
+ except IOError, inst:
+ raise util.Abort(_("can't read commit message '%s': %s") %
+ (logfile, inst.strerror))
+ return message
+
+def loglimit(opts):
+ """get the log limit according to option -l/--limit"""
+ limit = opts.get('limit')
+ if limit:
+ try:
+ limit = int(limit)
+ except ValueError:
+ raise util.Abort(_('limit must be a positive integer'))
+ if limit <= 0: raise util.Abort(_('limit must be positive'))
+ else:
+ limit = sys.maxint
+ return limit
+
+def remoteui(src, opts):
+ 'build a remote ui from ui or repo and opts'
+ if hasattr(src, 'baseui'): # looks like a repository
+ dst = src.baseui.copy() # drop repo-specific config
+ src = src.ui # copy target options from repo
+ else: # assume it's a global ui object
+ dst = src.copy() # keep all global options
+
+ # copy ssh-specific options
+ for o in 'ssh', 'remotecmd':
+ v = opts.get(o) or src.config('ui', o)
+ if v:
+ dst.setconfig("ui", o, v)
+ # copy bundle-specific options
+ r = src.config('bundle', 'mainreporoot')
+ if r:
+ dst.setconfig('bundle', 'mainreporoot', r)
+
+ return dst
+
+def revpair(repo, revs):
+ '''return pair of nodes, given list of revisions. second item can
+ be None, meaning use working dir.'''
+
+ def revfix(repo, val, defval):
+ if not val and val != 0 and defval is not None:
+ val = defval
+ return repo.lookup(val)
+
+ if not revs:
+ return repo.dirstate.parents()[0], None
+ end = None
+ if len(revs) == 1:
+ if revrangesep in revs[0]:
+ start, end = revs[0].split(revrangesep, 1)
+ start = revfix(repo, start, 0)
+ end = revfix(repo, end, len(repo) - 1)
+ else:
+ start = revfix(repo, revs[0], None)
+ elif len(revs) == 2:
+ if revrangesep in revs[0] or revrangesep in revs[1]:
+ raise util.Abort(_('too many revisions specified'))
+ start = revfix(repo, revs[0], None)
+ end = revfix(repo, revs[1], None)
+ else:
+ raise util.Abort(_('too many revisions specified'))
+ return start, end
+
+def revrange(repo, revs):
+ """Yield revision as strings from a list of revision specifications."""
+
+ def revfix(repo, val, defval):
+ if not val and val != 0 and defval is not None:
+ return defval
+ return repo.changelog.rev(repo.lookup(val))
+
+ seen, l = set(), []
+ for spec in revs:
+ if revrangesep in spec:
+ start, end = spec.split(revrangesep, 1)
+ start = revfix(repo, start, 0)
+ end = revfix(repo, end, len(repo) - 1)
+ step = start > end and -1 or 1
+ for rev in xrange(start, end+step, step):
+ if rev in seen:
+ continue
+ seen.add(rev)
+ l.append(rev)
+ else:
+ rev = revfix(repo, spec, None)
+ if rev in seen:
+ continue
+ seen.add(rev)
+ l.append(rev)
+
+ return l
+
+def make_filename(repo, pat, node,
+ total=None, seqno=None, revwidth=None, pathname=None):
+ node_expander = {
+ 'H': lambda: hex(node),
+ 'R': lambda: str(repo.changelog.rev(node)),
+ 'h': lambda: short(node),
+ }
+ expander = {
+ '%': lambda: '%',
+ 'b': lambda: os.path.basename(repo.root),
+ }
+
+ try:
+ if node:
+ expander.update(node_expander)
+ if node:
+ expander['r'] = (lambda:
+ str(repo.changelog.rev(node)).zfill(revwidth or 0))
+ if total is not None:
+ expander['N'] = lambda: str(total)
+ if seqno is not None:
+ expander['n'] = lambda: str(seqno)
+ if total is not None and seqno is not None:
+ expander['n'] = lambda: str(seqno).zfill(len(str(total)))
+ if pathname is not None:
+ expander['s'] = lambda: os.path.basename(pathname)
+ expander['d'] = lambda: os.path.dirname(pathname) or '.'
+ expander['p'] = lambda: pathname
+
+ newname = []
+ patlen = len(pat)
+ i = 0
+ while i < patlen:
+ c = pat[i]
+ if c == '%':
+ i += 1
+ c = pat[i]
+ c = expander[c]()
+ newname.append(c)
+ i += 1
+ return ''.join(newname)
+ except KeyError, inst:
+ raise util.Abort(_("invalid format spec '%%%s' in output filename") %
+ inst.args[0])
+
+def make_file(repo, pat, node=None,
+ total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
+
+ writable = 'w' in mode or 'a' in mode
+
+ if not pat or pat == '-':
+ return writable and sys.stdout or sys.stdin
+ if hasattr(pat, 'write') and writable:
+ return pat
+ if hasattr(pat, 'read') and 'r' in mode:
+ return pat
+ return open(make_filename(repo, pat, node, total, seqno, revwidth,
+ pathname),
+ mode)
+
+def expandpats(pats):
+ if not util.expandglobs:
+ return list(pats)
+ ret = []
+ for p in pats:
+ kind, name = _match._patsplit(p, None)
+ if kind is None:
+ try:
+ globbed = glob.glob(name)
+ except re.error:
+ globbed = [name]
+ if globbed:
+ ret.extend(globbed)
+ continue
+ ret.append(p)
+ return ret
+
+def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
+ if not globbed and default == 'relpath':
+ pats = expandpats(pats or [])
+ m = _match.match(repo.root, repo.getcwd(), pats,
+ opts.get('include'), opts.get('exclude'), default)
+ def badfn(f, msg):
+ repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
+ m.bad = badfn
+ return m
+
+def matchall(repo):
+ return _match.always(repo.root, repo.getcwd())
+
+def matchfiles(repo, files):
+ return _match.exact(repo.root, repo.getcwd(), files)
+
+def findrenames(repo, added, removed, threshold):
+ '''find renamed files -- yields (before, after, score) tuples'''
+ ctx = repo['.']
+ for a in added:
+ aa = repo.wread(a)
+ bestname, bestscore = None, threshold
+ for r in removed:
+ if r not in ctx:
+ continue
+ rr = ctx.filectx(r).data()
+
+ # bdiff.blocks() returns blocks of matching lines
+ # count the number of bytes in each
+ equal = 0
+ alines = mdiff.splitnewlines(aa)
+ matches = bdiff.blocks(aa, rr)
+ for x1,x2,y1,y2 in matches:
+ for line in alines[x1:x2]:
+ equal += len(line)
+
+ lengths = len(aa) + len(rr)
+ if lengths:
+ myscore = equal*2.0 / lengths
+ if myscore >= bestscore:
+ bestname, bestscore = r, myscore
+ if bestname:
+ yield bestname, a, bestscore
+
+def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
+ if dry_run is None:
+ dry_run = opts.get('dry_run')
+ if similarity is None:
+ similarity = float(opts.get('similarity') or 0)
+ # we'd use status here, except handling of symlinks and ignore is tricky
+ added, unknown, deleted, removed = [], [], [], []
+ audit_path = util.path_auditor(repo.root)
+ m = match(repo, pats, opts)
+ for abs in repo.walk(m):
+ target = repo.wjoin(abs)
+ good = True
+ try:
+ audit_path(abs)
+ except:
+ good = False
+ rel = m.rel(abs)
+ exact = m.exact(abs)
+ if good and abs not in repo.dirstate:
+ unknown.append(abs)
+ if repo.ui.verbose or not exact:
+ repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
+ elif repo.dirstate[abs] != 'r' and (not good or not util.lexists(target)
+ or (os.path.isdir(target) and not os.path.islink(target))):
+ deleted.append(abs)
+ if repo.ui.verbose or not exact:
+ repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
+ # for finding renames
+ elif repo.dirstate[abs] == 'r':
+ removed.append(abs)
+ elif repo.dirstate[abs] == 'a':
+ added.append(abs)
+ if not dry_run:
+ repo.remove(deleted)
+ repo.add(unknown)
+ if similarity > 0:
+ for old, new, score in findrenames(repo, added + unknown,
+ removed + deleted, similarity):
+ if repo.ui.verbose or not m.exact(old) or not m.exact(new):
+ repo.ui.status(_('recording removal of %s as rename to %s '
+ '(%d%% similar)\n') %
+ (m.rel(old), m.rel(new), score * 100))
+ if not dry_run:
+ repo.copy(old, new)
+
+def copy(ui, repo, pats, opts, rename=False):
+ # called with the repo lock held
+ #
+ # hgsep => pathname that uses "/" to separate directories
+ # ossep => pathname that uses os.sep to separate directories
+ cwd = repo.getcwd()
+ targets = {}
+ after = opts.get("after")
+ dryrun = opts.get("dry_run")
+
+ def walkpat(pat):
+ srcs = []
+ m = match(repo, [pat], opts, globbed=True)
+ for abs in repo.walk(m):
+ state = repo.dirstate[abs]
+ rel = m.rel(abs)
+ exact = m.exact(abs)
+ if state in '?r':
+ if exact and state == '?':
+ ui.warn(_('%s: not copying - file is not managed\n') % rel)
+ if exact and state == 'r':
+ ui.warn(_('%s: not copying - file has been marked for'
+ ' remove\n') % rel)
+ continue
+ # abs: hgsep
+ # rel: ossep
+ srcs.append((abs, rel, exact))
+ return srcs
+
+ # abssrc: hgsep
+ # relsrc: ossep
+ # otarget: ossep
+ def copyfile(abssrc, relsrc, otarget, exact):
+ abstarget = util.canonpath(repo.root, cwd, otarget)
+ reltarget = repo.pathto(abstarget, cwd)
+ target = repo.wjoin(abstarget)
+ src = repo.wjoin(abssrc)
+ state = repo.dirstate[abstarget]
+
+ # check for collisions
+ prevsrc = targets.get(abstarget)
+ if prevsrc is not None:
+ ui.warn(_('%s: not overwriting - %s collides with %s\n') %
+ (reltarget, repo.pathto(abssrc, cwd),
+ repo.pathto(prevsrc, cwd)))
+ return
+
+ # check for overwrites
+ exists = os.path.exists(target)
+ if not after and exists or after and state in 'mn':
+ if not opts['force']:
+ ui.warn(_('%s: not overwriting - file exists\n') %
+ reltarget)
+ return
+
+ if after:
+ if not exists:
+ return
+ elif not dryrun:
+ try:
+ if exists:
+ os.unlink(target)
+ targetdir = os.path.dirname(target) or '.'
+ if not os.path.isdir(targetdir):
+ os.makedirs(targetdir)
+ util.copyfile(src, target)
+ except IOError, inst:
+ if inst.errno == errno.ENOENT:
+ ui.warn(_('%s: deleted in working copy\n') % relsrc)
+ else:
+ ui.warn(_('%s: cannot copy - %s\n') %
+ (relsrc, inst.strerror))
+ return True # report a failure
+
+ if ui.verbose or not exact:
+ if rename:
+ ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
+ else:
+ ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
+
+ targets[abstarget] = abssrc
+
+ # fix up dirstate
+ origsrc = repo.dirstate.copied(abssrc) or abssrc
+ if abstarget == origsrc: # copying back a copy?
+ if state not in 'mn' and not dryrun:
+ repo.dirstate.normallookup(abstarget)
+ else:
+ if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
+ if not ui.quiet:
+ ui.warn(_("%s has not been committed yet, so no copy "
+ "data will be stored for %s.\n")
+ % (repo.pathto(origsrc, cwd), reltarget))
+ if repo.dirstate[abstarget] in '?r' and not dryrun:
+ repo.add([abstarget])
+ elif not dryrun:
+ repo.copy(origsrc, abstarget)
+
+ if rename and not dryrun:
+ repo.remove([abssrc], not after)
+
+ # pat: ossep
+ # dest ossep
+ # srcs: list of (hgsep, hgsep, ossep, bool)
+ # return: function that takes hgsep and returns ossep
+ def targetpathfn(pat, dest, srcs):
+ if os.path.isdir(pat):
+ abspfx = util.canonpath(repo.root, cwd, pat)
+ abspfx = util.localpath(abspfx)
+ if destdirexists:
+ striplen = len(os.path.split(abspfx)[0])
+ else:
+ striplen = len(abspfx)
+ if striplen:
+ striplen += len(os.sep)
+ res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
+ elif destdirexists:
+ res = lambda p: os.path.join(dest,
+ os.path.basename(util.localpath(p)))
+ else:
+ res = lambda p: dest
+ return res
+
+ # pat: ossep
+ # dest ossep
+ # srcs: list of (hgsep, hgsep, ossep, bool)
+ # return: function that takes hgsep and returns ossep
+ def targetpathafterfn(pat, dest, srcs):
+ if _match.patkind(pat):
+ # a mercurial pattern
+ res = lambda p: os.path.join(dest,
+ os.path.basename(util.localpath(p)))
+ else:
+ abspfx = util.canonpath(repo.root, cwd, pat)
+ if len(abspfx) < len(srcs[0][0]):
+ # A directory. Either the target path contains the last
+ # component of the source path or it does not.
+ def evalpath(striplen):
+ score = 0
+ for s in srcs:
+ t = os.path.join(dest, util.localpath(s[0])[striplen:])
+ if os.path.exists(t):
+ score += 1
+ return score
+
+ abspfx = util.localpath(abspfx)
+ striplen = len(abspfx)
+ if striplen:
+ striplen += len(os.sep)
+ if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
+ score = evalpath(striplen)
+ striplen1 = len(os.path.split(abspfx)[0])
+ if striplen1:
+ striplen1 += len(os.sep)
+ if evalpath(striplen1) > score:
+ striplen = striplen1
+ res = lambda p: os.path.join(dest,
+ util.localpath(p)[striplen:])
+ else:
+ # a file
+ if destdirexists:
+ res = lambda p: os.path.join(dest,
+ os.path.basename(util.localpath(p)))
+ else:
+ res = lambda p: dest
+ return res
+
+
+ pats = expandpats(pats)
+ if not pats:
+ raise util.Abort(_('no source or destination specified'))
+ if len(pats) == 1:
+ raise util.Abort(_('no destination specified'))
+ dest = pats.pop()
+ destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
+ if not destdirexists:
+ if len(pats) > 1 or _match.patkind(pats[0]):
+ raise util.Abort(_('with multiple sources, destination must be an '
+ 'existing directory'))
+ if util.endswithsep(dest):
+ raise util.Abort(_('destination %s is not a directory') % dest)
+
+ tfn = targetpathfn
+ if after:
+ tfn = targetpathafterfn
+ copylist = []
+ for pat in pats:
+ srcs = walkpat(pat)
+ if not srcs:
+ continue
+ copylist.append((tfn(pat, dest, srcs), srcs))
+ if not copylist:
+ raise util.Abort(_('no files to copy'))
+
+ errors = 0
+ for targetpath, srcs in copylist:
+ for abssrc, relsrc, exact in srcs:
+ if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
+ errors += 1
+
+ if errors:
+ ui.warn(_('(consider using --after)\n'))
+
+ return errors
+
+def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None):
+ '''Run a command as a service.'''
+
+ if opts['daemon'] and not opts['daemon_pipefds']:
+ rfd, wfd = os.pipe()
+ args = sys.argv[:]
+ args.append('--daemon-pipefds=%d,%d' % (rfd, wfd))
+ # Don't pass --cwd to the child process, because we've already
+ # changed directory.
+ for i in xrange(1,len(args)):
+ if args[i].startswith('--cwd='):
+ del args[i]
+ break
+ elif args[i].startswith('--cwd'):
+ del args[i:i+2]
+ break
+ pid = os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
+ args[0], args)
+ os.close(wfd)
+ os.read(rfd, 1)
+ if parentfn:
+ return parentfn(pid)
+ else:
+ os._exit(0)
+
+ if initfn:
+ initfn()
+
+ if opts['pid_file']:
+ fp = open(opts['pid_file'], 'w')
+ fp.write(str(os.getpid()) + '\n')
+ fp.close()
+
+ if opts['daemon_pipefds']:
+ rfd, wfd = [int(x) for x in opts['daemon_pipefds'].split(',')]
+ os.close(rfd)
+ try:
+ os.setsid()
+ except AttributeError:
+ pass
+ os.write(wfd, 'y')
+ os.close(wfd)
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ nullfd = os.open(util.nulldev, os.O_RDWR)
+ logfilefd = nullfd
+ if logfile:
+ logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
+ os.dup2(nullfd, 0)
+ os.dup2(logfilefd, 1)
+ os.dup2(logfilefd, 2)
+ if nullfd not in (0, 1, 2):
+ os.close(nullfd)
+ if logfile and logfilefd not in (0, 1, 2):
+ os.close(logfilefd)
+
+ if runfn:
+ return runfn()
+
+class changeset_printer(object):
+ '''show changeset information when templating not requested.'''
+
+ def __init__(self, ui, repo, patch, diffopts, buffered):
+ self.ui = ui
+ self.repo = repo
+ self.buffered = buffered
+ self.patch = patch
+ self.diffopts = diffopts
+ self.header = {}
+ self.hunk = {}
+ self.lastheader = None
+
+ def flush(self, rev):
+ if rev in self.header:
+ h = self.header[rev]
+ if h != self.lastheader:
+ self.lastheader = h
+ self.ui.write(h)
+ del self.header[rev]
+ if rev in self.hunk:
+ self.ui.write(self.hunk[rev])
+ del self.hunk[rev]
+ return 1
+ return 0
+
+ def show(self, ctx, copies=(), **props):
+ if self.buffered:
+ self.ui.pushbuffer()
+ self._show(ctx, copies, props)
+ self.hunk[ctx.rev()] = self.ui.popbuffer()
+ else:
+ self._show(ctx, copies, props)
+
+ def _show(self, ctx, copies, props):
+ '''show a single changeset or file revision'''
+ changenode = ctx.node()
+ rev = ctx.rev()
+
+ if self.ui.quiet:
+ self.ui.write("%d:%s\n" % (rev, short(changenode)))
+ return
+
+ log = self.repo.changelog
+ changes = log.read(changenode)
+ date = util.datestr(changes[2])
+ extra = changes[5]
+ branch = extra.get("branch")
+
+ hexfunc = self.ui.debugflag and hex or short
+
+ parents = [(p, hexfunc(log.node(p)))
+ for p in self._meaningful_parentrevs(log, rev)]
+
+ self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)))
+
+ # don't show the default branch name
+ if branch != 'default':
+ branch = encoding.tolocal(branch)
+ self.ui.write(_("branch: %s\n") % branch)
+ for tag in self.repo.nodetags(changenode):
+ self.ui.write(_("tag: %s\n") % tag)
+ for parent in parents:
+ self.ui.write(_("parent: %d:%s\n") % parent)
+
+ if self.ui.debugflag:
+ self.ui.write(_("manifest: %d:%s\n") %
+ (self.repo.manifest.rev(changes[0]), hex(changes[0])))
+ self.ui.write(_("user: %s\n") % changes[1])
+ self.ui.write(_("date: %s\n") % date)
+
+ if self.ui.debugflag:
+ files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
+ for key, value in zip([_("files:"), _("files+:"), _("files-:")],
+ files):
+ if value:
+ self.ui.write("%-12s %s\n" % (key, " ".join(value)))
+ elif changes[3] and self.ui.verbose:
+ self.ui.write(_("files: %s\n") % " ".join(changes[3]))
+ if copies and self.ui.verbose:
+ copies = ['%s (%s)' % c for c in copies]
+ self.ui.write(_("copies: %s\n") % ' '.join(copies))
+
+ if extra and self.ui.debugflag:
+ for key, value in sorted(extra.items()):
+ self.ui.write(_("extra: %s=%s\n")
+ % (key, value.encode('string_escape')))
+
+ description = changes[4].strip()
+ if description:
+ if self.ui.verbose:
+ self.ui.write(_("description:\n"))
+ self.ui.write(description)
+ self.ui.write("\n\n")
+ else:
+ self.ui.write(_("summary: %s\n") %
+ description.splitlines()[0])
+ self.ui.write("\n")
+
+ self.showpatch(changenode)
+
+ def showpatch(self, node):
+ if self.patch:
+ prev = self.repo.changelog.parents(node)[0]
+ chunks = patch.diff(self.repo, prev, node, match=self.patch,
+ opts=patch.diffopts(self.ui, self.diffopts))
+ for chunk in chunks:
+ self.ui.write(chunk)
+ self.ui.write("\n")
+
+ def _meaningful_parentrevs(self, log, rev):
+ """Return list of meaningful (or all if debug) parentrevs for rev.
+
+ For merges (two non-nullrev revisions) both parents are meaningful.
+ Otherwise the first parent revision is considered meaningful if it
+ is not the preceding revision.
+ """
+ parents = log.parentrevs(rev)
+ if not self.ui.debugflag and parents[1] == nullrev:
+ if parents[0] >= rev - 1:
+ parents = []
+ else:
+ parents = [parents[0]]
+ return parents
+
+
+class changeset_templater(changeset_printer):
+ '''format changeset information.'''
+
+ def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
+ changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
+ formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
+ self.t = templater.templater(mapfile, {'formatnode': formatnode},
+ cache={
+ 'parent': '{rev}:{node|formatnode} ',
+ 'manifest': '{rev}:{node|formatnode}',
+ 'filecopy': '{name} ({source})'})
+
+ def use_template(self, t):
+ '''set template string to use'''
+ self.t.cache['changeset'] = t
+
+ def _meaningful_parentrevs(self, ctx):
+ """Return list of meaningful (or all if debug) parentrevs for rev.
+ """
+ parents = ctx.parents()
+ if len(parents) > 1:
+ return parents
+ if self.ui.debugflag:
+ return [parents[0], self.repo['null']]
+ if parents[0].rev() >= ctx.rev() - 1:
+ return []
+ return parents
+
+ def _show(self, ctx, copies, props):
+ '''show a single changeset or file revision'''
+
+ def showlist(name, values, plural=None, **args):
+ '''expand set of values.
+ name is name of key in template map.
+ values is list of strings or dicts.
+ plural is plural of name, if not simply name + 's'.
+
+ expansion works like this, given name 'foo'.
+
+ if values is empty, expand 'no_foos'.
+
+ if 'foo' not in template map, return values as a string,
+ joined by space.
+
+ expand 'start_foos'.
+
+ for each value, expand 'foo'. if 'last_foo' in template
+ map, expand it instead of 'foo' for last key.
+
+ expand 'end_foos'.
+ '''
+ if plural: names = plural
+ else: names = name + 's'
+ if not values:
+ noname = 'no_' + names
+ if noname in self.t:
+ yield self.t(noname, **args)
+ return
+ if name not in self.t:
+ if isinstance(values[0], str):
+ yield ' '.join(values)
+ else:
+ for v in values:
+ yield dict(v, **args)
+ return
+ startname = 'start_' + names
+ if startname in self.t:
+ yield self.t(startname, **args)
+ vargs = args.copy()
+ def one(v, tag=name):
+ try:
+ vargs.update(v)
+ except (AttributeError, ValueError):
+ try:
+ for a, b in v:
+ vargs[a] = b
+ except ValueError:
+ vargs[name] = v
+ return self.t(tag, **vargs)
+ lastname = 'last_' + name
+ if lastname in self.t:
+ last = values.pop()
+ else:
+ last = None
+ for v in values:
+ yield one(v)
+ if last is not None:
+ yield one(last, tag=lastname)
+ endname = 'end_' + names
+ if endname in self.t:
+ yield self.t(endname, **args)
+
+ def showbranches(**args):
+ branch = ctx.branch()
+ if branch != 'default':
+ branch = encoding.tolocal(branch)
+ return showlist('branch', [branch], plural='branches', **args)
+
+ def showparents(**args):
+ parents = [[('rev', p.rev()), ('node', p.hex())]
+ for p in self._meaningful_parentrevs(ctx)]
+ return showlist('parent', parents, **args)
+
+ def showtags(**args):
+ return showlist('tag', ctx.tags(), **args)
+
+ def showextras(**args):
+ for key, value in sorted(ctx.extra().items()):
+ args = args.copy()
+ args.update(dict(key=key, value=value))
+ yield self.t('extra', **args)
+
+ def showcopies(**args):
+ c = [{'name': x[0], 'source': x[1]} for x in copies]
+ return showlist('file_copy', c, plural='file_copies', **args)
+
+ files = []
+ def getfiles():
+ if not files:
+ files[:] = self.repo.status(ctx.parents()[0].node(),
+ ctx.node())[:3]
+ return files
+ def showfiles(**args):
+ return showlist('file', ctx.files(), **args)
+ def showmods(**args):
+ return showlist('file_mod', getfiles()[0], **args)
+ def showadds(**args):
+ return showlist('file_add', getfiles()[1], **args)
+ def showdels(**args):
+ return showlist('file_del', getfiles()[2], **args)
+ def showmanifest(**args):
+ args = args.copy()
+ args.update(dict(rev=self.repo.manifest.rev(ctx.changeset()[0]),
+ node=hex(ctx.changeset()[0])))
+ return self.t('manifest', **args)
+
+ def showdiffstat(**args):
+ diff = patch.diff(self.repo, ctx.parents()[0].node(), ctx.node())
+ files, adds, removes = 0, 0, 0
+ for i in patch.diffstatdata(util.iterlines(diff)):
+ files += 1
+ adds += i[1]
+ removes += i[2]
+ return '%s: +%s/-%s' % (files, adds, removes)
+
+ defprops = {
+ 'author': ctx.user(),
+ 'branches': showbranches,
+ 'date': ctx.date(),
+ 'desc': ctx.description().strip(),
+ 'file_adds': showadds,
+ 'file_dels': showdels,
+ 'file_mods': showmods,
+ 'files': showfiles,
+ 'file_copies': showcopies,
+ 'manifest': showmanifest,
+ 'node': ctx.hex(),
+ 'parents': showparents,
+ 'rev': ctx.rev(),
+ 'tags': showtags,
+ 'extras': showextras,
+ 'diffstat': showdiffstat,
+ }
+ props = props.copy()
+ props.update(defprops)
+
+ # find correct templates for current mode
+
+ tmplmodes = [
+ (True, None),
+ (self.ui.verbose, 'verbose'),
+ (self.ui.quiet, 'quiet'),
+ (self.ui.debugflag, 'debug'),
+ ]
+
+ types = {'header': '', 'changeset': 'changeset'}
+ for mode, postfix in tmplmodes:
+ for type in types:
+ cur = postfix and ('%s_%s' % (type, postfix)) or type
+ if mode and cur in self.t:
+ types[type] = cur
+
+ try:
+
+ # write header
+ if types['header']:
+ h = templater.stringify(self.t(types['header'], **props))
+ if self.buffered:
+ self.header[ctx.rev()] = h
+ else:
+ self.ui.write(h)
+
+ # write changeset metadata, then patch if requested
+ key = types['changeset']
+ self.ui.write(templater.stringify(self.t(key, **props)))
+ self.showpatch(ctx.node())
+
+ except KeyError, inst:
+ msg = _("%s: no key named '%s'")
+ raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
+ except SyntaxError, inst:
+ raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
+
+def show_changeset(ui, repo, opts, buffered=False, matchfn=False):
+ """show one changeset using template or regular display.
+
+ Display format will be the first non-empty hit of:
+ 1. option 'template'
+ 2. option 'style'
+ 3. [ui] setting 'logtemplate'
+ 4. [ui] setting 'style'
+ If all of these values are either the unset or the empty string,
+ regular display via changeset_printer() is done.
+ """
+ # options
+ patch = False
+ if opts.get('patch'):
+ patch = matchfn or matchall(repo)
+
+ tmpl = opts.get('template')
+ style = None
+ if tmpl:
+ tmpl = templater.parsestring(tmpl, quoted=False)
+ else:
+ style = opts.get('style')
+
+ # ui settings
+ if not (tmpl or style):
+ tmpl = ui.config('ui', 'logtemplate')
+ if tmpl:
+ tmpl = templater.parsestring(tmpl)
+ else:
+ style = ui.config('ui', 'style')
+
+ if not (tmpl or style):
+ return changeset_printer(ui, repo, patch, opts, buffered)
+
+ mapfile = None
+ if style and not tmpl:
+ mapfile = style
+ if not os.path.split(mapfile)[0]:
+ mapname = (templater.templatepath('map-cmdline.' + mapfile)
+ or templater.templatepath(mapfile))
+ if mapname: mapfile = mapname
+
+ try:
+ t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
+ except SyntaxError, inst:
+ raise util.Abort(inst.args[0])
+ if tmpl: t.use_template(tmpl)
+ return t
+
+def finddate(ui, repo, date):
+ """Find the tipmost changeset that matches the given date spec"""
+ df = util.matchdate(date)
+ get = util.cachefunc(lambda r: repo[r].changeset())
+ changeiter, matchfn = walkchangerevs(ui, repo, [], get, {'rev':None})
+ results = {}
+ for st, rev, fns in changeiter:
+ if st == 'add':
+ d = get(rev)[2]
+ if df(d[0]):
+ results[rev] = d
+ elif st == 'iter':
+ if rev in results:
+ ui.status(_("Found revision %s from %s\n") %
+ (rev, util.datestr(results[rev])))
+ return str(rev)
+
+ raise util.Abort(_("revision matching date not found"))
+
+def walkchangerevs(ui, repo, pats, change, opts):
+ '''Iterate over files and the revs in which they changed.
+
+ Callers most commonly need to iterate backwards over the history
+ in which they are interested. Doing so has awful (quadratic-looking)
+ performance, so we use iterators in a "windowed" way.
+
+ We walk a window of revisions in the desired order. Within the
+ window, we first walk forwards to gather data, then in the desired
+ order (usually backwards) to display it.
+
+ This function returns an (iterator, matchfn) tuple. The iterator
+ yields 3-tuples. They will be of one of the following forms:
+
+ "window", incrementing, lastrev: stepping through a window,
+ positive if walking forwards through revs, last rev in the
+ sequence iterated over - use to reset state for the current window
+
+ "add", rev, fns: out-of-order traversal of the given filenames
+ fns, which changed during revision rev - use to gather data for
+ possible display
+
+ "iter", rev, None: in-order traversal of the revs earlier iterated
+ over with "add" - use to display data'''
+
+ def increasing_windows(start, end, windowsize=8, sizelimit=512):
+ if start < end:
+ while start < end:
+ yield start, min(windowsize, end-start)
+ start += windowsize
+ if windowsize < sizelimit:
+ windowsize *= 2
+ else:
+ while start > end:
+ yield start, min(windowsize, start-end-1)
+ start -= windowsize
+ if windowsize < sizelimit:
+ windowsize *= 2
+
+ m = match(repo, pats, opts)
+ follow = opts.get('follow') or opts.get('follow_first')
+
+ if not len(repo):
+ return [], m
+
+ if follow:
+ defrange = '%s:0' % repo['.'].rev()
+ else:
+ defrange = '-1:0'
+ revs = revrange(repo, opts['rev'] or [defrange])
+ wanted = set()
+ slowpath = m.anypats() or (m.files() and opts.get('removed'))
+ fncache = {}
+
+ if not slowpath and not m.files():
+ # No files, no patterns. Display all revs.
+ wanted = set(revs)
+ copies = []
+ if not slowpath:
+ # Only files, no patterns. Check the history of each file.
+ def filerevgen(filelog, node):
+ cl_count = len(repo)
+ if node is None:
+ last = len(filelog) - 1
+ else:
+ last = filelog.rev(node)
+ for i, window in increasing_windows(last, nullrev):
+ revs = []
+ for j in xrange(i - window, i + 1):
+ n = filelog.node(j)
+ revs.append((filelog.linkrev(j),
+ follow and filelog.renamed(n)))
+ for rev in reversed(revs):
+ # only yield rev for which we have the changelog, it can
+ # happen while doing "hg log" during a pull or commit
+ if rev[0] < cl_count:
+ yield rev
+ def iterfiles():
+ for filename in m.files():
+ yield filename, None
+ for filename_node in copies:
+ yield filename_node
+ minrev, maxrev = min(revs), max(revs)
+ for file_, node in iterfiles():
+ filelog = repo.file(file_)
+ if not len(filelog):
+ if node is None:
+ # A zero count may be a directory or deleted file, so
+ # try to find matching entries on the slow path.
+ if follow:
+ raise util.Abort(_('cannot follow nonexistent file: "%s"') % file_)
+ slowpath = True
+ break
+ else:
+ ui.warn(_('%s:%s copy source revision cannot be found!\n')
+ % (file_, short(node)))
+ continue
+ for rev, copied in filerevgen(filelog, node):
+ if rev <= maxrev:
+ if rev < minrev:
+ break
+ fncache.setdefault(rev, [])
+ fncache[rev].append(file_)
+ wanted.add(rev)
+ if follow and copied:
+ copies.append(copied)
+ if slowpath:
+ if follow:
+ raise util.Abort(_('can only follow copies/renames for explicit '
+ 'filenames'))
+
+ # The slow path checks files modified in every changeset.
+ def changerevgen():
+ for i, window in increasing_windows(len(repo) - 1, nullrev):
+ for j in xrange(i - window, i + 1):
+ yield j, change(j)[3]
+
+ for rev, changefiles in changerevgen():
+ matches = filter(m, changefiles)
+ if matches:
+ fncache[rev] = matches
+ wanted.add(rev)
+
+ class followfilter(object):
+ def __init__(self, onlyfirst=False):
+ self.startrev = nullrev
+ self.roots = []
+ self.onlyfirst = onlyfirst
+
+ def match(self, rev):
+ def realparents(rev):
+ if self.onlyfirst:
+ return repo.changelog.parentrevs(rev)[0:1]
+ else:
+ return filter(lambda x: x != nullrev,
+ repo.changelog.parentrevs(rev))
+
+ if self.startrev == nullrev:
+ self.startrev = rev
+ return True
+
+ if rev > self.startrev:
+ # forward: all descendants
+ if not self.roots:
+ self.roots.append(self.startrev)
+ for parent in realparents(rev):
+ if parent in self.roots:
+ self.roots.append(rev)
+ return True
+ else:
+ # backwards: all parents
+ if not self.roots:
+ self.roots.extend(realparents(self.startrev))
+ if rev in self.roots:
+ self.roots.remove(rev)
+ self.roots.extend(realparents(rev))
+ return True
+
+ return False
+
+ # it might be worthwhile to do this in the iterator if the rev range
+ # is descending and the prune args are all within that range
+ for rev in opts.get('prune', ()):
+ rev = repo.changelog.rev(repo.lookup(rev))
+ ff = followfilter()
+ stop = min(revs[0], revs[-1])
+ for x in xrange(rev, stop-1, -1):
+ if ff.match(x):
+ wanted.discard(x)
+
+ def iterate():
+ if follow and not m.files():
+ ff = followfilter(onlyfirst=opts.get('follow_first'))
+ def want(rev):
+ return ff.match(rev) and rev in wanted
+ else:
+ def want(rev):
+ return rev in wanted
+
+ for i, window in increasing_windows(0, len(revs)):
+ yield 'window', revs[0] < revs[-1], revs[-1]
+ nrevs = [rev for rev in revs[i:i+window] if want(rev)]
+ for rev in sorted(nrevs):
+ fns = fncache.get(rev)
+ if not fns:
+ def fns_generator():
+ for f in change(rev)[3]:
+ if m(f):
+ yield f
+ fns = fns_generator()
+ yield 'add', rev, fns
+ for rev in nrevs:
+ yield 'iter', rev, None
+ return iterate(), m
+
+def commit(ui, repo, commitfunc, pats, opts):
+ '''commit the specified files or all outstanding changes'''
+ date = opts.get('date')
+ if date:
+ opts['date'] = util.parsedate(date)
+ message = logmessage(opts)
+
+ # extract addremove carefully -- this function can be called from a command
+ # that doesn't support addremove
+ if opts.get('addremove'):
+ addremove(repo, pats, opts)
+
+ return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
+
+def commiteditor(repo, ctx, subs):
+ if ctx.description():
+ return ctx.description()
+ return commitforceeditor(repo, ctx, subs)
+
+def commitforceeditor(repo, ctx, subs):
+ edittext = []
+ modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
+ if ctx.description():
+ edittext.append(ctx.description())
+ edittext.append("")
+ edittext.append("") # Empty line between message and comments.
+ edittext.append(_("HG: Enter commit message."
+ " Lines beginning with 'HG:' are removed."))
+ edittext.append(_("HG: Leave message empty to abort commit."))
+ edittext.append("HG: --")
+ edittext.append(_("HG: user: %s") % ctx.user())
+ if ctx.p2():
+ edittext.append(_("HG: branch merge"))
+ if ctx.branch():
+ edittext.append(_("HG: branch '%s'")
+ % encoding.tolocal(ctx.branch()))
+ edittext.extend([_("HG: subrepo %s") % s for s in subs])
+ edittext.extend([_("HG: added %s") % f for f in added])
+ edittext.extend([_("HG: changed %s") % f for f in modified])
+ edittext.extend([_("HG: removed %s") % f for f in removed])
+ if not added and not modified and not removed:
+ edittext.append(_("HG: no files changed"))
+ edittext.append("")
+ # run editor in the repository root
+ olddir = os.getcwd()
+ os.chdir(repo.root)
+ text = repo.ui.edit("\n".join(edittext), ctx.user())
+ text = re.sub("(?m)^HG:.*\n", "", text)
+ os.chdir(olddir)
+
+ if not text.strip():
+ raise util.Abort(_("empty commit message"))
+
+ return text
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/commands.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/commands.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,3555 @@
+# commands.py - command processing for mercurial
+#
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from node import hex, nullid, nullrev, short
+from lock import release
+from i18n import _, gettext
+import os, re, sys, subprocess, difflib, time
+import hg, util, revlog, bundlerepo, extensions, copies, context, error
+import patch, help, mdiff, tempfile, url, encoding
+import archival, changegroup, cmdutil, sshserver, hbisect
+from hgweb import server
+import merge as merge_
+
+# Commands start here, listed alphabetically
+
+def add(ui, repo, *pats, **opts):
+ """add the specified files on the next commit
+
+ Schedule files to be version controlled and added to the
+ repository.
+
+ The files will be added to the repository at the next commit. To
+ undo an add before that, see hg forget.
+
+ If no names are given, add all files to the repository.
+ """
+
+ bad = []
+ exacts = {}
+ names = []
+ m = cmdutil.match(repo, pats, opts)
+ oldbad = m.bad
+ m.bad = lambda x,y: bad.append(x) or oldbad(x,y)
+
+ for f in repo.walk(m):
+ exact = m.exact(f)
+ if exact or f not in repo.dirstate:
+ names.append(f)
+ if ui.verbose or not exact:
+ ui.status(_('adding %s\n') % m.rel(f))
+ if not opts.get('dry_run'):
+ bad += [f for f in repo.add(names) if f in m.files()]
+ return bad and 1 or 0
+
+def addremove(ui, repo, *pats, **opts):
+ """add all new files, delete all missing files
+
+ Add all new files and remove all missing files from the
+ repository.
+
+ New files are ignored if they match any of the patterns in
+ .hgignore. As with add, these changes take effect at the next
+ commit.
+
+ Use the -s/--similarity option to detect renamed files. With a
+ parameter > 0, this compares every removed file with every added
+ file and records those similar enough as renames. This option
+ takes a percentage between 0 (disabled) and 100 (files must be
+ identical) as its parameter. Detecting renamed files this way can
+ be expensive.
+ """
+ try:
+ sim = float(opts.get('similarity') or 0)
+ except ValueError:
+ raise util.Abort(_('similarity must be a number'))
+ if sim < 0 or sim > 100:
+ raise util.Abort(_('similarity must be between 0 and 100'))
+ return cmdutil.addremove(repo, pats, opts, similarity=sim/100.)
+
+def annotate(ui, repo, *pats, **opts):
+ """show changeset information by line for each file
+
+ List changes in files, showing the revision id responsible for
+ each line
+
+ This command is useful for discovering when a change was made and
+ by whom.
+
+ Without the -a/--text option, annotate will avoid processing files
+ it detects as binary. With -a, annotate will annotate the file
+ anyway, although the results will probably be neither useful
+ nor desirable.
+ """
+ datefunc = ui.quiet and util.shortdate or util.datestr
+ getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
+
+ if not pats:
+ raise util.Abort(_('at least one filename or pattern is required'))
+
+ opmap = [('user', lambda x: ui.shortuser(x[0].user())),
+ ('number', lambda x: str(x[0].rev())),
+ ('changeset', lambda x: short(x[0].node())),
+ ('date', getdate),
+ ('follow', lambda x: x[0].path()),
+ ]
+
+ if (not opts.get('user') and not opts.get('changeset') and not opts.get('date')
+ and not opts.get('follow')):
+ opts['number'] = 1
+
+ linenumber = opts.get('line_number') is not None
+ if (linenumber and (not opts.get('changeset')) and (not opts.get('number'))):
+ raise util.Abort(_('at least one of -n/-c is required for -l'))
+
+ funcmap = [func for op, func in opmap if opts.get(op)]
+ if linenumber:
+ lastfunc = funcmap[-1]
+ funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1])
+
+ ctx = repo[opts.get('rev')]
+
+ m = cmdutil.match(repo, pats, opts)
+ for abs in ctx.walk(m):
+ fctx = ctx[abs]
+ if not opts.get('text') and util.binary(fctx.data()):
+ ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
+ continue
+
+ lines = fctx.annotate(follow=opts.get('follow'),
+ linenumber=linenumber)
+ pieces = []
+
+ for f in funcmap:
+ l = [f(n) for n, dummy in lines]
+ if l:
+ ml = max(map(len, l))
+ pieces.append(["%*s" % (ml, x) for x in l])
+
+ if pieces:
+ for p, l in zip(zip(*pieces), lines):
+ ui.write("%s: %s" % (" ".join(p), l[1]))
+
+def archive(ui, repo, dest, **opts):
+ '''create an unversioned archive of a repository revision
+
+ By default, the revision used is the parent of the working
+ directory; use -r/--rev to specify a different revision.
+
+ To specify the type of archive to create, use -t/--type. Valid
+ types are:
+
+ "files" (default): a directory full of files
+ "tar": tar archive, uncompressed
+ "tbz2": tar archive, compressed using bzip2
+ "tgz": tar archive, compressed using gzip
+ "uzip": zip archive, uncompressed
+ "zip": zip archive, compressed using deflate
+
+ The exact name of the destination archive or directory is given
+ using a format string; see 'hg help export' for details.
+
+ Each member added to an archive file has a directory prefix
+ prepended. Use -p/--prefix to specify a format string for the
+ prefix. The default is the basename of the archive, with suffixes
+ removed.
+ '''
+
+ ctx = repo[opts.get('rev')]
+ if not ctx:
+ raise util.Abort(_('no working directory: please specify a revision'))
+ node = ctx.node()
+ dest = cmdutil.make_filename(repo, dest, node)
+ if os.path.realpath(dest) == repo.root:
+ raise util.Abort(_('repository root cannot be destination'))
+ matchfn = cmdutil.match(repo, [], opts)
+ kind = opts.get('type') or 'files'
+ prefix = opts.get('prefix')
+ if dest == '-':
+ if kind == 'files':
+ raise util.Abort(_('cannot archive plain files to stdout'))
+ dest = sys.stdout
+ if not prefix: prefix = os.path.basename(repo.root) + '-%h'
+ prefix = cmdutil.make_filename(repo, prefix, node)
+ archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
+ matchfn, prefix)
+
+def backout(ui, repo, node=None, rev=None, **opts):
+ '''reverse effect of earlier changeset
+
+ Commit the backed out changes as a new changeset. The new
+ changeset is a child of the backed out changeset.
+
+ If you backout a changeset other than the tip, a new head is
+ created. This head will be the new tip and you should merge this
+ backout changeset with another head.
+
+ The --merge option remembers the parent of the working directory
+ before starting the backout, then merges the new head with that
+ changeset afterwards. This saves you from doing the merge by hand.
+ The result of this merge is not committed, as with a normal merge.
+
+ See 'hg help dates' for a list of formats valid for -d/--date.
+ '''
+ if rev and node:
+ raise util.Abort(_("please specify just one revision"))
+
+ if not rev:
+ rev = node
+
+ if not rev:
+ raise util.Abort(_("please specify a revision to backout"))
+
+ date = opts.get('date')
+ if date:
+ opts['date'] = util.parsedate(date)
+
+ cmdutil.bail_if_changed(repo)
+ node = repo.lookup(rev)
+
+ op1, op2 = repo.dirstate.parents()
+ a = repo.changelog.ancestor(op1, node)
+ if a != node:
+ raise util.Abort(_('cannot backout change on a different branch'))
+
+ p1, p2 = repo.changelog.parents(node)
+ if p1 == nullid:
+ raise util.Abort(_('cannot backout a change with no parents'))
+ if p2 != nullid:
+ if not opts.get('parent'):
+ raise util.Abort(_('cannot backout a merge changeset without '
+ '--parent'))
+ p = repo.lookup(opts['parent'])
+ if p not in (p1, p2):
+ raise util.Abort(_('%s is not a parent of %s') %
+ (short(p), short(node)))
+ parent = p
+ else:
+ if opts.get('parent'):
+ raise util.Abort(_('cannot use --parent on non-merge changeset'))
+ parent = p1
+
+ # the backout should appear on the same branch
+ branch = repo.dirstate.branch()
+ hg.clean(repo, node, show_stats=False)
+ repo.dirstate.setbranch(branch)
+ revert_opts = opts.copy()
+ revert_opts['date'] = None
+ revert_opts['all'] = True
+ revert_opts['rev'] = hex(parent)
+ revert_opts['no_backup'] = None
+ revert(ui, repo, **revert_opts)
+ commit_opts = opts.copy()
+ commit_opts['addremove'] = False
+ if not commit_opts['message'] and not commit_opts['logfile']:
+ # we don't translate commit messages
+ commit_opts['message'] = "Backed out changeset %s" % (short(node))
+ commit_opts['force_editor'] = True
+ commit(ui, repo, **commit_opts)
+ def nice(node):
+ return '%d:%s' % (repo.changelog.rev(node), short(node))
+ ui.status(_('changeset %s backs out changeset %s\n') %
+ (nice(repo.changelog.tip()), nice(node)))
+ if op1 != node:
+ hg.clean(repo, op1, show_stats=False)
+ if opts.get('merge'):
+ ui.status(_('merging with changeset %s\n') % nice(repo.changelog.tip()))
+ hg.merge(repo, hex(repo.changelog.tip()))
+ else:
+ ui.status(_('the backout changeset is a new head - '
+ 'do not forget to merge\n'))
+ ui.status(_('(use "backout --merge" '
+ 'if you want to auto-merge)\n'))
+
+def bisect(ui, repo, rev=None, extra=None, command=None,
+ reset=None, good=None, bad=None, skip=None, noupdate=None):
+ """subdivision search of changesets
+
+ This command helps to find changesets which introduce problems. To
+ use, mark the earliest changeset you know exhibits the problem as
+ bad, then mark the latest changeset which is free from the problem
+ as good. Bisect will update your working directory to a revision
+ for testing (unless the -U/--noupdate option is specified). Once
+ you have performed tests, mark the working directory as good or
+ bad, and bisect will either update to another candidate changeset
+ or announce that it has found the bad revision.
+
+ As a shortcut, you can also use the revision argument to mark a
+ revision as good or bad without checking it out first.
+
+ If you supply a command, it will be used for automatic bisection.
+ Its exit status will be used to mark revisions as good or bad:
+ status 0 means good, 125 means to skip the revision, 127
+ (command not found) will abort the bisection, and any other
+ non-zero exit status means the revision is bad.
+ """
+ def print_result(nodes, good):
+ displayer = cmdutil.show_changeset(ui, repo, {})
+ if len(nodes) == 1:
+ # narrowed it down to a single revision
+ if good:
+ ui.write(_("The first good revision is:\n"))
+ else:
+ ui.write(_("The first bad revision is:\n"))
+ displayer.show(repo[nodes[0]])
+ else:
+ # multiple possible revisions
+ if good:
+ ui.write(_("Due to skipped revisions, the first "
+ "good revision could be any of:\n"))
+ else:
+ ui.write(_("Due to skipped revisions, the first "
+ "bad revision could be any of:\n"))
+ for n in nodes:
+ displayer.show(repo[n])
+
+ def check_state(state, interactive=True):
+ if not state['good'] or not state['bad']:
+ if (good or bad or skip or reset) and interactive:
+ return
+ if not state['good']:
+ raise util.Abort(_('cannot bisect (no known good revisions)'))
+ else:
+ raise util.Abort(_('cannot bisect (no known bad revisions)'))
+ return True
+
+ # backward compatibility
+ if rev in "good bad reset init".split():
+ ui.warn(_("(use of 'hg bisect ' is deprecated)\n"))
+ cmd, rev, extra = rev, extra, None
+ if cmd == "good":
+ good = True
+ elif cmd == "bad":
+ bad = True
+ else:
+ reset = True
+ elif extra or good + bad + skip + reset + bool(command) > 1:
+ raise util.Abort(_('incompatible arguments'))
+
+ if reset:
+ p = repo.join("bisect.state")
+ if os.path.exists(p):
+ os.unlink(p)
+ return
+
+ state = hbisect.load_state(repo)
+
+ if command:
+ commandpath = util.find_exe(command)
+ if commandpath is None:
+ raise util.Abort(_("cannot find executable: %s") % command)
+ changesets = 1
+ try:
+ while changesets:
+ # update state
+ status = subprocess.call([commandpath])
+ if status == 125:
+ transition = "skip"
+ elif status == 0:
+ transition = "good"
+ # status < 0 means process was killed
+ elif status == 127:
+ raise util.Abort(_("failed to execute %s") % command)
+ elif status < 0:
+ raise util.Abort(_("%s killed") % command)
+ else:
+ transition = "bad"
+ ctx = repo[rev or '.']
+ state[transition].append(ctx.node())
+ ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition))
+ check_state(state, interactive=False)
+ # bisect
+ nodes, changesets, good = hbisect.bisect(repo.changelog, state)
+ # update to next check
+ cmdutil.bail_if_changed(repo)
+ hg.clean(repo, nodes[0], show_stats=False)
+ finally:
+ hbisect.save_state(repo, state)
+ return print_result(nodes, not status)
+
+ # update state
+ node = repo.lookup(rev or '.')
+ if good:
+ state['good'].append(node)
+ elif bad:
+ state['bad'].append(node)
+ elif skip:
+ state['skip'].append(node)
+
+ hbisect.save_state(repo, state)
+
+ if not check_state(state):
+ return
+
+ # actually bisect
+ nodes, changesets, good = hbisect.bisect(repo.changelog, state)
+ if changesets == 0:
+ print_result(nodes, good)
+ else:
+ assert len(nodes) == 1 # only a single node can be tested next
+ node = nodes[0]
+ # compute the approximate number of remaining tests
+ tests, size = 0, 2
+ while size <= changesets:
+ tests, size = tests + 1, size * 2
+ rev = repo.changelog.rev(node)
+ ui.write(_("Testing changeset %s:%s "
+ "(%s changesets remaining, ~%s tests)\n")
+ % (rev, short(node), changesets, tests))
+ if not noupdate:
+ cmdutil.bail_if_changed(repo)
+ return hg.clean(repo, node)
+
+def branch(ui, repo, label=None, **opts):
+ """set or show the current branch name
+
+ With no argument, show the current branch name. With one argument,
+ set the working directory branch name (the branch will not exist
+ in the repository until the next commit). Standard practice
+ recommends that primary development take place on the 'default'
+ branch.
+
+ Unless -f/--force is specified, branch will not let you set a
+ branch name that already exists, even if it's inactive.
+
+ Use -C/--clean to reset the working directory branch to that of
+ the parent of the working directory, negating a previous branch
+ change.
+
+ Use the command 'hg update' to switch to an existing branch. Use
+ 'hg commit --close-branch' to mark this branch as closed.
+ """
+
+ if opts.get('clean'):
+ label = repo[None].parents()[0].branch()
+ repo.dirstate.setbranch(label)
+ ui.status(_('reset working directory to branch %s\n') % label)
+ elif label:
+ if not opts.get('force') and label in repo.branchtags():
+ if label not in [p.branch() for p in repo.parents()]:
+ raise util.Abort(_('a branch of the same name already exists'
+ ' (use --force to override)'))
+ repo.dirstate.setbranch(encoding.fromlocal(label))
+ ui.status(_('marked working directory as branch %s\n') % label)
+ else:
+ ui.write("%s\n" % encoding.tolocal(repo.dirstate.branch()))
+
+def branches(ui, repo, active=False, closed=False):
+ """list repository named branches
+
+ List the repository's named branches, indicating which ones are
+ inactive. If -c/--closed is specified, also list branches which have
+ been marked closed (see hg commit --close-branch).
+
+ If -a/--active is specified, only show active branches. A branch
+ is considered active if it contains repository heads.
+
+ Use the command 'hg update' to switch to an existing branch.
+ """
+
+ hexfunc = ui.debugflag and hex or short
+ activebranches = [encoding.tolocal(repo[n].branch())
+ for n in repo.heads()]
+ def testactive(tag, node):
+ realhead = tag in activebranches
+ open = node in repo.branchheads(tag, closed=False)
+ return realhead and open
+ branches = sorted([(testactive(tag, node), repo.changelog.rev(node), tag)
+ for tag, node in repo.branchtags().items()],
+ reverse=True)
+
+ for isactive, node, tag in branches:
+ if (not active) or isactive:
+ if ui.quiet:
+ ui.write("%s\n" % tag)
+ else:
+ hn = repo.lookup(node)
+ if isactive:
+ notice = ''
+ elif hn not in repo.branchheads(tag, closed=False):
+ if not closed:
+ continue
+ notice = ' (closed)'
+ else:
+ notice = ' (inactive)'
+ rev = str(node).rjust(31 - encoding.colwidth(tag))
+ data = tag, rev, hexfunc(hn), notice
+ ui.write("%s %s:%s%s\n" % data)
+
+def bundle(ui, repo, fname, dest=None, **opts):
+ """create a changegroup file
+
+ Generate a compressed changegroup file collecting changesets not
+ known to be in another repository.
+
+ If no destination repository is specified the destination is
+ assumed to have all the nodes specified by one or more --base
+ parameters. To create a bundle containing all changesets, use
+ -a/--all (or --base null).
+
+ You can change compression method with the -t/--type option.
+ The available compression methods are: none, bzip2, and
+ gzip (by default, bundles are compressed using bzip2).
+
+ The bundle file can then be transferred using conventional means
+ and applied to another repository with the unbundle or pull
+ command. This is useful when direct push and pull are not
+ available or when exporting an entire repository is undesirable.
+
+ Applying bundles preserves all changeset contents including
+ permissions, copy/rename information, and revision history.
+ """
+ revs = opts.get('rev') or None
+ if revs:
+ revs = [repo.lookup(rev) for rev in revs]
+ if opts.get('all'):
+ base = ['null']
+ else:
+ base = opts.get('base')
+ if base:
+ if dest:
+ raise util.Abort(_("--base is incompatible with specifying "
+ "a destination"))
+ base = [repo.lookup(rev) for rev in base]
+ # create the right base
+ # XXX: nodesbetween / changegroup* should be "fixed" instead
+ o = []
+ has = set((nullid,))
+ for n in base:
+ has.update(repo.changelog.reachable(n))
+ if revs:
+ visit = list(revs)
+ else:
+ visit = repo.changelog.heads()
+ seen = {}
+ while visit:
+ n = visit.pop(0)
+ parents = [p for p in repo.changelog.parents(n) if p not in has]
+ if len(parents) == 0:
+ o.insert(0, n)
+ else:
+ for p in parents:
+ if p not in seen:
+ seen[p] = 1
+ visit.append(p)
+ else:
+ dest, revs, checkout = hg.parseurl(
+ ui.expandpath(dest or 'default-push', dest or 'default'), revs)
+ other = hg.repository(cmdutil.remoteui(repo, opts), dest)
+ o = repo.findoutgoing(other, force=opts.get('force'))
+
+ if revs:
+ cg = repo.changegroupsubset(o, revs, 'bundle')
+ else:
+ cg = repo.changegroup(o, 'bundle')
+
+ bundletype = opts.get('type', 'bzip2').lower()
+ btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
+ bundletype = btypes.get(bundletype)
+ if bundletype not in changegroup.bundletypes:
+ raise util.Abort(_('unknown bundle type specified with --type'))
+
+ changegroup.writebundle(cg, fname, bundletype)
+
+def cat(ui, repo, file1, *pats, **opts):
+ """output the current or given revision of files
+
+ Print the specified files as they were at the given revision. If
+ no revision is given, the parent of the working directory is used,
+ or tip if no revision is checked out.
+
+ Output may be to a file, in which case the name of the file is
+ given using a format string. The formatting rules are the same as
+ for the export command, with the following additions:
+
+ %s basename of file being printed
+ %d dirname of file being printed, or '.' if in repository root
+ %p root-relative path name of file being printed
+ """
+ ctx = repo[opts.get('rev')]
+ err = 1
+ m = cmdutil.match(repo, (file1,) + pats, opts)
+ for abs in ctx.walk(m):
+ fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs)
+ data = ctx[abs].data()
+ if opts.get('decode'):
+ data = repo.wwritedata(abs, data)
+ fp.write(data)
+ err = 0
+ return err
+
+def clone(ui, source, dest=None, **opts):
+ """make a copy of an existing repository
+
+ Create a copy of an existing repository in a new directory.
+
+ If no destination directory name is specified, it defaults to the
+ basename of the source.
+
+ The location of the source is added to the new repository's
+ .hg/hgrc file, as the default to be used for future pulls.
+
+ If you use the -r/--rev option to clone up to a specific revision,
+ no subsequent revisions (including subsequent tags) will be
+ present in the cloned repository. This option implies --pull, even
+ on local repositories.
+
+ By default, clone will check out the head of the 'default' branch.
+ If the -U/--noupdate option is used, the new clone will contain
+ only a repository (.hg) and no working copy (the working copy
+ parent is the null revision).
+
+ See 'hg help urls' for valid source format details.
+
+ It is possible to specify an ssh:// URL as the destination, but no
+ .hg/hgrc and working directory will be created on the remote side.
+ Please see 'hg help urls' for important details about ssh:// URLs.
+
+ For efficiency, hardlinks are used for cloning whenever the source
+ and destination are on the same filesystem (note this applies only
+ to the repository data, not to the checked out files). Some
+ filesystems, such as AFS, implement hardlinking incorrectly, but
+ do not report errors. In these cases, use the --pull option to
+ avoid hardlinking.
+
+ In some cases, you can clone repositories and checked out files
+ using full hardlinks with
+
+ $ cp -al REPO REPOCLONE
+
+ This is the fastest way to clone, but it is not always safe. The
+ operation is not atomic (making sure REPO is not modified during
+ the operation is up to you) and you have to make sure your editor
+ breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
+ this is not compatible with certain extensions that place their
+ metadata under the .hg directory, such as mq.
+
+ """
+ hg.clone(cmdutil.remoteui(ui, opts), source, dest,
+ pull=opts.get('pull'),
+ stream=opts.get('uncompressed'),
+ rev=opts.get('rev'),
+ update=not opts.get('noupdate'))
+
+def commit(ui, repo, *pats, **opts):
+ """commit the specified files or all outstanding changes
+
+ Commit changes to the given files into the repository. Unlike a
+ centralized RCS, this operation is a local operation. See hg push
+ for a way to actively distribute your changes.
+
+ If a list of files is omitted, all changes reported by "hg status"
+ will be committed.
+
+ If you are committing the result of a merge, do not provide any
+ filenames or -I/-X filters.
+
+ If no commit message is specified, the configured editor is
+ started to prompt you for a message.
+
+ See 'hg help dates' for a list of formats valid for -d/--date.
+ """
+ extra = {}
+ if opts.get('close_branch'):
+ extra['close'] = 1
+ e = cmdutil.commiteditor
+ if opts.get('force_editor'):
+ e = cmdutil.commitforceeditor
+
+ def commitfunc(ui, repo, message, match, opts):
+ return repo.commit(message, opts.get('user'), opts.get('date'), match,
+ editor=e, extra=extra)
+
+ node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
+ if not node:
+ ui.status(_("nothing changed\n"))
+ return
+ cl = repo.changelog
+ rev = cl.rev(node)
+ parents = cl.parentrevs(rev)
+ if rev - 1 in parents:
+ # one of the parents was the old tip
+ pass
+ elif (parents == (nullrev, nullrev) or
+ len(cl.heads(cl.node(parents[0]))) > 1 and
+ (parents[1] == nullrev or len(cl.heads(cl.node(parents[1]))) > 1)):
+ ui.status(_('created new head\n'))
+
+ if ui.debugflag:
+ ui.write(_('committed changeset %d:%s\n') % (rev,hex(node)))
+ elif ui.verbose:
+ ui.write(_('committed changeset %d:%s\n') % (rev,short(node)))
+
+def copy(ui, repo, *pats, **opts):
+ """mark files as copied for the next commit
+
+ Mark dest as having copies of source files. If dest is a
+ directory, copies are put in that directory. If dest is a file,
+ the source must be a single file.
+
+ By default, this command copies the contents of files as they
+ exist in the working directory. If invoked with -A/--after, the
+ operation is recorded, but no copying is performed.
+
+ This command takes effect with the next commit. To undo a copy
+ before that, see hg revert.
+ """
+ wlock = repo.wlock(False)
+ try:
+ return cmdutil.copy(ui, repo, pats, opts)
+ finally:
+ wlock.release()
+
+def debugancestor(ui, repo, *args):
+ """find the ancestor revision of two revisions in a given index"""
+ if len(args) == 3:
+ index, rev1, rev2 = args
+ r = revlog.revlog(util.opener(os.getcwd(), audit=False), index)
+ lookup = r.lookup
+ elif len(args) == 2:
+ if not repo:
+ raise util.Abort(_("There is no Mercurial repository here "
+ "(.hg not found)"))
+ rev1, rev2 = args
+ r = repo.changelog
+ lookup = repo.lookup
+ else:
+ raise util.Abort(_('either two or three arguments required'))
+ a = r.ancestor(lookup(rev1), lookup(rev2))
+ ui.write("%d:%s\n" % (r.rev(a), hex(a)))
+
+def debugcommands(ui, cmd='', *args):
+ for cmd, vals in sorted(table.iteritems()):
+ cmd = cmd.split('|')[0].strip('^')
+ opts = ', '.join([i[1] for i in vals[1]])
+ ui.write('%s: %s\n' % (cmd, opts))
+
+def debugcomplete(ui, cmd='', **opts):
+ """returns the completion list associated with the given command"""
+
+ if opts.get('options'):
+ options = []
+ otables = [globalopts]
+ if cmd:
+ aliases, entry = cmdutil.findcmd(cmd, table, False)
+ otables.append(entry[1])
+ for t in otables:
+ for o in t:
+ if o[0]:
+ options.append('-%s' % o[0])
+ options.append('--%s' % o[1])
+ ui.write("%s\n" % "\n".join(options))
+ return
+
+ cmdlist = cmdutil.findpossible(cmd, table)
+ if ui.verbose:
+ cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
+ ui.write("%s\n" % "\n".join(sorted(cmdlist)))
+
+def debugfsinfo(ui, path = "."):
+ file('.debugfsinfo', 'w').write('')
+ ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
+ ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
+ ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
+ and 'yes' or 'no'))
+ os.unlink('.debugfsinfo')
+
+def debugrebuildstate(ui, repo, rev="tip"):
+ """rebuild the dirstate as it would look like for the given revision"""
+ ctx = repo[rev]
+ wlock = repo.wlock()
+ try:
+ repo.dirstate.rebuild(ctx.node(), ctx.manifest())
+ finally:
+ wlock.release()
+
+def debugcheckstate(ui, repo):
+ """validate the correctness of the current dirstate"""
+ parent1, parent2 = repo.dirstate.parents()
+ m1 = repo[parent1].manifest()
+ m2 = repo[parent2].manifest()
+ errors = 0
+ for f in repo.dirstate:
+ state = repo.dirstate[f]
+ if state in "nr" and f not in m1:
+ ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
+ errors += 1
+ if state in "a" and f in m1:
+ ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
+ errors += 1
+ if state in "m" and f not in m1 and f not in m2:
+ ui.warn(_("%s in state %s, but not in either manifest\n") %
+ (f, state))
+ errors += 1
+ for f in m1:
+ state = repo.dirstate[f]
+ if state not in "nrm":
+ ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
+ errors += 1
+ if errors:
+ error = _(".hg/dirstate inconsistent with current parent's manifest")
+ raise util.Abort(error)
+
+def showconfig(ui, repo, *values, **opts):
+ """show combined config settings from all hgrc files
+
+ With no arguments, print names and values of all config items.
+
+ With one argument of the form section.name, print just the value
+ of that config item.
+
+ With multiple arguments, print names and values of all config
+ items with matching section names.
+
+ With --debug, the source (filename and line number) is printed
+ for each config item.
+ """
+
+ untrusted = bool(opts.get('untrusted'))
+ if values:
+ if len([v for v in values if '.' in v]) > 1:
+ raise util.Abort(_('only one config item permitted'))
+ for section, name, value in ui.walkconfig(untrusted=untrusted):
+ sectname = section + '.' + name
+ if values:
+ for v in values:
+ if v == section:
+ ui.debug('%s: ' %
+ ui.configsource(section, name, untrusted))
+ ui.write('%s=%s\n' % (sectname, value))
+ elif v == sectname:
+ ui.debug('%s: ' %
+ ui.configsource(section, name, untrusted))
+ ui.write(value, '\n')
+ else:
+ ui.debug('%s: ' %
+ ui.configsource(section, name, untrusted))
+ ui.write('%s=%s\n' % (sectname, value))
+
+def debugsetparents(ui, repo, rev1, rev2=None):
+ """manually set the parents of the current working directory
+
+ This is useful for writing repository conversion tools, but should
+ be used with care.
+ """
+
+ if not rev2:
+ rev2 = hex(nullid)
+
+ wlock = repo.wlock()
+ try:
+ repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
+ finally:
+ wlock.release()
+
+def debugstate(ui, repo, nodates=None):
+ """show the contents of the current dirstate"""
+ timestr = ""
+ showdate = not nodates
+ for file_, ent in sorted(repo.dirstate._map.iteritems()):
+ if showdate:
+ if ent[3] == -1:
+ # Pad or slice to locale representation
+ locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(0)))
+ timestr = 'unset'
+ timestr = timestr[:locale_len] + ' '*(locale_len - len(timestr))
+ else:
+ timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(ent[3]))
+ if ent[1] & 020000:
+ mode = 'lnk'
+ else:
+ mode = '%3o' % (ent[1] & 0777)
+ ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
+ for f in repo.dirstate.copies():
+ ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
+
+def debugsub(ui, repo, rev=None):
+ if rev == '':
+ rev = None
+ for k,v in sorted(repo[rev].substate.items()):
+ ui.write('path %s\n' % k)
+ ui.write(' source %s\n' % v[0])
+ ui.write(' revision %s\n' % v[1])
+
+def debugdata(ui, file_, rev):
+ """dump the contents of a data file revision"""
+ r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i")
+ try:
+ ui.write(r.revision(r.lookup(rev)))
+ except KeyError:
+ raise util.Abort(_('invalid revision identifier %s') % rev)
+
+def debugdate(ui, date, range=None, **opts):
+ """parse and display a date"""
+ if opts["extended"]:
+ d = util.parsedate(date, util.extendeddateformats)
+ else:
+ d = util.parsedate(date)
+ ui.write("internal: %s %s\n" % d)
+ ui.write("standard: %s\n" % util.datestr(d))
+ if range:
+ m = util.matchdate(range)
+ ui.write("match: %s\n" % m(d[0]))
+
+def debugindex(ui, file_):
+ """dump the contents of an index file"""
+ r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
+ ui.write(" rev offset length base linkrev"
+ " nodeid p1 p2\n")
+ for i in r:
+ node = r.node(i)
+ try:
+ pp = r.parents(node)
+ except:
+ pp = [nullid, nullid]
+ ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
+ i, r.start(i), r.length(i), r.base(i), r.linkrev(i),
+ short(node), short(pp[0]), short(pp[1])))
+
+def debugindexdot(ui, file_):
+ """dump an index DAG as a graphviz dot file"""
+ r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
+ ui.write("digraph G {\n")
+ for i in r:
+ node = r.node(i)
+ pp = r.parents(node)
+ ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
+ if pp[1] != nullid:
+ ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
+ ui.write("}\n")
+
+def debuginstall(ui):
+ '''test Mercurial installation'''
+
+ def writetemp(contents):
+ (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
+ f = os.fdopen(fd, "wb")
+ f.write(contents)
+ f.close()
+ return name
+
+ problems = 0
+
+ # encoding
+ ui.status(_("Checking encoding (%s)...\n") % encoding.encoding)
+ try:
+ encoding.fromlocal("test")
+ except util.Abort, inst:
+ ui.write(" %s\n" % inst)
+ ui.write(_(" (check that your locale is properly set)\n"))
+ problems += 1
+
+ # compiled modules
+ ui.status(_("Checking extensions...\n"))
+ try:
+ import bdiff, mpatch, base85
+ except Exception, inst:
+ ui.write(" %s\n" % inst)
+ ui.write(_(" One or more extensions could not be found"))
+ ui.write(_(" (check that you compiled the extensions)\n"))
+ problems += 1
+
+ # templates
+ ui.status(_("Checking templates...\n"))
+ try:
+ import templater
+ templater.templater(templater.templatepath("map-cmdline.default"))
+ except Exception, inst:
+ ui.write(" %s\n" % inst)
+ ui.write(_(" (templates seem to have been installed incorrectly)\n"))
+ problems += 1
+
+ # patch
+ ui.status(_("Checking patch...\n"))
+ patchproblems = 0
+ a = "1\n2\n3\n4\n"
+ b = "1\n2\n3\ninsert\n4\n"
+ fa = writetemp(a)
+ d = mdiff.unidiff(a, None, b, None, os.path.basename(fa),
+ os.path.basename(fa))
+ fd = writetemp(d)
+
+ files = {}
+ try:
+ patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files)
+ except util.Abort, e:
+ ui.write(_(" patch call failed:\n"))
+ ui.write(" " + str(e) + "\n")
+ patchproblems += 1
+ else:
+ if list(files) != [os.path.basename(fa)]:
+ ui.write(_(" unexpected patch output!\n"))
+ patchproblems += 1
+ a = file(fa).read()
+ if a != b:
+ ui.write(_(" patch test failed!\n"))
+ patchproblems += 1
+
+ if patchproblems:
+ if ui.config('ui', 'patch'):
+ ui.write(_(" (Current patch tool may be incompatible with patch,"
+ " or misconfigured. Please check your .hgrc file)\n"))
+ else:
+ ui.write(_(" Internal patcher failure, please report this error"
+ " to http://mercurial.selenic.com/bts/\n"))
+ problems += patchproblems
+
+ os.unlink(fa)
+ os.unlink(fd)
+
+ # editor
+ ui.status(_("Checking commit editor...\n"))
+ editor = ui.geteditor()
+ cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0])
+ if not cmdpath:
+ if editor == 'vi':
+ ui.write(_(" No commit editor set and can't find vi in PATH\n"))
+ ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
+ else:
+ ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
+ ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
+ problems += 1
+
+ # check username
+ ui.status(_("Checking username...\n"))
+ user = os.environ.get("HGUSER")
+ if user is None:
+ user = ui.config("ui", "username")
+ if user is None:
+ user = os.environ.get("EMAIL")
+ if not user:
+ ui.warn(" ")
+ ui.username()
+ ui.write(_(" (specify a username in your .hgrc file)\n"))
+
+ if not problems:
+ ui.status(_("No problems detected\n"))
+ else:
+ ui.write(_("%s problems detected,"
+ " please check your install!\n") % problems)
+
+ return problems
+
+def debugrename(ui, repo, file1, *pats, **opts):
+ """dump rename information"""
+
+ ctx = repo[opts.get('rev')]
+ m = cmdutil.match(repo, (file1,) + pats, opts)
+ for abs in ctx.walk(m):
+ fctx = ctx[abs]
+ o = fctx.filelog().renamed(fctx.filenode())
+ rel = m.rel(abs)
+ if o:
+ ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
+ else:
+ ui.write(_("%s not renamed\n") % rel)
+
+def debugwalk(ui, repo, *pats, **opts):
+ """show how files match on given patterns"""
+ m = cmdutil.match(repo, pats, opts)
+ items = list(repo.walk(m))
+ if not items:
+ return
+ fmt = 'f %%-%ds %%-%ds %%s' % (
+ max([len(abs) for abs in items]),
+ max([len(m.rel(abs)) for abs in items]))
+ for abs in items:
+ line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
+ ui.write("%s\n" % line.rstrip())
+
+def diff(ui, repo, *pats, **opts):
+ """diff repository (or selected files)
+
+ Show differences between revisions for the specified files.
+
+ Differences between files are shown using the unified diff format.
+
+ NOTE: diff may generate unexpected results for merges, as it will
+ default to comparing against the working directory's first parent
+ changeset if no revisions are specified.
+
+ When two revision arguments are given, then changes are shown
+ between those revisions. If only one revision is specified then
+ that revision is compared to the working directory, and, when no
+ revisions are specified, the working directory files are compared
+ to its parent.
+
+ Without the -a/--text option, diff will avoid generating diffs of
+ files it detects as binary. With -a, diff will generate a diff
+ anyway, probably with undesirable results.
+
+ Use the -g/--git option to generate diffs in the git extended diff
+ format. For more information, read 'hg help diffs'.
+ """
+
+ revs = opts.get('rev')
+ change = opts.get('change')
+
+ if revs and change:
+ msg = _('cannot specify --rev and --change at the same time')
+ raise util.Abort(msg)
+ elif change:
+ node2 = repo.lookup(change)
+ node1 = repo[node2].parents()[0].node()
+ else:
+ node1, node2 = cmdutil.revpair(repo, revs)
+
+ m = cmdutil.match(repo, pats, opts)
+ it = patch.diff(repo, node1, node2, match=m, opts=patch.diffopts(ui, opts))
+ for chunk in it:
+ ui.write(chunk)
+
+def export(ui, repo, *changesets, **opts):
+ """dump the header and diffs for one or more changesets
+
+ Print the changeset header and diffs for one or more revisions.
+
+ The information shown in the changeset header is: author,
+ changeset hash, parent(s) and commit comment.
+
+ NOTE: export may generate unexpected diff output for merge
+ changesets, as it will compare the merge changeset against its
+ first parent only.
+
+ Output may be to a file, in which case the name of the file is
+ given using a format string. The formatting rules are as follows:
+
+ %% literal "%" character
+ %H changeset hash (40 bytes of hexadecimal)
+ %N number of patches being generated
+ %R changeset revision number
+ %b basename of the exporting repository
+ %h short-form changeset hash (12 bytes of hexadecimal)
+ %n zero-padded sequence number, starting at 1
+ %r zero-padded changeset revision number
+
+ Without the -a/--text option, export will avoid generating diffs
+ of files it detects as binary. With -a, export will generate a
+ diff anyway, probably with undesirable results.
+
+ Use the -g/--git option to generate diffs in the git extended diff
+ format. See 'hg help diffs' for more information.
+
+ With the --switch-parent option, the diff will be against the
+ second parent. It can be useful to review a merge.
+ """
+ if not changesets:
+ raise util.Abort(_("export requires at least one changeset"))
+ revs = cmdutil.revrange(repo, changesets)
+ if len(revs) > 1:
+ ui.note(_('exporting patches:\n'))
+ else:
+ ui.note(_('exporting patch:\n'))
+ patch.export(repo, revs, template=opts.get('output'),
+ switch_parent=opts.get('switch_parent'),
+ opts=patch.diffopts(ui, opts))
+
+def forget(ui, repo, *pats, **opts):
+ """forget the specified files on the next commit
+
+ Mark the specified files so they will no longer be tracked
+ after the next commit.
+
+ This only removes files from the current branch, not from the
+ entire project history, and it does not delete them from the
+ working directory.
+
+ To undo a forget before the next commit, see hg add.
+ """
+
+ if not pats:
+ raise util.Abort(_('no files specified'))
+
+ m = cmdutil.match(repo, pats, opts)
+ s = repo.status(match=m, clean=True)
+ forget = sorted(s[0] + s[1] + s[3] + s[6])
+
+ for f in m.files():
+ if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
+ ui.warn(_('not removing %s: file is already untracked\n')
+ % m.rel(f))
+
+ for f in forget:
+ if ui.verbose or not m.exact(f):
+ ui.status(_('removing %s\n') % m.rel(f))
+
+ repo.remove(forget, unlink=False)
+
+def grep(ui, repo, pattern, *pats, **opts):
+ """search for a pattern in specified files and revisions
+
+ Search revisions of files for a regular expression.
+
+ This command behaves differently than Unix grep. It only accepts
+ Python/Perl regexps. It searches repository history, not the
+ working directory. It always prints the revision number in which a
+ match appears.
+
+ By default, grep only prints output for the first revision of a
+ file in which it finds a match. To get it to print every revision
+ that contains a change in match status ("-" for a match that
+ becomes a non-match, or "+" for a non-match that becomes a match),
+ use the --all flag.
+ """
+ reflags = 0
+ if opts.get('ignore_case'):
+ reflags |= re.I
+ try:
+ regexp = re.compile(pattern, reflags)
+ except Exception, inst:
+ ui.warn(_("grep: invalid match pattern: %s\n") % inst)
+ return None
+ sep, eol = ':', '\n'
+ if opts.get('print0'):
+ sep = eol = '\0'
+
+ getfile = util.lrucachefunc(repo.file)
+
+ def matchlines(body):
+ begin = 0
+ linenum = 0
+ while True:
+ match = regexp.search(body, begin)
+ if not match:
+ break
+ mstart, mend = match.span()
+ linenum += body.count('\n', begin, mstart) + 1
+ lstart = body.rfind('\n', begin, mstart) + 1 or begin
+ begin = body.find('\n', mend) + 1 or len(body)
+ lend = begin - 1
+ yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
+
+ class linestate(object):
+ def __init__(self, line, linenum, colstart, colend):
+ self.line = line
+ self.linenum = linenum
+ self.colstart = colstart
+ self.colend = colend
+
+ def __hash__(self):
+ return hash((self.linenum, self.line))
+
+ def __eq__(self, other):
+ return self.line == other.line
+
+ matches = {}
+ copies = {}
+ def grepbody(fn, rev, body):
+ matches[rev].setdefault(fn, [])
+ m = matches[rev][fn]
+ for lnum, cstart, cend, line in matchlines(body):
+ s = linestate(line, lnum, cstart, cend)
+ m.append(s)
+
+ def difflinestates(a, b):
+ sm = difflib.SequenceMatcher(None, a, b)
+ for tag, alo, ahi, blo, bhi in sm.get_opcodes():
+ if tag == 'insert':
+ for i in xrange(blo, bhi):
+ yield ('+', b[i])
+ elif tag == 'delete':
+ for i in xrange(alo, ahi):
+ yield ('-', a[i])
+ elif tag == 'replace':
+ for i in xrange(alo, ahi):
+ yield ('-', a[i])
+ for i in xrange(blo, bhi):
+ yield ('+', b[i])
+
+ def display(fn, r, pstates, states):
+ datefunc = ui.quiet and util.shortdate or util.datestr
+ found = False
+ filerevmatches = {}
+ if opts.get('all'):
+ iter = difflinestates(pstates, states)
+ else:
+ iter = [('', l) for l in states]
+ for change, l in iter:
+ cols = [fn, str(r)]
+ if opts.get('line_number'):
+ cols.append(str(l.linenum))
+ if opts.get('all'):
+ cols.append(change)
+ if opts.get('user'):
+ cols.append(ui.shortuser(get(r)[1]))
+ if opts.get('date'):
+ cols.append(datefunc(get(r)[2]))
+ if opts.get('files_with_matches'):
+ c = (fn, r)
+ if c in filerevmatches:
+ continue
+ filerevmatches[c] = 1
+ else:
+ cols.append(l.line)
+ ui.write(sep.join(cols), eol)
+ found = True
+ return found
+
+ skip = {}
+ revfiles = {}
+ get = util.cachefunc(lambda r: repo[r].changeset())
+ changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
+ found = False
+ follow = opts.get('follow')
+ for st, rev, fns in changeiter:
+ if st == 'window':
+ matches.clear()
+ revfiles.clear()
+ elif st == 'add':
+ ctx = repo[rev]
+ pctx = ctx.parents()[0]
+ parent = pctx.rev()
+ matches.setdefault(rev, {})
+ matches.setdefault(parent, {})
+ files = revfiles.setdefault(rev, [])
+ for fn in fns:
+ flog = getfile(fn)
+ try:
+ fnode = ctx.filenode(fn)
+ except error.LookupError:
+ continue
+
+ copied = flog.renamed(fnode)
+ copy = follow and copied and copied[0]
+ if copy:
+ copies.setdefault(rev, {})[fn] = copy
+ if fn in skip:
+ if copy:
+ skip[copy] = True
+ continue
+ files.append(fn)
+
+ if not matches[rev].has_key(fn):
+ grepbody(fn, rev, flog.read(fnode))
+
+ pfn = copy or fn
+ if not matches[parent].has_key(pfn):
+ try:
+ fnode = pctx.filenode(pfn)
+ grepbody(pfn, parent, flog.read(fnode))
+ except error.LookupError:
+ pass
+ elif st == 'iter':
+ parent = repo[rev].parents()[0].rev()
+ for fn in sorted(revfiles.get(rev, [])):
+ states = matches[rev][fn]
+ copy = copies.get(rev, {}).get(fn)
+ if fn in skip:
+ if copy:
+ skip[copy] = True
+ continue
+ pstates = matches.get(parent, {}).get(copy or fn, [])
+ if pstates or states:
+ r = display(fn, rev, pstates, states)
+ found = found or r
+ if r and not opts.get('all'):
+ skip[fn] = True
+ if copy:
+ skip[copy] = True
+
+def heads(ui, repo, *branchrevs, **opts):
+ """show current repository heads or show branch heads
+
+ With no arguments, show all repository head changesets.
+
+ Repository "heads" are changesets that don't have child
+ changesets. They are where development generally takes place and
+ are the usual targets for update and merge operations.
+
+ If one or more REV is given, the "branch heads" will be shown for
+ the named branch associated with that revision. The name of the
+ branch is called the revision's branch tag.
+
+ Branch heads are revisions on a given named branch that do not have
+ any descendants on the same branch. A branch head could be a true head
+ or it could be the last changeset on a branch before a new branch
+ was created. If none of the branch heads are true heads, the branch
+ is considered inactive. If -c/--closed is specified, also show branch
+ heads marked closed (see hg commit --close-branch).
+
+ If STARTREV is specified only those heads (or branch heads) that
+ are descendants of STARTREV will be displayed.
+ """
+ if opts.get('rev'):
+ start = repo.lookup(opts['rev'])
+ else:
+ start = None
+ closed = opts.get('closed')
+ hideinactive, _heads = opts.get('active'), None
+ if not branchrevs:
+ if closed:
+ raise error.Abort(_('you must specify a branch to use --closed'))
+ # Assume we're looking repo-wide heads if no revs were specified.
+ heads = repo.heads(start)
+ else:
+ if hideinactive:
+ _heads = repo.heads(start)
+ heads = []
+ visitedset = set()
+ for branchrev in branchrevs:
+ branch = repo[branchrev].branch()
+ if branch in visitedset:
+ continue
+ visitedset.add(branch)
+ bheads = repo.branchheads(branch, start, closed=closed)
+ if not bheads:
+ if not opts.get('rev'):
+ ui.warn(_("no open branch heads on branch %s\n") % branch)
+ elif branch != branchrev:
+ ui.warn(_("no changes on branch %s containing %s are "
+ "reachable from %s\n")
+ % (branch, branchrev, opts.get('rev')))
+ else:
+ ui.warn(_("no changes on branch %s are reachable from %s\n")
+ % (branch, opts.get('rev')))
+ if hideinactive:
+ bheads = [bhead for bhead in bheads if bhead in _heads]
+ heads.extend(bheads)
+ if not heads:
+ return 1
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ for n in heads:
+ displayer.show(repo[n])
+
+def help_(ui, name=None, with_version=False):
+ """show help for a given topic or a help overview
+
+ With no arguments, print a list of commands with short help messages.
+
+ Given a topic, extension, or command name, print help for that
+ topic."""
+ option_lists = []
+
+ def addglobalopts(aliases):
+ if ui.verbose:
+ option_lists.append((_("global options:"), globalopts))
+ if name == 'shortlist':
+ option_lists.append((_('use "hg help" for the full list '
+ 'of commands'), ()))
+ else:
+ if name == 'shortlist':
+ msg = _('use "hg help" for the full list of commands '
+ 'or "hg -v" for details')
+ elif aliases:
+ msg = _('use "hg -v help%s" to show aliases and '
+ 'global options') % (name and " " + name or "")
+ else:
+ msg = _('use "hg -v help %s" to show global options') % name
+ option_lists.append((msg, ()))
+
+ def helpcmd(name):
+ if with_version:
+ version_(ui)
+ ui.write('\n')
+
+ try:
+ aliases, i = cmdutil.findcmd(name, table, False)
+ except error.AmbiguousCommand, inst:
+ select = lambda c: c.lstrip('^').startswith(inst.args[0])
+ helplist(_('list of commands:\n\n'), select)
+ return
+
+ # synopsis
+ if len(i) > 2:
+ if i[2].startswith('hg'):
+ ui.write("%s\n" % i[2])
+ else:
+ ui.write('hg %s %s\n' % (aliases[0], i[2]))
+ else:
+ ui.write('hg %s\n' % aliases[0])
+
+ # aliases
+ if not ui.quiet and len(aliases) > 1:
+ ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
+
+ # description
+ doc = gettext(i[0].__doc__)
+ if not doc:
+ doc = _("(no help text available)")
+ if ui.quiet:
+ doc = doc.splitlines(0)[0]
+ ui.write("\n%s\n" % doc.rstrip())
+
+ if not ui.quiet:
+ # options
+ if i[1]:
+ option_lists.append((_("options:\n"), i[1]))
+
+ addglobalopts(False)
+
+ def helplist(header, select=None):
+ h = {}
+ cmds = {}
+ for c, e in table.iteritems():
+ f = c.split("|", 1)[0]
+ if select and not select(f):
+ continue
+ if (not select and name != 'shortlist' and
+ e[0].__module__ != __name__):
+ continue
+ if name == "shortlist" and not f.startswith("^"):
+ continue
+ f = f.lstrip("^")
+ if not ui.debugflag and f.startswith("debug"):
+ continue
+ doc = e[0].__doc__
+ if doc and 'DEPRECATED' in doc and not ui.verbose:
+ continue
+ doc = gettext(doc)
+ if not doc:
+ doc = _("(no help text available)")
+ h[f] = doc.splitlines(0)[0].rstrip()
+ cmds[f] = c.lstrip("^")
+
+ if not h:
+ ui.status(_('no commands defined\n'))
+ return
+
+ ui.status(header)
+ fns = sorted(h)
+ m = max(map(len, fns))
+ for f in fns:
+ if ui.verbose:
+ commands = cmds[f].replace("|",", ")
+ ui.write(" %s:\n %s\n"%(commands, h[f]))
+ else:
+ ui.write(' %-*s %s\n' % (m, f, util.wrap(h[f], m + 4)))
+
+ if name != 'shortlist':
+ exts, maxlength = extensions.enabled()
+ ui.write(help.listexts(_('enabled extensions:'), exts, maxlength))
+
+ if not ui.quiet:
+ addglobalopts(True)
+
+ def helptopic(name):
+ for names, header, doc in help.helptable:
+ if name in names:
+ break
+ else:
+ raise error.UnknownCommand(name)
+
+ # description
+ if not doc:
+ doc = _("(no help text available)")
+ if hasattr(doc, '__call__'):
+ doc = doc()
+
+ ui.write("%s\n" % header)
+ ui.write("%s\n" % doc.rstrip())
+
+ def helpext(name):
+ try:
+ mod = extensions.find(name)
+ except KeyError:
+ raise error.UnknownCommand(name)
+
+ doc = gettext(mod.__doc__) or _('no help text available')
+ doc = doc.splitlines(0)
+ ui.write(_('%s extension - %s\n') % (name.split('.')[-1], doc[0]))
+ for d in doc[1:]:
+ ui.write(d, '\n')
+
+ ui.status('\n')
+
+ try:
+ ct = mod.cmdtable
+ except AttributeError:
+ ct = {}
+
+ modcmds = set([c.split('|', 1)[0] for c in ct])
+ helplist(_('list of commands:\n\n'), modcmds.__contains__)
+
+ if name and name != 'shortlist':
+ i = None
+ for f in (helptopic, helpcmd, helpext):
+ try:
+ f(name)
+ i = None
+ break
+ except error.UnknownCommand, inst:
+ i = inst
+ if i:
+ raise i
+
+ else:
+ # program name
+ if ui.verbose or with_version:
+ version_(ui)
+ else:
+ ui.status(_("Mercurial Distributed SCM\n"))
+ ui.status('\n')
+
+ # list of commands
+ if name == "shortlist":
+ header = _('basic commands:\n\n')
+ else:
+ header = _('list of commands:\n\n')
+
+ helplist(header)
+
+ # list all option lists
+ opt_output = []
+ for title, options in option_lists:
+ opt_output.append(("\n%s" % title, None))
+ for shortopt, longopt, default, desc in options:
+ if "DEPRECATED" in desc and not ui.verbose: continue
+ opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
+ longopt and " --%s" % longopt),
+ "%s%s" % (desc,
+ default
+ and _(" (default: %s)") % default
+ or "")))
+
+ if not name:
+ ui.write(_("\nadditional help topics:\n\n"))
+ topics = []
+ for names, header, doc in help.helptable:
+ names = [(-len(name), name) for name in names]
+ names.sort()
+ topics.append((names[0][1], header))
+ topics_len = max([len(s[0]) for s in topics])
+ for t, desc in topics:
+ ui.write(" %-*s %s\n" % (topics_len, t, desc))
+
+ if opt_output:
+ opts_len = max([len(line[0]) for line in opt_output if line[1]] or [0])
+ for first, second in opt_output:
+ if second:
+ second = util.wrap(second, opts_len + 3)
+ ui.write(" %-*s %s\n" % (opts_len, first, second))
+ else:
+ ui.write("%s\n" % first)
+
+def identify(ui, repo, source=None,
+ rev=None, num=None, id=None, branch=None, tags=None):
+ """identify the working copy or specified revision
+
+ With no revision, print a summary of the current state of the
+ repository.
+
+ Specifying a path to a repository root or Mercurial bundle will
+ cause lookup to operate on that repository/bundle.
+
+ This summary identifies the repository state using one or two
+ parent hash identifiers, followed by a "+" if there are
+ uncommitted changes in the working directory, a list of tags for
+ this revision and a branch name for non-default branches.
+ """
+
+ if not repo and not source:
+ raise util.Abort(_("There is no Mercurial repository here "
+ "(.hg not found)"))
+
+ hexfunc = ui.debugflag and hex or short
+ default = not (num or id or branch or tags)
+ output = []
+
+ revs = []
+ if source:
+ source, revs, checkout = hg.parseurl(ui.expandpath(source), [])
+ repo = hg.repository(ui, source)
+
+ if not repo.local():
+ if not rev and revs:
+ rev = revs[0]
+ if not rev:
+ rev = "tip"
+ if num or branch or tags:
+ raise util.Abort(
+ "can't query remote revision number, branch, or tags")
+ output = [hexfunc(repo.lookup(rev))]
+ elif not rev:
+ ctx = repo[None]
+ parents = ctx.parents()
+ changed = False
+ if default or id or num:
+ changed = ctx.files() + ctx.deleted()
+ if default or id:
+ output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]),
+ (changed) and "+" or "")]
+ if num:
+ output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]),
+ (changed) and "+" or ""))
+ else:
+ ctx = repo[rev]
+ if default or id:
+ output = [hexfunc(ctx.node())]
+ if num:
+ output.append(str(ctx.rev()))
+
+ if repo.local() and default and not ui.quiet:
+ b = encoding.tolocal(ctx.branch())
+ if b != 'default':
+ output.append("(%s)" % b)
+
+ # multiple tags for a single parent separated by '/'
+ t = "/".join(ctx.tags())
+ if t:
+ output.append(t)
+
+ if branch:
+ output.append(encoding.tolocal(ctx.branch()))
+
+ if tags:
+ output.extend(ctx.tags())
+
+ ui.write("%s\n" % ' '.join(output))
+
+def import_(ui, repo, patch1, *patches, **opts):
+ """import an ordered set of patches
+
+ Import a list of patches and commit them individually.
+
+ If there are outstanding changes in the working directory, import
+ will abort unless given the -f/--force flag.
+
+ You can import a patch straight from a mail message. Even patches
+ as attachments work (to use the body part, it must have type
+ text/plain or text/x-patch). From and Subject headers of email
+ message are used as default committer and commit message. All
+ text/plain body parts before first diff are added to commit
+ message.
+
+ If the imported patch was generated by hg export, user and
+ description from patch override values from message headers and
+ body. Values given on command line with -m/--message and -u/--user
+ override these.
+
+ If --exact is specified, import will set the working directory to
+ the parent of each patch before applying it, and will abort if the
+ resulting changeset has a different ID than the one recorded in
+ the patch. This may happen due to character set problems or other
+ deficiencies in the text patch format.
+
+ With -s/--similarity, hg will attempt to discover renames and
+ copies in the patch in the same way as 'addremove'.
+
+ To read a patch from standard input, use "-" as the patch name. If
+ a URL is specified, the patch will be downloaded from it.
+ See 'hg help dates' for a list of formats valid for -d/--date.
+ """
+ patches = (patch1,) + patches
+
+ date = opts.get('date')
+ if date:
+ opts['date'] = util.parsedate(date)
+
+ try:
+ sim = float(opts.get('similarity') or 0)
+ except ValueError:
+ raise util.Abort(_('similarity must be a number'))
+ if sim < 0 or sim > 100:
+ raise util.Abort(_('similarity must be between 0 and 100'))
+
+ if opts.get('exact') or not opts.get('force'):
+ cmdutil.bail_if_changed(repo)
+
+ d = opts["base"]
+ strip = opts["strip"]
+ wlock = lock = None
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+ for p in patches:
+ pf = os.path.join(d, p)
+
+ if pf == '-':
+ ui.status(_("applying patch from stdin\n"))
+ pf = sys.stdin
+ else:
+ ui.status(_("applying %s\n") % p)
+ pf = url.open(ui, pf)
+ data = patch.extract(ui, pf)
+ tmpname, message, user, date, branch, nodeid, p1, p2 = data
+
+ if tmpname is None:
+ raise util.Abort(_('no diffs found'))
+
+ try:
+ cmdline_message = cmdutil.logmessage(opts)
+ if cmdline_message:
+ # pickup the cmdline msg
+ message = cmdline_message
+ elif message:
+ # pickup the patch msg
+ message = message.strip()
+ else:
+ # launch the editor
+ message = None
+ ui.debug(_('message:\n%s\n') % message)
+
+ wp = repo.parents()
+ if opts.get('exact'):
+ if not nodeid or not p1:
+ raise util.Abort(_('not a Mercurial patch'))
+ p1 = repo.lookup(p1)
+ p2 = repo.lookup(p2 or hex(nullid))
+
+ if p1 != wp[0].node():
+ hg.clean(repo, p1)
+ repo.dirstate.setparents(p1, p2)
+ elif p2:
+ try:
+ p1 = repo.lookup(p1)
+ p2 = repo.lookup(p2)
+ if p1 == wp[0].node():
+ repo.dirstate.setparents(p1, p2)
+ except error.RepoError:
+ pass
+ if opts.get('exact') or opts.get('import_branch'):
+ repo.dirstate.setbranch(branch or 'default')
+
+ files = {}
+ try:
+ patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
+ files=files, eolmode=None)
+ finally:
+ files = patch.updatedir(ui, repo, files, similarity=sim/100.)
+ if not opts.get('no_commit'):
+ m = cmdutil.matchfiles(repo, files or [])
+ n = repo.commit(message, opts.get('user') or user,
+ opts.get('date') or date, match=m,
+ editor=cmdutil.commiteditor)
+ if opts.get('exact'):
+ if hex(n) != nodeid:
+ repo.rollback()
+ raise util.Abort(_('patch is damaged'
+ ' or loses information'))
+ # Force a dirstate write so that the next transaction
+ # backups an up-do-date file.
+ repo.dirstate.write()
+ finally:
+ os.unlink(tmpname)
+ finally:
+ release(lock, wlock)
+
+def incoming(ui, repo, source="default", **opts):
+ """show new changesets found in source
+
+ Show new changesets found in the specified path/URL or the default
+ pull location. These are the changesets that would have been pulled
+ if a pull at the time you issued this command.
+
+ For remote repository, using --bundle avoids downloading the
+ changesets twice if the incoming is followed by a pull.
+
+ See pull for valid source format details.
+ """
+ limit = cmdutil.loglimit(opts)
+ source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
+ other = hg.repository(cmdutil.remoteui(repo, opts), source)
+ ui.status(_('comparing with %s\n') % url.hidepassword(source))
+ if revs:
+ revs = [other.lookup(rev) for rev in revs]
+ common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
+ force=opts["force"])
+ if not incoming:
+ try:
+ os.unlink(opts["bundle"])
+ except:
+ pass
+ ui.status(_("no changes found\n"))
+ return 1
+
+ cleanup = None
+ try:
+ fname = opts["bundle"]
+ if fname or not other.local():
+ # create a bundle (uncompressed if other repo is not local)
+
+ if revs is None and other.capable('changegroupsubset'):
+ revs = rheads
+
+ if revs is None:
+ cg = other.changegroup(incoming, "incoming")
+ else:
+ cg = other.changegroupsubset(incoming, revs, 'incoming')
+ bundletype = other.local() and "HG10BZ" or "HG10UN"
+ fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
+ # keep written bundle?
+ if opts["bundle"]:
+ cleanup = None
+ if not other.local():
+ # use the created uncompressed bundlerepo
+ other = bundlerepo.bundlerepository(ui, repo.root, fname)
+
+ o = other.changelog.nodesbetween(incoming, revs)[0]
+ if opts.get('newest_first'):
+ o.reverse()
+ displayer = cmdutil.show_changeset(ui, other, opts)
+ count = 0
+ for n in o:
+ if count >= limit:
+ break
+ parents = [p for p in other.changelog.parents(n) if p != nullid]
+ if opts.get('no_merges') and len(parents) == 2:
+ continue
+ count += 1
+ displayer.show(other[n])
+ finally:
+ if hasattr(other, 'close'):
+ other.close()
+ if cleanup:
+ os.unlink(cleanup)
+
+def init(ui, dest=".", **opts):
+ """create a new repository in the given directory
+
+ Initialize a new repository in the given directory. If the given
+ directory does not exist, it will be created.
+
+ If no directory is given, the current directory is used.
+
+ It is possible to specify an ssh:// URL as the destination.
+ See 'hg help urls' for more information.
+ """
+ hg.repository(cmdutil.remoteui(ui, opts), dest, create=1)
+
+def locate(ui, repo, *pats, **opts):
+ """locate files matching specific patterns
+
+ Print files under Mercurial control in the working directory whose
+ names match the given patterns.
+
+ By default, this command searches all directories in the working
+ directory. To search just the current directory and its
+ subdirectories, use "--include .".
+
+ If no patterns are given to match, this command prints the names
+ of all files under Mercurial control in the working directory.
+
+ If you want to feed the output of this command into the "xargs"
+ command, use the -0 option to both this command and "xargs". This
+ will avoid the problem of "xargs" treating single filenames that
+ contain whitespace as multiple filenames.
+ """
+ end = opts.get('print0') and '\0' or '\n'
+ rev = opts.get('rev') or None
+
+ ret = 1
+ m = cmdutil.match(repo, pats, opts, default='relglob')
+ m.bad = lambda x,y: False
+ for abs in repo[rev].walk(m):
+ if not rev and abs not in repo.dirstate:
+ continue
+ if opts.get('fullpath'):
+ ui.write(repo.wjoin(abs), end)
+ else:
+ ui.write(((pats and m.rel(abs)) or abs), end)
+ ret = 0
+
+ return ret
+
+def log(ui, repo, *pats, **opts):
+ """show revision history of entire repository or files
+
+ Print the revision history of the specified files or the entire
+ project.
+
+ File history is shown without following rename or copy history of
+ files. Use -f/--follow with a filename to follow history across
+ renames and copies. --follow without a filename will only show
+ ancestors or descendants of the starting revision. --follow-first
+ only follows the first parent of merge revisions.
+
+ If no revision range is specified, the default is tip:0 unless
+ --follow is set, in which case the working directory parent is
+ used as the starting revision.
+
+ See 'hg help dates' for a list of formats valid for -d/--date.
+
+ By default this command prints revision number and changeset id,
+ tags, non-trivial parents, user, date and time, and a summary for
+ each commit. When the -v/--verbose switch is used, the list of
+ changed files and full commit message are shown.
+
+ NOTE: log -p/--patch may generate unexpected diff output for merge
+ changesets, as it will only compare the merge changeset against
+ its first parent. Also, only files different from BOTH parents
+ will appear in files:.
+ """
+
+ get = util.cachefunc(lambda r: repo[r].changeset())
+ changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
+
+ limit = cmdutil.loglimit(opts)
+ count = 0
+
+ if opts.get('copies') and opts.get('rev'):
+ endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1
+ else:
+ endrev = len(repo)
+ rcache = {}
+ ncache = {}
+ def getrenamed(fn, rev):
+ '''looks up all renames for a file (up to endrev) the first
+ time the file is given. It indexes on the changerev and only
+ parses the manifest if linkrev != changerev.
+ Returns rename info for fn at changerev rev.'''
+ if fn not in rcache:
+ rcache[fn] = {}
+ ncache[fn] = {}
+ fl = repo.file(fn)
+ for i in fl:
+ node = fl.node(i)
+ lr = fl.linkrev(i)
+ renamed = fl.renamed(node)
+ rcache[fn][lr] = renamed
+ if renamed:
+ ncache[fn][node] = renamed
+ if lr >= endrev:
+ break
+ if rev in rcache[fn]:
+ return rcache[fn][rev]
+
+ # If linkrev != rev (i.e. rev not found in rcache) fallback to
+ # filectx logic.
+
+ try:
+ return repo[rev][fn].renamed()
+ except error.LookupError:
+ pass
+ return None
+
+ df = False
+ if opts["date"]:
+ df = util.matchdate(opts["date"])
+
+ only_branches = opts.get('only_branch')
+
+ displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn)
+ for st, rev, fns in changeiter:
+ if st == 'add':
+ parents = [p for p in repo.changelog.parentrevs(rev)
+ if p != nullrev]
+ if opts.get('no_merges') and len(parents) == 2:
+ continue
+ if opts.get('only_merges') and len(parents) != 2:
+ continue
+
+ if only_branches:
+ revbranch = get(rev)[5]['branch']
+ if revbranch not in only_branches:
+ continue
+
+ if df:
+ changes = get(rev)
+ if not df(changes[2][0]):
+ continue
+
+ if opts.get('keyword'):
+ changes = get(rev)
+ miss = 0
+ for k in [kw.lower() for kw in opts['keyword']]:
+ if not (k in changes[1].lower() or
+ k in changes[4].lower() or
+ k in " ".join(changes[3]).lower()):
+ miss = 1
+ break
+ if miss:
+ continue
+
+ if opts['user']:
+ changes = get(rev)
+ if not [k for k in opts['user'] if k in changes[1]]:
+ continue
+
+ copies = []
+ if opts.get('copies') and rev:
+ for fn in get(rev)[3]:
+ rename = getrenamed(fn, rev)
+ if rename:
+ copies.append((fn, rename[0]))
+ displayer.show(context.changectx(repo, rev), copies=copies)
+ elif st == 'iter':
+ if count == limit: break
+ if displayer.flush(rev):
+ count += 1
+
+def manifest(ui, repo, node=None, rev=None):
+ """output the current or given revision of the project manifest
+
+ Print a list of version controlled files for the given revision.
+ If no revision is given, the first parent of the working directory
+ is used, or the null revision if no revision is checked out.
+
+ With -v, print file permissions, symlink and executable bits.
+ With --debug, print file revision hashes.
+ """
+
+ if rev and node:
+ raise util.Abort(_("please specify just one revision"))
+
+ if not node:
+ node = rev
+
+ decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
+ ctx = repo[node]
+ for f in ctx:
+ if ui.debugflag:
+ ui.write("%40s " % hex(ctx.manifest()[f]))
+ if ui.verbose:
+ ui.write(decor[ctx.flags(f)])
+ ui.write("%s\n" % f)
+
+def merge(ui, repo, node=None, **opts):
+ """merge working directory with another revision
+
+ The current working directory is updated with all changes made in
+ the requested revision since the last common predecessor revision.
+
+ Files that changed between either parent are marked as changed for
+ the next commit and a commit must be performed before any further
+ updates to the repository are allowed. The next commit will have
+ two parents.
+
+ If no revision is specified, the working directory's parent is a
+ head revision, and the current branch contains exactly one other
+ head, the other head is merged with by default. Otherwise, an
+ explicit revision with which to merge with must be provided.
+ """
+
+ if opts.get('rev') and node:
+ raise util.Abort(_("please specify just one revision"))
+ if not node:
+ node = opts.get('rev')
+
+ if not node:
+ branch = repo.changectx(None).branch()
+ bheads = repo.branchheads(branch)
+ if len(bheads) > 2:
+ raise util.Abort(_("branch '%s' has %d heads - "
+ "please merge with an explicit rev") %
+ (branch, len(bheads)))
+
+ parent = repo.dirstate.parents()[0]
+ if len(bheads) == 1:
+ if len(repo.heads()) > 1:
+ raise util.Abort(_("branch '%s' has one head - "
+ "please merge with an explicit rev") %
+ branch)
+ msg = _('there is nothing to merge')
+ if parent != repo.lookup(repo[None].branch()):
+ msg = _('%s - use "hg update" instead') % msg
+ raise util.Abort(msg)
+
+ if parent not in bheads:
+ raise util.Abort(_('working dir not at a head rev - '
+ 'use "hg update" or merge with an explicit rev'))
+ node = parent == bheads[0] and bheads[-1] or bheads[0]
+
+ if opts.get('preview'):
+ p1 = repo['.']
+ p2 = repo[node]
+ common = p1.ancestor(p2)
+ roots, heads = [common.node()], [p2.node()]
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ for node in repo.changelog.nodesbetween(roots=roots, heads=heads)[0]:
+ displayer.show(repo[node])
+ return 0
+
+ return hg.merge(repo, node, force=opts.get('force'))
+
+def outgoing(ui, repo, dest=None, **opts):
+ """show changesets not found in destination
+
+ Show changesets not found in the specified destination repository
+ or the default push location. These are the changesets that would
+ be pushed if a push was requested.
+
+ See pull for valid destination format details.
+ """
+ limit = cmdutil.loglimit(opts)
+ dest, revs, checkout = hg.parseurl(
+ ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
+ if revs:
+ revs = [repo.lookup(rev) for rev in revs]
+
+ other = hg.repository(cmdutil.remoteui(repo, opts), dest)
+ ui.status(_('comparing with %s\n') % url.hidepassword(dest))
+ o = repo.findoutgoing(other, force=opts.get('force'))
+ if not o:
+ ui.status(_("no changes found\n"))
+ return 1
+ o = repo.changelog.nodesbetween(o, revs)[0]
+ if opts.get('newest_first'):
+ o.reverse()
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ count = 0
+ for n in o:
+ if count >= limit:
+ break
+ parents = [p for p in repo.changelog.parents(n) if p != nullid]
+ if opts.get('no_merges') and len(parents) == 2:
+ continue
+ count += 1
+ displayer.show(repo[n])
+
+def parents(ui, repo, file_=None, **opts):
+ """show the parents of the working directory or revision
+
+ Print the working directory's parent revisions. If a revision is
+ given via -r/--rev, the parent of that revision will be printed.
+ If a file argument is given, the revision in which the file was
+ last changed (before the working directory revision or the
+ argument to --rev if given) is printed.
+ """
+ rev = opts.get('rev')
+ if rev:
+ ctx = repo[rev]
+ else:
+ ctx = repo[None]
+
+ if file_:
+ m = cmdutil.match(repo, (file_,), opts)
+ if m.anypats() or len(m.files()) != 1:
+ raise util.Abort(_('can only specify an explicit filename'))
+ file_ = m.files()[0]
+ filenodes = []
+ for cp in ctx.parents():
+ if not cp:
+ continue
+ try:
+ filenodes.append(cp.filenode(file_))
+ except error.LookupError:
+ pass
+ if not filenodes:
+ raise util.Abort(_("'%s' not found in manifest!") % file_)
+ fl = repo.file(file_)
+ p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
+ else:
+ p = [cp.node() for cp in ctx.parents()]
+
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ for n in p:
+ if n != nullid:
+ displayer.show(repo[n])
+
+def paths(ui, repo, search=None):
+ """show aliases for remote repositories
+
+ Show definition of symbolic path name NAME. If no name is given,
+ show definition of all available names.
+
+ Path names are defined in the [paths] section of /etc/mercurial/hgrc
+ and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
+
+ See 'hg help urls' for more information.
+ """
+ if search:
+ for name, path in ui.configitems("paths"):
+ if name == search:
+ ui.write("%s\n" % url.hidepassword(path))
+ return
+ ui.warn(_("not found!\n"))
+ return 1
+ else:
+ for name, path in ui.configitems("paths"):
+ ui.write("%s = %s\n" % (name, url.hidepassword(path)))
+
+def postincoming(ui, repo, modheads, optupdate, checkout):
+ if modheads == 0:
+ return
+ if optupdate:
+ if (modheads <= 1 or len(repo.branchheads()) == 1) or checkout:
+ return hg.update(repo, checkout)
+ else:
+ ui.status(_("not updating, since new heads added\n"))
+ if modheads > 1:
+ ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
+ else:
+ ui.status(_("(run 'hg update' to get a working copy)\n"))
+
+def pull(ui, repo, source="default", **opts):
+ """pull changes from the specified source
+
+ Pull changes from a remote repository to a local one.
+
+ This finds all changes from the repository at the specified path
+ or URL and adds them to a local repository (the current one unless
+ -R is specified). By default, this does not update the copy of the
+ project in the working directory.
+
+ Use hg incoming if you want to see what would have been added by a
+ pull at the time you issued this command. If you then decide to
+ added those changes to the repository, you should use pull -r X
+ where X is the last changeset listed by hg incoming.
+
+ If SOURCE is omitted, the 'default' path will be used.
+ See 'hg help urls' for more information.
+ """
+ source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
+ other = hg.repository(cmdutil.remoteui(repo, opts), source)
+ ui.status(_('pulling from %s\n') % url.hidepassword(source))
+ if revs:
+ try:
+ revs = [other.lookup(rev) for rev in revs]
+ except error.CapabilityError:
+ err = _("Other repository doesn't support revision lookup, "
+ "so a rev cannot be specified.")
+ raise util.Abort(err)
+
+ modheads = repo.pull(other, heads=revs, force=opts.get('force'))
+ return postincoming(ui, repo, modheads, opts.get('update'), checkout)
+
+def push(ui, repo, dest=None, **opts):
+ """push changes to the specified destination
+
+ Push changes from the local repository to the given destination.
+
+ This is the symmetrical operation for pull. It moves changes from
+ the current repository to a different one. If the destination is
+ local this is identical to a pull in that directory from the
+ current one.
+
+ By default, push will refuse to run if it detects the result would
+ increase the number of remote heads. This generally indicates the
+ user forgot to pull and merge before pushing.
+
+ If -r/--rev is used, the named revision and all its ancestors will
+ be pushed to the remote repository.
+
+ Please see 'hg help urls' for important details about ssh://
+ URLs. If DESTINATION is omitted, a default path will be used.
+ """
+ dest, revs, checkout = hg.parseurl(
+ ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
+ other = hg.repository(cmdutil.remoteui(repo, opts), dest)
+ ui.status(_('pushing to %s\n') % url.hidepassword(dest))
+ if revs:
+ revs = [repo.lookup(rev) for rev in revs]
+
+ # push subrepos depth-first for coherent ordering
+ c = repo['']
+ subs = c.substate # only repos that are committed
+ for s in sorted(subs):
+ c.sub(s).push(opts.get('force'))
+
+ r = repo.push(other, opts.get('force'), revs=revs)
+ return r == 0
+
+def recover(ui, repo):
+ """roll back an interrupted transaction
+
+ Recover from an interrupted commit or pull.
+
+ This command tries to fix the repository status after an
+ interrupted operation. It should only be necessary when Mercurial
+ suggests it.
+ """
+ if repo.recover():
+ return hg.verify(repo)
+ return 1
+
+def remove(ui, repo, *pats, **opts):
+ """remove the specified files on the next commit
+
+ Schedule the indicated files for removal from the repository.
+
+ This only removes files from the current branch, not from the
+ entire project history. -A/--after can be used to remove only
+ files that have already been deleted, -f/--force can be used to
+ force deletion, and -Af can be used to remove files from the next
+ revision without deleting them from the working directory.
+
+ The following table details the behavior of remove for different
+ file states (columns) and option combinations (rows). The file
+ states are Added [A], Clean [C], Modified [M] and Missing [!]
+ (as reported by hg status). The actions are Warn, Remove (from
+ branch) and Delete (from disk).
+
+ A C M !
+ none W RD W R
+ -f R RD RD R
+ -A W W W R
+ -Af R R R R
+
+ This command schedules the files to be removed at the next commit.
+ To undo a remove before that, see hg revert.
+ """
+
+ after, force = opts.get('after'), opts.get('force')
+ if not pats and not after:
+ raise util.Abort(_('no files specified'))
+
+ m = cmdutil.match(repo, pats, opts)
+ s = repo.status(match=m, clean=True)
+ modified, added, deleted, clean = s[0], s[1], s[3], s[6]
+
+ for f in m.files():
+ if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
+ ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
+
+ def warn(files, reason):
+ for f in files:
+ ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
+ % (m.rel(f), reason))
+
+ if force:
+ remove, forget = modified + deleted + clean, added
+ elif after:
+ remove, forget = deleted, []
+ warn(modified + added + clean, _('still exists'))
+ else:
+ remove, forget = deleted + clean, []
+ warn(modified, _('is modified'))
+ warn(added, _('has been marked for add'))
+
+ for f in sorted(remove + forget):
+ if ui.verbose or not m.exact(f):
+ ui.status(_('removing %s\n') % m.rel(f))
+
+ repo.forget(forget)
+ repo.remove(remove, unlink=not after)
+
+def rename(ui, repo, *pats, **opts):
+ """rename files; equivalent of copy + remove
+
+ Mark dest as copies of sources; mark sources for deletion. If dest
+ is a directory, copies are put in that directory. If dest is a
+ file, there can only be one source.
+
+ By default, this command copies the contents of files as they
+ exist in the working directory. If invoked with -A/--after, the
+ operation is recorded, but no copying is performed.
+
+ This command takes effect at the next commit. To undo a rename
+ before that, see hg revert.
+ """
+ wlock = repo.wlock(False)
+ try:
+ return cmdutil.copy(ui, repo, pats, opts, rename=True)
+ finally:
+ wlock.release()
+
+def resolve(ui, repo, *pats, **opts):
+ """retry file merges from a merge or update
+
+ This command will cleanly retry unresolved file merges using file
+ revisions preserved from the last update or merge. To attempt to
+ resolve all unresolved files, use the -a/--all switch.
+
+ If a conflict is resolved manually, please note that the changes
+ will be overwritten if the merge is retried with resolve. The
+ -m/--mark switch should be used to mark the file as resolved.
+
+ This command also allows listing resolved files and manually
+ indicating whether or not files are resolved. All files must be
+ marked as resolved before a commit is permitted.
+
+ The codes used to show the status of files are:
+ U = unresolved
+ R = resolved
+ """
+
+ all, mark, unmark, show = [opts.get(o) for o in 'all mark unmark list'.split()]
+
+ if (show and (mark or unmark)) or (mark and unmark):
+ raise util.Abort(_("too many options specified"))
+ if pats and all:
+ raise util.Abort(_("can't specify --all and patterns"))
+ if not (all or pats or show or mark or unmark):
+ raise util.Abort(_('no files or directories specified; '
+ 'use --all to remerge all files'))
+
+ ms = merge_.mergestate(repo)
+ m = cmdutil.match(repo, pats, opts)
+
+ for f in ms:
+ if m(f):
+ if show:
+ ui.write("%s %s\n" % (ms[f].upper(), f))
+ elif mark:
+ ms.mark(f, "r")
+ elif unmark:
+ ms.mark(f, "u")
+ else:
+ wctx = repo[None]
+ mctx = wctx.parents()[-1]
+
+ # backup pre-resolve (merge uses .orig for its own purposes)
+ a = repo.wjoin(f)
+ util.copyfile(a, a + ".resolve")
+
+ # resolve file
+ ms.resolve(f, wctx, mctx)
+
+ # replace filemerge's .orig file with our resolve file
+ util.rename(a + ".resolve", a + ".orig")
+
+def revert(ui, repo, *pats, **opts):
+ """restore individual files or directories to an earlier state
+
+ (Use update -r to check out earlier revisions, revert does not
+ change the working directory parents.)
+
+ With no revision specified, revert the named files or directories
+ to the contents they had in the parent of the working directory.
+ This restores the contents of the affected files to an unmodified
+ state and unschedules adds, removes, copies, and renames. If the
+ working directory has two parents, you must explicitly specify the
+ revision to revert to.
+
+ Using the -r/--rev option, revert the given files or directories
+ to their contents as of a specific revision. This can be helpful
+ to "roll back" some or all of an earlier change. See 'hg help
+ dates' for a list of formats valid for -d/--date.
+
+ Revert modifies the working directory. It does not commit any
+ changes, or change the parent of the working directory. If you
+ revert to a revision other than the parent of the working
+ directory, the reverted files will thus appear modified
+ afterwards.
+
+ If a file has been deleted, it is restored. If the executable mode
+ of a file was changed, it is reset.
+
+ If names are given, all files matching the names are reverted.
+ If no arguments are given, no files are reverted.
+
+ Modified files are saved with a .orig suffix before reverting.
+ To disable these backups, use --no-backup.
+ """
+
+ if opts["date"]:
+ if opts["rev"]:
+ raise util.Abort(_("you can't specify a revision and a date"))
+ opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
+
+ if not pats and not opts.get('all'):
+ raise util.Abort(_('no files or directories specified; '
+ 'use --all to revert the whole repo'))
+
+ parent, p2 = repo.dirstate.parents()
+ if not opts.get('rev') and p2 != nullid:
+ raise util.Abort(_('uncommitted merge - please provide a '
+ 'specific revision'))
+ ctx = repo[opts.get('rev')]
+ node = ctx.node()
+ mf = ctx.manifest()
+ if node == parent:
+ pmf = mf
+ else:
+ pmf = None
+
+ # need all matching names in dirstate and manifest of target rev,
+ # so have to walk both. do not print errors if files exist in one
+ # but not other.
+
+ names = {}
+
+ wlock = repo.wlock()
+ try:
+ # walk dirstate.
+
+ m = cmdutil.match(repo, pats, opts)
+ m.bad = lambda x,y: False
+ for abs in repo.walk(m):
+ names[abs] = m.rel(abs), m.exact(abs)
+
+ # walk target manifest.
+
+ def badfn(path, msg):
+ if path in names:
+ return
+ path_ = path + '/'
+ for f in names:
+ if f.startswith(path_):
+ return
+ ui.warn("%s: %s\n" % (m.rel(path), msg))
+
+ m = cmdutil.match(repo, pats, opts)
+ m.bad = badfn
+ for abs in repo[node].walk(m):
+ if abs not in names:
+ names[abs] = m.rel(abs), m.exact(abs)
+
+ m = cmdutil.matchfiles(repo, names)
+ changes = repo.status(match=m)[:4]
+ modified, added, removed, deleted = map(set, changes)
+
+ # if f is a rename, also revert the source
+ cwd = repo.getcwd()
+ for f in added:
+ src = repo.dirstate.copied(f)
+ if src and src not in names and repo.dirstate[src] == 'r':
+ removed.add(src)
+ names[src] = (repo.pathto(src, cwd), True)
+
+ def removeforget(abs):
+ if repo.dirstate[abs] == 'a':
+ return _('forgetting %s\n')
+ return _('removing %s\n')
+
+ revert = ([], _('reverting %s\n'))
+ add = ([], _('adding %s\n'))
+ remove = ([], removeforget)
+ undelete = ([], _('undeleting %s\n'))
+
+ disptable = (
+ # dispatch table:
+ # file state
+ # action if in target manifest
+ # action if not in target manifest
+ # make backup if in target manifest
+ # make backup if not in target manifest
+ (modified, revert, remove, True, True),
+ (added, revert, remove, True, False),
+ (removed, undelete, None, False, False),
+ (deleted, revert, remove, False, False),
+ )
+
+ for abs, (rel, exact) in sorted(names.items()):
+ mfentry = mf.get(abs)
+ target = repo.wjoin(abs)
+ def handle(xlist, dobackup):
+ xlist[0].append(abs)
+ if dobackup and not opts.get('no_backup') and util.lexists(target):
+ bakname = "%s.orig" % rel
+ ui.note(_('saving current version of %s as %s\n') %
+ (rel, bakname))
+ if not opts.get('dry_run'):
+ util.copyfile(target, bakname)
+ if ui.verbose or not exact:
+ msg = xlist[1]
+ if not isinstance(msg, basestring):
+ msg = msg(abs)
+ ui.status(msg % rel)
+ for table, hitlist, misslist, backuphit, backupmiss in disptable:
+ if abs not in table: continue
+ # file has changed in dirstate
+ if mfentry:
+ handle(hitlist, backuphit)
+ elif misslist is not None:
+ handle(misslist, backupmiss)
+ break
+ else:
+ if abs not in repo.dirstate:
+ if mfentry:
+ handle(add, True)
+ elif exact:
+ ui.warn(_('file not managed: %s\n') % rel)
+ continue
+ # file has not changed in dirstate
+ if node == parent:
+ if exact: ui.warn(_('no changes needed to %s\n') % rel)
+ continue
+ if pmf is None:
+ # only need parent manifest in this unlikely case,
+ # so do not read by default
+ pmf = repo[parent].manifest()
+ if abs in pmf:
+ if mfentry:
+ # if version of file is same in parent and target
+ # manifests, do nothing
+ if (pmf[abs] != mfentry or
+ pmf.flags(abs) != mf.flags(abs)):
+ handle(revert, False)
+ else:
+ handle(remove, False)
+
+ if not opts.get('dry_run'):
+ def checkout(f):
+ fc = ctx[f]
+ repo.wwrite(f, fc.data(), fc.flags())
+
+ audit_path = util.path_auditor(repo.root)
+ for f in remove[0]:
+ if repo.dirstate[f] == 'a':
+ repo.dirstate.forget(f)
+ continue
+ audit_path(f)
+ try:
+ util.unlink(repo.wjoin(f))
+ except OSError:
+ pass
+ repo.dirstate.remove(f)
+
+ normal = None
+ if node == parent:
+ # We're reverting to our parent. If possible, we'd like status
+ # to report the file as clean. We have to use normallookup for
+ # merges to avoid losing information about merged/dirty files.
+ if p2 != nullid:
+ normal = repo.dirstate.normallookup
+ else:
+ normal = repo.dirstate.normal
+ for f in revert[0]:
+ checkout(f)
+ if normal:
+ normal(f)
+
+ for f in add[0]:
+ checkout(f)
+ repo.dirstate.add(f)
+
+ normal = repo.dirstate.normallookup
+ if node == parent and p2 == nullid:
+ normal = repo.dirstate.normal
+ for f in undelete[0]:
+ checkout(f)
+ normal(f)
+
+ finally:
+ wlock.release()
+
+def rollback(ui, repo):
+ """roll back the last transaction
+
+ This command should be used with care. There is only one level of
+ rollback, and there is no way to undo a rollback. It will also
+ restore the dirstate at the time of the last transaction, losing
+ any dirstate changes since that time. This command does not alter
+ the working directory.
+
+ Transactions are used to encapsulate the effects of all commands
+ that create new changesets or propagate existing changesets into a
+ repository. For example, the following commands are transactional,
+ and their effects can be rolled back:
+
+ commit
+ import
+ pull
+ push (with this repository as destination)
+ unbundle
+
+ This command is not intended for use on public repositories. Once
+ changes are visible for pull by other users, rolling a transaction
+ back locally is ineffective (someone else may already have pulled
+ the changes). Furthermore, a race is possible with readers of the
+ repository; for example an in-progress pull from the repository
+ may fail if a rollback is performed.
+ """
+ repo.rollback()
+
+def root(ui, repo):
+ """print the root (top) of the current working directory
+
+ Print the root directory of the current repository.
+ """
+ ui.write(repo.root + "\n")
+
+def serve(ui, repo, **opts):
+ """export the repository via HTTP
+
+ Start a local HTTP repository browser and pull server.
+
+ By default, the server logs accesses to stdout and errors to
+ stderr. Use the -A/--accesslog and -E/--errorlog options to log to
+ files.
+ """
+
+ if opts["stdio"]:
+ if repo is None:
+ raise error.RepoError(_("There is no Mercurial repository here"
+ " (.hg not found)"))
+ s = sshserver.sshserver(ui, repo)
+ s.serve_forever()
+
+ baseui = repo and repo.baseui or ui
+ optlist = ("name templates style address port prefix ipv6"
+ " accesslog errorlog webdir_conf certificate encoding")
+ for o in optlist.split():
+ if opts.get(o, None):
+ baseui.setconfig("web", o, str(opts[o]))
+ if (repo is not None) and (repo.ui != baseui):
+ repo.ui.setconfig("web", o, str(opts[o]))
+
+ if repo is None and not ui.config("web", "webdir_conf"):
+ raise error.RepoError(_("There is no Mercurial repository here"
+ " (.hg not found)"))
+
+ class service(object):
+ def init(self):
+ util.set_signal_handler()
+ self.httpd = server.create_server(baseui, repo)
+
+ if not ui.verbose: return
+
+ if self.httpd.prefix:
+ prefix = self.httpd.prefix.strip('/') + '/'
+ else:
+ prefix = ''
+
+ port = ':%d' % self.httpd.port
+ if port == ':80':
+ port = ''
+
+ bindaddr = self.httpd.addr
+ if bindaddr == '0.0.0.0':
+ bindaddr = '*'
+ elif ':' in bindaddr: # IPv6
+ bindaddr = '[%s]' % bindaddr
+
+ fqaddr = self.httpd.fqaddr
+ if ':' in fqaddr:
+ fqaddr = '[%s]' % fqaddr
+ ui.status(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
+ (fqaddr, port, prefix, bindaddr, self.httpd.port))
+
+ def run(self):
+ self.httpd.serve_forever()
+
+ service = service()
+
+ cmdutil.service(opts, initfn=service.init, runfn=service.run)
+
+def status(ui, repo, *pats, **opts):
+ """show changed files in the working directory
+
+ Show status of files in the repository. If names are given, only
+ files that match are shown. Files that are clean or ignored or
+ the source of a copy/move operation, are not listed unless
+ -c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
+ Unless options described with "show only ..." are given, the
+ options -mardu are used.
+
+ Option -q/--quiet hides untracked (unknown and ignored) files
+ unless explicitly requested with -u/--unknown or -i/--ignored.
+
+ NOTE: status may appear to disagree with diff if permissions have
+ changed or a merge has occurred. The standard diff format does not
+ report permission changes and diff only reports changes relative
+ to one merge parent.
+
+ If one revision is given, it is used as the base revision.
+ If two revisions are given, the differences between them are
+ shown.
+
+ The codes used to show the status of files are:
+ M = modified
+ A = added
+ R = removed
+ C = clean
+ ! = missing (deleted by non-hg command, but still tracked)
+ ? = not tracked
+ I = ignored
+ = origin of the previous file listed as A (added)
+ """
+
+ node1, node2 = cmdutil.revpair(repo, opts.get('rev'))
+ cwd = (pats and repo.getcwd()) or ''
+ end = opts.get('print0') and '\0' or '\n'
+ copy = {}
+ states = 'modified added removed deleted unknown ignored clean'.split()
+ show = [k for k in states if opts.get(k)]
+ if opts.get('all'):
+ show += ui.quiet and (states[:4] + ['clean']) or states
+ if not show:
+ show = ui.quiet and states[:4] or states[:5]
+
+ stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts),
+ 'ignored' in show, 'clean' in show, 'unknown' in show)
+ changestates = zip(states, 'MAR!?IC', stat)
+
+ if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
+ ctxn = repo[nullid]
+ ctx1 = repo[node1]
+ ctx2 = repo[node2]
+ added = stat[1]
+ if node2 is None:
+ added = stat[0] + stat[1] # merged?
+
+ for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
+ if k in added:
+ copy[k] = v
+ elif v in added:
+ copy[v] = k
+
+ for state, char, files in changestates:
+ if state in show:
+ format = "%s %%s%s" % (char, end)
+ if opts.get('no_status'):
+ format = "%%s%s" % end
+
+ for f in files:
+ ui.write(format % repo.pathto(f, cwd))
+ if f in copy:
+ ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end))
+
+def tag(ui, repo, name1, *names, **opts):
+ """add one or more tags for the current or given revision
+
+ Name a particular revision using .
+
+ Tags are used to name particular revisions of the repository and are
+ very useful to compare different revisions, to go back to significant
+ earlier versions or to mark branch points as releases, etc.
+
+ If no revision is given, the parent of the working directory is
+ used, or tip if no revision is checked out.
+
+ To facilitate version control, distribution, and merging of tags,
+ they are stored as a file named ".hgtags" which is managed
+ similarly to other project files and can be hand-edited if
+ necessary. The file '.hg/localtags' is used for local tags (not
+ shared among repositories).
+
+ See 'hg help dates' for a list of formats valid for -d/--date.
+ """
+
+ rev_ = "."
+ names = (name1,) + names
+ if len(names) != len(set(names)):
+ raise util.Abort(_('tag names must be unique'))
+ for n in names:
+ if n in ['tip', '.', 'null']:
+ raise util.Abort(_('the name \'%s\' is reserved') % n)
+ if opts.get('rev') and opts.get('remove'):
+ raise util.Abort(_("--rev and --remove are incompatible"))
+ if opts.get('rev'):
+ rev_ = opts['rev']
+ message = opts.get('message')
+ if opts.get('remove'):
+ expectedtype = opts.get('local') and 'local' or 'global'
+ for n in names:
+ if not repo.tagtype(n):
+ raise util.Abort(_('tag \'%s\' does not exist') % n)
+ if repo.tagtype(n) != expectedtype:
+ if expectedtype == 'global':
+ raise util.Abort(_('tag \'%s\' is not a global tag') % n)
+ else:
+ raise util.Abort(_('tag \'%s\' is not a local tag') % n)
+ rev_ = nullid
+ if not message:
+ # we don't translate commit messages
+ message = 'Removed tag %s' % ', '.join(names)
+ elif not opts.get('force'):
+ for n in names:
+ if n in repo.tags():
+ raise util.Abort(_('tag \'%s\' already exists '
+ '(use -f to force)') % n)
+ if not rev_ and repo.dirstate.parents()[1] != nullid:
+ raise util.Abort(_('uncommitted merge - please provide a '
+ 'specific revision'))
+ r = repo[rev_].node()
+
+ if not message:
+ # we don't translate commit messages
+ message = ('Added tag %s for changeset %s' %
+ (', '.join(names), short(r)))
+
+ date = opts.get('date')
+ if date:
+ date = util.parsedate(date)
+
+ repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
+
+def tags(ui, repo):
+ """list repository tags
+
+ This lists both regular and local tags. When the -v/--verbose
+ switch is used, a third column "local" is printed for local tags.
+ """
+
+ hexfunc = ui.debugflag and hex or short
+ tagtype = ""
+
+ for t, n in reversed(repo.tagslist()):
+ if ui.quiet:
+ ui.write("%s\n" % t)
+ continue
+
+ try:
+ hn = hexfunc(n)
+ r = "%5d:%s" % (repo.changelog.rev(n), hn)
+ except error.LookupError:
+ r = " ?:%s" % hn
+ else:
+ spaces = " " * (30 - encoding.colwidth(t))
+ if ui.verbose:
+ if repo.tagtype(t) == 'local':
+ tagtype = " local"
+ else:
+ tagtype = ""
+ ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
+
+def tip(ui, repo, **opts):
+ """show the tip revision
+
+ The tip revision (usually just called the tip) is the changeset
+ most recently added to the repository (and therefore the most
+ recently changed head).
+
+ If you have just made a commit, that commit will be the tip. If
+ you have just pulled changes from another repository, the tip of
+ that repository becomes the current tip. The "tip" tag is special
+ and cannot be renamed or assigned to a different changeset.
+ """
+ cmdutil.show_changeset(ui, repo, opts).show(repo[len(repo) - 1])
+
+def unbundle(ui, repo, fname1, *fnames, **opts):
+ """apply one or more changegroup files
+
+ Apply one or more compressed changegroup files generated by the
+ bundle command.
+ """
+ fnames = (fname1,) + fnames
+
+ lock = repo.lock()
+ try:
+ for fname in fnames:
+ f = url.open(ui, fname)
+ gen = changegroup.readbundle(f, fname)
+ modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
+ finally:
+ lock.release()
+
+ return postincoming(ui, repo, modheads, opts.get('update'), None)
+
+def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False):
+ """update working directory
+
+ Update the repository's working directory to the specified
+ revision, or the tip of the current branch if none is specified.
+ Use null as the revision to remove the working copy (like 'hg
+ clone -U').
+
+ When the working directory contains no uncommitted changes, it
+ will be replaced by the state of the requested revision from the
+ repository. When the requested revision is on a different branch,
+ the working directory will additionally be switched to that
+ branch.
+
+ When there are uncommitted changes, use option -C/--clean to
+ discard them, forcibly replacing the state of the working
+ directory with the requested revision. Alternately, use -c/--check
+ to abort.
+
+ When there are uncommitted changes and option -C/--clean is not
+ used, and the parent revision and requested revision are on the
+ same branch, and one of them is an ancestor of the other, then the
+ new working directory will contain the requested revision merged
+ with the uncommitted changes. Otherwise, the update will fail with
+ a suggestion to use 'merge' or 'update -C' instead.
+
+ If you want to update just one file to an older revision, use
+ revert.
+
+ See 'hg help dates' for a list of formats valid for -d/--date.
+ """
+ if rev and node:
+ raise util.Abort(_("please specify just one revision"))
+
+ if not rev:
+ rev = node
+
+ if not clean and check:
+ # we could use dirty() but we can ignore merge and branch trivia
+ c = repo[None]
+ if c.modified() or c.added() or c.removed():
+ raise util.Abort(_("uncommitted local changes"))
+
+ if date:
+ if rev:
+ raise util.Abort(_("you can't specify a revision and a date"))
+ rev = cmdutil.finddate(ui, repo, date)
+
+ if clean or check:
+ return hg.clean(repo, rev)
+ else:
+ return hg.update(repo, rev)
+
+def verify(ui, repo):
+ """verify the integrity of the repository
+
+ Verify the integrity of the current repository.
+
+ This will perform an extensive check of the repository's
+ integrity, validating the hashes and checksums of each entry in
+ the changelog, manifest, and tracked files, as well as the
+ integrity of their crosslinks and indices.
+ """
+ return hg.verify(repo)
+
+def version_(ui):
+ """output version and copyright information"""
+ ui.write(_("Mercurial Distributed SCM (version %s)\n")
+ % util.version())
+ ui.status(_(
+ "\nCopyright (C) 2005-2009 Matt Mackall and others\n"
+ "This is free software; see the source for copying conditions. "
+ "There is NO\nwarranty; "
+ "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
+ ))
+
+# Command options and aliases are listed here, alphabetically
+
+globalopts = [
+ ('R', 'repository', '',
+ _('repository root directory or symbolic path name')),
+ ('', 'cwd', '', _('change working directory')),
+ ('y', 'noninteractive', None,
+ _('do not prompt, assume \'yes\' for any required answers')),
+ ('q', 'quiet', None, _('suppress output')),
+ ('v', 'verbose', None, _('enable additional output')),
+ ('', 'config', [], _('set/override config option')),
+ ('', 'debug', None, _('enable debugging output')),
+ ('', 'debugger', None, _('start debugger')),
+ ('', 'encoding', encoding.encoding, _('set the charset encoding')),
+ ('', 'encodingmode', encoding.encodingmode,
+ _('set the charset encoding mode')),
+ ('', 'traceback', None, _('print traceback on exception')),
+ ('', 'time', None, _('time how long the command takes')),
+ ('', 'profile', None, _('print command execution profile')),
+ ('', 'version', None, _('output version information and exit')),
+ ('h', 'help', None, _('display help and exit')),
+]
+
+dryrunopts = [('n', 'dry-run', None,
+ _('do not perform actions, just print output'))]
+
+remoteopts = [
+ ('e', 'ssh', '', _('specify ssh command to use')),
+ ('', 'remotecmd', '', _('specify hg command to run on the remote side')),
+]
+
+walkopts = [
+ ('I', 'include', [], _('include names matching the given patterns')),
+ ('X', 'exclude', [], _('exclude names matching the given patterns')),
+]
+
+commitopts = [
+ ('m', 'message', '', _('use as commit message')),
+ ('l', 'logfile', '', _('read commit message from ')),
+]
+
+commitopts2 = [
+ ('d', 'date', '', _('record datecode as commit date')),
+ ('u', 'user', '', _('record the specified user as committer')),
+]
+
+templateopts = [
+ ('', 'style', '', _('display using template map file')),
+ ('', 'template', '', _('display with template')),
+]
+
+logopts = [
+ ('p', 'patch', None, _('show patch')),
+ ('g', 'git', None, _('use git extended diff format')),
+ ('l', 'limit', '', _('limit number of changes displayed')),
+ ('M', 'no-merges', None, _('do not show merges')),
+] + templateopts
+
+diffopts = [
+ ('a', 'text', None, _('treat all files as text')),
+ ('g', 'git', None, _('use git extended diff format')),
+ ('', 'nodates', None, _("don't include dates in diff headers"))
+]
+
+diffopts2 = [
+ ('p', 'show-function', None, _('show which function each change is in')),
+ ('w', 'ignore-all-space', None,
+ _('ignore white space when comparing lines')),
+ ('b', 'ignore-space-change', None,
+ _('ignore changes in the amount of white space')),
+ ('B', 'ignore-blank-lines', None,
+ _('ignore changes whose lines are all blank')),
+ ('U', 'unified', '', _('number of lines of context to show'))
+]
+
+similarityopts = [
+ ('s', 'similarity', '',
+ _('guess renamed files by similarity (0<=s<=100)'))
+]
+
+table = {
+ "^add": (add, walkopts + dryrunopts, _('[OPTION]... [FILE]...')),
+ "addremove":
+ (addremove, similarityopts + walkopts + dryrunopts,
+ _('[OPTION]... [FILE]...')),
+ "^annotate|blame":
+ (annotate,
+ [('r', 'rev', '', _('annotate the specified revision')),
+ ('f', 'follow', None, _('follow file copies and renames')),
+ ('a', 'text', None, _('treat all files as text')),
+ ('u', 'user', None, _('list the author (long with -v)')),
+ ('d', 'date', None, _('list the date (short with -q)')),
+ ('n', 'number', None, _('list the revision number (default)')),
+ ('c', 'changeset', None, _('list the changeset')),
+ ('l', 'line-number', None,
+ _('show line number at the first appearance'))
+ ] + walkopts,
+ _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')),
+ "archive":
+ (archive,
+ [('', 'no-decode', None, _('do not pass files through decoders')),
+ ('p', 'prefix', '', _('directory prefix for files in archive')),
+ ('r', 'rev', '', _('revision to distribute')),
+ ('t', 'type', '', _('type of distribution to create')),
+ ] + walkopts,
+ _('[OPTION]... DEST')),
+ "backout":
+ (backout,
+ [('', 'merge', None,
+ _('merge with old dirstate parent after backout')),
+ ('', 'parent', '', _('parent to choose when backing out merge')),
+ ('r', 'rev', '', _('revision to backout')),
+ ] + walkopts + commitopts + commitopts2,
+ _('[OPTION]... [-r] REV')),
+ "bisect":
+ (bisect,
+ [('r', 'reset', False, _('reset bisect state')),
+ ('g', 'good', False, _('mark changeset good')),
+ ('b', 'bad', False, _('mark changeset bad')),
+ ('s', 'skip', False, _('skip testing changeset')),
+ ('c', 'command', '', _('use command to check changeset state')),
+ ('U', 'noupdate', False, _('do not update to target'))],
+ _("[-gbsr] [-c CMD] [REV]")),
+ "branch":
+ (branch,
+ [('f', 'force', None,
+ _('set branch name even if it shadows an existing branch')),
+ ('C', 'clean', None, _('reset branch name to parent branch name'))],
+ _('[-fC] [NAME]')),
+ "branches":
+ (branches,
+ [('a', 'active', False,
+ _('show only branches that have unmerged heads')),
+ ('c', 'closed', False,
+ _('show normal and closed branches'))],
+ _('[-a]')),
+ "bundle":
+ (bundle,
+ [('f', 'force', None,
+ _('run even when remote repository is unrelated')),
+ ('r', 'rev', [],
+ _('a changeset up to which you would like to bundle')),
+ ('', 'base', [],
+ _('a base changeset to specify instead of a destination')),
+ ('a', 'all', None, _('bundle all changesets in the repository')),
+ ('t', 'type', 'bzip2', _('bundle compression type to use')),
+ ] + remoteopts,
+ _('[-f] [-a] [-r REV]... [--base REV]... FILE [DEST]')),
+ "cat":
+ (cat,
+ [('o', 'output', '', _('print output to file with formatted name')),
+ ('r', 'rev', '', _('print the given revision')),
+ ('', 'decode', None, _('apply any matching decode filter')),
+ ] + walkopts,
+ _('[OPTION]... FILE...')),
+ "^clone":
+ (clone,
+ [('U', 'noupdate', None,
+ _('the clone will only contain a repository (no working copy)')),
+ ('r', 'rev', [],
+ _('a changeset you would like to have after cloning')),
+ ('', 'pull', None, _('use pull protocol to copy metadata')),
+ ('', 'uncompressed', None,
+ _('use uncompressed transfer (fast over LAN)')),
+ ] + remoteopts,
+ _('[OPTION]... SOURCE [DEST]')),
+ "^commit|ci":
+ (commit,
+ [('A', 'addremove', None,
+ _('mark new/missing files as added/removed before committing')),
+ ('', 'close-branch', None,
+ _('mark a branch as closed, hiding it from the branch list')),
+ ] + walkopts + commitopts + commitopts2,
+ _('[OPTION]... [FILE]...')),
+ "copy|cp":
+ (copy,
+ [('A', 'after', None, _('record a copy that has already occurred')),
+ ('f', 'force', None,
+ _('forcibly copy over an existing managed file')),
+ ] + walkopts + dryrunopts,
+ _('[OPTION]... [SOURCE]... DEST')),
+ "debugancestor": (debugancestor, [], _('[INDEX] REV1 REV2')),
+ "debugcheckstate": (debugcheckstate, []),
+ "debugcommands": (debugcommands, [], _('[COMMAND]')),
+ "debugcomplete":
+ (debugcomplete,
+ [('o', 'options', None, _('show the command options'))],
+ _('[-o] CMD')),
+ "debugdate":
+ (debugdate,
+ [('e', 'extended', None, _('try extended date formats'))],
+ _('[-e] DATE [RANGE]')),
+ "debugdata": (debugdata, [], _('FILE REV')),
+ "debugfsinfo": (debugfsinfo, [], _('[PATH]')),
+ "debugindex": (debugindex, [], _('FILE')),
+ "debugindexdot": (debugindexdot, [], _('FILE')),
+ "debuginstall": (debuginstall, []),
+ "debugrebuildstate":
+ (debugrebuildstate,
+ [('r', 'rev', '', _('revision to rebuild to'))],
+ _('[-r REV] [REV]')),
+ "debugrename":
+ (debugrename,
+ [('r', 'rev', '', _('revision to debug'))],
+ _('[-r REV] FILE')),
+ "debugsetparents":
+ (debugsetparents, [], _('REV1 [REV2]')),
+ "debugstate":
+ (debugstate,
+ [('', 'nodates', None, _('do not display the saved mtime'))],
+ _('[OPTION]...')),
+ "debugsub":
+ (debugsub,
+ [('r', 'rev', '', _('revision to check'))],
+ _('[-r REV] [REV]')),
+ "debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')),
+ "^diff":
+ (diff,
+ [('r', 'rev', [], _('revision')),
+ ('c', 'change', '', _('change made by revision'))
+ ] + diffopts + diffopts2 + walkopts,
+ _('[OPTION]... [-r REV1 [-r REV2]] [FILE]...')),
+ "^export":
+ (export,
+ [('o', 'output', '', _('print output to file with formatted name')),
+ ('', 'switch-parent', None, _('diff against the second parent'))
+ ] + diffopts,
+ _('[OPTION]... [-o OUTFILESPEC] REV...')),
+ "^forget":
+ (forget,
+ [] + walkopts,
+ _('[OPTION]... FILE...')),
+ "grep":
+ (grep,
+ [('0', 'print0', None, _('end fields with NUL')),
+ ('', 'all', None, _('print all revisions that match')),
+ ('f', 'follow', None,
+ _('follow changeset history, or file history across copies and renames')),
+ ('i', 'ignore-case', None, _('ignore case when matching')),
+ ('l', 'files-with-matches', None,
+ _('print only filenames and revisions that match')),
+ ('n', 'line-number', None, _('print matching line numbers')),
+ ('r', 'rev', [], _('search in given revision range')),
+ ('u', 'user', None, _('list the author (long with -v)')),
+ ('d', 'date', None, _('list the date (short with -q)')),
+ ] + walkopts,
+ _('[OPTION]... PATTERN [FILE]...')),
+ "heads":
+ (heads,
+ [('r', 'rev', '', _('show only heads which are descendants of REV')),
+ ('a', 'active', False,
+ _('show only the active branch heads from open branches')),
+ ('c', 'closed', False,
+ _('show normal and closed branch heads')),
+ ] + templateopts,
+ _('[-r STARTREV] [REV]...')),
+ "help": (help_, [], _('[TOPIC]')),
+ "identify|id":
+ (identify,
+ [('r', 'rev', '', _('identify the specified revision')),
+ ('n', 'num', None, _('show local revision number')),
+ ('i', 'id', None, _('show global revision id')),
+ ('b', 'branch', None, _('show branch')),
+ ('t', 'tags', None, _('show tags'))],
+ _('[-nibt] [-r REV] [SOURCE]')),
+ "import|patch":
+ (import_,
+ [('p', 'strip', 1,
+ _('directory strip option for patch. This has the same '
+ 'meaning as the corresponding patch option')),
+ ('b', 'base', '', _('base path')),
+ ('f', 'force', None,
+ _('skip check for outstanding uncommitted changes')),
+ ('', 'no-commit', None, _("don't commit, just update the working directory")),
+ ('', 'exact', None,
+ _('apply patch to the nodes from which it was generated')),
+ ('', 'import-branch', None,
+ _('use any branch information in patch (implied by --exact)'))] +
+ commitopts + commitopts2 + similarityopts,
+ _('[OPTION]... PATCH...')),
+ "incoming|in":
+ (incoming,
+ [('f', 'force', None,
+ _('run even when remote repository is unrelated')),
+ ('n', 'newest-first', None, _('show newest record first')),
+ ('', 'bundle', '', _('file to store the bundles into')),
+ ('r', 'rev', [],
+ _('a specific revision up to which you would like to pull')),
+ ] + logopts + remoteopts,
+ _('[-p] [-n] [-M] [-f] [-r REV]...'
+ ' [--bundle FILENAME] [SOURCE]')),
+ "^init":
+ (init,
+ remoteopts,
+ _('[-e CMD] [--remotecmd CMD] [DEST]')),
+ "locate":
+ (locate,
+ [('r', 'rev', '', _('search the repository as it stood at REV')),
+ ('0', 'print0', None,
+ _('end filenames with NUL, for use with xargs')),
+ ('f', 'fullpath', None,
+ _('print complete paths from the filesystem root')),
+ ] + walkopts,
+ _('[OPTION]... [PATTERN]...')),
+ "^log|history":
+ (log,
+ [('f', 'follow', None,
+ _('follow changeset history, or file history across copies and renames')),
+ ('', 'follow-first', None,
+ _('only follow the first parent of merge changesets')),
+ ('d', 'date', '', _('show revisions matching date spec')),
+ ('C', 'copies', None, _('show copied files')),
+ ('k', 'keyword', [], _('do case-insensitive search for a keyword')),
+ ('r', 'rev', [], _('show the specified revision or range')),
+ ('', 'removed', None, _('include revisions where files were removed')),
+ ('m', 'only-merges', None, _('show only merges')),
+ ('u', 'user', [], _('revisions committed by user')),
+ ('b', 'only-branch', [],
+ _('show only changesets within the given named branch')),
+ ('P', 'prune', [], _('do not display revision or any of its ancestors')),
+ ] + logopts + walkopts,
+ _('[OPTION]... [FILE]')),
+ "manifest":
+ (manifest,
+ [('r', 'rev', '', _('revision to display'))],
+ _('[-r REV]')),
+ "^merge":
+ (merge,
+ [('f', 'force', None, _('force a merge with outstanding changes')),
+ ('r', 'rev', '', _('revision to merge')),
+ ('P', 'preview', None,
+ _('review revisions to merge (no merge is performed)'))],
+ _('[-f] [[-r] REV]')),
+ "outgoing|out":
+ (outgoing,
+ [('f', 'force', None,
+ _('run even when remote repository is unrelated')),
+ ('r', 'rev', [],
+ _('a specific revision up to which you would like to push')),
+ ('n', 'newest-first', None, _('show newest record first')),
+ ] + logopts + remoteopts,
+ _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')),
+ "^parents":
+ (parents,
+ [('r', 'rev', '', _('show parents from the specified revision')),
+ ] + templateopts,
+ _('[-r REV] [FILE]')),
+ "paths": (paths, [], _('[NAME]')),
+ "^pull":
+ (pull,
+ [('u', 'update', None,
+ _('update to new tip if changesets were pulled')),
+ ('f', 'force', None,
+ _('run even when remote repository is unrelated')),
+ ('r', 'rev', [],
+ _('a specific revision up to which you would like to pull')),
+ ] + remoteopts,
+ _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')),
+ "^push":
+ (push,
+ [('f', 'force', None, _('force push')),
+ ('r', 'rev', [],
+ _('a specific revision up to which you would like to push')),
+ ] + remoteopts,
+ _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')),
+ "recover": (recover, []),
+ "^remove|rm":
+ (remove,
+ [('A', 'after', None, _('record delete for missing files')),
+ ('f', 'force', None,
+ _('remove (and delete) file even if added or modified')),
+ ] + walkopts,
+ _('[OPTION]... FILE...')),
+ "rename|mv":
+ (rename,
+ [('A', 'after', None, _('record a rename that has already occurred')),
+ ('f', 'force', None,
+ _('forcibly copy over an existing managed file')),
+ ] + walkopts + dryrunopts,
+ _('[OPTION]... SOURCE... DEST')),
+ "resolve":
+ (resolve,
+ [('a', 'all', None, _('remerge all unresolved files')),
+ ('l', 'list', None, _('list state of files needing merge')),
+ ('m', 'mark', None, _('mark files as resolved')),
+ ('u', 'unmark', None, _('unmark files as resolved'))]
+ + walkopts,
+ _('[OPTION]... [FILE]...')),
+ "revert":
+ (revert,
+ [('a', 'all', None, _('revert all changes when no arguments given')),
+ ('d', 'date', '', _('tipmost revision matching date')),
+ ('r', 'rev', '', _('revision to revert to')),
+ ('', 'no-backup', None, _('do not save backup copies of files')),
+ ] + walkopts + dryrunopts,
+ _('[OPTION]... [-r REV] [NAME]...')),
+ "rollback": (rollback, []),
+ "root": (root, []),
+ "^serve":
+ (serve,
+ [('A', 'accesslog', '', _('name of access log file to write to')),
+ ('d', 'daemon', None, _('run server in background')),
+ ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
+ ('E', 'errorlog', '', _('name of error log file to write to')),
+ ('p', 'port', 0, _('port to listen on (default: 8000)')),
+ ('a', 'address', '', _('address to listen on (default: all interfaces)')),
+ ('', 'prefix', '', _('prefix path to serve from (default: server root)')),
+ ('n', 'name', '',
+ _('name to show in web pages (default: working directory)')),
+ ('', 'webdir-conf', '', _('name of the webdir config file'
+ ' (serve more than one repository)')),
+ ('', 'pid-file', '', _('name of file to write process ID to')),
+ ('', 'stdio', None, _('for remote clients')),
+ ('t', 'templates', '', _('web templates to use')),
+ ('', 'style', '', _('template style to use')),
+ ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
+ ('', 'certificate', '', _('SSL certificate file'))],
+ _('[OPTION]...')),
+ "showconfig|debugconfig":
+ (showconfig,
+ [('u', 'untrusted', None, _('show untrusted configuration options'))],
+ _('[-u] [NAME]...')),
+ "^status|st":
+ (status,
+ [('A', 'all', None, _('show status of all files')),
+ ('m', 'modified', None, _('show only modified files')),
+ ('a', 'added', None, _('show only added files')),
+ ('r', 'removed', None, _('show only removed files')),
+ ('d', 'deleted', None, _('show only deleted (but tracked) files')),
+ ('c', 'clean', None, _('show only files without changes')),
+ ('u', 'unknown', None, _('show only unknown (not tracked) files')),
+ ('i', 'ignored', None, _('show only ignored files')),
+ ('n', 'no-status', None, _('hide status prefix')),
+ ('C', 'copies', None, _('show source of copied files')),
+ ('0', 'print0', None,
+ _('end filenames with NUL, for use with xargs')),
+ ('', 'rev', [], _('show difference from revision')),
+ ] + walkopts,
+ _('[OPTION]... [FILE]...')),
+ "tag":
+ (tag,
+ [('f', 'force', None, _('replace existing tag')),
+ ('l', 'local', None, _('make the tag local')),
+ ('r', 'rev', '', _('revision to tag')),
+ ('', 'remove', None, _('remove a tag')),
+ # -l/--local is already there, commitopts cannot be used
+ ('m', 'message', '', _('use as commit message')),
+ ] + commitopts2,
+ _('[-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')),
+ "tags": (tags, []),
+ "tip":
+ (tip,
+ [('p', 'patch', None, _('show patch')),
+ ('g', 'git', None, _('use git extended diff format')),
+ ] + templateopts,
+ _('[-p]')),
+ "unbundle":
+ (unbundle,
+ [('u', 'update', None,
+ _('update to new tip if changesets were unbundled'))],
+ _('[-u] FILE...')),
+ "^update|up|checkout|co":
+ (update,
+ [('C', 'clean', None, _('overwrite locally modified files (no backup)')),
+ ('c', 'check', None, _('check for uncommitted changes')),
+ ('d', 'date', '', _('tipmost revision matching date')),
+ ('r', 'rev', '', _('revision'))],
+ _('[-C] [-d DATE] [[-r] REV]')),
+ "verify": (verify, []),
+ "version": (version_, []),
+}
+
+norepo = ("clone init version help debugcommands debugcomplete debugdata"
+ " debugindex debugindexdot debugdate debuginstall debugfsinfo")
+optionalrepo = ("identify paths serve showconfig debugancestor")
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/config.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/config.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,137 @@
+# config.py - configuration parsing for Mercurial
+#
+# Copyright 2009 Matt Mackall and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from i18n import _
+import error
+import re, os
+
+class sortdict(dict):
+ 'a simple sorted dictionary'
+ def __init__(self, data=None):
+ self._list = []
+ if data:
+ self.update(data)
+ def copy(self):
+ return sortdict(self)
+ def __setitem__(self, key, val):
+ if key in self:
+ self._list.remove(key)
+ self._list.append(key)
+ dict.__setitem__(self, key, val)
+ def __iter__(self):
+ return self._list.__iter__()
+ def update(self, src):
+ for k in src:
+ self[k] = src[k]
+ def items(self):
+ return [(k, self[k]) for k in self._list]
+ def __delitem__(self, key):
+ dict.__delitem__(self, key)
+ self._list.remove(key)
+
+class config(object):
+ def __init__(self, data=None):
+ self._data = {}
+ self._source = {}
+ if data:
+ for k in data._data:
+ self._data[k] = data[k].copy()
+ self._source = data._source.copy()
+ def copy(self):
+ return config(self)
+ def __contains__(self, section):
+ return section in self._data
+ def __getitem__(self, section):
+ return self._data.get(section, {})
+ def __iter__(self):
+ for d in self.sections():
+ yield d
+ def update(self, src):
+ for s in src:
+ if s not in self:
+ self._data[s] = sortdict()
+ self._data[s].update(src._data[s])
+ self._source.update(src._source)
+ def get(self, section, item, default=None):
+ return self._data.get(section, {}).get(item, default)
+ def source(self, section, item):
+ return self._source.get((section, item), "")
+ def sections(self):
+ return sorted(self._data.keys())
+ def items(self, section):
+ return self._data.get(section, {}).items()
+ def set(self, section, item, value, source=""):
+ if section not in self:
+ self._data[section] = sortdict()
+ self._data[section][item] = value
+ self._source[(section, item)] = source
+
+ def parse(self, src, data, sections=None, remap=None, include=None):
+ sectionre = re.compile(r'\[([^\[]+)\]')
+ itemre = re.compile(r'([^=\s][^=]*?)\s*=\s*(.*\S|)')
+ contre = re.compile(r'\s+(\S.*\S)')
+ emptyre = re.compile(r'(;|#|\s*$)')
+ unsetre = re.compile(r'%unset\s+(\S+)')
+ includere = re.compile(r'%include\s+(\S.*\S)')
+ section = ""
+ item = None
+ line = 0
+ cont = 0
+
+ for l in data.splitlines(1):
+ line += 1
+ if cont:
+ m = contre.match(l)
+ if m:
+ if sections and section not in sections:
+ continue
+ v = self.get(section, item) + "\n" + m.group(1)
+ self.set(section, item, v, "%s:%d" % (src, line))
+ continue
+ item = None
+ m = includere.match(l)
+ if m:
+ inc = m.group(1)
+ base = os.path.dirname(src)
+ inc = os.path.normpath(os.path.join(base, inc))
+ if include:
+ include(inc, remap=remap, sections=sections)
+ continue
+ if emptyre.match(l):
+ continue
+ m = sectionre.match(l)
+ if m:
+ section = m.group(1)
+ if remap:
+ section = remap.get(section, section)
+ if section not in self:
+ self._data[section] = sortdict()
+ continue
+ m = itemre.match(l)
+ if m:
+ item = m.group(1)
+ cont = 1
+ if sections and section not in sections:
+ continue
+ self.set(section, item, m.group(2), "%s:%d" % (src, line))
+ continue
+ m = unsetre.match(l)
+ if m:
+ name = m.group(1)
+ if sections and section not in sections:
+ continue
+ if self.get(section, name) != None:
+ del self._data[section][name]
+ continue
+
+ raise error.ConfigError(_('config error at %s:%d: \'%s\'')
+ % (src, line, l.rstrip()))
+
+ def read(self, path, fp=None, sections=None, remap=None):
+ if not fp:
+ fp = open(path)
+ self.parse(path, fp.read(), sections, remap, self.read)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/context.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/context.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,816 @@
+# context.py - changeset and file context objects for mercurial
+#
+# Copyright 2006, 2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from node import nullid, nullrev, short, hex
+from i18n import _
+import ancestor, bdiff, error, util, subrepo
+import os, errno
+
+propertycache = util.propertycache
+
+class changectx(object):
+ """A changecontext object makes access to data related to a particular
+ changeset convenient."""
+ def __init__(self, repo, changeid=''):
+ """changeid is a revision number, node, or tag"""
+ if changeid == '':
+ changeid = '.'
+ self._repo = repo
+ if isinstance(changeid, (long, int)):
+ self._rev = changeid
+ self._node = self._repo.changelog.node(changeid)
+ else:
+ self._node = self._repo.lookup(changeid)
+ self._rev = self._repo.changelog.rev(self._node)
+
+ def __str__(self):
+ return short(self.node())
+
+ def __int__(self):
+ return self.rev()
+
+ def __repr__(self):
+ return "" % str(self)
+
+ def __hash__(self):
+ try:
+ return hash(self._rev)
+ except AttributeError:
+ return id(self)
+
+ def __eq__(self, other):
+ try:
+ return self._rev == other._rev
+ except AttributeError:
+ return False
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __nonzero__(self):
+ return self._rev != nullrev
+
+ @propertycache
+ def _changeset(self):
+ return self._repo.changelog.read(self.node())
+
+ @propertycache
+ def _manifest(self):
+ return self._repo.manifest.read(self._changeset[0])
+
+ @propertycache
+ def _manifestdelta(self):
+ return self._repo.manifest.readdelta(self._changeset[0])
+
+ @propertycache
+ def _parents(self):
+ p = self._repo.changelog.parentrevs(self._rev)
+ if p[1] == nullrev:
+ p = p[:-1]
+ return [changectx(self._repo, x) for x in p]
+
+ @propertycache
+ def substate(self):
+ return subrepo.state(self)
+
+ def __contains__(self, key):
+ return key in self._manifest
+
+ def __getitem__(self, key):
+ return self.filectx(key)
+
+ def __iter__(self):
+ for f in sorted(self._manifest):
+ yield f
+
+ def changeset(self): return self._changeset
+ def manifest(self): return self._manifest
+ def manifestnode(self): return self._changeset[0]
+
+ def rev(self): return self._rev
+ def node(self): return self._node
+ def hex(self): return hex(self._node)
+ def user(self): return self._changeset[1]
+ def date(self): return self._changeset[2]
+ def files(self): return self._changeset[3]
+ def description(self): return self._changeset[4]
+ def branch(self): return self._changeset[5].get("branch")
+ def extra(self): return self._changeset[5]
+ def tags(self): return self._repo.nodetags(self._node)
+
+ def parents(self):
+ """return contexts for each parent changeset"""
+ return self._parents
+
+ def p1(self):
+ return self._parents[0]
+
+ def p2(self):
+ if len(self._parents) == 2:
+ return self._parents[1]
+ return changectx(self._repo, -1)
+
+ def children(self):
+ """return contexts for each child changeset"""
+ c = self._repo.changelog.children(self._node)
+ return [changectx(self._repo, x) for x in c]
+
+ def ancestors(self):
+ for a in self._repo.changelog.ancestors(self._rev):
+ yield changectx(self._repo, a)
+
+ def descendants(self):
+ for d in self._repo.changelog.descendants(self._rev):
+ yield changectx(self._repo, d)
+
+ def _fileinfo(self, path):
+ if '_manifest' in self.__dict__:
+ try:
+ return self._manifest[path], self._manifest.flags(path)
+ except KeyError:
+ raise error.LookupError(self._node, path,
+ _('not found in manifest'))
+ if '_manifestdelta' in self.__dict__ or path in self.files():
+ if path in self._manifestdelta:
+ return self._manifestdelta[path], self._manifestdelta.flags(path)
+ node, flag = self._repo.manifest.find(self._changeset[0], path)
+ if not node:
+ raise error.LookupError(self._node, path,
+ _('not found in manifest'))
+
+ return node, flag
+
+ def filenode(self, path):
+ return self._fileinfo(path)[0]
+
+ def flags(self, path):
+ try:
+ return self._fileinfo(path)[1]
+ except error.LookupError:
+ return ''
+
+ def filectx(self, path, fileid=None, filelog=None):
+ """get a file context from this changeset"""
+ if fileid is None:
+ fileid = self.filenode(path)
+ return filectx(self._repo, path, fileid=fileid,
+ changectx=self, filelog=filelog)
+
+ def ancestor(self, c2):
+ """
+ return the ancestor context of self and c2
+ """
+ n = self._repo.changelog.ancestor(self._node, c2._node)
+ return changectx(self._repo, n)
+
+ def walk(self, match):
+ fset = set(match.files())
+ # for dirstate.walk, files=['.'] means "walk the whole tree".
+ # follow that here, too
+ fset.discard('.')
+ for fn in self:
+ for ffn in fset:
+ # match if the file is the exact name or a directory
+ if ffn == fn or fn.startswith("%s/" % ffn):
+ fset.remove(ffn)
+ break
+ if match(fn):
+ yield fn
+ for fn in sorted(fset):
+ if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
+ yield fn
+
+ def sub(self, path):
+ return subrepo.subrepo(self, path)
+
+class filectx(object):
+ """A filecontext object makes access to data related to a particular
+ filerevision convenient."""
+ def __init__(self, repo, path, changeid=None, fileid=None,
+ filelog=None, changectx=None):
+ """changeid can be a changeset revision, node, or tag.
+ fileid can be a file revision or node."""
+ self._repo = repo
+ self._path = path
+
+ assert (changeid is not None
+ or fileid is not None
+ or changectx is not None)
+
+ if filelog:
+ self._filelog = filelog
+
+ if changeid is not None:
+ self._changeid = changeid
+ if changectx is not None:
+ self._changectx = changectx
+ if fileid is not None:
+ self._fileid = fileid
+
+ @propertycache
+ def _changectx(self):
+ return changectx(self._repo, self._changeid)
+
+ @propertycache
+ def _filelog(self):
+ return self._repo.file(self._path)
+
+ @propertycache
+ def _changeid(self):
+ if '_changectx' in self.__dict__:
+ return self._changectx.rev()
+ else:
+ return self._filelog.linkrev(self._filerev)
+
+ @propertycache
+ def _filenode(self):
+ if '_fileid' in self.__dict__:
+ return self._filelog.lookup(self._fileid)
+ else:
+ return self._changectx.filenode(self._path)
+
+ @propertycache
+ def _filerev(self):
+ return self._filelog.rev(self._filenode)
+
+ @propertycache
+ def _repopath(self):
+ return self._path
+
+ def __nonzero__(self):
+ try:
+ self._filenode
+ return True
+ except error.LookupError:
+ # file is missing
+ return False
+
+ def __str__(self):
+ return "%s@%s" % (self.path(), short(self.node()))
+
+ def __repr__(self):
+ return "" % str(self)
+
+ def __hash__(self):
+ try:
+ return hash((self._path, self._fileid))
+ except AttributeError:
+ return id(self)
+
+ def __eq__(self, other):
+ try:
+ return (self._path == other._path
+ and self._fileid == other._fileid)
+ except AttributeError:
+ return False
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def filectx(self, fileid):
+ '''opens an arbitrary revision of the file without
+ opening a new filelog'''
+ return filectx(self._repo, self._path, fileid=fileid,
+ filelog=self._filelog)
+
+ def filerev(self): return self._filerev
+ def filenode(self): return self._filenode
+ def flags(self): return self._changectx.flags(self._path)
+ def filelog(self): return self._filelog
+
+ def rev(self):
+ if '_changectx' in self.__dict__:
+ return self._changectx.rev()
+ if '_changeid' in self.__dict__:
+ return self._changectx.rev()
+ return self._filelog.linkrev(self._filerev)
+
+ def linkrev(self): return self._filelog.linkrev(self._filerev)
+ def node(self): return self._changectx.node()
+ def hex(self): return hex(self.node())
+ def user(self): return self._changectx.user()
+ def date(self): return self._changectx.date()
+ def files(self): return self._changectx.files()
+ def description(self): return self._changectx.description()
+ def branch(self): return self._changectx.branch()
+ def manifest(self): return self._changectx.manifest()
+ def changectx(self): return self._changectx
+
+ def data(self): return self._filelog.read(self._filenode)
+ def path(self): return self._path
+ def size(self): return self._filelog.size(self._filerev)
+
+ def cmp(self, text): return self._filelog.cmp(self._filenode, text)
+
+ def renamed(self):
+ """check if file was actually renamed in this changeset revision
+
+ If rename logged in file revision, we report copy for changeset only
+ if file revisions linkrev points back to the changeset in question
+ or both changeset parents contain different file revisions.
+ """
+
+ renamed = self._filelog.renamed(self._filenode)
+ if not renamed:
+ return renamed
+
+ if self.rev() == self.linkrev():
+ return renamed
+
+ name = self.path()
+ fnode = self._filenode
+ for p in self._changectx.parents():
+ try:
+ if fnode == p.filenode(name):
+ return None
+ except error.LookupError:
+ pass
+ return renamed
+
+ def parents(self):
+ p = self._path
+ fl = self._filelog
+ pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
+
+ r = self._filelog.renamed(self._filenode)
+ if r:
+ pl[0] = (r[0], r[1], None)
+
+ return [filectx(self._repo, p, fileid=n, filelog=l)
+ for p,n,l in pl if n != nullid]
+
+ def children(self):
+ # hard for renames
+ c = self._filelog.children(self._filenode)
+ return [filectx(self._repo, self._path, fileid=x,
+ filelog=self._filelog) for x in c]
+
+ def annotate(self, follow=False, linenumber=None):
+ '''returns a list of tuples of (ctx, line) for each line
+ in the file, where ctx is the filectx of the node where
+ that line was last changed.
+ This returns tuples of ((ctx, linenumber), line) for each line,
+ if "linenumber" parameter is NOT "None".
+ In such tuples, linenumber means one at the first appearance
+ in the managed file.
+ To reduce annotation cost,
+ this returns fixed value(False is used) as linenumber,
+ if "linenumber" parameter is "False".'''
+
+ def decorate_compat(text, rev):
+ return ([rev] * len(text.splitlines()), text)
+
+ def without_linenumber(text, rev):
+ return ([(rev, False)] * len(text.splitlines()), text)
+
+ def with_linenumber(text, rev):
+ size = len(text.splitlines())
+ return ([(rev, i) for i in xrange(1, size + 1)], text)
+
+ decorate = (((linenumber is None) and decorate_compat) or
+ (linenumber and with_linenumber) or
+ without_linenumber)
+
+ def pair(parent, child):
+ for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
+ child[0][b1:b2] = parent[0][a1:a2]
+ return child
+
+ getlog = util.lrucachefunc(lambda x: self._repo.file(x))
+ def getctx(path, fileid):
+ log = path == self._path and self._filelog or getlog(path)
+ return filectx(self._repo, path, fileid=fileid, filelog=log)
+ getctx = util.lrucachefunc(getctx)
+
+ def parents(f):
+ # we want to reuse filectx objects as much as possible
+ p = f._path
+ if f._filerev is None: # working dir
+ pl = [(n.path(), n.filerev()) for n in f.parents()]
+ else:
+ pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
+
+ if follow:
+ r = f.renamed()
+ if r:
+ pl[0] = (r[0], getlog(r[0]).rev(r[1]))
+
+ return [getctx(p, n) for p, n in pl if n != nullrev]
+
+ # use linkrev to find the first changeset where self appeared
+ if self.rev() != self.linkrev():
+ base = self.filectx(self.filerev())
+ else:
+ base = self
+
+ # find all ancestors
+ needed = {base: 1}
+ visit = [base]
+ files = [base._path]
+ while visit:
+ f = visit.pop(0)
+ for p in parents(f):
+ if p not in needed:
+ needed[p] = 1
+ visit.append(p)
+ if p._path not in files:
+ files.append(p._path)
+ else:
+ # count how many times we'll use this
+ needed[p] += 1
+
+ # sort by revision (per file) which is a topological order
+ visit = []
+ for f in files:
+ fn = [(n.rev(), n) for n in needed if n._path == f]
+ visit.extend(fn)
+
+ hist = {}
+ for r, f in sorted(visit):
+ curr = decorate(f.data(), f)
+ for p in parents(f):
+ if p != nullid:
+ curr = pair(hist[p], curr)
+ # trim the history of unneeded revs
+ needed[p] -= 1
+ if not needed[p]:
+ del hist[p]
+ hist[f] = curr
+
+ return zip(hist[f][0], hist[f][1].splitlines(1))
+
+ def ancestor(self, fc2):
+ """
+ find the common ancestor file context, if any, of self, and fc2
+ """
+
+ acache = {}
+
+ # prime the ancestor cache for the working directory
+ for c in (self, fc2):
+ if c._filerev is None:
+ pl = [(n.path(), n.filenode()) for n in c.parents()]
+ acache[(c._path, None)] = pl
+
+ flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
+ def parents(vertex):
+ if vertex in acache:
+ return acache[vertex]
+ f, n = vertex
+ if f not in flcache:
+ flcache[f] = self._repo.file(f)
+ fl = flcache[f]
+ pl = [(f, p) for p in fl.parents(n) if p != nullid]
+ re = fl.renamed(n)
+ if re:
+ pl.append(re)
+ acache[vertex] = pl
+ return pl
+
+ a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
+ v = ancestor.ancestor(a, b, parents)
+ if v:
+ f, n = v
+ return filectx(self._repo, f, fileid=n, filelog=flcache[f])
+
+ return None
+
+class workingctx(changectx):
+ """A workingctx object makes access to data related to
+ the current working directory convenient.
+ parents - a pair of parent nodeids, or None to use the dirstate.
+ date - any valid date string or (unixtime, offset), or None.
+ user - username string, or None.
+ extra - a dictionary of extra values, or None.
+ changes - a list of file lists as returned by localrepo.status()
+ or None to use the repository status.
+ """
+ def __init__(self, repo, parents=None, text="", user=None, date=None,
+ extra=None, changes=None):
+ self._repo = repo
+ self._rev = None
+ self._node = None
+ self._text = text
+ if date:
+ self._date = util.parsedate(date)
+ if user:
+ self._user = user
+ if parents:
+ self._parents = [changectx(self._repo, p) for p in parents]
+ if changes:
+ self._status = list(changes)
+
+ self._extra = {}
+ if extra:
+ self._extra = extra.copy()
+ if 'branch' not in self._extra:
+ branch = self._repo.dirstate.branch()
+ try:
+ branch = branch.decode('UTF-8').encode('UTF-8')
+ except UnicodeDecodeError:
+ raise util.Abort(_('branch name not in UTF-8!'))
+ self._extra['branch'] = branch
+ if self._extra['branch'] == '':
+ self._extra['branch'] = 'default'
+
+ def __str__(self):
+ return str(self._parents[0]) + "+"
+
+ def __nonzero__(self):
+ return True
+
+ def __contains__(self, key):
+ return self._repo.dirstate[key] not in "?r"
+
+ @propertycache
+ def _manifest(self):
+ """generate a manifest corresponding to the working directory"""
+
+ man = self._parents[0].manifest().copy()
+ copied = self._repo.dirstate.copies()
+ cf = lambda x: man.flags(copied.get(x, x))
+ ff = self._repo.dirstate.flagfunc(cf)
+ modified, added, removed, deleted, unknown = self._status[:5]
+ for i, l in (("a", added), ("m", modified), ("u", unknown)):
+ for f in l:
+ man[f] = man.get(copied.get(f, f), nullid) + i
+ try:
+ man.set(f, ff(f))
+ except OSError:
+ pass
+
+ for f in deleted + removed:
+ if f in man:
+ del man[f]
+
+ return man
+
+ @propertycache
+ def _status(self):
+ return self._repo.status(unknown=True)
+
+ @propertycache
+ def _user(self):
+ return self._repo.ui.username()
+
+ @propertycache
+ def _date(self):
+ return util.makedate()
+
+ @propertycache
+ def _parents(self):
+ p = self._repo.dirstate.parents()
+ if p[1] == nullid:
+ p = p[:-1]
+ self._parents = [changectx(self._repo, x) for x in p]
+ return self._parents
+
+ def manifest(self): return self._manifest
+
+ def user(self): return self._user or self._repo.ui.username()
+ def date(self): return self._date
+ def description(self): return self._text
+ def files(self):
+ return sorted(self._status[0] + self._status[1] + self._status[2])
+
+ def modified(self): return self._status[0]
+ def added(self): return self._status[1]
+ def removed(self): return self._status[2]
+ def deleted(self): return self._status[3]
+ def unknown(self): return self._status[4]
+ def clean(self): return self._status[5]
+ def branch(self): return self._extra['branch']
+ def extra(self): return self._extra
+
+ def tags(self):
+ t = []
+ [t.extend(p.tags()) for p in self.parents()]
+ return t
+
+ def children(self):
+ return []
+
+ def flags(self, path):
+ if '_manifest' in self.__dict__:
+ try:
+ return self._manifest.flags(path)
+ except KeyError:
+ return ''
+
+ pnode = self._parents[0].changeset()[0]
+ orig = self._repo.dirstate.copies().get(path, path)
+ node, flag = self._repo.manifest.find(pnode, orig)
+ try:
+ ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
+ return ff(path)
+ except OSError:
+ pass
+
+ if not node or path in self.deleted() or path in self.removed():
+ return ''
+ return flag
+
+ def filectx(self, path, filelog=None):
+ """get a file context from the working directory"""
+ return workingfilectx(self._repo, path, workingctx=self,
+ filelog=filelog)
+
+ def ancestor(self, c2):
+ """return the ancestor context of self and c2"""
+ return self._parents[0].ancestor(c2) # punt on two parents for now
+
+ def walk(self, match):
+ return sorted(self._repo.dirstate.walk(match, True, False))
+
+ def dirty(self, missing=False):
+ "check whether a working directory is modified"
+
+ return (self.p2() or self.branch() != self.p1().branch() or
+ self.modified() or self.added() or self.removed() or
+ (missing and self.deleted()))
+
+class workingfilectx(filectx):
+ """A workingfilectx object makes access to data related to a particular
+ file in the working directory convenient."""
+ def __init__(self, repo, path, filelog=None, workingctx=None):
+ """changeid can be a changeset revision, node, or tag.
+ fileid can be a file revision or node."""
+ self._repo = repo
+ self._path = path
+ self._changeid = None
+ self._filerev = self._filenode = None
+
+ if filelog:
+ self._filelog = filelog
+ if workingctx:
+ self._changectx = workingctx
+
+ @propertycache
+ def _changectx(self):
+ return workingctx(self._repo)
+
+ def __nonzero__(self):
+ return True
+
+ def __str__(self):
+ return "%s@%s" % (self.path(), self._changectx)
+
+ def data(self): return self._repo.wread(self._path)
+ def renamed(self):
+ rp = self._repo.dirstate.copied(self._path)
+ if not rp:
+ return None
+ return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
+
+ def parents(self):
+ '''return parent filectxs, following copies if necessary'''
+ def filenode(ctx, path):
+ return ctx._manifest.get(path, nullid)
+
+ path = self._path
+ fl = self._filelog
+ pcl = self._changectx._parents
+ renamed = self.renamed()
+
+ if renamed:
+ pl = [renamed + (None,)]
+ else:
+ pl = [(path, filenode(pcl[0], path), fl)]
+
+ for pc in pcl[1:]:
+ pl.append((path, filenode(pc, path), fl))
+
+ return [filectx(self._repo, p, fileid=n, filelog=l)
+ for p,n,l in pl if n != nullid]
+
+ def children(self):
+ return []
+
+ def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
+ def date(self):
+ t, tz = self._changectx.date()
+ try:
+ return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
+ except OSError, err:
+ if err.errno != errno.ENOENT: raise
+ return (t, tz)
+
+ def cmp(self, text): return self._repo.wread(self._path) == text
+
+class memctx(object):
+ """Use memctx to perform in-memory commits via localrepo.commitctx().
+
+ Revision information is supplied at initialization time while
+ related files data and is made available through a callback
+ mechanism. 'repo' is the current localrepo, 'parents' is a
+ sequence of two parent revisions identifiers (pass None for every
+ missing parent), 'text' is the commit message and 'files' lists
+ names of files touched by the revision (normalized and relative to
+ repository root).
+
+ filectxfn(repo, memctx, path) is a callable receiving the
+ repository, the current memctx object and the normalized path of
+ requested file, relative to repository root. It is fired by the
+ commit function for every file in 'files', but calls order is
+ undefined. If the file is available in the revision being
+ committed (updated or added), filectxfn returns a memfilectx
+ object. If the file was removed, filectxfn raises an
+ IOError. Moved files are represented by marking the source file
+ removed and the new file added with copy information (see
+ memfilectx).
+
+ user receives the committer name and defaults to current
+ repository username, date is the commit date in any format
+ supported by util.parsedate() and defaults to current date, extra
+ is a dictionary of metadata or is left empty.
+ """
+ def __init__(self, repo, parents, text, files, filectxfn, user=None,
+ date=None, extra=None):
+ self._repo = repo
+ self._rev = None
+ self._node = None
+ self._text = text
+ self._date = date and util.parsedate(date) or util.makedate()
+ self._user = user
+ parents = [(p or nullid) for p in parents]
+ p1, p2 = parents
+ self._parents = [changectx(self._repo, p) for p in (p1, p2)]
+ files = sorted(set(files))
+ self._status = [files, [], [], [], []]
+ self._filectxfn = filectxfn
+
+ self._extra = extra and extra.copy() or {}
+ if 'branch' not in self._extra:
+ self._extra['branch'] = 'default'
+ elif self._extra.get('branch') == '':
+ self._extra['branch'] = 'default'
+
+ def __str__(self):
+ return str(self._parents[0]) + "+"
+
+ def __int__(self):
+ return self._rev
+
+ def __nonzero__(self):
+ return True
+
+ def __getitem__(self, key):
+ return self.filectx(key)
+
+ def p1(self): return self._parents[0]
+ def p2(self): return self._parents[1]
+
+ def user(self): return self._user or self._repo.ui.username()
+ def date(self): return self._date
+ def description(self): return self._text
+ def files(self): return self.modified()
+ def modified(self): return self._status[0]
+ def added(self): return self._status[1]
+ def removed(self): return self._status[2]
+ def deleted(self): return self._status[3]
+ def unknown(self): return self._status[4]
+ def clean(self): return self._status[5]
+ def branch(self): return self._extra['branch']
+ def extra(self): return self._extra
+ def flags(self, f): return self[f].flags()
+
+ def parents(self):
+ """return contexts for each parent changeset"""
+ return self._parents
+
+ def filectx(self, path, filelog=None):
+ """get a file context from the working directory"""
+ return self._filectxfn(self._repo, self, path)
+
+class memfilectx(object):
+ """memfilectx represents an in-memory file to commit.
+
+ See memctx for more details.
+ """
+ def __init__(self, path, data, islink, isexec, copied):
+ """
+ path is the normalized file path relative to repository root.
+ data is the file content as a string.
+ islink is True if the file is a symbolic link.
+ isexec is True if the file is executable.
+ copied is the source file path if current file was copied in the
+ revision being committed, or None."""
+ self._path = path
+ self._data = data
+ self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
+ self._copied = None
+ if copied:
+ self._copied = (copied, nullid)
+
+ def __nonzero__(self): return True
+ def __str__(self): return "%s@%s" % (self.path(), self._changectx)
+ def path(self): return self._path
+ def data(self): return self._data
+ def flags(self): return self._flags
+ def isexec(self): return 'x' in self._flags
+ def islink(self): return 'l' in self._flags
+ def renamed(self): return self._copied
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/copies.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/copies.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,233 @@
+# copies.py - copy detection for Mercurial
+#
+# Copyright 2008 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from i18n import _
+import util
+import heapq
+
+def _nonoverlap(d1, d2, d3):
+ "Return list of elements in d1 not in d2 or d3"
+ return sorted([d for d in d1 if d not in d3 and d not in d2])
+
+def _dirname(f):
+ s = f.rfind("/")
+ if s == -1:
+ return ""
+ return f[:s]
+
+def _dirs(files):
+ d = set()
+ for f in files:
+ f = _dirname(f)
+ while f not in d:
+ d.add(f)
+ f = _dirname(f)
+ return d
+
+def _findoldnames(fctx, limit):
+ "find files that path was copied from, back to linkrev limit"
+ old = {}
+ seen = set()
+ orig = fctx.path()
+ visit = [(fctx, 0)]
+ while visit:
+ fc, depth = visit.pop()
+ s = str(fc)
+ if s in seen:
+ continue
+ seen.add(s)
+ if fc.path() != orig and fc.path() not in old:
+ old[fc.path()] = (depth, fc.path()) # remember depth
+ if fc.rev() < limit and fc.rev() is not None:
+ continue
+ visit += [(p, depth - 1) for p in fc.parents()]
+
+ # return old names sorted by depth
+ return [o[1] for o in sorted(old.values())]
+
+def _findlimit(repo, a, b):
+ "find the earliest revision that's an ancestor of a or b but not both"
+ # basic idea:
+ # - mark a and b with different sides
+ # - if a parent's children are all on the same side, the parent is
+ # on that side, otherwise it is on no side
+ # - walk the graph in topological order with the help of a heap;
+ # - add unseen parents to side map
+ # - clear side of any parent that has children on different sides
+ # - track number of interesting revs that might still be on a side
+ # - track the lowest interesting rev seen
+ # - quit when interesting revs is zero
+
+ cl = repo.changelog
+ working = len(cl) # pseudo rev for the working directory
+ if a is None:
+ a = working
+ if b is None:
+ b = working
+
+ side = {a: -1, b: 1}
+ visit = [-a, -b]
+ heapq.heapify(visit)
+ interesting = len(visit)
+ limit = working
+
+ while interesting:
+ r = -heapq.heappop(visit)
+ if r == working:
+ parents = [cl.rev(p) for p in repo.dirstate.parents()]
+ else:
+ parents = cl.parentrevs(r)
+ for p in parents:
+ if p not in side:
+ # first time we see p; add it to visit
+ side[p] = side[r]
+ if side[p]:
+ interesting += 1
+ heapq.heappush(visit, -p)
+ elif side[p] and side[p] != side[r]:
+ # p was interesting but now we know better
+ side[p] = 0
+ interesting -= 1
+ if side[r]:
+ limit = r # lowest rev visited
+ interesting -= 1
+ return limit
+
+def copies(repo, c1, c2, ca, checkdirs=False):
+ """
+ Find moves and copies between context c1 and c2
+ """
+ # avoid silly behavior for update from empty dir
+ if not c1 or not c2 or c1 == c2:
+ return {}, {}
+
+ # avoid silly behavior for parent -> working dir
+ if c2.node() is None and c1.node() == repo.dirstate.parents()[0]:
+ return repo.dirstate.copies(), {}
+
+ limit = _findlimit(repo, c1.rev(), c2.rev())
+ m1 = c1.manifest()
+ m2 = c2.manifest()
+ ma = ca.manifest()
+
+ def makectx(f, n):
+ if len(n) != 20: # in a working context?
+ if c1.rev() is None:
+ return c1.filectx(f)
+ return c2.filectx(f)
+ return repo.filectx(f, fileid=n)
+
+ ctx = util.lrucachefunc(makectx)
+ copy = {}
+ fullcopy = {}
+ diverge = {}
+
+ def checkcopies(f, m1, m2):
+ '''check possible copies of f from m1 to m2'''
+ c1 = ctx(f, m1[f])
+ for of in _findoldnames(c1, limit):
+ fullcopy[f] = of # remember for dir rename detection
+ if of in m2: # original file not in other manifest?
+ # if the original file is unchanged on the other branch,
+ # no merge needed
+ if m2[of] != ma.get(of):
+ c2 = ctx(of, m2[of])
+ ca = c1.ancestor(c2)
+ # related and named changed on only one side?
+ if ca and (ca.path() == f or ca.path() == c2.path()):
+ if c1 != ca or c2 != ca: # merge needed?
+ copy[f] = of
+ elif of in ma:
+ diverge.setdefault(of, []).append(f)
+
+ repo.ui.debug(_(" searching for copies back to rev %d\n") % limit)
+
+ u1 = _nonoverlap(m1, m2, ma)
+ u2 = _nonoverlap(m2, m1, ma)
+
+ if u1:
+ repo.ui.debug(_(" unmatched files in local:\n %s\n")
+ % "\n ".join(u1))
+ if u2:
+ repo.ui.debug(_(" unmatched files in other:\n %s\n")
+ % "\n ".join(u2))
+
+ for f in u1:
+ checkcopies(f, m1, m2)
+ for f in u2:
+ checkcopies(f, m2, m1)
+
+ diverge2 = set()
+ for of, fl in diverge.items():
+ if len(fl) == 1:
+ del diverge[of] # not actually divergent
+ else:
+ diverge2.update(fl) # reverse map for below
+
+ if fullcopy:
+ repo.ui.debug(_(" all copies found (* = to merge, ! = divergent):\n"))
+ for f in fullcopy:
+ note = ""
+ if f in copy: note += "*"
+ if f in diverge2: note += "!"
+ repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note))
+ del diverge2
+
+ if not fullcopy or not checkdirs:
+ return copy, diverge
+
+ repo.ui.debug(_(" checking for directory renames\n"))
+
+ # generate a directory move map
+ d1, d2 = _dirs(m1), _dirs(m2)
+ invalid = set()
+ dirmove = {}
+
+ # examine each file copy for a potential directory move, which is
+ # when all the files in a directory are moved to a new directory
+ for dst, src in fullcopy.iteritems():
+ dsrc, ddst = _dirname(src), _dirname(dst)
+ if dsrc in invalid:
+ # already seen to be uninteresting
+ continue
+ elif dsrc in d1 and ddst in d1:
+ # directory wasn't entirely moved locally
+ invalid.add(dsrc)
+ elif dsrc in d2 and ddst in d2:
+ # directory wasn't entirely moved remotely
+ invalid.add(dsrc)
+ elif dsrc in dirmove and dirmove[dsrc] != ddst:
+ # files from the same directory moved to two different places
+ invalid.add(dsrc)
+ else:
+ # looks good so far
+ dirmove[dsrc + "/"] = ddst + "/"
+
+ for i in invalid:
+ if i in dirmove:
+ del dirmove[i]
+ del d1, d2, invalid
+
+ if not dirmove:
+ return copy, diverge
+
+ for d in dirmove:
+ repo.ui.debug(_(" dir %s -> %s\n") % (d, dirmove[d]))
+
+ # check unaccounted nonoverlapping files against directory moves
+ for f in u1 + u2:
+ if f not in fullcopy:
+ for d in dirmove:
+ if f.startswith(d):
+ # new file added in a directory that was moved, move it
+ df = dirmove[d] + f[len(d):]
+ if df not in copy:
+ copy[f] = df
+ repo.ui.debug(_(" file %s -> %s\n") % (f, copy[f]))
+ break
+
+ return copy, diverge
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/demandimport.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/demandimport.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,136 @@
+# demandimport.py - global demand-loading of modules for Mercurial
+#
+# Copyright 2006, 2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+'''
+demandimport - automatic demandloading of modules
+
+To enable this module, do:
+
+ import demandimport; demandimport.enable()
+
+Imports of the following forms will be demand-loaded:
+
+ import a, b.c
+ import a.b as c
+ from a import b,c # a will be loaded immediately
+
+These imports will not be delayed:
+
+ from a import *
+ b = __import__(a)
+'''
+
+import __builtin__
+_origimport = __import__
+
+class _demandmod(object):
+ """module demand-loader and proxy"""
+ def __init__(self, name, globals, locals):
+ if '.' in name:
+ head, rest = name.split('.', 1)
+ after = [rest]
+ else:
+ head = name
+ after = []
+ object.__setattr__(self, "_data", (head, globals, locals, after))
+ object.__setattr__(self, "_module", None)
+ def _extend(self, name):
+ """add to the list of submodules to load"""
+ self._data[3].append(name)
+ def _load(self):
+ if not self._module:
+ head, globals, locals, after = self._data
+ mod = _origimport(head, globals, locals)
+ # load submodules
+ def subload(mod, p):
+ h, t = p, None
+ if '.' in p:
+ h, t = p.split('.', 1)
+ if not hasattr(mod, h):
+ setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__))
+ elif t:
+ subload(getattr(mod, h), t)
+
+ for x in after:
+ subload(mod, x)
+
+ # are we in the locals dictionary still?
+ if locals and locals.get(head) == self:
+ locals[head] = mod
+ object.__setattr__(self, "_module", mod)
+
+ def __repr__(self):
+ if self._module:
+ return "" % self._data[0]
+ return "" % self._data[0]
+ def __call__(self, *args, **kwargs):
+ raise TypeError("%s object is not callable" % repr(self))
+ def __getattribute__(self, attr):
+ if attr in ('_data', '_extend', '_load', '_module'):
+ return object.__getattribute__(self, attr)
+ self._load()
+ return getattr(self._module, attr)
+ def __setattr__(self, attr, val):
+ self._load()
+ setattr(self._module, attr, val)
+
+def _demandimport(name, globals=None, locals=None, fromlist=None, level=None):
+ if not locals or name in ignore or fromlist == ('*',):
+ # these cases we can't really delay
+ return _origimport(name, globals, locals, fromlist)
+ elif not fromlist:
+ # import a [as b]
+ if '.' in name: # a.b
+ base, rest = name.split('.', 1)
+ # email.__init__ loading email.mime
+ if globals and globals.get('__name__', None) == base:
+ return _origimport(name, globals, locals, fromlist)
+ # if a is already demand-loaded, add b to its submodule list
+ if base in locals:
+ if isinstance(locals[base], _demandmod):
+ locals[base]._extend(rest)
+ return locals[base]
+ return _demandmod(name, globals, locals)
+ else:
+ if level is not None:
+ # from . import b,c,d or from .a import b,c,d
+ return _origimport(name, globals, locals, fromlist, level)
+ # from a import b,c,d
+ mod = _origimport(name, globals, locals)
+ # recurse down the module chain
+ for comp in name.split('.')[1:]:
+ if not hasattr(mod, comp):
+ setattr(mod, comp, _demandmod(comp, mod.__dict__, mod.__dict__))
+ mod = getattr(mod, comp)
+ for x in fromlist:
+ # set requested submodules for demand load
+ if not(hasattr(mod, x)):
+ setattr(mod, x, _demandmod(x, mod.__dict__, locals))
+ return mod
+
+ignore = [
+ '_hashlib',
+ '_xmlplus',
+ 'fcntl',
+ 'win32com.gen_py',
+ 'pythoncom',
+ # imported by tarfile, not available under Windows
+ 'pwd',
+ 'grp',
+ # imported by profile, itself imported by hotshot.stats,
+ # not available under Windows
+ 'resource',
+ ]
+
+def enable():
+ "enable global demand-loading of modules"
+ __builtin__.__import__ = _demandimport
+
+def disable():
+ "disable global demand-loading of modules"
+ __builtin__.__import__ = _origimport
+
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/diffhelpers.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/diffhelpers.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,56 @@
+# diffhelpers.py - pure Python implementation of diffhelpers.c
+#
+# Copyright 2009 Matt Mackall and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+def addlines(fp, hunk, lena, lenb, a, b):
+ while True:
+ todoa = lena - len(a)
+ todob = lenb - len(b)
+ num = max(todoa, todob)
+ if num == 0:
+ break
+ for i in xrange(num):
+ s = fp.readline()
+ c = s[0]
+ if s == "\\ No newline at end of file\n":
+ fix_newline(hunk, a, b)
+ continue
+ if c == "\n":
+ # Some patches may be missing the control char
+ # on empty lines. Supply a leading space.
+ s = " \n"
+ hunk.append(s)
+ if c == "+":
+ b.append(s[1:])
+ elif c == "-":
+ a.append(s)
+ else:
+ b.append(s[1:])
+ a.append(s)
+ return 0
+
+def fix_newline(hunk, a, b):
+ l = hunk[-1]
+ c = l[0]
+ hline = l[:-1]
+
+ if c == " " or c == "+":
+ b[-1] = l[1:-1]
+ if c == " " or c == "-":
+ a[-1] = hline
+ hunk[-1] = hline
+ return 0
+
+
+def testhunk(a, b, bstart):
+ alen = len(a)
+ blen = len(b)
+ if alen > blen - bstart:
+ return -1
+ for i in xrange(alen):
+ if a[i][1:] != b[i + bstart]:
+ return -1
+ return 0
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/dirstate.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/dirstate.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,601 @@
+# dirstate.py - working directory tracking for mercurial
+#
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from node import nullid
+from i18n import _
+import util, ignore, osutil, parsers
+import struct, os, stat, errno
+import cStringIO, sys
+
+_unknown = ('?', 0, 0, 0)
+_format = ">cllll"
+propertycache = util.propertycache
+
+def _finddirs(path):
+ pos = path.rfind('/')
+ while pos != -1:
+ yield path[:pos]
+ pos = path.rfind('/', 0, pos)
+
+def _incdirs(dirs, path):
+ for base in _finddirs(path):
+ if base in dirs:
+ dirs[base] += 1
+ return
+ dirs[base] = 1
+
+def _decdirs(dirs, path):
+ for base in _finddirs(path):
+ if dirs[base] > 1:
+ dirs[base] -= 1
+ return
+ del dirs[base]
+
+class dirstate(object):
+
+ def __init__(self, opener, ui, root):
+ self._opener = opener
+ self._root = root
+ self._rootdir = os.path.join(root, '')
+ self._dirty = False
+ self._dirtypl = False
+ self._ui = ui
+
+ @propertycache
+ def _map(self):
+ self._read()
+ return self._map
+
+ @propertycache
+ def _copymap(self):
+ self._read()
+ return self._copymap
+
+ @propertycache
+ def _foldmap(self):
+ f = {}
+ for name in self._map:
+ f[os.path.normcase(name)] = name
+ return f
+
+ @propertycache
+ def _branch(self):
+ try:
+ return self._opener("branch").read().strip() or "default"
+ except IOError:
+ return "default"
+
+ @propertycache
+ def _pl(self):
+ try:
+ st = self._opener("dirstate").read(40)
+ l = len(st)
+ if l == 40:
+ return st[:20], st[20:40]
+ elif l > 0 and l < 40:
+ raise util.Abort(_('working directory state appears damaged!'))
+ except IOError, err:
+ if err.errno != errno.ENOENT: raise
+ return [nullid, nullid]
+
+ @propertycache
+ def _dirs(self):
+ dirs = {}
+ for f,s in self._map.iteritems():
+ if s[0] != 'r':
+ _incdirs(dirs, f)
+ return dirs
+
+ @propertycache
+ def _ignore(self):
+ files = [self._join('.hgignore')]
+ for name, path in self._ui.configitems("ui"):
+ if name == 'ignore' or name.startswith('ignore.'):
+ files.append(os.path.expanduser(path))
+ return ignore.ignore(self._root, files, self._ui.warn)
+
+ @propertycache
+ def _slash(self):
+ return self._ui.configbool('ui', 'slash') and os.sep != '/'
+
+ @propertycache
+ def _checklink(self):
+ return util.checklink(self._root)
+
+ @propertycache
+ def _checkexec(self):
+ return util.checkexec(self._root)
+
+ @propertycache
+ def _checkcase(self):
+ return not util.checkcase(self._join('.hg'))
+
+ def _join(self, f):
+ # much faster than os.path.join()
+ # it's safe because f is always a relative path
+ return self._rootdir + f
+
+ def flagfunc(self, fallback):
+ if self._checklink:
+ if self._checkexec:
+ def f(x):
+ p = self._join(x)
+ if os.path.islink(p):
+ return 'l'
+ if util.is_exec(p):
+ return 'x'
+ return ''
+ return f
+ def f(x):
+ if os.path.islink(self._join(x)):
+ return 'l'
+ if 'x' in fallback(x):
+ return 'x'
+ return ''
+ return f
+ if self._checkexec:
+ def f(x):
+ if 'l' in fallback(x):
+ return 'l'
+ if util.is_exec(self._join(x)):
+ return 'x'
+ return ''
+ return f
+ return fallback
+
+ def getcwd(self):
+ cwd = os.getcwd()
+ if cwd == self._root: return ''
+ # self._root ends with a path separator if self._root is '/' or 'C:\'
+ rootsep = self._root
+ if not util.endswithsep(rootsep):
+ rootsep += os.sep
+ if cwd.startswith(rootsep):
+ return cwd[len(rootsep):]
+ else:
+ # we're outside the repo. return an absolute path.
+ return cwd
+
+ def pathto(self, f, cwd=None):
+ if cwd is None:
+ cwd = self.getcwd()
+ path = util.pathto(self._root, cwd, f)
+ if self._slash:
+ return util.normpath(path)
+ return path
+
+ def __getitem__(self, key):
+ ''' current states:
+ n normal
+ m needs merging
+ r marked for removal
+ a marked for addition
+ ? not tracked'''
+ return self._map.get(key, ("?",))[0]
+
+ def __contains__(self, key):
+ return key in self._map
+
+ def __iter__(self):
+ for x in sorted(self._map):
+ yield x
+
+ def parents(self):
+ return self._pl
+
+ def branch(self):
+ return self._branch
+
+ def setparents(self, p1, p2=nullid):
+ self._dirty = self._dirtypl = True
+ self._pl = p1, p2
+
+ def setbranch(self, branch):
+ self._branch = branch
+ self._opener("branch", "w").write(branch + '\n')
+
+ def _read(self):
+ self._map = {}
+ self._copymap = {}
+ try:
+ st = self._opener("dirstate").read()
+ except IOError, err:
+ if err.errno != errno.ENOENT: raise
+ return
+ if not st:
+ return
+
+ p = parsers.parse_dirstate(self._map, self._copymap, st)
+ if not self._dirtypl:
+ self._pl = p
+
+ def invalidate(self):
+ for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
+ if a in self.__dict__:
+ delattr(self, a)
+ self._dirty = False
+
+ def copy(self, source, dest):
+ """Mark dest as a copy of source. Unmark dest if source is None.
+ """
+ if source == dest:
+ return
+ self._dirty = True
+ if source is not None:
+ self._copymap[dest] = source
+ elif dest in self._copymap:
+ del self._copymap[dest]
+
+ def copied(self, file):
+ return self._copymap.get(file, None)
+
+ def copies(self):
+ return self._copymap
+
+ def _droppath(self, f):
+ if self[f] not in "?r" and "_dirs" in self.__dict__:
+ _decdirs(self._dirs, f)
+
+ def _addpath(self, f, check=False):
+ oldstate = self[f]
+ if check or oldstate == "r":
+ if '\r' in f or '\n' in f:
+ raise util.Abort(
+ _("'\\n' and '\\r' disallowed in filenames: %r") % f)
+ if f in self._dirs:
+ raise util.Abort(_('directory %r already in dirstate') % f)
+ # shadows
+ for d in _finddirs(f):
+ if d in self._dirs:
+ break
+ if d in self._map and self[d] != 'r':
+ raise util.Abort(
+ _('file %r in dirstate clashes with %r') % (d, f))
+ if oldstate in "?r" and "_dirs" in self.__dict__:
+ _incdirs(self._dirs, f)
+
+ def normal(self, f):
+ 'mark a file normal and clean'
+ self._dirty = True
+ self._addpath(f)
+ s = os.lstat(self._join(f))
+ self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
+ if f in self._copymap:
+ del self._copymap[f]
+
+ def normallookup(self, f):
+ 'mark a file normal, but possibly dirty'
+ if self._pl[1] != nullid and f in self._map:
+ # if there is a merge going on and the file was either
+ # in state 'm' or dirty before being removed, restore that state.
+ entry = self._map[f]
+ if entry[0] == 'r' and entry[2] in (-1, -2):
+ source = self._copymap.get(f)
+ if entry[2] == -1:
+ self.merge(f)
+ elif entry[2] == -2:
+ self.normaldirty(f)
+ if source:
+ self.copy(source, f)
+ return
+ if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
+ return
+ self._dirty = True
+ self._addpath(f)
+ self._map[f] = ('n', 0, -1, -1)
+ if f in self._copymap:
+ del self._copymap[f]
+
+ def normaldirty(self, f):
+ 'mark a file normal, but dirty'
+ self._dirty = True
+ self._addpath(f)
+ self._map[f] = ('n', 0, -2, -1)
+ if f in self._copymap:
+ del self._copymap[f]
+
+ def add(self, f):
+ 'mark a file added'
+ self._dirty = True
+ self._addpath(f, True)
+ self._map[f] = ('a', 0, -1, -1)
+ if f in self._copymap:
+ del self._copymap[f]
+
+ def remove(self, f):
+ 'mark a file removed'
+ self._dirty = True
+ self._droppath(f)
+ size = 0
+ if self._pl[1] != nullid and f in self._map:
+ entry = self._map[f]
+ if entry[0] == 'm':
+ size = -1
+ elif entry[0] == 'n' and entry[2] == -2:
+ size = -2
+ self._map[f] = ('r', 0, size, 0)
+ if size == 0 and f in self._copymap:
+ del self._copymap[f]
+
+ def merge(self, f):
+ 'mark a file merged'
+ self._dirty = True
+ s = os.lstat(self._join(f))
+ self._addpath(f)
+ self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
+ if f in self._copymap:
+ del self._copymap[f]
+
+ def forget(self, f):
+ 'forget a file'
+ self._dirty = True
+ try:
+ self._droppath(f)
+ del self._map[f]
+ except KeyError:
+ self._ui.warn(_("not in dirstate: %s\n") % f)
+
+ def _normalize(self, path, knownpath):
+ norm_path = os.path.normcase(path)
+ fold_path = self._foldmap.get(norm_path, None)
+ if fold_path is None:
+ if knownpath or not os.path.exists(os.path.join(self._root, path)):
+ fold_path = path
+ else:
+ fold_path = self._foldmap.setdefault(norm_path,
+ util.fspath(path, self._root))
+ return fold_path
+
+ def clear(self):
+ self._map = {}
+ if "_dirs" in self.__dict__:
+ delattr(self, "_dirs");
+ self._copymap = {}
+ self._pl = [nullid, nullid]
+ self._dirty = True
+
+ def rebuild(self, parent, files):
+ self.clear()
+ for f in files:
+ if 'x' in files.flags(f):
+ self._map[f] = ('n', 0777, -1, 0)
+ else:
+ self._map[f] = ('n', 0666, -1, 0)
+ self._pl = (parent, nullid)
+ self._dirty = True
+
+ def write(self):
+ if not self._dirty:
+ return
+ st = self._opener("dirstate", "w", atomictemp=True)
+
+ try:
+ gran = int(self._ui.config('dirstate', 'granularity', 1))
+ except ValueError:
+ gran = 1
+ limit = sys.maxint
+ if gran > 0:
+ limit = util.fstat(st).st_mtime - gran
+
+ cs = cStringIO.StringIO()
+ copymap = self._copymap
+ pack = struct.pack
+ write = cs.write
+ write("".join(self._pl))
+ for f, e in self._map.iteritems():
+ if f in copymap:
+ f = "%s\0%s" % (f, copymap[f])
+ if e[3] > limit and e[0] == 'n':
+ e = (e[0], 0, -1, -1)
+ e = pack(_format, e[0], e[1], e[2], e[3], len(f))
+ write(e)
+ write(f)
+ st.write(cs.getvalue())
+ st.rename()
+ self._dirty = self._dirtypl = False
+
+ def _dirignore(self, f):
+ if f == '.':
+ return False
+ if self._ignore(f):
+ return True
+ for p in _finddirs(f):
+ if self._ignore(p):
+ return True
+ return False
+
+ def walk(self, match, unknown, ignored):
+ '''
+ walk recursively through the directory tree, finding all files
+ matched by the match function
+
+ results are yielded in a tuple (filename, stat), where stat
+ and st is the stat result if the file was found in the directory.
+ '''
+
+ def fwarn(f, msg):
+ self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
+ return False
+
+ def badtype(mode):
+ kind = _('unknown')
+ if stat.S_ISCHR(mode): kind = _('character device')
+ elif stat.S_ISBLK(mode): kind = _('block device')
+ elif stat.S_ISFIFO(mode): kind = _('fifo')
+ elif stat.S_ISSOCK(mode): kind = _('socket')
+ elif stat.S_ISDIR(mode): kind = _('directory')
+ return _('unsupported file type (type is %s)') % kind
+
+ ignore = self._ignore
+ dirignore = self._dirignore
+ if ignored:
+ ignore = util.never
+ dirignore = util.never
+ elif not unknown:
+ # if unknown and ignored are False, skip step 2
+ ignore = util.always
+ dirignore = util.always
+
+ matchfn = match.matchfn
+ badfn = match.bad
+ dmap = self._map
+ normpath = util.normpath
+ listdir = osutil.listdir
+ lstat = os.lstat
+ getkind = stat.S_IFMT
+ dirkind = stat.S_IFDIR
+ regkind = stat.S_IFREG
+ lnkkind = stat.S_IFLNK
+ join = self._join
+ work = []
+ wadd = work.append
+
+ if self._checkcase:
+ normalize = self._normalize
+ else:
+ normalize = lambda x, y: x
+
+ exact = skipstep3 = False
+ if matchfn == match.exact: # match.exact
+ exact = True
+ dirignore = util.always # skip step 2
+ elif match.files() and not match.anypats(): # match.match, no patterns
+ skipstep3 = True
+
+ files = set(match.files())
+ if not files or '.' in files:
+ files = ['']
+ results = {'.hg': None}
+
+ # step 1: find all explicit files
+ for ff in sorted(files):
+ nf = normalize(normpath(ff), False)
+ if nf in results:
+ continue
+
+ try:
+ st = lstat(join(nf))
+ kind = getkind(st.st_mode)
+ if kind == dirkind:
+ skipstep3 = False
+ if nf in dmap:
+ #file deleted on disk but still in dirstate
+ results[nf] = None
+ match.dir(nf)
+ if not dirignore(nf):
+ wadd(nf)
+ elif kind == regkind or kind == lnkkind:
+ results[nf] = st
+ else:
+ badfn(ff, badtype(kind))
+ if nf in dmap:
+ results[nf] = None
+ except OSError, inst:
+ if nf in dmap: # does it exactly match a file?
+ results[nf] = None
+ else: # does it match a directory?
+ prefix = nf + "/"
+ for fn in dmap:
+ if fn.startswith(prefix):
+ match.dir(nf)
+ skipstep3 = False
+ break
+ else:
+ badfn(ff, inst.strerror)
+
+ # step 2: visit subdirectories
+ while work:
+ nd = work.pop()
+ skip = None
+ if nd == '.':
+ nd = ''
+ else:
+ skip = '.hg'
+ try:
+ entries = listdir(join(nd), stat=True, skip=skip)
+ except OSError, inst:
+ if inst.errno == errno.EACCES:
+ fwarn(nd, inst.strerror)
+ continue
+ raise
+ for f, kind, st in entries:
+ nf = normalize(nd and (nd + "/" + f) or f, True)
+ if nf not in results:
+ if kind == dirkind:
+ if not ignore(nf):
+ match.dir(nf)
+ wadd(nf)
+ if nf in dmap and matchfn(nf):
+ results[nf] = None
+ elif kind == regkind or kind == lnkkind:
+ if nf in dmap:
+ if matchfn(nf):
+ results[nf] = st
+ elif matchfn(nf) and not ignore(nf):
+ results[nf] = st
+ elif nf in dmap and matchfn(nf):
+ results[nf] = None
+
+ # step 3: report unseen items in the dmap hash
+ if not skipstep3 and not exact:
+ visit = sorted([f for f in dmap if f not in results and matchfn(f)])
+ for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
+ if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
+ st = None
+ results[nf] = st
+
+ del results['.hg']
+ return results
+
+ def status(self, match, ignored, clean, unknown):
+ listignored, listclean, listunknown = ignored, clean, unknown
+ lookup, modified, added, unknown, ignored = [], [], [], [], []
+ removed, deleted, clean = [], [], []
+
+ dmap = self._map
+ ladd = lookup.append
+ madd = modified.append
+ aadd = added.append
+ uadd = unknown.append
+ iadd = ignored.append
+ radd = removed.append
+ dadd = deleted.append
+ cadd = clean.append
+
+ for fn, st in self.walk(match, listunknown, listignored).iteritems():
+ if fn not in dmap:
+ if (listignored or match.exact(fn)) and self._dirignore(fn):
+ if listignored:
+ iadd(fn)
+ elif listunknown:
+ uadd(fn)
+ continue
+
+ state, mode, size, time = dmap[fn]
+
+ if not st and state in "nma":
+ dadd(fn)
+ elif state == 'n':
+ if (size >= 0 and
+ (size != st.st_size
+ or ((mode ^ st.st_mode) & 0100 and self._checkexec))
+ or size == -2
+ or fn in self._copymap):
+ madd(fn)
+ elif time != int(st.st_mtime):
+ ladd(fn)
+ elif listclean:
+ cadd(fn)
+ elif state == 'm':
+ madd(fn)
+ elif state == 'a':
+ aadd(fn)
+ elif state == 'r':
+ radd(fn)
+
+ return (lookup, modified, added, removed, deleted, unknown, ignored,
+ clean)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/dispatch.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/dispatch.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,501 @@
+# dispatch.py - command dispatching for mercurial
+#
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from i18n import _
+import os, sys, atexit, signal, pdb, socket, errno, shlex, time
+import util, commands, hg, fancyopts, extensions, hook, error
+import cmdutil, encoding
+import ui as _ui
+
+def run():
+ "run the command in sys.argv"
+ sys.exit(dispatch(sys.argv[1:]))
+
+def dispatch(args):
+ "run the command specified in args"
+ try:
+ u = _ui.ui()
+ if '--traceback' in args:
+ u.setconfig('ui', 'traceback', 'on')
+ except util.Abort, inst:
+ sys.stderr.write(_("abort: %s\n") % inst)
+ return -1
+ return _runcatch(u, args)
+
+def _runcatch(ui, args):
+ def catchterm(*args):
+ raise error.SignalInterrupt
+
+ for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
+ num = getattr(signal, name, None)
+ if num: signal.signal(num, catchterm)
+
+ try:
+ try:
+ # enter the debugger before command execution
+ if '--debugger' in args:
+ pdb.set_trace()
+ try:
+ return _dispatch(ui, args)
+ finally:
+ ui.flush()
+ except:
+ # enter the debugger when we hit an exception
+ if '--debugger' in args:
+ pdb.post_mortem(sys.exc_info()[2])
+ ui.traceback()
+ raise
+
+ # Global exception handling, alphabetically
+ # Mercurial-specific first, followed by built-in and library exceptions
+ except error.AmbiguousCommand, inst:
+ ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
+ (inst.args[0], " ".join(inst.args[1])))
+ except error.ConfigError, inst:
+ ui.warn(_("hg: %s\n") % inst.args[0])
+ except error.LockHeld, inst:
+ if inst.errno == errno.ETIMEDOUT:
+ reason = _('timed out waiting for lock held by %s') % inst.locker
+ else:
+ reason = _('lock held by %s') % inst.locker
+ ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
+ except error.LockUnavailable, inst:
+ ui.warn(_("abort: could not lock %s: %s\n") %
+ (inst.desc or inst.filename, inst.strerror))
+ except error.ParseError, inst:
+ if inst.args[0]:
+ ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
+ commands.help_(ui, inst.args[0])
+ else:
+ ui.warn(_("hg: %s\n") % inst.args[1])
+ commands.help_(ui, 'shortlist')
+ except error.RepoError, inst:
+ ui.warn(_("abort: %s!\n") % inst)
+ except error.ResponseError, inst:
+ ui.warn(_("abort: %s") % inst.args[0])
+ if not isinstance(inst.args[1], basestring):
+ ui.warn(" %r\n" % (inst.args[1],))
+ elif not inst.args[1]:
+ ui.warn(_(" empty string\n"))
+ else:
+ ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
+ except error.RevlogError, inst:
+ ui.warn(_("abort: %s!\n") % inst)
+ except error.SignalInterrupt:
+ ui.warn(_("killed!\n"))
+ except error.UnknownCommand, inst:
+ ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
+ commands.help_(ui, 'shortlist')
+ except util.Abort, inst:
+ ui.warn(_("abort: %s\n") % inst)
+ except ImportError, inst:
+ m = str(inst).split()[-1]
+ ui.warn(_("abort: could not import module %s!\n") % m)
+ if m in "mpatch bdiff".split():
+ ui.warn(_("(did you forget to compile extensions?)\n"))
+ elif m in "zlib".split():
+ ui.warn(_("(is your Python install correct?)\n"))
+ except IOError, inst:
+ if hasattr(inst, "code"):
+ ui.warn(_("abort: %s\n") % inst)
+ elif hasattr(inst, "reason"):
+ try: # usually it is in the form (errno, strerror)
+ reason = inst.reason.args[1]
+ except: # it might be anything, for example a string
+ reason = inst.reason
+ ui.warn(_("abort: error: %s\n") % reason)
+ elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE:
+ if ui.debugflag:
+ ui.warn(_("broken pipe\n"))
+ elif getattr(inst, "strerror", None):
+ if getattr(inst, "filename", None):
+ ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
+ else:
+ ui.warn(_("abort: %s\n") % inst.strerror)
+ else:
+ raise
+ except OSError, inst:
+ if getattr(inst, "filename", None):
+ ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
+ else:
+ ui.warn(_("abort: %s\n") % inst.strerror)
+ except KeyboardInterrupt:
+ try:
+ ui.warn(_("interrupted!\n"))
+ except IOError, inst:
+ if inst.errno == errno.EPIPE:
+ if ui.debugflag:
+ ui.warn(_("\nbroken pipe\n"))
+ else:
+ raise
+ except MemoryError:
+ ui.warn(_("abort: out of memory\n"))
+ except SystemExit, inst:
+ # Commands shouldn't sys.exit directly, but give a return code.
+ # Just in case catch this and and pass exit code to caller.
+ return inst.code
+ except socket.error, inst:
+ ui.warn(_("abort: %s\n") % inst.args[-1])
+ except:
+ ui.warn(_("** unknown exception encountered, details follow\n"))
+ ui.warn(_("** report bug details to "
+ "http://mercurial.selenic.com/bts/\n"))
+ ui.warn(_("** or mercurial@selenic.com\n"))
+ ui.warn(_("** Mercurial Distributed SCM (version %s)\n")
+ % util.version())
+ ui.warn(_("** Extensions loaded: %s\n")
+ % ", ".join([x[0] for x in extensions.extensions()]))
+ raise
+
+ return -1
+
+def _findrepo(p):
+ while not os.path.isdir(os.path.join(p, ".hg")):
+ oldp, p = p, os.path.dirname(p)
+ if p == oldp:
+ return None
+
+ return p
+
+def aliasargs(fn):
+ if hasattr(fn, 'args'):
+ return fn.args
+ return []
+
+class cmdalias(object):
+ def __init__(self, name, definition, cmdtable):
+ self.name = name
+ self.definition = definition
+ self.args = []
+ self.opts = []
+ self.help = ''
+ self.norepo = True
+
+ try:
+ cmdutil.findcmd(self.name, cmdtable, True)
+ self.shadows = True
+ except error.UnknownCommand:
+ self.shadows = False
+
+ if not self.definition:
+ def fn(ui, *args):
+ ui.warn(_("no definition for alias '%s'\n") % self.name)
+ return 1
+ self.fn = fn
+
+ return
+
+ args = shlex.split(self.definition)
+ cmd = args.pop(0)
+ opts = []
+ help = ''
+
+ try:
+ self.fn, self.opts, self.help = cmdutil.findcmd(cmd, cmdtable, False)[1]
+ self.args = aliasargs(self.fn) + args
+ if cmd not in commands.norepo.split(' '):
+ self.norepo = False
+ except error.UnknownCommand:
+ def fn(ui, *args):
+ ui.warn(_("alias '%s' resolves to unknown command '%s'\n") \
+ % (self.name, cmd))
+ return 1
+ self.fn = fn
+ except error.AmbiguousCommand:
+ def fn(ui, *args):
+ ui.warn(_("alias '%s' resolves to ambiguous command '%s'\n") \
+ % (self.name, cmd))
+ return 1
+ self.fn = fn
+
+ def __call__(self, ui, *args, **opts):
+ if self.shadows:
+ ui.debug(_("alias '%s' shadows command\n") % self.name)
+
+ return self.fn(ui, *args, **opts)
+
+def addaliases(ui, cmdtable):
+ # aliases are processed after extensions have been loaded, so they
+ # may use extension commands. Aliases can also use other alias definitions,
+ # but only if they have been defined prior to the current definition.
+ for alias, definition in ui.configitems('alias'):
+ aliasdef = cmdalias(alias, definition, cmdtable)
+ cmdtable[alias] = (aliasdef, aliasdef.opts, aliasdef.help)
+ if aliasdef.norepo:
+ commands.norepo += ' %s' % alias
+
+def _parse(ui, args):
+ options = {}
+ cmdoptions = {}
+
+ try:
+ args = fancyopts.fancyopts(args, commands.globalopts, options)
+ except fancyopts.getopt.GetoptError, inst:
+ raise error.ParseError(None, inst)
+
+ if args:
+ cmd, args = args[0], args[1:]
+ aliases, i = cmdutil.findcmd(cmd, commands.table,
+ ui.config("ui", "strict"))
+ cmd = aliases[0]
+ args = aliasargs(i[0]) + args
+ defaults = ui.config("defaults", cmd)
+ if defaults:
+ args = shlex.split(defaults) + args
+ c = list(i[1])
+ else:
+ cmd = None
+ c = []
+
+ # combine global options into local
+ for o in commands.globalopts:
+ c.append((o[0], o[1], options[o[1]], o[3]))
+
+ try:
+ args = fancyopts.fancyopts(args, c, cmdoptions, True)
+ except fancyopts.getopt.GetoptError, inst:
+ raise error.ParseError(cmd, inst)
+
+ # separate global options back out
+ for o in commands.globalopts:
+ n = o[1]
+ options[n] = cmdoptions[n]
+ del cmdoptions[n]
+
+ return (cmd, cmd and i[0] or None, args, options, cmdoptions)
+
+def _parseconfig(ui, config):
+ """parse the --config options from the command line"""
+ for cfg in config:
+ try:
+ name, value = cfg.split('=', 1)
+ section, name = name.split('.', 1)
+ if not section or not name:
+ raise IndexError
+ ui.setconfig(section, name, value)
+ except (IndexError, ValueError):
+ raise util.Abort(_('malformed --config option: %s') % cfg)
+
+def _earlygetopt(aliases, args):
+ """Return list of values for an option (or aliases).
+
+ The values are listed in the order they appear in args.
+ The options and values are removed from args.
+ """
+ try:
+ argcount = args.index("--")
+ except ValueError:
+ argcount = len(args)
+ shortopts = [opt for opt in aliases if len(opt) == 2]
+ values = []
+ pos = 0
+ while pos < argcount:
+ if args[pos] in aliases:
+ if pos + 1 >= argcount:
+ # ignore and let getopt report an error if there is no value
+ break
+ del args[pos]
+ values.append(args.pop(pos))
+ argcount -= 2
+ elif args[pos][:2] in shortopts:
+ # short option can have no following space, e.g. hg log -Rfoo
+ values.append(args.pop(pos)[2:])
+ argcount -= 1
+ else:
+ pos += 1
+ return values
+
+def runcommand(lui, repo, cmd, fullargs, ui, options, d):
+ # run pre-hook, and abort if it fails
+ ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs))
+ if ret:
+ return ret
+ ret = _runcommand(ui, options, cmd, d)
+ # run post-hook, passing command result
+ hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
+ result = ret)
+ return ret
+
+_loaded = set()
+def _dispatch(ui, args):
+ # read --config before doing anything else
+ # (e.g. to change trust settings for reading .hg/hgrc)
+ _parseconfig(ui, _earlygetopt(['--config'], args))
+
+ # check for cwd
+ cwd = _earlygetopt(['--cwd'], args)
+ if cwd:
+ os.chdir(cwd[-1])
+
+ # read the local repository .hgrc into a local ui object
+ path = _findrepo(os.getcwd()) or ""
+ if not path:
+ lui = ui
+ if path:
+ try:
+ lui = ui.copy()
+ lui.readconfig(os.path.join(path, ".hg", "hgrc"))
+ except IOError:
+ pass
+
+ # now we can expand paths, even ones in .hg/hgrc
+ rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
+ if rpath:
+ path = lui.expandpath(rpath[-1])
+ lui = ui.copy()
+ lui.readconfig(os.path.join(path, ".hg", "hgrc"))
+
+ extensions.loadall(lui)
+ for name, module in extensions.extensions():
+ if name in _loaded:
+ continue
+
+ # setup extensions
+ # TODO this should be generalized to scheme, where extensions can
+ # redepend on other extensions. then we should toposort them, and
+ # do initialization in correct order
+ extsetup = getattr(module, 'extsetup', None)
+ if extsetup:
+ extsetup()
+
+ cmdtable = getattr(module, 'cmdtable', {})
+ overrides = [cmd for cmd in cmdtable if cmd in commands.table]
+ if overrides:
+ ui.warn(_("extension '%s' overrides commands: %s\n")
+ % (name, " ".join(overrides)))
+ commands.table.update(cmdtable)
+ _loaded.add(name)
+
+ addaliases(lui, commands.table)
+
+ # check for fallback encoding
+ fallback = lui.config('ui', 'fallbackencoding')
+ if fallback:
+ encoding.fallbackencoding = fallback
+
+ fullargs = args
+ cmd, func, args, options, cmdoptions = _parse(lui, args)
+
+ if options["config"]:
+ raise util.Abort(_("Option --config may not be abbreviated!"))
+ if options["cwd"]:
+ raise util.Abort(_("Option --cwd may not be abbreviated!"))
+ if options["repository"]:
+ raise util.Abort(_(
+ "Option -R has to be separated from other options (e.g. not -qR) "
+ "and --repository may only be abbreviated as --repo!"))
+
+ if options["encoding"]:
+ encoding.encoding = options["encoding"]
+ if options["encodingmode"]:
+ encoding.encodingmode = options["encodingmode"]
+ if options["time"]:
+ def get_times():
+ t = os.times()
+ if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
+ t = (t[0], t[1], t[2], t[3], time.clock())
+ return t
+ s = get_times()
+ def print_time():
+ t = get_times()
+ ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
+ (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
+ atexit.register(print_time)
+
+ if options['verbose'] or options['debug'] or options['quiet']:
+ ui.setconfig('ui', 'verbose', str(bool(options['verbose'])))
+ ui.setconfig('ui', 'debug', str(bool(options['debug'])))
+ ui.setconfig('ui', 'quiet', str(bool(options['quiet'])))
+ if options['traceback']:
+ ui.setconfig('ui', 'traceback', 'on')
+ if options['noninteractive']:
+ ui.setconfig('ui', 'interactive', 'off')
+
+ if options['help']:
+ return commands.help_(ui, cmd, options['version'])
+ elif options['version']:
+ return commands.version_(ui)
+ elif not cmd:
+ return commands.help_(ui, 'shortlist')
+
+ repo = None
+ if cmd not in commands.norepo.split():
+ try:
+ repo = hg.repository(ui, path=path)
+ ui = repo.ui
+ if not repo.local():
+ raise util.Abort(_("repository '%s' is not local") % path)
+ ui.setconfig("bundle", "mainreporoot", repo.root)
+ except error.RepoError:
+ if cmd not in commands.optionalrepo.split():
+ if args and not path: # try to infer -R from command args
+ repos = map(_findrepo, args)
+ guess = repos[0]
+ if guess and repos.count(guess) == len(repos):
+ return _dispatch(ui, ['--repository', guess] + fullargs)
+ if not path:
+ raise error.RepoError(_("There is no Mercurial repository"
+ " here (.hg not found)"))
+ raise
+ args.insert(0, repo)
+ elif rpath:
+ ui.warn("warning: --repository ignored\n")
+
+ d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
+ return runcommand(lui, repo, cmd, fullargs, ui, options, d)
+
+def _runcommand(ui, options, cmd, cmdfunc):
+ def checkargs():
+ try:
+ return cmdfunc()
+ except error.SignatureError:
+ raise error.ParseError(cmd, _("invalid arguments"))
+
+ if options['profile']:
+ format = ui.config('profiling', 'format', default='text')
+
+ if not format in ['text', 'kcachegrind']:
+ ui.warn(_("unrecognized profiling format '%s'"
+ " - Ignored\n") % format)
+ format = 'text'
+
+ output = ui.config('profiling', 'output')
+
+ if output:
+ path = os.path.expanduser(output)
+ path = ui.expandpath(path)
+ ostream = open(path, 'wb')
+ else:
+ ostream = sys.stderr
+
+ try:
+ from mercurial import lsprof
+ except ImportError:
+ raise util.Abort(_(
+ 'lsprof not available - install from '
+ 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
+ p = lsprof.Profiler()
+ p.enable(subcalls=True)
+ try:
+ return checkargs()
+ finally:
+ p.disable()
+
+ if format == 'kcachegrind':
+ import lsprofcalltree
+ calltree = lsprofcalltree.KCacheGrind(p)
+ calltree.output(ostream)
+ else:
+ # format == 'text'
+ stats = lsprof.Stats(p.getstats())
+ stats.sort()
+ stats.pprint(top=10, file=ostream, climit=5)
+
+ if output:
+ ostream.close()
+ else:
+ return checkargs()
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/encoding.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/encoding.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,75 @@
+# encoding.py - character transcoding support for Mercurial
+#
+# Copyright 2005-2009 Matt Mackall and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import error
+import sys, unicodedata, locale, os
+
+_encodingfixup = {'646': 'ascii', 'ANSI_X3.4-1968': 'ascii'}
+
+try:
+ encoding = os.environ.get("HGENCODING")
+ if sys.platform == 'darwin' and not encoding:
+ # On darwin, getpreferredencoding ignores the locale environment and
+ # always returns mac-roman. We override this if the environment is
+ # not C (has been customized by the user).
+ locale.setlocale(locale.LC_CTYPE, '')
+ encoding = locale.getlocale()[1]
+ if not encoding:
+ encoding = locale.getpreferredencoding() or 'ascii'
+ encoding = _encodingfixup.get(encoding, encoding)
+except locale.Error:
+ encoding = 'ascii'
+encodingmode = os.environ.get("HGENCODINGMODE", "strict")
+fallbackencoding = 'ISO-8859-1'
+
+def tolocal(s):
+ """
+ Convert a string from internal UTF-8 to local encoding
+
+ All internal strings should be UTF-8 but some repos before the
+ implementation of locale support may contain latin1 or possibly
+ other character sets. We attempt to decode everything strictly
+ using UTF-8, then Latin-1, and failing that, we use UTF-8 and
+ replace unknown characters.
+ """
+ for e in ('UTF-8', fallbackencoding):
+ try:
+ u = s.decode(e) # attempt strict decoding
+ return u.encode(encoding, "replace")
+ except LookupError, k:
+ raise error.Abort("%s, please check your locale settings" % k)
+ except UnicodeDecodeError:
+ pass
+ u = s.decode("utf-8", "replace") # last ditch
+ return u.encode(encoding, "replace")
+
+def fromlocal(s):
+ """
+ Convert a string from the local character encoding to UTF-8
+
+ We attempt to decode strings using the encoding mode set by
+ HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
+ characters will cause an error message. Other modes include
+ 'replace', which replaces unknown characters with a special
+ Unicode character, and 'ignore', which drops the character.
+ """
+ try:
+ return s.decode(encoding, encodingmode).encode("utf-8")
+ except UnicodeDecodeError, inst:
+ sub = s[max(0, inst.start-10):inst.start+10]
+ raise error.Abort("decoding near '%s': %s!" % (sub, inst))
+ except LookupError, k:
+ raise error.Abort("%s, please check your locale settings" % k)
+
+def colwidth(s):
+ "Find the column width of a UTF-8 string for display"
+ d = s.decode(encoding, 'replace')
+ if hasattr(unicodedata, 'east_asian_width'):
+ w = unicodedata.east_asian_width
+ return sum([w(c) in 'WF' and 2 or 1 for c in d])
+ return len(d)
+
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/error.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/error.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,72 @@
+# error.py - Mercurial exceptions
+#
+# Copyright 2005-2008 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+"""Mercurial exceptions.
+
+This allows us to catch exceptions at higher levels without forcing
+imports.
+"""
+
+# Do not import anything here, please
+
+class RevlogError(Exception):
+ pass
+
+class LookupError(RevlogError, KeyError):
+ def __init__(self, name, index, message):
+ self.name = name
+ if isinstance(name, str) and len(name) == 20:
+ from node import short
+ name = short(name)
+ RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
+
+ def __str__(self):
+ return RevlogError.__str__(self)
+
+class ParseError(Exception):
+ """Exception raised on errors in parsing the command line."""
+
+class ConfigError(Exception):
+ 'Exception raised when parsing config files'
+
+class RepoError(Exception):
+ pass
+
+class CapabilityError(RepoError):
+ pass
+
+class LockError(IOError):
+ def __init__(self, errno, strerror, filename, desc):
+ IOError.__init__(self, errno, strerror, filename)
+ self.desc = desc
+
+class LockHeld(LockError):
+ def __init__(self, errno, filename, desc, locker):
+ LockError.__init__(self, errno, 'Lock held', filename, desc)
+ self.locker = locker
+
+class LockUnavailable(LockError):
+ pass
+
+class ResponseError(Exception):
+ """Raised to print an error with part of output and exit."""
+
+class UnknownCommand(Exception):
+ """Exception raised if command is not in the command table."""
+
+class AmbiguousCommand(Exception):
+ """Exception raised if command shortcut matches more than one command."""
+
+# derived from KeyboardInterrupt to simplify some breakout code
+class SignalInterrupt(KeyboardInterrupt):
+ """Exception raised on SIGTERM and SIGHUP."""
+
+class SignatureError(Exception):
+ pass
+
+class Abort(Exception):
+ """Raised if a command needs to print an error and exit."""
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/extensions.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/extensions.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,182 @@
+# extensions.py - extension handling for mercurial
+#
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import imp, os
+import util, cmdutil, help
+from i18n import _, gettext
+
+_extensions = {}
+_order = []
+
+def extensions():
+ for name in _order:
+ module = _extensions[name]
+ if module:
+ yield name, module
+
+def find(name):
+ '''return module with given extension name'''
+ try:
+ return _extensions[name]
+ except KeyError:
+ for k, v in _extensions.iteritems():
+ if k.endswith('.' + name) or k.endswith('/' + name):
+ return v
+ raise KeyError(name)
+
+def loadpath(path, module_name):
+ module_name = module_name.replace('.', '_')
+ path = os.path.expanduser(path)
+ if os.path.isdir(path):
+ # module/__init__.py style
+ d, f = os.path.split(path.rstrip('/'))
+ fd, fpath, desc = imp.find_module(f, [d])
+ return imp.load_module(module_name, fd, fpath, desc)
+ else:
+ return imp.load_source(module_name, path)
+
+def load(ui, name, path):
+ if name.startswith('hgext.') or name.startswith('hgext/'):
+ shortname = name[6:]
+ else:
+ shortname = name
+ if shortname in _extensions:
+ return
+ _extensions[shortname] = None
+ if path:
+ # the module will be loaded in sys.modules
+ # choose an unique name so that it doesn't
+ # conflicts with other modules
+ mod = loadpath(path, 'hgext.%s' % name)
+ else:
+ def importh(name):
+ mod = __import__(name)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+ try:
+ mod = importh("hgext.%s" % name)
+ except ImportError:
+ mod = importh(name)
+ _extensions[shortname] = mod
+ _order.append(shortname)
+
+ uisetup = getattr(mod, 'uisetup', None)
+ if uisetup:
+ uisetup(ui)
+
+def loadall(ui):
+ result = ui.configitems("extensions")
+ for (name, path) in result:
+ if path:
+ if path[0] == '!':
+ continue
+ try:
+ load(ui, name, path)
+ except KeyboardInterrupt:
+ raise
+ except Exception, inst:
+ if path:
+ ui.warn(_("*** failed to import extension %s from %s: %s\n")
+ % (name, path, inst))
+ else:
+ ui.warn(_("*** failed to import extension %s: %s\n")
+ % (name, inst))
+ if ui.traceback():
+ return 1
+
+def wrapcommand(table, command, wrapper):
+ aliases, entry = cmdutil.findcmd(command, table)
+ for alias, e in table.iteritems():
+ if e is entry:
+ key = alias
+ break
+
+ origfn = entry[0]
+ def wrap(*args, **kwargs):
+ return util.checksignature(wrapper)(
+ util.checksignature(origfn), *args, **kwargs)
+
+ wrap.__doc__ = getattr(origfn, '__doc__')
+ wrap.__module__ = getattr(origfn, '__module__')
+
+ newentry = list(entry)
+ newentry[0] = wrap
+ table[key] = tuple(newentry)
+ return entry
+
+def wrapfunction(container, funcname, wrapper):
+ def wrap(*args, **kwargs):
+ return wrapper(origfn, *args, **kwargs)
+
+ origfn = getattr(container, funcname)
+ setattr(container, funcname, wrap)
+ return origfn
+
+def disabled():
+ '''find disabled extensions from hgext
+ returns a dict of {name: desc}, and the max name length'''
+
+ import hgext
+ extpath = os.path.dirname(os.path.abspath(hgext.__file__))
+
+ try: # might not be a filesystem path
+ files = os.listdir(extpath)
+ except OSError:
+ return None, 0
+
+ exts = {}
+ maxlength = 0
+ for e in files:
+
+ if e.endswith('.py'):
+ name = e.rsplit('.', 1)[0]
+ path = os.path.join(extpath, e)
+ else:
+ name = e
+ path = os.path.join(extpath, e, '__init__.py')
+ if not os.path.exists(path):
+ continue
+
+ if name in exts or name in _order or name == '__init__':
+ continue
+
+ try:
+ file = open(path)
+ except IOError:
+ continue
+ else:
+ doc = help.moduledoc(file)
+ file.close()
+
+ if doc: # extracting localized synopsis
+ exts[name] = gettext(doc).splitlines()[0]
+ else:
+ exts[name] = _('(no help text available)')
+
+ if len(name) > maxlength:
+ maxlength = len(name)
+
+ return exts, maxlength
+
+def enabled():
+ '''return a dict of {name: desc} of extensions, and the max name length'''
+
+ if not enabled:
+ return {}, 0
+
+ exts = {}
+ maxlength = 0
+ exthelps = []
+ for ename, ext in extensions():
+ doc = (gettext(ext.__doc__) or _('(no help text available)'))
+ ename = ename.split('.')[-1]
+ maxlength = max(len(ename), maxlength)
+ exts[ename] = doc.splitlines(0)[0].strip()
+
+ return exts, maxlength
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/fancyopts.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/fancyopts.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,110 @@
+# fancyopts.py - better command line parsing
+#
+# Copyright 2005-2009 Matt Mackall and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import getopt
+
+def gnugetopt(args, options, longoptions):
+ """Parse options mostly like getopt.gnu_getopt.
+
+ This is different from getopt.gnu_getopt in that an argument of - will
+ become an argument of - instead of vanishing completely.
+ """
+ extraargs = []
+ if '--' in args:
+ stopindex = args.index('--')
+ extraargs = args[stopindex+1:]
+ args = args[:stopindex]
+ opts, parseargs = getopt.getopt(args, options, longoptions)
+ args = []
+ while parseargs:
+ arg = parseargs.pop(0)
+ if arg and arg[0] == '-' and len(arg) > 1:
+ parseargs.insert(0, arg)
+ topts, newparseargs = getopt.getopt(parseargs, options, longoptions)
+ opts = opts + topts
+ parseargs = newparseargs
+ else:
+ args.append(arg)
+ args.extend(extraargs)
+ return opts, args
+
+
+def fancyopts(args, options, state, gnu=False):
+ """
+ read args, parse options, and store options in state
+
+ each option is a tuple of:
+
+ short option or ''
+ long option
+ default value
+ description
+
+ option types include:
+
+ boolean or none - option sets variable in state to true
+ string - parameter string is stored in state
+ list - parameter string is added to a list
+ integer - parameter strings is stored as int
+ function - call function with parameter
+
+ non-option args are returned
+ """
+ namelist = []
+ shortlist = ''
+ argmap = {}
+ defmap = {}
+
+ for short, name, default, comment in options:
+ # convert opts to getopt format
+ oname = name
+ name = name.replace('-', '_')
+
+ argmap['-' + short] = argmap['--' + oname] = name
+ defmap[name] = default
+
+ # copy defaults to state
+ if isinstance(default, list):
+ state[name] = default[:]
+ elif hasattr(default, '__call__'):
+ state[name] = None
+ else:
+ state[name] = default
+
+ # does it take a parameter?
+ if not (default is None or default is True or default is False):
+ if short: short += ':'
+ if oname: oname += '='
+ if short:
+ shortlist += short
+ if name:
+ namelist.append(oname)
+
+ # parse arguments
+ if gnu:
+ parse = gnugetopt
+ else:
+ parse = getopt.getopt
+ opts, args = parse(args, shortlist, namelist)
+
+ # transfer result to state
+ for opt, val in opts:
+ name = argmap[opt]
+ t = type(defmap[name])
+ if t is type(fancyopts):
+ state[name] = defmap[name](val)
+ elif t is type(1):
+ state[name] = int(val)
+ elif t is type(''):
+ state[name] = val
+ elif t is type([]):
+ state[name].append(val)
+ elif t is type(None) or t is type(False):
+ state[name] = True
+
+ # return unparsed args
+ return args
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/filelog.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/filelog.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,68 @@
+# filelog.py - file history class for mercurial
+#
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import revlog
+
+class filelog(revlog.revlog):
+ def __init__(self, opener, path):
+ revlog.revlog.__init__(self, opener,
+ "/".join(("data", path + ".i")))
+
+ def read(self, node):
+ t = self.revision(node)
+ if not t.startswith('\1\n'):
+ return t
+ s = t.index('\1\n', 2)
+ return t[s+2:]
+
+ def _readmeta(self, node):
+ t = self.revision(node)
+ if not t.startswith('\1\n'):
+ return {}
+ s = t.index('\1\n', 2)
+ mt = t[2:s]
+ m = {}
+ for l in mt.splitlines():
+ k, v = l.split(": ", 1)
+ m[k] = v
+ return m
+
+ def add(self, text, meta, transaction, link, p1=None, p2=None):
+ if meta or text.startswith('\1\n'):
+ mt = ""
+ if meta:
+ mt = ["%s: %s\n" % (k, v) for k, v in meta.iteritems()]
+ text = "\1\n%s\1\n%s" % ("".join(mt), text)
+ return self.addrevision(text, transaction, link, p1, p2)
+
+ def renamed(self, node):
+ if self.parents(node)[0] != revlog.nullid:
+ return False
+ m = self._readmeta(node)
+ if m and "copy" in m:
+ return (m["copy"], revlog.bin(m["copyrev"]))
+ return False
+
+ def size(self, rev):
+ """return the size of a given revision"""
+
+ # for revisions with renames, we have to go the slow way
+ node = self.node(rev)
+ if self.renamed(node):
+ return len(self.read(node))
+
+ return revlog.revlog.size(self, rev)
+
+ def cmp(self, node, text):
+ """compare text with a given file revision"""
+
+ # for renames, we have to go the slow way
+ if self.renamed(node):
+ t2 = self.read(node)
+ return t2 != text
+
+ return revlog.revlog.cmp(self, node, text)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/filemerge.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/filemerge.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,231 @@
+# filemerge.py - file-level merge handling for Mercurial
+#
+# Copyright 2006, 2007, 2008 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from node import short
+from i18n import _
+import util, simplemerge, match
+import os, tempfile, re, filecmp
+
+def _toolstr(ui, tool, part, default=""):
+ return ui.config("merge-tools", tool + "." + part, default)
+
+def _toolbool(ui, tool, part, default=False):
+ return ui.configbool("merge-tools", tool + "." + part, default)
+
+_internal = ['internal:' + s
+ for s in 'fail local other merge prompt dump'.split()]
+
+def _findtool(ui, tool):
+ if tool in _internal:
+ return tool
+ k = _toolstr(ui, tool, "regkey")
+ if k:
+ p = util.lookup_reg(k, _toolstr(ui, tool, "regname"))
+ if p:
+ p = util.find_exe(p + _toolstr(ui, tool, "regappend"))
+ if p:
+ return p
+ return util.find_exe(_toolstr(ui, tool, "executable", tool))
+
+def _picktool(repo, ui, path, binary, symlink):
+ def check(tool, pat, symlink, binary):
+ tmsg = tool
+ if pat:
+ tmsg += " specified for " + pat
+ if not _findtool(ui, tool):
+ if pat: # explicitly requested tool deserves a warning
+ ui.warn(_("couldn't find merge tool %s\n") % tmsg)
+ else: # configured but non-existing tools are more silent
+ ui.note(_("couldn't find merge tool %s\n") % tmsg)
+ elif symlink and not _toolbool(ui, tool, "symlink"):
+ ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
+ elif binary and not _toolbool(ui, tool, "binary"):
+ ui.warn(_("tool %s can't handle binary\n") % tmsg)
+ elif not util.gui() and _toolbool(ui, tool, "gui"):
+ ui.warn(_("tool %s requires a GUI\n") % tmsg)
+ else:
+ return True
+ return False
+
+ # HGMERGE takes precedence
+ hgmerge = os.environ.get("HGMERGE")
+ if hgmerge:
+ return (hgmerge, hgmerge)
+
+ # then patterns
+ for pat, tool in ui.configitems("merge-patterns"):
+ mf = match.match(repo.root, '', [pat])
+ if mf(path) and check(tool, pat, symlink, False):
+ toolpath = _findtool(ui, tool)
+ return (tool, '"' + toolpath + '"')
+
+ # then merge tools
+ tools = {}
+ for k,v in ui.configitems("merge-tools"):
+ t = k.split('.')[0]
+ if t not in tools:
+ tools[t] = int(_toolstr(ui, t, "priority", "0"))
+ names = tools.keys()
+ tools = sorted([(-p,t) for t,p in tools.items()])
+ uimerge = ui.config("ui", "merge")
+ if uimerge:
+ if uimerge not in names:
+ return (uimerge, uimerge)
+ tools.insert(0, (None, uimerge)) # highest priority
+ tools.append((None, "hgmerge")) # the old default, if found
+ for p,t in tools:
+ if check(t, None, symlink, binary):
+ toolpath = _findtool(ui, t)
+ return (t, '"' + toolpath + '"')
+ # internal merge as last resort
+ return (not (symlink or binary) and "internal:merge" or None, None)
+
+def _eoltype(data):
+ "Guess the EOL type of a file"
+ if '\0' in data: # binary
+ return None
+ if '\r\n' in data: # Windows
+ return '\r\n'
+ if '\r' in data: # Old Mac
+ return '\r'
+ if '\n' in data: # UNIX
+ return '\n'
+ return None # unknown
+
+def _matcheol(file, origfile):
+ "Convert EOL markers in a file to match origfile"
+ tostyle = _eoltype(open(origfile, "rb").read())
+ if tostyle:
+ data = open(file, "rb").read()
+ style = _eoltype(data)
+ if style:
+ newdata = data.replace(style, tostyle)
+ if newdata != data:
+ open(file, "wb").write(newdata)
+
+def filemerge(repo, mynode, orig, fcd, fco, fca):
+ """perform a 3-way merge in the working directory
+
+ mynode = parent node before merge
+ orig = original local filename before merge
+ fco = other file context
+ fca = ancestor file context
+ fcd = local file context for current/destination file
+ """
+
+ def temp(prefix, ctx):
+ pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
+ (fd, name) = tempfile.mkstemp(prefix=pre)
+ data = repo.wwritedata(ctx.path(), ctx.data())
+ f = os.fdopen(fd, "wb")
+ f.write(data)
+ f.close()
+ return name
+
+ def isbin(ctx):
+ try:
+ return util.binary(ctx.data())
+ except IOError:
+ return False
+
+ if not fco.cmp(fcd.data()): # files identical?
+ return None
+
+ ui = repo.ui
+ fd = fcd.path()
+ binary = isbin(fcd) or isbin(fco) or isbin(fca)
+ symlink = 'l' in fcd.flags() + fco.flags()
+ tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
+ ui.debug(_("picked tool '%s' for %s (binary %s symlink %s)\n") %
+ (tool, fd, binary, symlink))
+
+ if not tool or tool == 'internal:prompt':
+ tool = "internal:local"
+ if ui.prompt(_(" no tool found to merge %s\n"
+ "keep (l)ocal or take (o)ther?") % fd,
+ (_("&Local"), _("&Other")), _("l")) != _("l"):
+ tool = "internal:other"
+ if tool == "internal:local":
+ return 0
+ if tool == "internal:other":
+ repo.wwrite(fd, fco.data(), fco.flags())
+ return 0
+ if tool == "internal:fail":
+ return 1
+
+ # do the actual merge
+ a = repo.wjoin(fd)
+ b = temp("base", fca)
+ c = temp("other", fco)
+ out = ""
+ back = a + ".orig"
+ util.copyfile(a, back)
+
+ if orig != fco.path():
+ ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
+ else:
+ ui.status(_("merging %s\n") % fd)
+
+ ui.debug(_("my %s other %s ancestor %s\n") % (fcd, fco, fca))
+
+ # do we attempt to simplemerge first?
+ if _toolbool(ui, tool, "premerge", not (binary or symlink)):
+ r = simplemerge.simplemerge(ui, a, b, c, quiet=True)
+ if not r:
+ ui.debug(_(" premerge successful\n"))
+ os.unlink(back)
+ os.unlink(b)
+ os.unlink(c)
+ return 0
+ util.copyfile(back, a) # restore from backup and try again
+
+ env = dict(HG_FILE=fd,
+ HG_MY_NODE=short(mynode),
+ HG_OTHER_NODE=str(fco.changectx()),
+ HG_MY_ISLINK='l' in fcd.flags(),
+ HG_OTHER_ISLINK='l' in fco.flags(),
+ HG_BASE_ISLINK='l' in fca.flags())
+
+ if tool == "internal:merge":
+ r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other'])
+ elif tool == 'internal:dump':
+ a = repo.wjoin(fd)
+ util.copyfile(a, a + ".local")
+ repo.wwrite(fd + ".other", fco.data(), fco.flags())
+ repo.wwrite(fd + ".base", fca.data(), fca.flags())
+ return 1 # unresolved
+ else:
+ args = _toolstr(ui, tool, "args", '$local $base $other')
+ if "$output" in args:
+ out, a = a, back # read input from backup, write to original
+ replace = dict(local=a, base=b, other=c, output=out)
+ args = re.sub("\$(local|base|other|output)",
+ lambda x: '"%s"' % replace[x.group()[1:]], args)
+ r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env)
+
+ if not r and _toolbool(ui, tool, "checkconflicts"):
+ if re.match("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data()):
+ r = 1
+
+ if not r and _toolbool(ui, tool, "checkchanged"):
+ if filecmp.cmp(repo.wjoin(fd), back):
+ if ui.prompt(_(" output file %s appears unchanged\n"
+ "was merge successful (yn)?") % fd,
+ (_("&Yes"), _("&No")), _("n")) != _("y"):
+ r = 1
+
+ if _toolbool(ui, tool, "fixeol"):
+ _matcheol(repo.wjoin(fd), back)
+
+ if r:
+ ui.warn(_("merging %s failed!\n") % fd)
+ else:
+ os.unlink(back)
+
+ os.unlink(b)
+ os.unlink(c)
+ return r
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/graphmod.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/graphmod.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,119 @@
+# Revision graph generator for Mercurial
+#
+# Copyright 2008 Dirkjan Ochtman
+# Copyright 2007 Joel Rosdahl
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+"""supports walking the history as DAGs suitable for graphical output
+
+The most basic format we use is that of::
+
+ (id, type, data, [parentids])
+
+The node and parent ids are arbitrary integers which identify a node in the
+context of the graph returned. Type is a constant specifying the node type.
+Data depends on type.
+"""
+
+from mercurial.node import nullrev
+
+CHANGESET = 'C'
+
+def revisions(repo, start, stop):
+ """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
+
+ This generator function walks through the revision history from revision
+ start to revision stop (which must be less than or equal to start). It
+ returns a tuple for each node. The node and parent ids are arbitrary
+ integers which identify a node in the context of the graph returned.
+ """
+ cur = start
+ while cur >= stop:
+ ctx = repo[cur]
+ parents = [p.rev() for p in ctx.parents() if p.rev() != nullrev]
+ yield (cur, CHANGESET, ctx, sorted(parents))
+ cur -= 1
+
+def filerevs(repo, path, start, stop):
+ """file cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
+
+ This generator function walks through the revision history of a single
+ file from revision start down to revision stop.
+ """
+ filerev = len(repo.file(path)) - 1
+ while filerev >= 0:
+ fctx = repo.filectx(path, fileid=filerev)
+ parents = [f.linkrev() for f in fctx.parents() if f.path() == path]
+ rev = fctx.rev()
+ if rev <= start:
+ yield (rev, CHANGESET, fctx, sorted(parents))
+ if rev <= stop:
+ break
+ filerev -= 1
+
+def nodes(repo, nodes):
+ """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
+
+ This generator function walks the given nodes. It only returns parents
+ that are in nodes, too.
+ """
+ include = set(nodes)
+ for node in nodes:
+ ctx = repo[node]
+ parents = [p.rev() for p in ctx.parents() if p.node() in include]
+ yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
+
+def colored(dag):
+ """annotates a DAG with colored edge information
+
+ For each DAG node this function emits tuples::
+
+ (id, type, data, (col, color), [(col, nextcol, color)])
+
+ with the following new elements:
+
+ - Tuple (col, color) with column and color index for the current node
+ - A list of tuples indicating the edges between the current node and its
+ parents.
+ """
+ seen = []
+ colors = {}
+ newcolor = 1
+ for (cur, type, data, parents) in dag:
+
+ # Compute seen and next
+ if cur not in seen:
+ seen.append(cur) # new head
+ colors[cur] = newcolor
+ newcolor += 1
+
+ col = seen.index(cur)
+ color = colors.pop(cur)
+ next = seen[:]
+
+ # Add parents to next
+ addparents = [p for p in parents if p not in next]
+ next[col:col + 1] = addparents
+
+ # Set colors for the parents
+ for i, p in enumerate(addparents):
+ if not i:
+ colors[p] = color
+ else:
+ colors[p] = newcolor
+ newcolor += 1
+
+ # Add edges to the graph
+ edges = []
+ for ecol, eid in enumerate(seen):
+ if eid in next:
+ edges.append((ecol, next.index(eid), colors[eid]))
+ elif eid == cur:
+ for p in parents:
+ edges.append((ecol, next.index(p), colors[p]))
+
+ # Yield and move on
+ yield (cur, type, data, (col, color), edges)
+ seen = next
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hbisect.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hbisect.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,145 @@
+# changelog bisection for mercurial
+#
+# Copyright 2007 Matt Mackall
+# Copyright 2005, 2006 Benoit Boissinot
+#
+# Inspired by git bisect, extension skeleton taken from mq.py.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import os
+from i18n import _
+from node import short, hex
+import util
+
+def bisect(changelog, state):
+ """find the next node (if any) for testing during a bisect search.
+ returns a (nodes, number, good) tuple.
+
+ 'nodes' is the final result of the bisect if 'number' is 0.
+ Otherwise 'number' indicates the remaining possible candidates for
+ the search and 'nodes' contains the next bisect target.
+ 'good' is True if bisect is searching for a first good changeset, False
+ if searching for a first bad one.
+ """
+
+ clparents = changelog.parentrevs
+ skip = set([changelog.rev(n) for n in state['skip']])
+
+ def buildancestors(bad, good):
+ # only the earliest bad revision matters
+ badrev = min([changelog.rev(n) for n in bad])
+ goodrevs = [changelog.rev(n) for n in good]
+ # build ancestors array
+ ancestors = [[]] * (len(changelog) + 1) # an extra for [-1]
+
+ # clear good revs from array
+ for node in goodrevs:
+ ancestors[node] = None
+ for rev in xrange(len(changelog), -1, -1):
+ if ancestors[rev] is None:
+ for prev in clparents(rev):
+ ancestors[prev] = None
+
+ if ancestors[badrev] is None:
+ return badrev, None
+ return badrev, ancestors
+
+ good = 0
+ badrev, ancestors = buildancestors(state['bad'], state['good'])
+ if not ancestors: # looking for bad to good transition?
+ good = 1
+ badrev, ancestors = buildancestors(state['good'], state['bad'])
+ bad = changelog.node(badrev)
+ if not ancestors: # now we're confused
+ raise util.Abort(_("Inconsistent state, %s:%s is good and bad")
+ % (badrev, short(bad)))
+
+ # build children dict
+ children = {}
+ visit = [badrev]
+ candidates = []
+ while visit:
+ rev = visit.pop(0)
+ if ancestors[rev] == []:
+ candidates.append(rev)
+ for prev in clparents(rev):
+ if prev != -1:
+ if prev in children:
+ children[prev].append(rev)
+ else:
+ children[prev] = [rev]
+ visit.append(prev)
+
+ candidates.sort()
+ # have we narrowed it down to one entry?
+ # or have all other possible candidates besides 'bad' have been skipped?
+ tot = len(candidates)
+ unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
+ if tot == 1 or not unskipped:
+ return ([changelog.node(rev) for rev in candidates], 0, good)
+ perfect = tot // 2
+
+ # find the best node to test
+ best_rev = None
+ best_len = -1
+ poison = set()
+ for rev in candidates:
+ if rev in poison:
+ # poison children
+ poison.update(children.get(rev, []))
+ continue
+
+ a = ancestors[rev] or [rev]
+ ancestors[rev] = None
+
+ x = len(a) # number of ancestors
+ y = tot - x # number of non-ancestors
+ value = min(x, y) # how good is this test?
+ if value > best_len and rev not in skip:
+ best_len = value
+ best_rev = rev
+ if value == perfect: # found a perfect candidate? quit early
+ break
+
+ if y < perfect and rev not in skip: # all downhill from here?
+ # poison children
+ poison.update(children.get(rev, []))
+ continue
+
+ for c in children.get(rev, []):
+ if ancestors[c]:
+ ancestors[c] = list(set(ancestors[c] + a))
+ else:
+ ancestors[c] = a + [c]
+
+ assert best_rev is not None
+ best_node = changelog.node(best_rev)
+
+ return ([best_node], tot, good)
+
+
+def load_state(repo):
+ state = {'good': [], 'bad': [], 'skip': []}
+ if os.path.exists(repo.join("bisect.state")):
+ for l in repo.opener("bisect.state"):
+ kind, node = l[:-1].split()
+ node = repo.lookup(node)
+ if kind not in state:
+ raise util.Abort(_("unknown bisect kind %s") % kind)
+ state[kind].append(node)
+ return state
+
+
+def save_state(repo, state):
+ f = repo.opener("bisect.state", "w", atomictemp=True)
+ wlock = repo.wlock()
+ try:
+ for kind in state:
+ for node in state[kind]:
+ f.write("%s %s\n" % (kind, hex(node)))
+ f.rename()
+ finally:
+ wlock.release()
+
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/help.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/help.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,511 @@
+# help.py - help data for mercurial
+#
+# Copyright 2006 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from i18n import _
+import extensions, util
+
+
+def moduledoc(file):
+ '''return the top-level python documentation for the given file
+
+ Loosely inspired by pydoc.source_synopsis(), but rewritten to handle \'''
+ as well as """ and to return the whole text instead of just the synopsis'''
+ result = []
+
+ line = file.readline()
+ while line[:1] == '#' or not line.strip():
+ line = file.readline()
+ if not line: break
+
+ start = line[:3]
+ if start == '"""' or start == "'''":
+ line = line[3:]
+ while line:
+ if line.rstrip().endswith(start):
+ line = line.split(start)[0]
+ if line:
+ result.append(line)
+ break
+ elif not line:
+ return None # unmatched delimiter
+ result.append(line)
+ line = file.readline()
+ else:
+ return None
+
+ return ''.join(result)
+
+def listexts(header, exts, maxlength):
+ '''return a text listing of the given extensions'''
+ if not exts:
+ return ''
+ result = '\n%s\n\n' % header
+ for name, desc in sorted(exts.iteritems()):
+ desc = util.wrap(desc, maxlength + 4)
+ result += ' %s %s\n' % (name.ljust(maxlength), desc)
+ return result
+
+def extshelp():
+ doc = _(r'''
+ Mercurial has the ability to add new features through the use of
+ extensions. Extensions may add new commands, add options to
+ existing commands, change the default behavior of commands, or
+ implement hooks.
+
+ Extensions are not loaded by default for a variety of reasons:
+ they can increase startup overhead; they may be meant for
+ advanced usage only; they may provide potentially dangerous
+ abilities (such as letting you destroy or modify history); they
+ might not be ready for prime time; or they may alter some
+ usual behaviors of stock Mercurial. It is thus up to the user to
+ activate extensions as needed.
+
+ To enable the "foo" extension, either shipped with Mercurial
+ or in the Python search path, create an entry for it in your
+ hgrc, like this:
+
+ [extensions]
+ foo =
+
+ You may also specify the full path to an extension:
+
+ [extensions]
+ myfeature = ~/.hgext/myfeature.py
+
+ To explicitly disable an extension enabled in an hgrc of broader
+ scope, prepend its path with !:
+
+ [extensions]
+ # disabling extension bar residing in /path/to/extension/bar.py
+ hgext.bar = !/path/to/extension/bar.py
+ # ditto, but no path was supplied for extension baz
+ hgext.baz = !
+ ''')
+
+ exts, maxlength = extensions.enabled()
+ doc += listexts(_('enabled extensions:'), exts, maxlength)
+
+ exts, maxlength = extensions.disabled()
+ doc += listexts(_('disabled extensions:'), exts, maxlength)
+
+ return doc
+
+helptable = (
+ (["dates"], _("Date Formats"),
+ _(r'''
+ Some commands allow the user to specify a date, e.g.:
+ * backout, commit, import, tag: Specify the commit date.
+ * log, revert, update: Select revision(s) by date.
+
+ Many date formats are valid. Here are some examples:
+
+ "Wed Dec 6 13:18:29 2006" (local timezone assumed)
+ "Dec 6 13:18 -0600" (year assumed, time offset provided)
+ "Dec 6 13:18 UTC" (UTC and GMT are aliases for +0000)
+ "Dec 6" (midnight)
+ "13:18" (today assumed)
+ "3:39" (3:39AM assumed)
+ "3:39pm" (15:39)
+ "2006-12-06 13:18:29" (ISO 8601 format)
+ "2006-12-6 13:18"
+ "2006-12-6"
+ "12-6"
+ "12/6"
+ "12/6/6" (Dec 6 2006)
+
+ Lastly, there is Mercurial's internal format:
+
+ "1165432709 0" (Wed Dec 6 13:18:29 2006 UTC)
+
+ This is the internal representation format for dates. unixtime is
+ the number of seconds since the epoch (1970-01-01 00:00 UTC).
+ offset is the offset of the local timezone, in seconds west of UTC
+ (negative if the timezone is east of UTC).
+
+ The log command also accepts date ranges:
+
+ "<{datetime}" - at or before a given date/time
+ ">{datetime}" - on or after a given date/time
+ "{datetime} to {datetime}" - a date range, inclusive
+ "-{days}" - within a given number of days of today
+ ''')),
+
+ (["patterns"], _("File Name Patterns"),
+ _(r'''
+ Mercurial accepts several notations for identifying one or more
+ files at a time.
+
+ By default, Mercurial treats filenames as shell-style extended
+ glob patterns.
+
+ Alternate pattern notations must be specified explicitly.
+
+ To use a plain path name without any pattern matching, start it
+ with "path:". These path names must completely match starting at
+ the current repository root.
+
+ To use an extended glob, start a name with "glob:". Globs are
+ rooted at the current directory; a glob such as "*.c" will only
+ match files in the current directory ending with ".c".
+
+ The supported glob syntax extensions are "**" to match any string
+ across path separators and "{a,b}" to mean "a or b".
+
+ To use a Perl/Python regular expression, start a name with "re:".
+ Regexp pattern matching is anchored at the root of the repository.
+
+ Plain examples:
+
+ path:foo/bar a name bar in a directory named foo in the root of
+ the repository
+ path:path:name a file or directory named "path:name"
+
+ Glob examples:
+
+ glob:*.c any name ending in ".c" in the current directory
+ *.c any name ending in ".c" in the current directory
+ **.c any name ending in ".c" in any subdirectory of the
+ current directory including itself.
+ foo/*.c any name ending in ".c" in the directory foo
+ foo/**.c any name ending in ".c" in any subdirectory of foo
+ including itself.
+
+ Regexp examples:
+
+ re:.*\.c$ any name ending in ".c", anywhere in the repository
+
+ ''')),
+
+ (['environment', 'env'], _('Environment Variables'),
+ _(r'''
+HG::
+ Path to the 'hg' executable, automatically passed when running
+ hooks, extensions or external tools. If unset or empty, this is
+ the hg executable's name if it's frozen, or an executable named
+ 'hg' (with %PATHEXT% [defaulting to COM/EXE/BAT/CMD] extensions on
+ Windows) is searched.
+
+HGEDITOR::
+ This is the name of the editor to run when committing. See EDITOR.
+
+ (deprecated, use .hgrc)
+
+HGENCODING::
+ This overrides the default locale setting detected by Mercurial.
+ This setting is used to convert data including usernames,
+ changeset descriptions, tag names, and branches. This setting can
+ be overridden with the --encoding command-line option.
+
+HGENCODINGMODE::
+ This sets Mercurial's behavior for handling unknown characters
+ while transcoding user input. The default is "strict", which
+ causes Mercurial to abort if it can't map a character. Other
+ settings include "replace", which replaces unknown characters, and
+ "ignore", which drops them. This setting can be overridden with
+ the --encodingmode command-line option.
+
+HGMERGE::
+ An executable to use for resolving merge conflicts. The program
+ will be executed with three arguments: local file, remote file,
+ ancestor file.
+
+ (deprecated, use .hgrc)
+
+HGRCPATH::
+ A list of files or directories to search for hgrc files. Item
+ separator is ":" on Unix, ";" on Windows. If HGRCPATH is not set,
+ platform default search path is used. If empty, only the .hg/hgrc
+ from the current repository is read.
+
+ For each element in HGRCPATH:
+ * if it's a directory, all files ending with .rc are added
+ * otherwise, the file itself will be added
+
+HGUSER::
+ This is the string used as the author of a commit. If not set,
+ available values will be considered in this order:
+
+ * HGUSER (deprecated)
+ * hgrc files from the HGRCPATH
+ * EMAIL
+ * interactive prompt
+ * LOGNAME (with '@hostname' appended)
+
+ (deprecated, use .hgrc)
+
+EMAIL::
+ May be used as the author of a commit; see HGUSER.
+
+LOGNAME::
+ May be used as the author of a commit; see HGUSER.
+
+VISUAL::
+ This is the name of the editor to use when committing. See EDITOR.
+
+EDITOR::
+ Sometimes Mercurial needs to open a text file in an editor for a
+ user to modify, for example when writing commit messages. The
+ editor it uses is determined by looking at the environment
+ variables HGEDITOR, VISUAL and EDITOR, in that order. The first
+ non-empty one is chosen. If all of them are empty, the editor
+ defaults to 'vi'.
+
+PYTHONPATH::
+ This is used by Python to find imported modules and may need to be
+ set appropriately if this Mercurial is not installed system-wide.
+ ''')),
+
+ (['revs', 'revisions'], _('Specifying Single Revisions'),
+ _(r'''
+ Mercurial supports several ways to specify individual revisions.
+
+ A plain integer is treated as a revision number. Negative integers
+ are treated as topological offsets from the tip, with -1 denoting
+ the tip. As such, negative numbers are only useful if you've
+ memorized your local tree numbers and want to save typing a single
+ digit. This editor suggests copy and paste.
+
+ A 40-digit hexadecimal string is treated as a unique revision
+ identifier.
+
+ A hexadecimal string less than 40 characters long is treated as a
+ unique revision identifier, and referred to as a short-form
+ identifier. A short-form identifier is only valid if it is the
+ prefix of exactly one full-length identifier.
+
+ Any other string is treated as a tag name, which is a symbolic
+ name associated with a revision identifier. Tag names may not
+ contain the ":" character.
+
+ The reserved name "tip" is a special tag that always identifies
+ the most recent revision.
+
+ The reserved name "null" indicates the null revision. This is the
+ revision of an empty repository, and the parent of revision 0.
+
+ The reserved name "." indicates the working directory parent. If
+ no working directory is checked out, it is equivalent to null. If
+ an uncommitted merge is in progress, "." is the revision of the
+ first parent.
+ ''')),
+
+ (['mrevs', 'multirevs'], _('Specifying Multiple Revisions'),
+ _(r'''
+ When Mercurial accepts more than one revision, they may be
+ specified individually, or provided as a topologically continuous
+ range, separated by the ":" character.
+
+ The syntax of range notation is [BEGIN]:[END], where BEGIN and END
+ are revision identifiers. Both BEGIN and END are optional. If
+ BEGIN is not specified, it defaults to revision number 0. If END
+ is not specified, it defaults to the tip. The range ":" thus means
+ "all revisions".
+
+ If BEGIN is greater than END, revisions are treated in reverse
+ order.
+
+ A range acts as a closed interval. This means that a range of 3:5
+ gives 3, 4 and 5. Similarly, a range of 9:6 gives 9, 8, 7, and 6.
+ ''')),
+
+ (['diffs'], _('Diff Formats'),
+ _(r'''
+ Mercurial's default format for showing changes between two
+ versions of a file is compatible with the unified format of GNU
+ diff, which can be used by GNU patch and many other standard
+ tools.
+
+ While this standard format is often enough, it does not encode the
+ following information:
+
+ - executable status and other permission bits
+ - copy or rename information
+ - changes in binary files
+ - creation or deletion of empty files
+
+ Mercurial also supports the extended diff format from the git VCS
+ which addresses these limitations. The git diff format is not
+ produced by default because a few widespread tools still do not
+ understand this format.
+
+ This means that when generating diffs from a Mercurial repository
+ (e.g. with "hg export"), you should be careful about things like
+ file copies and renames or other things mentioned above, because
+ when applying a standard diff to a different repository, this
+ extra information is lost. Mercurial's internal operations (like
+ push and pull) are not affected by this, because they use an
+ internal binary format for communicating changes.
+
+ To make Mercurial produce the git extended diff format, use the
+ --git option available for many commands, or set 'git = True' in
+ the [diff] section of your hgrc. You do not need to set this
+ option when importing diffs in this format or using them in the mq
+ extension.
+ ''')),
+ (['templating'], _('Template Usage'),
+ _(r'''
+ Mercurial allows you to customize output of commands through
+ templates. You can either pass in a template from the command
+ line, via the --template option, or select an existing
+ template-style (--style).
+
+ You can customize output for any "log-like" command: log,
+ outgoing, incoming, tip, parents, heads and glog.
+
+ Three styles are packaged with Mercurial: default (the style used
+ when no explicit preference is passed), compact and changelog.
+ Usage:
+
+ $ hg log -r1 --style changelog
+
+ A template is a piece of text, with markup to invoke variable
+ expansion:
+
+ $ hg log -r1 --template "{node}\n"
+ b56ce7b07c52de7d5fd79fb89701ea538af65746
+
+ Strings in curly braces are called keywords. The availability of
+ keywords depends on the exact context of the templater. These
+ keywords are usually available for templating a log-like command:
+
+ - author: String. The unmodified author of the changeset.
+ - branches: String. The name of the branch on which the changeset
+ was committed. Will be empty if the branch name was default.
+ - date: Date information. The date when the changeset was committed.
+ - desc: String. The text of the changeset description.
+ - diffstat: String. Statistics of changes with the following
+ format: "modified files: +added/-removed lines"
+ - files: List of strings. All files modified, added, or removed by
+ this changeset.
+ - file_adds: List of strings. Files added by this changeset.
+ - file_mods: List of strings. Files modified by this changeset.
+ - file_dels: List of strings. Files removed by this changeset.
+ - node: String. The changeset identification hash, as a
+ 40-character hexadecimal string.
+ - parents: List of strings. The parents of the changeset.
+ - rev: Integer. The repository-local changeset revision number.
+ - tags: List of strings. Any tags associated with the changeset.
+
+ The "date" keyword does not produce human-readable output. If you
+ want to use a date in your output, you can use a filter to process
+ it. Filters are functions which return a string based on the input
+ variable. You can also use a chain of filters to get the desired
+ output:
+
+ $ hg tip --template "{date|isodate}\n"
+ 2008-08-21 18:22 +0000
+
+ List of filters:
+
+ - addbreaks: Any text. Add an XHTML "
" tag before the end of
+ every line except the last.
+ - age: Date. Returns a human-readable date/time difference between
+ the given date/time and the current date/time.
+ - basename: Any text. Treats the text as a path, and returns the
+ last component of the path after splitting by the path
+ separator (ignoring trailing separators). For example,
+ "foo/bar/baz" becomes "baz" and "foo/bar//" becomes "bar".
+ - stripdir: Treat the text as path and strip a directory level, if
+ possible. For example, "foo" and "foo/bar" becomes "foo".
+ - date: Date. Returns a date in a Unix date format, including
+ the timezone: "Mon Sep 04 15:13:13 2006 0700".
+ - domain: Any text. Finds the first string that looks like an
+ email address, and extracts just the domain component.
+ Example: 'User ' becomes 'example.com'.
+ - email: Any text. Extracts the first string that looks like an
+ email address. Example: 'User ' becomes
+ 'user@example.com'.
+ - escape: Any text. Replaces the special XML/XHTML characters "&",
+ "<" and ">" with XML entities.
+ - fill68: Any text. Wraps the text to fit in 68 columns.
+ - fill76: Any text. Wraps the text to fit in 76 columns.
+ - firstline: Any text. Returns the first line of text.
+ - nonempty: Any text. Returns '(none)' if the string is empty.
+ - hgdate: Date. Returns the date as a pair of numbers:
+ "1157407993 25200" (Unix timestamp, timezone offset).
+ - isodate: Date. Returns the date in ISO 8601 format.
+ - localdate: Date. Converts a date to local date.
+ - obfuscate: Any text. Returns the input text rendered as a
+ sequence of XML entities.
+ - person: Any text. Returns the text before an email address.
+ - rfc822date: Date. Returns a date using the same format used
+ in email headers.
+ - short: Changeset hash. Returns the short form of a changeset
+ hash, i.e. a 12-byte hexadecimal string.
+ - shortdate: Date. Returns a date like "2006-09-18".
+ - strip: Any text. Strips all leading and trailing whitespace.
+ - tabindent: Any text. Returns the text, with every line except
+ the first starting with a tab character.
+ - urlescape: Any text. Escapes all "special" characters. For
+ example, "foo bar" becomes "foo%20bar".
+ - user: Any text. Returns the user portion of an email address.
+ ''')),
+
+ (['urls'], _('URL Paths'),
+ _(r'''
+ Valid URLs are of the form:
+
+ local/filesystem/path[#revision]
+ file://local/filesystem/path[#revision]
+ http://[user[:pass]@]host[:port]/[path][#revision]
+ https://[user[:pass]@]host[:port]/[path][#revision]
+ ssh://[user[:pass]@]host[:port]/[path][#revision]
+
+ Paths in the local filesystem can either point to Mercurial
+ repositories or to bundle files (as created by 'hg bundle' or
+ 'hg incoming --bundle').
+
+ An optional identifier after # indicates a particular branch, tag,
+ or changeset to use from the remote repository. See also 'hg help
+ revisions'.
+
+ Some features, such as pushing to http:// and https:// URLs are
+ only possible if the feature is explicitly enabled on the remote
+ Mercurial server.
+
+ Some notes about using SSH with Mercurial:
+ - SSH requires an accessible shell account on the destination
+ machine and a copy of hg in the remote path or specified with as
+ remotecmd.
+ - path is relative to the remote user's home directory by default.
+ Use an extra slash at the start of a path to specify an absolute path:
+ ssh://example.com//tmp/repository
+ - Mercurial doesn't use its own compression via SSH; the right
+ thing to do is to configure it in your ~/.ssh/config, e.g.:
+ Host *.mylocalnetwork.example.com
+ Compression no
+ Host *
+ Compression yes
+ Alternatively specify "ssh -C" as your ssh command in your hgrc
+ or with the --ssh command line option.
+
+ These URLs can all be stored in your hgrc with path aliases under
+ the [paths] section like so:
+ [paths]
+ alias1 = URL1
+ alias2 = URL2
+ ...
+
+ You can then use the alias for any command that uses a URL (for
+ example 'hg pull alias1' would pull from the 'alias1' path).
+
+ Two path aliases are special because they are used as defaults
+ when you do not provide the URL to a command:
+
+ default:
+ When you create a repository with hg clone, the clone command
+ saves the location of the source repository as the new
+ repository's 'default' path. This is then used when you omit
+ path from push- and pull-like commands (including incoming and
+ outgoing).
+
+ default-push:
+ The push command will look for a path named 'default-push', and
+ prefer it over 'default' if both are defined.
+ ''')),
+ (["extensions"], _("Using additional features"), extshelp),
+)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hg.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hg.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,363 @@
+# hg.py - repository classes for mercurial
+#
+# Copyright 2005-2007 Matt Mackall
+# Copyright 2006 Vadim Gelfer
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from i18n import _
+from lock import release
+import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo
+import lock, util, extensions, error
+import merge as _merge
+import verify as _verify
+import errno, os, shutil
+
+def _local(path):
+ return (os.path.isfile(util.drop_scheme('file', path)) and
+ bundlerepo or localrepo)
+
+def parseurl(url, revs=[]):
+ '''parse url#branch, returning url, branch + revs'''
+
+ if '#' not in url:
+ return url, (revs or None), revs and revs[-1] or None
+
+ url, branch = url.split('#', 1)
+ checkout = revs and revs[-1] or branch
+ return url, (revs or []) + [branch], checkout
+
+schemes = {
+ 'bundle': bundlerepo,
+ 'file': _local,
+ 'http': httprepo,
+ 'https': httprepo,
+ 'ssh': sshrepo,
+ 'static-http': statichttprepo,
+}
+
+def _lookup(path):
+ scheme = 'file'
+ if path:
+ c = path.find(':')
+ if c > 0:
+ scheme = path[:c]
+ thing = schemes.get(scheme) or schemes['file']
+ try:
+ return thing(path)
+ except TypeError:
+ return thing
+
+def islocal(repo):
+ '''return true if repo or path is local'''
+ if isinstance(repo, str):
+ try:
+ return _lookup(repo).islocal(repo)
+ except AttributeError:
+ return False
+ return repo.local()
+
+def repository(ui, path='', create=False):
+ """return a repository object for the specified path"""
+ repo = _lookup(path).instance(ui, path, create)
+ ui = getattr(repo, "ui", ui)
+ for name, module in extensions.extensions():
+ hook = getattr(module, 'reposetup', None)
+ if hook:
+ hook(ui, repo)
+ return repo
+
+def defaultdest(source):
+ '''return default destination of clone if none is given'''
+ return os.path.basename(os.path.normpath(source))
+
+def localpath(path):
+ if path.startswith('file://localhost/'):
+ return path[16:]
+ if path.startswith('file://'):
+ return path[7:]
+ if path.startswith('file:'):
+ return path[5:]
+ return path
+
+def share(ui, source, dest=None, update=True):
+ '''create a shared repository'''
+
+ if not islocal(source):
+ raise util.Abort(_('can only share local repositories'))
+
+ if not dest:
+ dest = os.path.basename(source)
+
+ if isinstance(source, str):
+ origsource = ui.expandpath(source)
+ source, rev, checkout = parseurl(origsource, '')
+ srcrepo = repository(ui, source)
+ else:
+ srcrepo = source
+ origsource = source = srcrepo.url()
+ checkout = None
+
+ sharedpath = srcrepo.sharedpath # if our source is already sharing
+
+ root = os.path.realpath(dest)
+ roothg = os.path.join(root, '.hg')
+
+ if os.path.exists(roothg):
+ raise util.Abort(_('destination already exists'))
+
+ if not os.path.isdir(root):
+ os.mkdir(root)
+ os.mkdir(roothg)
+
+ requirements = ''
+ try:
+ requirements = srcrepo.opener('requires').read()
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+
+ requirements += 'shared\n'
+ file(os.path.join(roothg, 'requires'), 'w').write(requirements)
+ file(os.path.join(roothg, 'sharedpath'), 'w').write(sharedpath)
+
+ default = srcrepo.ui.config('paths', 'default')
+ if default:
+ f = file(os.path.join(roothg, 'hgrc'), 'w')
+ f.write('[paths]\ndefault = %s\n' % default)
+ f.close()
+
+ r = repository(ui, root)
+
+ if update:
+ r.ui.status(_("updating working directory\n"))
+ if update is not True:
+ checkout = update
+ for test in (checkout, 'default', 'tip'):
+ try:
+ uprev = r.lookup(test)
+ break
+ except:
+ continue
+ _update(r, uprev)
+
+def clone(ui, source, dest=None, pull=False, rev=None, update=True,
+ stream=False):
+ """Make a copy of an existing repository.
+
+ Create a copy of an existing repository in a new directory. The
+ source and destination are URLs, as passed to the repository
+ function. Returns a pair of repository objects, the source and
+ newly created destination.
+
+ The location of the source is added to the new repository's
+ .hg/hgrc file, as the default to be used for future pulls and
+ pushes.
+
+ If an exception is raised, the partly cloned/updated destination
+ repository will be deleted.
+
+ Arguments:
+
+ source: repository object or URL
+
+ dest: URL of destination repository to create (defaults to base
+ name of source repository)
+
+ pull: always pull from source repository, even in local case
+
+ stream: stream raw data uncompressed from repository (fast over
+ LAN, slow over WAN)
+
+ rev: revision to clone up to (implies pull=True)
+
+ update: update working directory after clone completes, if
+ destination is local repository (True means update to default rev,
+ anything else is treated as a revision)
+ """
+
+ if isinstance(source, str):
+ origsource = ui.expandpath(source)
+ source, rev, checkout = parseurl(origsource, rev)
+ src_repo = repository(ui, source)
+ else:
+ src_repo = source
+ origsource = source = src_repo.url()
+ checkout = rev and rev[-1] or None
+
+ if dest is None:
+ dest = defaultdest(source)
+ ui.status(_("destination directory: %s\n") % dest)
+
+ dest = localpath(dest)
+ source = localpath(source)
+
+ if os.path.exists(dest):
+ if not os.path.isdir(dest):
+ raise util.Abort(_("destination '%s' already exists") % dest)
+ elif os.listdir(dest):
+ raise util.Abort(_("destination '%s' is not empty") % dest)
+
+ class DirCleanup(object):
+ def __init__(self, dir_):
+ self.rmtree = shutil.rmtree
+ self.dir_ = dir_
+ def close(self):
+ self.dir_ = None
+ def cleanup(self):
+ if self.dir_:
+ self.rmtree(self.dir_, True)
+
+ src_lock = dest_lock = dir_cleanup = None
+ try:
+ if islocal(dest):
+ dir_cleanup = DirCleanup(dest)
+
+ abspath = origsource
+ copy = False
+ if src_repo.cancopy() and islocal(dest):
+ abspath = os.path.abspath(util.drop_scheme('file', origsource))
+ copy = not pull and not rev
+
+ if copy:
+ try:
+ # we use a lock here because if we race with commit, we
+ # can end up with extra data in the cloned revlogs that's
+ # not pointed to by changesets, thus causing verify to
+ # fail
+ src_lock = src_repo.lock(wait=False)
+ except error.LockError:
+ copy = False
+
+ if copy:
+ src_repo.hook('preoutgoing', throw=True, source='clone')
+ hgdir = os.path.realpath(os.path.join(dest, ".hg"))
+ if not os.path.exists(dest):
+ os.mkdir(dest)
+ else:
+ # only clean up directories we create ourselves
+ dir_cleanup.dir_ = hgdir
+ try:
+ dest_path = hgdir
+ os.mkdir(dest_path)
+ except OSError, inst:
+ if inst.errno == errno.EEXIST:
+ dir_cleanup.close()
+ raise util.Abort(_("destination '%s' already exists")
+ % dest)
+ raise
+
+ for f in src_repo.store.copylist():
+ src = os.path.join(src_repo.path, f)
+ dst = os.path.join(dest_path, f)
+ dstbase = os.path.dirname(dst)
+ if dstbase and not os.path.exists(dstbase):
+ os.mkdir(dstbase)
+ if os.path.exists(src):
+ if dst.endswith('data'):
+ # lock to avoid premature writing to the target
+ dest_lock = lock.lock(os.path.join(dstbase, "lock"))
+ util.copyfiles(src, dst)
+
+ # we need to re-init the repo after manually copying the data
+ # into it
+ dest_repo = repository(ui, dest)
+ src_repo.hook('outgoing', source='clone', node='0'*40)
+ else:
+ try:
+ dest_repo = repository(ui, dest, create=True)
+ except OSError, inst:
+ if inst.errno == errno.EEXIST:
+ dir_cleanup.close()
+ raise util.Abort(_("destination '%s' already exists")
+ % dest)
+ raise
+
+ revs = None
+ if rev:
+ if 'lookup' not in src_repo.capabilities:
+ raise util.Abort(_("src repository does not support revision "
+ "lookup and so doesn't support clone by "
+ "revision"))
+ revs = [src_repo.lookup(r) for r in rev]
+ checkout = revs[0]
+ if dest_repo.local():
+ dest_repo.clone(src_repo, heads=revs, stream=stream)
+ elif src_repo.local():
+ src_repo.push(dest_repo, revs=revs)
+ else:
+ raise util.Abort(_("clone from remote to remote not supported"))
+
+ if dir_cleanup:
+ dir_cleanup.close()
+
+ if dest_repo.local():
+ fp = dest_repo.opener("hgrc", "w", text=True)
+ fp.write("[paths]\n")
+ fp.write("default = %s\n" % abspath)
+ fp.close()
+
+ dest_repo.ui.setconfig('paths', 'default', abspath)
+
+ if update:
+ dest_repo.ui.status(_("updating working directory\n"))
+ if update is not True:
+ checkout = update
+ for test in (checkout, 'default', 'tip'):
+ try:
+ uprev = dest_repo.lookup(test)
+ break
+ except:
+ continue
+ _update(dest_repo, uprev)
+
+ return src_repo, dest_repo
+ finally:
+ release(src_lock, dest_lock)
+ if dir_cleanup is not None:
+ dir_cleanup.cleanup()
+
+def _showstats(repo, stats):
+ stats = ((stats[0], _("updated")),
+ (stats[1], _("merged")),
+ (stats[2], _("removed")),
+ (stats[3], _("unresolved")))
+ note = ", ".join([_("%d files %s") % s for s in stats])
+ repo.ui.status("%s\n" % note)
+
+def update(repo, node):
+ """update the working directory to node, merging linear changes"""
+ stats = _merge.update(repo, node, False, False, None)
+ _showstats(repo, stats)
+ if stats[3]:
+ repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
+ return stats[3] > 0
+
+# naming conflict in clone()
+_update = update
+
+def clean(repo, node, show_stats=True):
+ """forcibly switch the working directory to node, clobbering changes"""
+ stats = _merge.update(repo, node, False, True, None)
+ if show_stats: _showstats(repo, stats)
+ return stats[3] > 0
+
+def merge(repo, node, force=None, remind=True):
+ """branch merge with node, resolving changes"""
+ stats = _merge.update(repo, node, True, force, False)
+ _showstats(repo, stats)
+ if stats[3]:
+ repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
+ "or 'hg up --clean' to abandon\n"))
+ elif remind:
+ repo.ui.status(_("(branch merge, don't forget to commit)\n"))
+ return stats[3] > 0
+
+def revert(repo, node, choose):
+ """revert changes to revision in node without updating dirstate"""
+ return _merge.update(repo, node, False, True, choose)[3] > 0
+
+def verify(repo):
+ """verify the consistency of a repository"""
+ return _verify.verify(repo)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hgweb/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hgweb/__init__.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,16 @@
+# hgweb/__init__.py - web interface to a mercurial repository
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge
+# Copyright 2005 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import hgweb_mod, hgwebdir_mod
+
+def hgweb(*args, **kwargs):
+ return hgweb_mod.hgweb(*args, **kwargs)
+
+def hgwebdir(*args, **kwargs):
+ return hgwebdir_mod.hgwebdir(*args, **kwargs)
+
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hgweb/common.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hgweb/common.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,105 @@
+# hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge
+# Copyright 2005, 2006 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import errno, mimetypes, os
+
+HTTP_OK = 200
+HTTP_BAD_REQUEST = 400
+HTTP_UNAUTHORIZED = 401
+HTTP_FORBIDDEN = 403
+HTTP_NOT_FOUND = 404
+HTTP_METHOD_NOT_ALLOWED = 405
+HTTP_SERVER_ERROR = 500
+
+class ErrorResponse(Exception):
+ def __init__(self, code, message=None, headers=[]):
+ Exception.__init__(self)
+ self.code = code
+ self.headers = headers
+ if message is not None:
+ self.message = message
+ else:
+ self.message = _statusmessage(code)
+
+def _statusmessage(code):
+ from BaseHTTPServer import BaseHTTPRequestHandler
+ responses = BaseHTTPRequestHandler.responses
+ return responses.get(code, ('Error', 'Unknown error'))[0]
+
+def statusmessage(code):
+ return '%d %s' % (code, _statusmessage(code))
+
+def get_mtime(repo_path):
+ store_path = os.path.join(repo_path, ".hg")
+ if not os.path.isdir(os.path.join(store_path, "data")):
+ store_path = os.path.join(store_path, "store")
+ cl_path = os.path.join(store_path, "00changelog.i")
+ if os.path.exists(cl_path):
+ return os.stat(cl_path).st_mtime
+ else:
+ return os.stat(store_path).st_mtime
+
+def staticfile(directory, fname, req):
+ """return a file inside directory with guessed Content-Type header
+
+ fname always uses '/' as directory separator and isn't allowed to
+ contain unusual path components.
+ Content-Type is guessed using the mimetypes module.
+ Return an empty string if fname is illegal or file not found.
+
+ """
+ parts = fname.split('/')
+ for part in parts:
+ if (part in ('', os.curdir, os.pardir) or
+ os.sep in part or os.altsep is not None and os.altsep in part):
+ return ""
+ fpath = os.path.join(*parts)
+ if isinstance(directory, str):
+ directory = [directory]
+ for d in directory:
+ path = os.path.join(d, fpath)
+ if os.path.exists(path):
+ break
+ try:
+ os.stat(path)
+ ct = mimetypes.guess_type(path)[0] or "text/plain"
+ req.respond(HTTP_OK, ct, length = os.path.getsize(path))
+ return file(path, 'rb').read()
+ except TypeError:
+ raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
+ except OSError, err:
+ if err.errno == errno.ENOENT:
+ raise ErrorResponse(HTTP_NOT_FOUND)
+ else:
+ raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror)
+
+def paritygen(stripecount, offset=0):
+ """count parity of horizontal stripes for easier reading"""
+ if stripecount and offset:
+ # account for offset, e.g. due to building the list in reverse
+ count = (stripecount + offset) % stripecount
+ parity = (stripecount + offset) / stripecount & 1
+ else:
+ count = 0
+ parity = 0
+ while True:
+ yield parity
+ count += 1
+ if stripecount and count >= stripecount:
+ parity = 1 - parity
+ count = 0
+
+def get_contact(config):
+ """Return repo contact information or empty string.
+
+ web.contact is the primary source, but if that is not set, try
+ ui.username or $EMAIL as a fallback to display something useful.
+ """
+ return (config("web", "contact") or
+ config("ui", "username") or
+ os.environ.get("EMAIL") or "")
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hgweb/hgweb_mod.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hgweb/hgweb_mod.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,315 @@
+# hgweb/hgweb_mod.py - Web interface for a repository.
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import os
+from mercurial import ui, hg, hook, error, encoding, templater
+from common import get_mtime, ErrorResponse
+from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
+from common import HTTP_UNAUTHORIZED, HTTP_METHOD_NOT_ALLOWED
+from request import wsgirequest
+import webcommands, protocol, webutil
+
+perms = {
+ 'changegroup': 'pull',
+ 'changegroupsubset': 'pull',
+ 'unbundle': 'push',
+ 'stream_out': 'pull',
+}
+
+class hgweb(object):
+ def __init__(self, repo, name=None):
+ if isinstance(repo, str):
+ u = ui.ui()
+ u.setconfig('ui', 'report_untrusted', 'off')
+ u.setconfig('ui', 'interactive', 'off')
+ self.repo = hg.repository(u, repo)
+ else:
+ self.repo = repo
+
+ hook.redirect(True)
+ self.mtime = -1
+ self.reponame = name
+ self.archives = 'zip', 'gz', 'bz2'
+ self.stripecount = 1
+ # a repo owner may set web.templates in .hg/hgrc to get any file
+ # readable by the user running the CGI script
+ self.templatepath = self.config('web', 'templates')
+
+ # The CGI scripts are often run by a user different from the repo owner.
+ # Trust the settings from the .hg/hgrc files by default.
+ def config(self, section, name, default=None, untrusted=True):
+ return self.repo.ui.config(section, name, default,
+ untrusted=untrusted)
+
+ def configbool(self, section, name, default=False, untrusted=True):
+ return self.repo.ui.configbool(section, name, default,
+ untrusted=untrusted)
+
+ def configlist(self, section, name, default=None, untrusted=True):
+ return self.repo.ui.configlist(section, name, default,
+ untrusted=untrusted)
+
+ def refresh(self):
+ mtime = get_mtime(self.repo.root)
+ if mtime != self.mtime:
+ self.mtime = mtime
+ self.repo = hg.repository(self.repo.ui, self.repo.root)
+ self.maxchanges = int(self.config("web", "maxchanges", 10))
+ self.stripecount = int(self.config("web", "stripes", 1))
+ self.maxshortchanges = int(self.config("web", "maxshortchanges", 60))
+ self.maxfiles = int(self.config("web", "maxfiles", 10))
+ self.allowpull = self.configbool("web", "allowpull", True)
+ encoding.encoding = self.config("web", "encoding",
+ encoding.encoding)
+
+ def run(self):
+ if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
+ raise RuntimeError("This function is only intended to be "
+ "called while running as a CGI script.")
+ import mercurial.hgweb.wsgicgi as wsgicgi
+ wsgicgi.launch(self)
+
+ def __call__(self, env, respond):
+ req = wsgirequest(env, respond)
+ return self.run_wsgi(req)
+
+ def run_wsgi(self, req):
+
+ self.refresh()
+
+ # work with CGI variables to create coherent structure
+ # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
+
+ req.url = req.env['SCRIPT_NAME']
+ if not req.url.endswith('/'):
+ req.url += '/'
+ if 'REPO_NAME' in req.env:
+ req.url += req.env['REPO_NAME'] + '/'
+
+ if 'PATH_INFO' in req.env:
+ parts = req.env['PATH_INFO'].strip('/').split('/')
+ repo_parts = req.env.get('REPO_NAME', '').split('/')
+ if parts[:len(repo_parts)] == repo_parts:
+ parts = parts[len(repo_parts):]
+ query = '/'.join(parts)
+ else:
+ query = req.env['QUERY_STRING'].split('&', 1)[0]
+ query = query.split(';', 1)[0]
+
+ # process this if it's a protocol request
+ # protocol bits don't need to create any URLs
+ # and the clients always use the old URL structure
+
+ cmd = req.form.get('cmd', [''])[0]
+ if cmd and cmd in protocol.__all__:
+ if query:
+ raise ErrorResponse(HTTP_NOT_FOUND)
+ try:
+ if cmd in perms:
+ try:
+ self.check_perm(req, perms[cmd])
+ except ErrorResponse, inst:
+ if cmd == 'unbundle':
+ req.drain()
+ raise
+ method = getattr(protocol, cmd)
+ return method(self.repo, req)
+ except ErrorResponse, inst:
+ req.respond(inst, protocol.HGTYPE)
+ if not inst.message:
+ return []
+ return '0\n%s\n' % inst.message,
+
+ # translate user-visible url structure to internal structure
+
+ args = query.split('/', 2)
+ if 'cmd' not in req.form and args and args[0]:
+
+ cmd = args.pop(0)
+ style = cmd.rfind('-')
+ if style != -1:
+ req.form['style'] = [cmd[:style]]
+ cmd = cmd[style+1:]
+
+ # avoid accepting e.g. style parameter as command
+ if hasattr(webcommands, cmd):
+ req.form['cmd'] = [cmd]
+ else:
+ cmd = ''
+
+ if cmd == 'static':
+ req.form['file'] = ['/'.join(args)]
+ else:
+ if args and args[0]:
+ node = args.pop(0)
+ req.form['node'] = [node]
+ if args:
+ req.form['file'] = args
+
+ if cmd == 'archive':
+ fn = req.form['node'][0]
+ for type_, spec in self.archive_specs.iteritems():
+ ext = spec[2]
+ if fn.endswith(ext):
+ req.form['node'] = [fn[:-len(ext)]]
+ req.form['type'] = [type_]
+
+ # process the web interface request
+
+ try:
+ tmpl = self.templater(req)
+ ctype = tmpl('mimetype', encoding=encoding.encoding)
+ ctype = templater.stringify(ctype)
+
+ # check read permissions non-static content
+ if cmd != 'static':
+ self.check_perm(req, None)
+
+ if cmd == '':
+ req.form['cmd'] = [tmpl.cache['default']]
+ cmd = req.form['cmd'][0]
+
+ if cmd not in webcommands.__all__:
+ msg = 'no such method: %s' % cmd
+ raise ErrorResponse(HTTP_BAD_REQUEST, msg)
+ elif cmd == 'file' and 'raw' in req.form.get('style', []):
+ self.ctype = ctype
+ content = webcommands.rawfile(self, req, tmpl)
+ else:
+ content = getattr(webcommands, cmd)(self, req, tmpl)
+ req.respond(HTTP_OK, ctype)
+
+ return content
+
+ except error.LookupError, err:
+ req.respond(HTTP_NOT_FOUND, ctype)
+ msg = str(err)
+ if 'manifest' not in msg:
+ msg = 'revision not found: %s' % err.name
+ return tmpl('error', error=msg)
+ except (error.RepoError, error.RevlogError), inst:
+ req.respond(HTTP_SERVER_ERROR, ctype)
+ return tmpl('error', error=str(inst))
+ except ErrorResponse, inst:
+ req.respond(inst, ctype)
+ return tmpl('error', error=inst.message)
+
+ def templater(self, req):
+
+ # determine scheme, port and server name
+ # this is needed to create absolute urls
+
+ proto = req.env.get('wsgi.url_scheme')
+ if proto == 'https':
+ proto = 'https'
+ default_port = "443"
+ else:
+ proto = 'http'
+ default_port = "80"
+
+ port = req.env["SERVER_PORT"]
+ port = port != default_port and (":" + port) or ""
+ urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
+ staticurl = self.config("web", "staticurl") or req.url + 'static/'
+ if not staticurl.endswith('/'):
+ staticurl += '/'
+
+ # some functions for the templater
+
+ def header(**map):
+ yield tmpl('header', encoding=encoding.encoding, **map)
+
+ def footer(**map):
+ yield tmpl("footer", **map)
+
+ def motd(**map):
+ yield self.config("web", "motd", "")
+
+ # figure out which style to use
+
+ vars = {}
+ style = self.config("web", "style", "paper")
+ if 'style' in req.form:
+ style = req.form['style'][0]
+ vars['style'] = style
+
+ start = req.url[-1] == '?' and '&' or '?'
+ sessionvars = webutil.sessionvars(vars, start)
+ mapfile = templater.stylemap(style, self.templatepath)
+
+ if not self.reponame:
+ self.reponame = (self.config("web", "name")
+ or req.env.get('REPO_NAME')
+ or req.url.strip('/') or self.repo.root)
+
+ # create the templater
+
+ tmpl = templater.templater(mapfile,
+ defaults={"url": req.url,
+ "staticurl": staticurl,
+ "urlbase": urlbase,
+ "repo": self.reponame,
+ "header": header,
+ "footer": footer,
+ "motd": motd,
+ "sessionvars": sessionvars
+ })
+ return tmpl
+
+ def archivelist(self, nodeid):
+ allowed = self.configlist("web", "allow_archive")
+ for i, spec in self.archive_specs.iteritems():
+ if i in allowed or self.configbool("web", "allow" + i):
+ yield {"type" : i, "extension" : spec[2], "node" : nodeid}
+
+ archive_specs = {
+ 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
+ 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
+ 'zip': ('application/zip', 'zip', '.zip', None),
+ }
+
+ def check_perm(self, req, op):
+ '''Check permission for operation based on request data (including
+ authentication info). Return if op allowed, else raise an ErrorResponse
+ exception.'''
+
+ user = req.env.get('REMOTE_USER')
+
+ deny_read = self.configlist('web', 'deny_read')
+ if deny_read and (not user or deny_read == ['*'] or user in deny_read):
+ raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
+
+ allow_read = self.configlist('web', 'allow_read')
+ result = (not allow_read) or (allow_read == ['*'])
+ if not (result or user in allow_read):
+ raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
+
+ if op == 'pull' and not self.allowpull:
+ raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
+ elif op == 'pull' or op is None: # op is None for interface requests
+ return
+
+ # enforce that you can only push using POST requests
+ if req.env['REQUEST_METHOD'] != 'POST':
+ msg = 'push requires POST request'
+ raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
+
+ # require ssl by default for pushing, auth info cannot be sniffed
+ # and replayed
+ scheme = req.env.get('wsgi.url_scheme')
+ if self.configbool('web', 'push_ssl', True) and scheme != 'https':
+ raise ErrorResponse(HTTP_OK, 'ssl required')
+
+ deny = self.configlist('web', 'deny_push')
+ if deny and (not user or deny == ['*'] or user in deny):
+ raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
+
+ allow = self.configlist('web', 'allow_push')
+ result = allow and (allow == ['*'] or user in allow)
+ if not result:
+ raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hgweb/hgwebdir_mod.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hgweb/hgwebdir_mod.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,333 @@
+# hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge
+# Copyright 2005, 2006 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import os, re, time
+from mercurial.i18n import _
+from mercurial import ui, hg, util, templater
+from mercurial import error, encoding
+from common import ErrorResponse, get_mtime, staticfile, paritygen,\
+ get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
+from hgweb_mod import hgweb
+from request import wsgirequest
+import webutil
+
+def cleannames(items):
+ return [(util.pconvert(name).strip('/'), path) for name, path in items]
+
+def findrepos(paths):
+ repos = {}
+ for prefix, root in cleannames(paths):
+ roothead, roottail = os.path.split(root)
+ # "foo = /bar/*" makes every subrepo of /bar/ to be
+ # mounted as foo/subrepo
+ # and "foo = /bar/**" also recurses into the subdirectories,
+ # remember to use it without working dir.
+ try:
+ recurse = {'*': False, '**': True}[roottail]
+ except KeyError:
+ repos[prefix] = root
+ continue
+ roothead = os.path.normpath(roothead)
+ for path in util.walkrepos(roothead, followsym=True, recurse=recurse):
+ path = os.path.normpath(path)
+ name = util.pconvert(path[len(roothead):]).strip('/')
+ if prefix:
+ name = prefix + '/' + name
+ repos[name] = path
+ return repos.items()
+
+class hgwebdir(object):
+ refreshinterval = 20
+
+ def __init__(self, conf, baseui=None):
+ self.conf = conf
+ self.baseui = baseui
+ self.lastrefresh = 0
+ self.refresh()
+
+ def refresh(self):
+ if self.lastrefresh + self.refreshinterval > time.time():
+ return
+
+ if self.baseui:
+ self.ui = self.baseui.copy()
+ else:
+ self.ui = ui.ui()
+ self.ui.setconfig('ui', 'report_untrusted', 'off')
+ self.ui.setconfig('ui', 'interactive', 'off')
+
+ if not isinstance(self.conf, (dict, list, tuple)):
+ map = {'paths': 'hgweb-paths'}
+ self.ui.readconfig(self.conf, remap=map, trust=True)
+ paths = self.ui.configitems('hgweb-paths')
+ elif isinstance(self.conf, (list, tuple)):
+ paths = self.conf
+ elif isinstance(self.conf, dict):
+ paths = self.conf.items()
+
+ encoding.encoding = self.ui.config('web', 'encoding',
+ encoding.encoding)
+ self.motd = self.ui.config('web', 'motd')
+ self.style = self.ui.config('web', 'style', 'paper')
+ self.stripecount = self.ui.config('web', 'stripes', 1)
+ if self.stripecount:
+ self.stripecount = int(self.stripecount)
+ self._baseurl = self.ui.config('web', 'baseurl')
+
+ self.repos = findrepos(paths)
+ for prefix, root in self.ui.configitems('collections'):
+ prefix = util.pconvert(prefix)
+ for path in util.walkrepos(root, followsym=True):
+ repo = os.path.normpath(path)
+ name = util.pconvert(repo)
+ if name.startswith(prefix):
+ name = name[len(prefix):]
+ self.repos.append((name.lstrip('/'), repo))
+
+ self.repos.sort()
+ self.lastrefresh = time.time()
+
+ def run(self):
+ if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
+ raise RuntimeError("This function is only intended to be "
+ "called while running as a CGI script.")
+ import mercurial.hgweb.wsgicgi as wsgicgi
+ wsgicgi.launch(self)
+
+ def __call__(self, env, respond):
+ req = wsgirequest(env, respond)
+ return self.run_wsgi(req)
+
+ def read_allowed(self, ui, req):
+ """Check allow_read and deny_read config options of a repo's ui object
+ to determine user permissions. By default, with neither option set (or
+ both empty), allow all users to read the repo. There are two ways a
+ user can be denied read access: (1) deny_read is not empty, and the
+ user is unauthenticated or deny_read contains user (or *), and (2)
+ allow_read is not empty and the user is not in allow_read. Return True
+ if user is allowed to read the repo, else return False."""
+
+ user = req.env.get('REMOTE_USER')
+
+ deny_read = ui.configlist('web', 'deny_read', untrusted=True)
+ if deny_read and (not user or deny_read == ['*'] or user in deny_read):
+ return False
+
+ allow_read = ui.configlist('web', 'allow_read', untrusted=True)
+ # by default, allow reading if no allow_read option has been set
+ if (not allow_read) or (allow_read == ['*']) or (user in allow_read):
+ return True
+
+ return False
+
+ def run_wsgi(self, req):
+ try:
+ try:
+ self.refresh()
+
+ virtual = req.env.get("PATH_INFO", "").strip('/')
+ tmpl = self.templater(req)
+ ctype = tmpl('mimetype', encoding=encoding.encoding)
+ ctype = templater.stringify(ctype)
+
+ # a static file
+ if virtual.startswith('static/') or 'static' in req.form:
+ if virtual.startswith('static/'):
+ fname = virtual[7:]
+ else:
+ fname = req.form['static'][0]
+ static = templater.templatepath('static')
+ return (staticfile(static, fname, req),)
+
+ # top-level index
+ elif not virtual:
+ req.respond(HTTP_OK, ctype)
+ return self.makeindex(req, tmpl)
+
+ # nested indexes and hgwebs
+
+ repos = dict(self.repos)
+ while virtual:
+ real = repos.get(virtual)
+ if real:
+ req.env['REPO_NAME'] = virtual
+ try:
+ repo = hg.repository(self.ui, real)
+ return hgweb(repo).run_wsgi(req)
+ except IOError, inst:
+ msg = inst.strerror
+ raise ErrorResponse(HTTP_SERVER_ERROR, msg)
+ except error.RepoError, inst:
+ raise ErrorResponse(HTTP_SERVER_ERROR, str(inst))
+
+ # browse subdirectories
+ subdir = virtual + '/'
+ if [r for r in repos if r.startswith(subdir)]:
+ req.respond(HTTP_OK, ctype)
+ return self.makeindex(req, tmpl, subdir)
+
+ up = virtual.rfind('/')
+ if up < 0:
+ break
+ virtual = virtual[:up]
+
+ # prefixes not found
+ req.respond(HTTP_NOT_FOUND, ctype)
+ return tmpl("notfound", repo=virtual)
+
+ except ErrorResponse, err:
+ req.respond(err, ctype)
+ return tmpl('error', error=err.message or '')
+ finally:
+ tmpl = None
+
+ def makeindex(self, req, tmpl, subdir=""):
+
+ def archivelist(ui, nodeid, url):
+ allowed = ui.configlist("web", "allow_archive", untrusted=True)
+ for i in [('zip', '.zip'), ('gz', '.tar.gz'), ('bz2', '.tar.bz2')]:
+ if i[0] in allowed or ui.configbool("web", "allow" + i[0],
+ untrusted=True):
+ yield {"type" : i[0], "extension": i[1],
+ "node": nodeid, "url": url}
+
+ sortdefault = 'name', False
+ def entries(sortcolumn="", descending=False, subdir="", **map):
+ rows = []
+ parity = paritygen(self.stripecount)
+ for name, path in self.repos:
+ if not name.startswith(subdir):
+ continue
+ name = name[len(subdir):]
+
+ u = self.ui.copy()
+ try:
+ u.readconfig(os.path.join(path, '.hg', 'hgrc'))
+ except Exception, e:
+ u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e))
+ continue
+ def get(section, name, default=None):
+ return u.config(section, name, default, untrusted=True)
+
+ if u.configbool("web", "hidden", untrusted=True):
+ continue
+
+ if not self.read_allowed(u, req):
+ continue
+
+ parts = [name]
+ if 'PATH_INFO' in req.env:
+ parts.insert(0, req.env['PATH_INFO'].rstrip('/'))
+ if req.env['SCRIPT_NAME']:
+ parts.insert(0, req.env['SCRIPT_NAME'])
+ m = re.match('((?:https?://)?)(.*)', '/'.join(parts))
+ # squish repeated slashes out of the path component
+ url = m.group(1) + re.sub('/+', '/', m.group(2)) + '/'
+
+ # update time with local timezone
+ try:
+ d = (get_mtime(path), util.makedate()[1])
+ except OSError:
+ continue
+
+ contact = get_contact(get)
+ description = get("web", "description", "")
+ name = get("web", "name", name)
+ row = dict(contact=contact or "unknown",
+ contact_sort=contact.upper() or "unknown",
+ name=name,
+ name_sort=name,
+ url=url,
+ description=description or "unknown",
+ description_sort=description.upper() or "unknown",
+ lastchange=d,
+ lastchange_sort=d[1]-d[0],
+ archives=archivelist(u, "tip", url))
+ if (not sortcolumn or (sortcolumn, descending) == sortdefault):
+ # fast path for unsorted output
+ row['parity'] = parity.next()
+ yield row
+ else:
+ rows.append((row["%s_sort" % sortcolumn], row))
+ if rows:
+ rows.sort()
+ if descending:
+ rows.reverse()
+ for key, row in rows:
+ row['parity'] = parity.next()
+ yield row
+
+ self.refresh()
+ sortable = ["name", "description", "contact", "lastchange"]
+ sortcolumn, descending = sortdefault
+ if 'sort' in req.form:
+ sortcolumn = req.form['sort'][0]
+ descending = sortcolumn.startswith('-')
+ if descending:
+ sortcolumn = sortcolumn[1:]
+ if sortcolumn not in sortable:
+ sortcolumn = ""
+
+ sort = [("sort_%s" % column,
+ "%s%s" % ((not descending and column == sortcolumn)
+ and "-" or "", column))
+ for column in sortable]
+
+ self.refresh()
+ if self._baseurl is not None:
+ req.env['SCRIPT_NAME'] = self._baseurl
+
+ return tmpl("index", entries=entries, subdir=subdir,
+ sortcolumn=sortcolumn, descending=descending,
+ **dict(sort))
+
+ def templater(self, req):
+
+ def header(**map):
+ yield tmpl('header', encoding=encoding.encoding, **map)
+
+ def footer(**map):
+ yield tmpl("footer", **map)
+
+ def motd(**map):
+ if self.motd is not None:
+ yield self.motd
+ else:
+ yield config('web', 'motd', '')
+
+ def config(section, name, default=None, untrusted=True):
+ return self.ui.config(section, name, default, untrusted)
+
+ if self._baseurl is not None:
+ req.env['SCRIPT_NAME'] = self._baseurl
+
+ url = req.env.get('SCRIPT_NAME', '')
+ if not url.endswith('/'):
+ url += '/'
+
+ vars = {}
+ style = self.style
+ if 'style' in req.form:
+ vars['style'] = style = req.form['style'][0]
+ start = url[-1] == '?' and '&' or '?'
+ sessionvars = webutil.sessionvars(vars, start)
+
+ staticurl = config('web', 'staticurl') or url + 'static/'
+ if not staticurl.endswith('/'):
+ staticurl += '/'
+
+ style = 'style' in req.form and req.form['style'][0] or self.style
+ mapfile = templater.stylemap(style)
+ tmpl = templater.templater(mapfile,
+ defaults={"header": header,
+ "footer": footer,
+ "motd": motd,
+ "url": url,
+ "staticurl": staticurl,
+ "sessionvars": sessionvars})
+ return tmpl
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hgweb/protocol.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hgweb/protocol.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,206 @@
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import cStringIO, zlib, tempfile, errno, os, sys, urllib
+from mercurial import util, streamclone
+from mercurial.node import bin, hex
+from mercurial import changegroup as changegroupmod
+from common import ErrorResponse, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
+
+# __all__ is populated with the allowed commands. Be sure to add to it if
+# you're adding a new command, or the new command won't work.
+
+__all__ = [
+ 'lookup', 'heads', 'branches', 'between', 'changegroup',
+ 'changegroupsubset', 'capabilities', 'unbundle', 'stream_out',
+ 'branchmap',
+]
+
+HGTYPE = 'application/mercurial-0.1'
+
+def lookup(repo, req):
+ try:
+ r = hex(repo.lookup(req.form['key'][0]))
+ success = 1
+ except Exception,inst:
+ r = str(inst)
+ success = 0
+ resp = "%s %s\n" % (success, r)
+ req.respond(HTTP_OK, HGTYPE, length=len(resp))
+ yield resp
+
+def heads(repo, req):
+ resp = " ".join(map(hex, repo.heads())) + "\n"
+ req.respond(HTTP_OK, HGTYPE, length=len(resp))
+ yield resp
+
+def branchmap(repo, req):
+ branches = repo.branchmap()
+ heads = []
+ for branch, nodes in branches.iteritems():
+ branchname = urllib.quote(branch)
+ branchnodes = [hex(node) for node in nodes]
+ heads.append('%s %s' % (branchname, ' '.join(branchnodes)))
+ resp = '\n'.join(heads)
+ req.respond(HTTP_OK, HGTYPE, length=len(resp))
+ yield resp
+
+def branches(repo, req):
+ nodes = []
+ if 'nodes' in req.form:
+ nodes = map(bin, req.form['nodes'][0].split(" "))
+ resp = cStringIO.StringIO()
+ for b in repo.branches(nodes):
+ resp.write(" ".join(map(hex, b)) + "\n")
+ resp = resp.getvalue()
+ req.respond(HTTP_OK, HGTYPE, length=len(resp))
+ yield resp
+
+def between(repo, req):
+ if 'pairs' in req.form:
+ pairs = [map(bin, p.split("-"))
+ for p in req.form['pairs'][0].split(" ")]
+ resp = cStringIO.StringIO()
+ for b in repo.between(pairs):
+ resp.write(" ".join(map(hex, b)) + "\n")
+ resp = resp.getvalue()
+ req.respond(HTTP_OK, HGTYPE, length=len(resp))
+ yield resp
+
+def changegroup(repo, req):
+ req.respond(HTTP_OK, HGTYPE)
+ nodes = []
+
+ if 'roots' in req.form:
+ nodes = map(bin, req.form['roots'][0].split(" "))
+
+ z = zlib.compressobj()
+ f = repo.changegroup(nodes, 'serve')
+ while 1:
+ chunk = f.read(4096)
+ if not chunk:
+ break
+ yield z.compress(chunk)
+
+ yield z.flush()
+
+def changegroupsubset(repo, req):
+ req.respond(HTTP_OK, HGTYPE)
+ bases = []
+ heads = []
+
+ if 'bases' in req.form:
+ bases = [bin(x) for x in req.form['bases'][0].split(' ')]
+ if 'heads' in req.form:
+ heads = [bin(x) for x in req.form['heads'][0].split(' ')]
+
+ z = zlib.compressobj()
+ f = repo.changegroupsubset(bases, heads, 'serve')
+ while 1:
+ chunk = f.read(4096)
+ if not chunk:
+ break
+ yield z.compress(chunk)
+
+ yield z.flush()
+
+def capabilities(repo, req):
+ caps = ['lookup', 'changegroupsubset', 'branchmap']
+ if repo.ui.configbool('server', 'uncompressed', untrusted=True):
+ caps.append('stream=%d' % repo.changelog.version)
+ if changegroupmod.bundlepriority:
+ caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
+ rsp = ' '.join(caps)
+ req.respond(HTTP_OK, HGTYPE, length=len(rsp))
+ yield rsp
+
+def unbundle(repo, req):
+
+ proto = req.env.get('wsgi.url_scheme') or 'http'
+ their_heads = req.form['heads'][0].split(' ')
+
+ def check_heads():
+ heads = map(hex, repo.heads())
+ return their_heads == [hex('force')] or their_heads == heads
+
+ # fail early if possible
+ if not check_heads():
+ req.drain()
+ raise ErrorResponse(HTTP_OK, 'unsynced changes')
+
+ # do not lock repo until all changegroup data is
+ # streamed. save to temporary file.
+
+ fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
+ fp = os.fdopen(fd, 'wb+')
+ try:
+ length = int(req.env['CONTENT_LENGTH'])
+ for s in util.filechunkiter(req, limit=length):
+ fp.write(s)
+
+ try:
+ lock = repo.lock()
+ try:
+ if not check_heads():
+ raise ErrorResponse(HTTP_OK, 'unsynced changes')
+
+ fp.seek(0)
+ header = fp.read(6)
+ if header.startswith('HG') and not header.startswith('HG10'):
+ raise ValueError('unknown bundle version')
+ elif header not in changegroupmod.bundletypes:
+ raise ValueError('unknown bundle compression type')
+ gen = changegroupmod.unbundle(header, fp)
+
+ # send addchangegroup output to client
+
+ oldio = sys.stdout, sys.stderr
+ sys.stderr = sys.stdout = cStringIO.StringIO()
+
+ try:
+ url = 'remote:%s:%s:%s' % (
+ proto,
+ urllib.quote(req.env.get('REMOTE_HOST', '')),
+ urllib.quote(req.env.get('REMOTE_USER', '')))
+ try:
+ ret = repo.addchangegroup(gen, 'serve', url)
+ except util.Abort, inst:
+ sys.stdout.write("abort: %s\n" % inst)
+ ret = 0
+ finally:
+ val = sys.stdout.getvalue()
+ sys.stdout, sys.stderr = oldio
+ req.respond(HTTP_OK, HGTYPE)
+ return '%d\n%s' % (ret, val),
+ finally:
+ lock.release()
+ except ValueError, inst:
+ raise ErrorResponse(HTTP_OK, inst)
+ except (OSError, IOError), inst:
+ filename = getattr(inst, 'filename', '')
+ # Don't send our filesystem layout to the client
+ if filename.startswith(repo.root):
+ filename = filename[len(repo.root)+1:]
+ else:
+ filename = ''
+ error = getattr(inst, 'strerror', 'Unknown error')
+ if inst.errno == errno.ENOENT:
+ code = HTTP_NOT_FOUND
+ else:
+ code = HTTP_SERVER_ERROR
+ raise ErrorResponse(code, '%s: %s' % (error, filename))
+ finally:
+ fp.close()
+ os.unlink(tempname)
+
+def stream_out(repo, req):
+ req.respond(HTTP_OK, HGTYPE)
+ try:
+ for chunk in streamclone.stream_out(repo, untrusted=True):
+ yield chunk
+ except streamclone.StreamException, inst:
+ yield str(inst)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hgweb/request.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hgweb/request.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,134 @@
+# hgweb/request.py - An http request from either CGI or the standalone server.
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge
+# Copyright 2005, 2006 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import socket, cgi, errno
+from mercurial import util
+from common import ErrorResponse, statusmessage
+
+shortcuts = {
+ 'cl': [('cmd', ['changelog']), ('rev', None)],
+ 'sl': [('cmd', ['shortlog']), ('rev', None)],
+ 'cs': [('cmd', ['changeset']), ('node', None)],
+ 'f': [('cmd', ['file']), ('filenode', None)],
+ 'fl': [('cmd', ['filelog']), ('filenode', None)],
+ 'fd': [('cmd', ['filediff']), ('node', None)],
+ 'fa': [('cmd', ['annotate']), ('filenode', None)],
+ 'mf': [('cmd', ['manifest']), ('manifest', None)],
+ 'ca': [('cmd', ['archive']), ('node', None)],
+ 'tags': [('cmd', ['tags'])],
+ 'tip': [('cmd', ['changeset']), ('node', ['tip'])],
+ 'static': [('cmd', ['static']), ('file', None)]
+}
+
+def expand(form):
+ for k in shortcuts.iterkeys():
+ if k in form:
+ for name, value in shortcuts[k]:
+ if value is None:
+ value = form[k]
+ form[name] = value
+ del form[k]
+ return form
+
+class wsgirequest(object):
+ def __init__(self, wsgienv, start_response):
+ version = wsgienv['wsgi.version']
+ if (version < (1, 0)) or (version >= (2, 0)):
+ raise RuntimeError("Unknown and unsupported WSGI version %d.%d"
+ % version)
+ self.inp = wsgienv['wsgi.input']
+ self.err = wsgienv['wsgi.errors']
+ self.threaded = wsgienv['wsgi.multithread']
+ self.multiprocess = wsgienv['wsgi.multiprocess']
+ self.run_once = wsgienv['wsgi.run_once']
+ self.env = wsgienv
+ self.form = expand(cgi.parse(self.inp, self.env, keep_blank_values=1))
+ self._start_response = start_response
+ self.server_write = None
+ self.headers = []
+
+ def __iter__(self):
+ return iter([])
+
+ def read(self, count=-1):
+ return self.inp.read(count)
+
+ def drain(self):
+ '''need to read all data from request, httplib is half-duplex'''
+ length = int(self.env.get('CONTENT_LENGTH', 0))
+ for s in util.filechunkiter(self.inp, limit=length):
+ pass
+
+ def respond(self, status, type=None, filename=None, length=0):
+ if self._start_response is not None:
+
+ self.httphdr(type, filename, length)
+ if not self.headers:
+ raise RuntimeError("request.write called before headers sent")
+
+ for k, v in self.headers:
+ if not isinstance(v, str):
+ raise TypeError('header value must be string: %r' % v)
+
+ if isinstance(status, ErrorResponse):
+ self.header(status.headers)
+ status = statusmessage(status.code)
+ elif status == 200:
+ status = '200 Script output follows'
+ elif isinstance(status, int):
+ status = statusmessage(status)
+
+ self.server_write = self._start_response(status, self.headers)
+ self._start_response = None
+ self.headers = []
+
+ def write(self, thing):
+ if hasattr(thing, "__iter__"):
+ for part in thing:
+ self.write(part)
+ else:
+ thing = str(thing)
+ try:
+ self.server_write(thing)
+ except socket.error, inst:
+ if inst[0] != errno.ECONNRESET:
+ raise
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def flush(self):
+ return None
+
+ def close(self):
+ return None
+
+ def header(self, headers=[('Content-Type','text/html')]):
+ self.headers.extend(headers)
+
+ def httphdr(self, type=None, filename=None, length=0, headers={}):
+ headers = headers.items()
+ if type is not None:
+ headers.append(('Content-Type', type))
+ if filename:
+ filename = (filename.split('/')[-1]
+ .replace('\\', '\\\\').replace('"', '\\"'))
+ headers.append(('Content-Disposition',
+ 'inline; filename="%s"' % filename))
+ if length:
+ headers.append(('Content-Length', str(length)))
+ self.header(headers)
+
+def wsgiapplication(app_maker):
+ '''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir()
+ can and should now be used as a WSGI application.'''
+ application = app_maker()
+ def run_wsgi(env, respond):
+ return application(env, respond)
+ return run_wsgi
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hgweb/server.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hgweb/server.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,298 @@
+# hgweb/server.py - The standalone hg web server.
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
+from mercurial import hg, util, error
+from hgweb_mod import hgweb
+from hgwebdir_mod import hgwebdir
+from mercurial.i18n import _
+
+def _splitURI(uri):
+ """ Return path and query splited from uri
+
+ Just like CGI environment, the path is unquoted, the query is
+ not.
+ """
+ if '?' in uri:
+ path, query = uri.split('?', 1)
+ else:
+ path, query = uri, ''
+ return urllib.unquote(path), query
+
+class _error_logger(object):
+ def __init__(self, handler):
+ self.handler = handler
+ def flush(self):
+ pass
+ def write(self, str):
+ self.writelines(str.split('\n'))
+ def writelines(self, seq):
+ for msg in seq:
+ self.handler.log_error("HG error: %s", msg)
+
+class _hgwebhandler(object, BaseHTTPServer.BaseHTTPRequestHandler):
+
+ url_scheme = 'http'
+
+ def __init__(self, *args, **kargs):
+ self.protocol_version = 'HTTP/1.1'
+ BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
+
+ def _log_any(self, fp, format, *args):
+ fp.write("%s - - [%s] %s\n" % (self.client_address[0],
+ self.log_date_time_string(),
+ format % args))
+ fp.flush()
+
+ def log_error(self, format, *args):
+ self._log_any(self.server.errorlog, format, *args)
+
+ def log_message(self, format, *args):
+ self._log_any(self.server.accesslog, format, *args)
+
+ def do_write(self):
+ try:
+ self.do_hgweb()
+ except socket.error, inst:
+ if inst[0] != errno.EPIPE:
+ raise
+
+ def do_POST(self):
+ try:
+ self.do_write()
+ except StandardError:
+ self._start_response("500 Internal Server Error", [])
+ self._write("Internal Server Error")
+ tb = "".join(traceback.format_exception(*sys.exc_info()))
+ self.log_error("Exception happened during processing "
+ "request '%s':\n%s", self.path, tb)
+
+ def do_GET(self):
+ self.do_POST()
+
+ def do_hgweb(self):
+ path, query = _splitURI(self.path)
+
+ env = {}
+ env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+ env['REQUEST_METHOD'] = self.command
+ env['SERVER_NAME'] = self.server.server_name
+ env['SERVER_PORT'] = str(self.server.server_port)
+ env['REQUEST_URI'] = self.path
+ env['SCRIPT_NAME'] = self.server.prefix
+ env['PATH_INFO'] = path[len(self.server.prefix):]
+ env['REMOTE_HOST'] = self.client_address[0]
+ env['REMOTE_ADDR'] = self.client_address[0]
+ if query:
+ env['QUERY_STRING'] = query
+
+ if self.headers.typeheader is None:
+ env['CONTENT_TYPE'] = self.headers.type
+ else:
+ env['CONTENT_TYPE'] = self.headers.typeheader
+ length = self.headers.getheader('content-length')
+ if length:
+ env['CONTENT_LENGTH'] = length
+ for header in [h for h in self.headers.keys()
+ if h not in ('content-type', 'content-length')]:
+ hkey = 'HTTP_' + header.replace('-', '_').upper()
+ hval = self.headers.getheader(header)
+ hval = hval.replace('\n', '').strip()
+ if hval:
+ env[hkey] = hval
+ env['SERVER_PROTOCOL'] = self.request_version
+ env['wsgi.version'] = (1, 0)
+ env['wsgi.url_scheme'] = self.url_scheme
+ env['wsgi.input'] = self.rfile
+ env['wsgi.errors'] = _error_logger(self)
+ env['wsgi.multithread'] = isinstance(self.server,
+ SocketServer.ThreadingMixIn)
+ env['wsgi.multiprocess'] = isinstance(self.server,
+ SocketServer.ForkingMixIn)
+ env['wsgi.run_once'] = 0
+
+ self.close_connection = True
+ self.saved_status = None
+ self.saved_headers = []
+ self.sent_headers = False
+ self.length = None
+ for chunk in self.server.application(env, self._start_response):
+ self._write(chunk)
+
+ def send_headers(self):
+ if not self.saved_status:
+ raise AssertionError("Sending headers before "
+ "start_response() called")
+ saved_status = self.saved_status.split(None, 1)
+ saved_status[0] = int(saved_status[0])
+ self.send_response(*saved_status)
+ should_close = True
+ for h in self.saved_headers:
+ self.send_header(*h)
+ if h[0].lower() == 'content-length':
+ should_close = False
+ self.length = int(h[1])
+ # The value of the Connection header is a list of case-insensitive
+ # tokens separated by commas and optional whitespace.
+ if 'close' in [token.strip().lower() for token in
+ self.headers.get('connection', '').split(',')]:
+ should_close = True
+ if should_close:
+ self.send_header('Connection', 'close')
+ self.close_connection = should_close
+ self.end_headers()
+ self.sent_headers = True
+
+ def _start_response(self, http_status, headers, exc_info=None):
+ code, msg = http_status.split(None, 1)
+ code = int(code)
+ self.saved_status = http_status
+ bad_headers = ('connection', 'transfer-encoding')
+ self.saved_headers = [h for h in headers
+ if h[0].lower() not in bad_headers]
+ return self._write
+
+ def _write(self, data):
+ if not self.saved_status:
+ raise AssertionError("data written before start_response() called")
+ elif not self.sent_headers:
+ self.send_headers()
+ if self.length is not None:
+ if len(data) > self.length:
+ raise AssertionError("Content-length header sent, but more "
+ "bytes than specified are being written.")
+ self.length = self.length - len(data)
+ self.wfile.write(data)
+ self.wfile.flush()
+
+class _shgwebhandler(_hgwebhandler):
+
+ url_scheme = 'https'
+
+ def setup(self):
+ self.connection = self.request
+ self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
+ self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
+
+ def do_write(self):
+ from OpenSSL.SSL import SysCallError
+ try:
+ super(_shgwebhandler, self).do_write()
+ except SysCallError, inst:
+ if inst.args[0] != errno.EPIPE:
+ raise
+
+ def handle_one_request(self):
+ from OpenSSL.SSL import SysCallError, ZeroReturnError
+ try:
+ super(_shgwebhandler, self).handle_one_request()
+ except (SysCallError, ZeroReturnError):
+ self.close_connection = True
+ pass
+
+def create_server(ui, repo):
+ use_threads = True
+
+ def openlog(opt, default):
+ if opt and opt != '-':
+ return open(opt, 'a')
+ return default
+
+ if repo is None:
+ myui = ui
+ else:
+ myui = repo.ui
+ address = myui.config("web", "address", "")
+ port = int(myui.config("web", "port", 8000))
+ prefix = myui.config("web", "prefix", "")
+ if prefix:
+ prefix = "/" + prefix.strip("/")
+ use_ipv6 = myui.configbool("web", "ipv6")
+ webdir_conf = myui.config("web", "webdir_conf")
+ ssl_cert = myui.config("web", "certificate")
+ accesslog = openlog(myui.config("web", "accesslog", "-"), sys.stdout)
+ errorlog = openlog(myui.config("web", "errorlog", "-"), sys.stderr)
+
+ if use_threads:
+ try:
+ from threading import activeCount
+ except ImportError:
+ use_threads = False
+
+ if use_threads:
+ _mixin = SocketServer.ThreadingMixIn
+ else:
+ if hasattr(os, "fork"):
+ _mixin = SocketServer.ForkingMixIn
+ else:
+ class _mixin:
+ pass
+
+ class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
+
+ # SO_REUSEADDR has broken semantics on windows
+ if os.name == 'nt':
+ allow_reuse_address = 0
+
+ def __init__(self, *args, **kargs):
+ BaseHTTPServer.HTTPServer.__init__(self, *args, **kargs)
+ self.accesslog = accesslog
+ self.errorlog = errorlog
+ self.daemon_threads = True
+ def make_handler():
+ if webdir_conf:
+ hgwebobj = hgwebdir(webdir_conf, ui)
+ elif repo is not None:
+ hgwebobj = hgweb(hg.repository(repo.ui, repo.root))
+ else:
+ raise error.RepoError(_("There is no Mercurial repository"
+ " here (.hg not found)"))
+ return hgwebobj
+ self.application = make_handler()
+
+ if ssl_cert:
+ try:
+ from OpenSSL import SSL
+ ctx = SSL.Context(SSL.SSLv23_METHOD)
+ except ImportError:
+ raise util.Abort(_("SSL support is unavailable"))
+ ctx.use_privatekey_file(ssl_cert)
+ ctx.use_certificate_file(ssl_cert)
+ sock = socket.socket(self.address_family, self.socket_type)
+ self.socket = SSL.Connection(ctx, sock)
+ self.server_bind()
+ self.server_activate()
+
+ self.addr, self.port = self.socket.getsockname()[0:2]
+ self.prefix = prefix
+ self.fqaddr = socket.getfqdn(address)
+
+ class IPv6HTTPServer(MercurialHTTPServer):
+ address_family = getattr(socket, 'AF_INET6', None)
+
+ def __init__(self, *args, **kwargs):
+ if self.address_family is None:
+ raise error.RepoError(_('IPv6 is not available on this system'))
+ super(IPv6HTTPServer, self).__init__(*args, **kwargs)
+
+ if ssl_cert:
+ handler = _shgwebhandler
+ else:
+ handler = _hgwebhandler
+
+ # ugly hack due to python issue5853 (for threaded use)
+ import mimetypes; mimetypes.init()
+
+ try:
+ if use_ipv6:
+ return IPv6HTTPServer((address, port), handler)
+ else:
+ return MercurialHTTPServer((address, port), handler)
+ except socket.error, inst:
+ raise util.Abort(_("cannot start server at '%s:%d': %s")
+ % (address, port, inst.args[1]))
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hgweb/webcommands.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hgweb/webcommands.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,690 @@
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import os, mimetypes, re, cgi, copy
+import webutil
+from mercurial import error, archival, templater, templatefilters
+from mercurial.node import short, hex
+from mercurial.util import binary
+from common import paritygen, staticfile, get_contact, ErrorResponse
+from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND
+from mercurial import graphmod
+
+# __all__ is populated with the allowed commands. Be sure to add to it if
+# you're adding a new command, or the new command won't work.
+
+__all__ = [
+ 'log', 'rawfile', 'file', 'changelog', 'shortlog', 'changeset', 'rev',
+ 'manifest', 'tags', 'branches', 'summary', 'filediff', 'diff', 'annotate',
+ 'filelog', 'archive', 'static', 'graph',
+]
+
+def log(web, req, tmpl):
+ if 'file' in req.form and req.form['file'][0]:
+ return filelog(web, req, tmpl)
+ else:
+ return changelog(web, req, tmpl)
+
+def rawfile(web, req, tmpl):
+ path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
+ if not path:
+ content = manifest(web, req, tmpl)
+ req.respond(HTTP_OK, web.ctype)
+ return content
+
+ try:
+ fctx = webutil.filectx(web.repo, req)
+ except error.LookupError, inst:
+ try:
+ content = manifest(web, req, tmpl)
+ req.respond(HTTP_OK, web.ctype)
+ return content
+ except ErrorResponse:
+ raise inst
+
+ path = fctx.path()
+ text = fctx.data()
+ mt = mimetypes.guess_type(path)[0]
+ if mt is None:
+ mt = binary(text) and 'application/octet-stream' or 'text/plain'
+
+ req.respond(HTTP_OK, mt, path, len(text))
+ return [text]
+
+def _filerevision(web, tmpl, fctx):
+ f = fctx.path()
+ text = fctx.data()
+ parity = paritygen(web.stripecount)
+
+ if binary(text):
+ mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
+ text = '(binary:%s)' % mt
+
+ def lines():
+ for lineno, t in enumerate(text.splitlines(1)):
+ yield {"line": t,
+ "lineid": "l%d" % (lineno + 1),
+ "linenumber": "% 6d" % (lineno + 1),
+ "parity": parity.next()}
+
+ return tmpl("filerevision",
+ file=f,
+ path=webutil.up(f),
+ text=lines(),
+ rev=fctx.rev(),
+ node=hex(fctx.node()),
+ author=fctx.user(),
+ date=fctx.date(),
+ desc=fctx.description(),
+ branch=webutil.nodebranchnodefault(fctx),
+ parent=webutil.parents(fctx),
+ child=webutil.children(fctx),
+ rename=webutil.renamelink(fctx),
+ permissions=fctx.manifest().flags(f))
+
+def file(web, req, tmpl):
+ path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
+ if not path:
+ return manifest(web, req, tmpl)
+ try:
+ return _filerevision(web, tmpl, webutil.filectx(web.repo, req))
+ except error.LookupError, inst:
+ try:
+ return manifest(web, req, tmpl)
+ except ErrorResponse:
+ raise inst
+
+def _search(web, tmpl, query):
+
+ def changelist(**map):
+ cl = web.repo.changelog
+ count = 0
+ qw = query.lower().split()
+
+ def revgen():
+ for i in xrange(len(cl) - 1, 0, -100):
+ l = []
+ for j in xrange(max(0, i - 100), i + 1):
+ ctx = web.repo[j]
+ l.append(ctx)
+ l.reverse()
+ for e in l:
+ yield e
+
+ for ctx in revgen():
+ miss = 0
+ for q in qw:
+ if not (q in ctx.user().lower() or
+ q in ctx.description().lower() or
+ q in " ".join(ctx.files()).lower()):
+ miss = 1
+ break
+ if miss:
+ continue
+
+ count += 1
+ n = ctx.node()
+ showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
+ files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
+
+ yield tmpl('searchentry',
+ parity=parity.next(),
+ author=ctx.user(),
+ parent=webutil.parents(ctx),
+ child=webutil.children(ctx),
+ changelogtag=showtags,
+ desc=ctx.description(),
+ date=ctx.date(),
+ files=files,
+ rev=ctx.rev(),
+ node=hex(n),
+ tags=webutil.nodetagsdict(web.repo, n),
+ inbranch=webutil.nodeinbranch(web.repo, ctx),
+ branches=webutil.nodebranchdict(web.repo, ctx))
+
+ if count >= web.maxchanges:
+ break
+
+ cl = web.repo.changelog
+ parity = paritygen(web.stripecount)
+
+ return tmpl('search',
+ query=query,
+ node=hex(cl.tip()),
+ entries=changelist,
+ archives=web.archivelist("tip"))
+
+def changelog(web, req, tmpl, shortlog = False):
+ if 'node' in req.form:
+ ctx = webutil.changectx(web.repo, req)
+ else:
+ if 'rev' in req.form:
+ hi = req.form['rev'][0]
+ else:
+ hi = len(web.repo) - 1
+ try:
+ ctx = web.repo[hi]
+ except error.RepoError:
+ return _search(web, tmpl, hi) # XXX redirect to 404 page?
+
+ def changelist(limit=0, **map):
+ l = [] # build a list in forward order for efficiency
+ for i in xrange(start, end):
+ ctx = web.repo[i]
+ n = ctx.node()
+ showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
+ files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
+
+ l.insert(0, {"parity": parity.next(),
+ "author": ctx.user(),
+ "parent": webutil.parents(ctx, i - 1),
+ "child": webutil.children(ctx, i + 1),
+ "changelogtag": showtags,
+ "desc": ctx.description(),
+ "date": ctx.date(),
+ "files": files,
+ "rev": i,
+ "node": hex(n),
+ "tags": webutil.nodetagsdict(web.repo, n),
+ "inbranch": webutil.nodeinbranch(web.repo, ctx),
+ "branches": webutil.nodebranchdict(web.repo, ctx)
+ })
+
+ if limit > 0:
+ l = l[:limit]
+
+ for e in l:
+ yield e
+
+ maxchanges = shortlog and web.maxshortchanges or web.maxchanges
+ cl = web.repo.changelog
+ count = len(cl)
+ pos = ctx.rev()
+ start = max(0, pos - maxchanges + 1)
+ end = min(count, start + maxchanges)
+ pos = end - 1
+ parity = paritygen(web.stripecount, offset=start-end)
+
+ changenav = webutil.revnavgen(pos, maxchanges, count, web.repo.changectx)
+
+ return tmpl(shortlog and 'shortlog' or 'changelog',
+ changenav=changenav,
+ node=hex(ctx.node()),
+ rev=pos, changesets=count,
+ entries=lambda **x: changelist(limit=0,**x),
+ latestentry=lambda **x: changelist(limit=1,**x),
+ archives=web.archivelist("tip"))
+
+def shortlog(web, req, tmpl):
+ return changelog(web, req, tmpl, shortlog = True)
+
+def changeset(web, req, tmpl):
+ ctx = webutil.changectx(web.repo, req)
+ showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node())
+ showbranch = webutil.nodebranchnodefault(ctx)
+
+ files = []
+ parity = paritygen(web.stripecount)
+ for f in ctx.files():
+ template = f in ctx and 'filenodelink' or 'filenolink'
+ files.append(tmpl(template,
+ node=ctx.hex(), file=f,
+ parity=parity.next()))
+
+ parity = paritygen(web.stripecount)
+ diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity)
+ return tmpl('changeset',
+ diff=diffs,
+ rev=ctx.rev(),
+ node=ctx.hex(),
+ parent=webutil.parents(ctx),
+ child=webutil.children(ctx),
+ changesettag=showtags,
+ changesetbranch=showbranch,
+ author=ctx.user(),
+ desc=ctx.description(),
+ date=ctx.date(),
+ files=files,
+ archives=web.archivelist(ctx.hex()),
+ tags=webutil.nodetagsdict(web.repo, ctx.node()),
+ branch=webutil.nodebranchnodefault(ctx),
+ inbranch=webutil.nodeinbranch(web.repo, ctx),
+ branches=webutil.nodebranchdict(web.repo, ctx))
+
+rev = changeset
+
+def manifest(web, req, tmpl):
+ ctx = webutil.changectx(web.repo, req)
+ path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
+ mf = ctx.manifest()
+ node = ctx.node()
+
+ files = {}
+ dirs = {}
+ parity = paritygen(web.stripecount)
+
+ if path and path[-1] != "/":
+ path += "/"
+ l = len(path)
+ abspath = "/" + path
+
+ for f, n in mf.iteritems():
+ if f[:l] != path:
+ continue
+ remain = f[l:]
+ elements = remain.split('/')
+ if len(elements) == 1:
+ files[remain] = f
+ else:
+ h = dirs # need to retain ref to dirs (root)
+ for elem in elements[0:-1]:
+ if elem not in h:
+ h[elem] = {}
+ h = h[elem]
+ if len(h) > 1:
+ break
+ h[None] = None # denotes files present
+
+ if mf and not files and not dirs:
+ raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
+
+ def filelist(**map):
+ for f in sorted(files):
+ full = files[f]
+
+ fctx = ctx.filectx(full)
+ yield {"file": full,
+ "parity": parity.next(),
+ "basename": f,
+ "date": fctx.date(),
+ "size": fctx.size(),
+ "permissions": mf.flags(full)}
+
+ def dirlist(**map):
+ for d in sorted(dirs):
+
+ emptydirs = []
+ h = dirs[d]
+ while isinstance(h, dict) and len(h) == 1:
+ k,v = h.items()[0]
+ if v:
+ emptydirs.append(k)
+ h = v
+
+ path = "%s%s" % (abspath, d)
+ yield {"parity": parity.next(),
+ "path": path,
+ "emptydirs": "/".join(emptydirs),
+ "basename": d}
+
+ return tmpl("manifest",
+ rev=ctx.rev(),
+ node=hex(node),
+ path=abspath,
+ up=webutil.up(abspath),
+ upparity=parity.next(),
+ fentries=filelist,
+ dentries=dirlist,
+ archives=web.archivelist(hex(node)),
+ tags=webutil.nodetagsdict(web.repo, node),
+ inbranch=webutil.nodeinbranch(web.repo, ctx),
+ branches=webutil.nodebranchdict(web.repo, ctx))
+
+def tags(web, req, tmpl):
+ i = web.repo.tagslist()
+ i.reverse()
+ parity = paritygen(web.stripecount)
+
+ def entries(notip=False,limit=0, **map):
+ count = 0
+ for k, n in i:
+ if notip and k == "tip":
+ continue
+ if limit > 0 and count >= limit:
+ continue
+ count = count + 1
+ yield {"parity": parity.next(),
+ "tag": k,
+ "date": web.repo[n].date(),
+ "node": hex(n)}
+
+ return tmpl("tags",
+ node=hex(web.repo.changelog.tip()),
+ entries=lambda **x: entries(False,0, **x),
+ entriesnotip=lambda **x: entries(True,0, **x),
+ latestentry=lambda **x: entries(True,1, **x))
+
+def branches(web, req, tmpl):
+ b = web.repo.branchtags()
+ tips = (web.repo[n] for t, n in web.repo.branchtags().iteritems())
+ heads = web.repo.heads()
+ parity = paritygen(web.stripecount)
+ sortkey = lambda ctx: ('close' not in ctx.extra(), ctx.rev())
+
+ def entries(limit, **map):
+ count = 0
+ for ctx in sorted(tips, key=sortkey, reverse=True):
+ if limit > 0 and count >= limit:
+ return
+ count += 1
+ if ctx.node() not in heads:
+ status = 'inactive'
+ elif not web.repo.branchheads(ctx.branch()):
+ status = 'closed'
+ else:
+ status = 'open'
+ yield {'parity': parity.next(),
+ 'branch': ctx.branch(),
+ 'status': status,
+ 'node': ctx.hex(),
+ 'date': ctx.date()}
+
+ return tmpl('branches', node=hex(web.repo.changelog.tip()),
+ entries=lambda **x: entries(0, **x),
+ latestentry=lambda **x: entries(1, **x))
+
+def summary(web, req, tmpl):
+ i = web.repo.tagslist()
+ i.reverse()
+
+ def tagentries(**map):
+ parity = paritygen(web.stripecount)
+ count = 0
+ for k, n in i:
+ if k == "tip": # skip tip
+ continue
+
+ count += 1
+ if count > 10: # limit to 10 tags
+ break
+
+ yield tmpl("tagentry",
+ parity=parity.next(),
+ tag=k,
+ node=hex(n),
+ date=web.repo[n].date())
+
+ def branches(**map):
+ parity = paritygen(web.stripecount)
+
+ b = web.repo.branchtags()
+ l = [(-web.repo.changelog.rev(n), n, t) for t, n in b.iteritems()]
+ for r,n,t in sorted(l):
+ yield {'parity': parity.next(),
+ 'branch': t,
+ 'node': hex(n),
+ 'date': web.repo[n].date()}
+
+ def changelist(**map):
+ parity = paritygen(web.stripecount, offset=start-end)
+ l = [] # build a list in forward order for efficiency
+ for i in xrange(start, end):
+ ctx = web.repo[i]
+ n = ctx.node()
+ hn = hex(n)
+
+ l.insert(0, tmpl(
+ 'shortlogentry',
+ parity=parity.next(),
+ author=ctx.user(),
+ desc=ctx.description(),
+ date=ctx.date(),
+ rev=i,
+ node=hn,
+ tags=webutil.nodetagsdict(web.repo, n),
+ inbranch=webutil.nodeinbranch(web.repo, ctx),
+ branches=webutil.nodebranchdict(web.repo, ctx)))
+
+ yield l
+
+ cl = web.repo.changelog
+ count = len(cl)
+ start = max(0, count - web.maxchanges)
+ end = min(count, start + web.maxchanges)
+
+ return tmpl("summary",
+ desc=web.config("web", "description", "unknown"),
+ owner=get_contact(web.config) or "unknown",
+ lastchange=cl.read(cl.tip())[2],
+ tags=tagentries,
+ branches=branches,
+ shortlog=changelist,
+ node=hex(cl.tip()),
+ archives=web.archivelist("tip"))
+
+def filediff(web, req, tmpl):
+ fctx, ctx = None, None
+ try:
+ fctx = webutil.filectx(web.repo, req)
+ except LookupError:
+ ctx = webutil.changectx(web.repo, req)
+ path = webutil.cleanpath(web.repo, req.form['file'][0])
+ if path not in ctx.files():
+ raise
+
+ if fctx is not None:
+ n = fctx.node()
+ path = fctx.path()
+ else:
+ n = ctx.node()
+ # path already defined in except clause
+
+ parity = paritygen(web.stripecount)
+ diffs = webutil.diffs(web.repo, tmpl, fctx or ctx, [path], parity)
+ rename = fctx and webutil.renamelink(fctx) or []
+ ctx = fctx and fctx or ctx
+ return tmpl("filediff",
+ file=path,
+ node=hex(n),
+ rev=ctx.rev(),
+ date=ctx.date(),
+ desc=ctx.description(),
+ author=ctx.user(),
+ rename=rename,
+ branch=webutil.nodebranchnodefault(ctx),
+ parent=webutil.parents(ctx),
+ child=webutil.children(ctx),
+ diff=diffs)
+
+diff = filediff
+
+def annotate(web, req, tmpl):
+ fctx = webutil.filectx(web.repo, req)
+ f = fctx.path()
+ parity = paritygen(web.stripecount)
+
+ def annotate(**map):
+ last = None
+ if binary(fctx.data()):
+ mt = (mimetypes.guess_type(fctx.path())[0]
+ or 'application/octet-stream')
+ lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
+ '(binary:%s)' % mt)])
+ else:
+ lines = enumerate(fctx.annotate(follow=True, linenumber=True))
+ for lineno, ((f, targetline), l) in lines:
+ fnode = f.filenode()
+
+ if last != fnode:
+ last = fnode
+
+ yield {"parity": parity.next(),
+ "node": hex(f.node()),
+ "rev": f.rev(),
+ "author": f.user(),
+ "desc": f.description(),
+ "file": f.path(),
+ "targetline": targetline,
+ "line": l,
+ "lineid": "l%d" % (lineno + 1),
+ "linenumber": "% 6d" % (lineno + 1)}
+
+ return tmpl("fileannotate",
+ file=f,
+ annotate=annotate,
+ path=webutil.up(f),
+ rev=fctx.rev(),
+ node=hex(fctx.node()),
+ author=fctx.user(),
+ date=fctx.date(),
+ desc=fctx.description(),
+ rename=webutil.renamelink(fctx),
+ branch=webutil.nodebranchnodefault(fctx),
+ parent=webutil.parents(fctx),
+ child=webutil.children(fctx),
+ permissions=fctx.manifest().flags(f))
+
+def filelog(web, req, tmpl):
+
+ try:
+ fctx = webutil.filectx(web.repo, req)
+ f = fctx.path()
+ fl = fctx.filelog()
+ except error.LookupError:
+ f = webutil.cleanpath(web.repo, req.form['file'][0])
+ fl = web.repo.file(f)
+ numrevs = len(fl)
+ if not numrevs: # file doesn't exist at all
+ raise
+ rev = webutil.changectx(web.repo, req).rev()
+ first = fl.linkrev(0)
+ if rev < first: # current rev is from before file existed
+ raise
+ frev = numrevs - 1
+ while fl.linkrev(frev) > rev:
+ frev -= 1
+ fctx = web.repo.filectx(f, fl.linkrev(frev))
+
+ count = fctx.filerev() + 1
+ pagelen = web.maxshortchanges
+ start = max(0, fctx.filerev() - pagelen + 1) # first rev on this page
+ end = min(count, start + pagelen) # last rev on this page
+ parity = paritygen(web.stripecount, offset=start-end)
+
+ def entries(limit=0, **map):
+ l = []
+
+ repo = web.repo
+ for i in xrange(start, end):
+ iterfctx = fctx.filectx(i)
+
+ l.insert(0, {"parity": parity.next(),
+ "filerev": i,
+ "file": f,
+ "node": hex(iterfctx.node()),
+ "author": iterfctx.user(),
+ "date": iterfctx.date(),
+ "rename": webutil.renamelink(iterfctx),
+ "parent": webutil.parents(iterfctx),
+ "child": webutil.children(iterfctx),
+ "desc": iterfctx.description(),
+ "tags": webutil.nodetagsdict(repo, iterfctx.node()),
+ "branch": webutil.nodebranchnodefault(iterfctx),
+ "inbranch": webutil.nodeinbranch(repo, iterfctx),
+ "branches": webutil.nodebranchdict(repo, iterfctx)})
+
+ if limit > 0:
+ l = l[:limit]
+
+ for e in l:
+ yield e
+
+ nodefunc = lambda x: fctx.filectx(fileid=x)
+ nav = webutil.revnavgen(end - 1, pagelen, count, nodefunc)
+ return tmpl("filelog", file=f, node=hex(fctx.node()), nav=nav,
+ entries=lambda **x: entries(limit=0, **x),
+ latestentry=lambda **x: entries(limit=1, **x))
+
+
+def archive(web, req, tmpl):
+ type_ = req.form.get('type', [None])[0]
+ allowed = web.configlist("web", "allow_archive")
+ key = req.form['node'][0]
+
+ if type_ not in web.archives:
+ msg = 'Unsupported archive type: %s' % type_
+ raise ErrorResponse(HTTP_NOT_FOUND, msg)
+
+ if not ((type_ in allowed or
+ web.configbool("web", "allow" + type_, False))):
+ msg = 'Archive type not allowed: %s' % type_
+ raise ErrorResponse(HTTP_FORBIDDEN, msg)
+
+ reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
+ cnode = web.repo.lookup(key)
+ arch_version = key
+ if cnode == key or key == 'tip':
+ arch_version = short(cnode)
+ name = "%s-%s" % (reponame, arch_version)
+ mimetype, artype, extension, encoding = web.archive_specs[type_]
+ headers = [
+ ('Content-Type', mimetype),
+ ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
+ ]
+ if encoding:
+ headers.append(('Content-Encoding', encoding))
+ req.header(headers)
+ req.respond(HTTP_OK)
+ archival.archive(web.repo, req, cnode, artype, prefix=name)
+ return []
+
+
+def static(web, req, tmpl):
+ fname = req.form['file'][0]
+ # a repo owner may set web.static in .hg/hgrc to get any file
+ # readable by the user running the CGI script
+ static = web.config("web", "static", None, untrusted=False)
+ if not static:
+ tp = web.templatepath or templater.templatepath()
+ if isinstance(tp, str):
+ tp = [tp]
+ static = [os.path.join(p, 'static') for p in tp]
+ return [staticfile(static, fname, req)]
+
+def graph(web, req, tmpl):
+ rev = webutil.changectx(web.repo, req).rev()
+ bg_height = 39
+
+ revcount = 25
+ if 'revcount' in req.form:
+ revcount = int(req.form.get('revcount', [revcount])[0])
+ tmpl.defaults['sessionvars']['revcount'] = revcount
+
+ lessvars = copy.copy(tmpl.defaults['sessionvars'])
+ lessvars['revcount'] = revcount / 2
+ morevars = copy.copy(tmpl.defaults['sessionvars'])
+ morevars['revcount'] = revcount * 2
+
+ max_rev = len(web.repo) - 1
+ revcount = min(max_rev, revcount)
+ revnode = web.repo.changelog.node(rev)
+ revnode_hex = hex(revnode)
+ uprev = min(max_rev, rev + revcount)
+ downrev = max(0, rev - revcount)
+ count = len(web.repo)
+ changenav = webutil.revnavgen(rev, revcount, count, web.repo.changectx)
+
+ dag = graphmod.revisions(web.repo, rev, downrev)
+ tree = list(graphmod.colored(dag))
+ canvasheight = (len(tree) + 1) * bg_height - 27;
+ data = []
+ for (id, type, ctx, vtx, edges) in tree:
+ if type != graphmod.CHANGESET:
+ continue
+ node = short(ctx.node())
+ age = templatefilters.age(ctx.date())
+ desc = templatefilters.firstline(ctx.description())
+ desc = cgi.escape(templatefilters.nonempty(desc))
+ user = cgi.escape(templatefilters.person(ctx.user()))
+ branch = ctx.branch()
+ branch = branch, web.repo.branchtags().get(branch) == ctx.node()
+ data.append((node, vtx, edges, desc, user, age, branch, ctx.tags()))
+
+ return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev,
+ lessvars=lessvars, morevars=morevars, downrev=downrev,
+ canvasheight=canvasheight, jsdata=data, bg_height=bg_height,
+ node=revnode_hex, changenav=changenav)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hgweb/webutil.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hgweb/webutil.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,218 @@
+# hgweb/webutil.py - utility library for the web interface.
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import os, copy
+from mercurial import match, patch, util, error
+from mercurial.node import hex, nullid
+
+def up(p):
+ if p[0] != "/":
+ p = "/" + p
+ if p[-1] == "/":
+ p = p[:-1]
+ up = os.path.dirname(p)
+ if up == "/":
+ return "/"
+ return up + "/"
+
+def revnavgen(pos, pagelen, limit, nodefunc):
+ def seq(factor, limit=None):
+ if limit:
+ yield limit
+ if limit >= 20 and limit <= 40:
+ yield 50
+ else:
+ yield 1 * factor
+ yield 3 * factor
+ for f in seq(factor * 10):
+ yield f
+
+ def nav(**map):
+ l = []
+ last = 0
+ for f in seq(1, pagelen):
+ if f < pagelen or f <= last:
+ continue
+ if f > limit:
+ break
+ last = f
+ if pos + f < limit:
+ l.append(("+%d" % f, hex(nodefunc(pos + f).node())))
+ if pos - f >= 0:
+ l.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node())))
+
+ try:
+ yield {"label": "(0)", "node": hex(nodefunc('0').node())}
+
+ for label, node in l:
+ yield {"label": label, "node": node}
+
+ yield {"label": "tip", "node": "tip"}
+ except error.RepoError:
+ pass
+
+ return nav
+
+def _siblings(siblings=[], hiderev=None):
+ siblings = [s for s in siblings if s.node() != nullid]
+ if len(siblings) == 1 and siblings[0].rev() == hiderev:
+ return
+ for s in siblings:
+ d = {'node': hex(s.node()), 'rev': s.rev()}
+ d['user'] = s.user()
+ d['date'] = s.date()
+ d['description'] = s.description()
+ d['branch'] = s.branch()
+ if hasattr(s, 'path'):
+ d['file'] = s.path()
+ yield d
+
+def parents(ctx, hide=None):
+ return _siblings(ctx.parents(), hide)
+
+def children(ctx, hide=None):
+ return _siblings(ctx.children(), hide)
+
+def renamelink(fctx):
+ r = fctx.renamed()
+ if r:
+ return [dict(file=r[0], node=hex(r[1]))]
+ return []
+
+def nodetagsdict(repo, node):
+ return [{"name": i} for i in repo.nodetags(node)]
+
+def nodebranchdict(repo, ctx):
+ branches = []
+ branch = ctx.branch()
+ # If this is an empty repo, ctx.node() == nullid,
+ # ctx.branch() == 'default', but branchtags() is
+ # an empty dict. Using dict.get avoids a traceback.
+ if repo.branchtags().get(branch) == ctx.node():
+ branches.append({"name": branch})
+ return branches
+
+def nodeinbranch(repo, ctx):
+ branches = []
+ branch = ctx.branch()
+ if branch != 'default' and repo.branchtags().get(branch) != ctx.node():
+ branches.append({"name": branch})
+ return branches
+
+def nodebranchnodefault(ctx):
+ branches = []
+ branch = ctx.branch()
+ if branch != 'default':
+ branches.append({"name": branch})
+ return branches
+
+def showtag(repo, tmpl, t1, node=nullid, **args):
+ for t in repo.nodetags(node):
+ yield tmpl(t1, tag=t, **args)
+
+def cleanpath(repo, path):
+ path = path.lstrip('/')
+ return util.canonpath(repo.root, '', path)
+
+def changectx(repo, req):
+ changeid = "tip"
+ if 'node' in req.form:
+ changeid = req.form['node'][0]
+ elif 'manifest' in req.form:
+ changeid = req.form['manifest'][0]
+
+ try:
+ ctx = repo[changeid]
+ except error.RepoError:
+ man = repo.manifest
+ ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
+
+ return ctx
+
+def filectx(repo, req):
+ path = cleanpath(repo, req.form['file'][0])
+ if 'node' in req.form:
+ changeid = req.form['node'][0]
+ else:
+ changeid = req.form['filenode'][0]
+ try:
+ fctx = repo[changeid][path]
+ except error.RepoError:
+ fctx = repo.filectx(path, fileid=changeid)
+
+ return fctx
+
+def listfilediffs(tmpl, files, node, max):
+ for f in files[:max]:
+ yield tmpl('filedifflink', node=hex(node), file=f)
+ if len(files) > max:
+ yield tmpl('fileellipses')
+
+def diffs(repo, tmpl, ctx, files, parity):
+
+ def countgen():
+ start = 1
+ while True:
+ yield start
+ start += 1
+
+ blockcount = countgen()
+ def prettyprintlines(diff):
+ blockno = blockcount.next()
+ for lineno, l in enumerate(diff.splitlines(True)):
+ lineno = "%d.%d" % (blockno, lineno + 1)
+ if l.startswith('+'):
+ ltype = "difflineplus"
+ elif l.startswith('-'):
+ ltype = "difflineminus"
+ elif l.startswith('@'):
+ ltype = "difflineat"
+ else:
+ ltype = "diffline"
+ yield tmpl(ltype,
+ line=l,
+ lineid="l%s" % lineno,
+ linenumber="% 8s" % lineno)
+
+ if files:
+ m = match.exact(repo.root, repo.getcwd(), files)
+ else:
+ m = match.always(repo.root, repo.getcwd())
+
+ diffopts = patch.diffopts(repo.ui, untrusted=True)
+ parents = ctx.parents()
+ node1 = parents and parents[0].node() or nullid
+ node2 = ctx.node()
+
+ block = []
+ for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
+ if chunk.startswith('diff') and block:
+ yield tmpl('diffblock', parity=parity.next(),
+ lines=prettyprintlines(''.join(block)))
+ block = []
+ if chunk.startswith('diff'):
+ chunk = ''.join(chunk.splitlines(True)[1:])
+ block.append(chunk)
+ yield tmpl('diffblock', parity=parity.next(),
+ lines=prettyprintlines(''.join(block)))
+
+class sessionvars(object):
+ def __init__(self, vars, start='?'):
+ self.start = start
+ self.vars = vars
+ def __getitem__(self, key):
+ return self.vars[key]
+ def __setitem__(self, key, value):
+ self.vars[key] = value
+ def __copy__(self):
+ return sessionvars(copy.copy(self.vars), self.start)
+ def __iter__(self):
+ separator = self.start
+ for key, value in self.vars.iteritems():
+ yield {'name': key, 'value': str(value), 'separator': separator}
+ separator = '&'
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hgweb/wsgicgi.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hgweb/wsgicgi.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,70 @@
+# hgweb/wsgicgi.py - CGI->WSGI translator
+#
+# Copyright 2006 Eric Hopper
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+#
+# This was originally copied from the public domain code at
+# http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
+
+import os, sys
+from mercurial import util
+
+def launch(application):
+ util.set_binary(sys.stdin)
+ util.set_binary(sys.stdout)
+
+ environ = dict(os.environ.iteritems())
+ environ.setdefault('PATH_INFO', '')
+ if '.cgi' in environ['PATH_INFO']:
+ environ['PATH_INFO'] = environ['PATH_INFO'].split('.cgi', 1)[1]
+
+ environ['wsgi.input'] = sys.stdin
+ environ['wsgi.errors'] = sys.stderr
+ environ['wsgi.version'] = (1, 0)
+ environ['wsgi.multithread'] = False
+ environ['wsgi.multiprocess'] = True
+ environ['wsgi.run_once'] = True
+
+ if environ.get('HTTPS','off').lower() in ('on','1','yes'):
+ environ['wsgi.url_scheme'] = 'https'
+ else:
+ environ['wsgi.url_scheme'] = 'http'
+
+ headers_set = []
+ headers_sent = []
+ out = sys.stdout
+
+ def write(data):
+ if not headers_set:
+ raise AssertionError("write() before start_response()")
+
+ elif not headers_sent:
+ # Before the first output, send the stored headers
+ status, response_headers = headers_sent[:] = headers_set
+ out.write('Status: %s\r\n' % status)
+ for header in response_headers:
+ out.write('%s: %s\r\n' % header)
+ out.write('\r\n')
+
+ out.write(data)
+ out.flush()
+
+ def start_response(status, response_headers, exc_info=None):
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise original exception if headers sent
+ raise exc_info[0](exc_info[1], exc_info[2])
+ finally:
+ exc_info = None # avoid dangling circular ref
+ elif headers_set:
+ raise AssertionError("Headers already set!")
+
+ headers_set[:] = [status, response_headers]
+ return write
+
+ content = application(environ, start_response)
+ for chunk in content:
+ write(chunk)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/hook.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/hook.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,127 @@
+# hook.py - hook support for mercurial
+#
+# Copyright 2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from i18n import _
+import os, sys
+import extensions, util
+
+def _pythonhook(ui, repo, name, hname, funcname, args, throw):
+ '''call python hook. hook is callable object, looked up as
+ name in python module. if callable returns "true", hook
+ fails, else passes. if hook raises exception, treated as
+ hook failure. exception propagates if throw is "true".
+
+ reason for "true" meaning "hook failed" is so that
+ unmodified commands (e.g. mercurial.commands.update) can
+ be run as hooks without wrappers to convert return values.'''
+
+ ui.note(_("calling hook %s: %s\n") % (hname, funcname))
+ obj = funcname
+ if not hasattr(obj, '__call__'):
+ d = funcname.rfind('.')
+ if d == -1:
+ raise util.Abort(_('%s hook is invalid ("%s" not in '
+ 'a module)') % (hname, funcname))
+ modname = funcname[:d]
+ try:
+ obj = __import__(modname)
+ except ImportError:
+ try:
+ # extensions are loaded with hgext_ prefix
+ obj = __import__("hgext_%s" % modname)
+ except ImportError:
+ raise util.Abort(_('%s hook is invalid '
+ '(import of "%s" failed)') %
+ (hname, modname))
+ try:
+ for p in funcname.split('.')[1:]:
+ obj = getattr(obj, p)
+ except AttributeError:
+ raise util.Abort(_('%s hook is invalid '
+ '("%s" is not defined)') %
+ (hname, funcname))
+ if not hasattr(obj, '__call__'):
+ raise util.Abort(_('%s hook is invalid '
+ '("%s" is not callable)') %
+ (hname, funcname))
+ try:
+ r = obj(ui=ui, repo=repo, hooktype=name, **args)
+ except KeyboardInterrupt:
+ raise
+ except Exception, exc:
+ if isinstance(exc, util.Abort):
+ ui.warn(_('error: %s hook failed: %s\n') %
+ (hname, exc.args[0]))
+ else:
+ ui.warn(_('error: %s hook raised an exception: '
+ '%s\n') % (hname, exc))
+ if throw:
+ raise
+ ui.traceback()
+ return True
+ if r:
+ if throw:
+ raise util.Abort(_('%s hook failed') % hname)
+ ui.warn(_('warning: %s hook failed\n') % hname)
+ return r
+
+def _exthook(ui, repo, name, cmd, args, throw):
+ ui.note(_("running hook %s: %s\n") % (name, cmd))
+
+ env = {}
+ for k, v in args.iteritems():
+ if hasattr(v, '__call__'):
+ v = v()
+ env['HG_' + k.upper()] = v
+
+ if repo:
+ cwd = repo.root
+ else:
+ cwd = os.getcwd()
+ r = util.system(cmd, environ=env, cwd=cwd)
+ if r:
+ desc, r = util.explain_exit(r)
+ if throw:
+ raise util.Abort(_('%s hook %s') % (name, desc))
+ ui.warn(_('warning: %s hook %s\n') % (name, desc))
+ return r
+
+_redirect = False
+def redirect(state):
+ global _redirect
+ _redirect = state
+
+def hook(ui, repo, name, throw=False, **args):
+ r = False
+
+ if _redirect:
+ # temporarily redirect stdout to stderr
+ oldstdout = os.dup(sys.__stdout__.fileno())
+ os.dup2(sys.__stderr__.fileno(), sys.__stdout__.fileno())
+
+ try:
+ for hname, cmd in ui.configitems('hooks'):
+ if hname.split('.')[0] != name or not cmd:
+ continue
+ if hasattr(cmd, '__call__'):
+ r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r
+ elif cmd.startswith('python:'):
+ if cmd.count(':') == 2:
+ path, cmd = cmd[7:].split(':')
+ mod = extensions.loadpath(path, 'hgkook.%s' % hname)
+ hookfn = getattr(mod, cmd)
+ else:
+ hookfn = cmd[7:].strip()
+ r = _pythonhook(ui, repo, name, hname, hookfn, args, throw) or r
+ else:
+ r = _exthook(ui, repo, hname, cmd, args, throw) or r
+ finally:
+ if _redirect:
+ os.dup2(oldstdout, sys.__stdout__.fileno())
+ os.close(oldstdout)
+
+ return r
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/httprepo.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/httprepo.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,258 @@
+# httprepo.py - HTTP repository proxy classes for mercurial
+#
+# Copyright 2005, 2006 Matt Mackall
+# Copyright 2006 Vadim Gelfer
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from node import bin, hex, nullid
+from i18n import _
+import repo, changegroup, statichttprepo, error, url, util
+import os, urllib, urllib2, urlparse, zlib, httplib
+import errno, socket
+
+def zgenerator(f):
+ zd = zlib.decompressobj()
+ try:
+ for chunk in util.filechunkiter(f):
+ yield zd.decompress(chunk)
+ except httplib.HTTPException:
+ raise IOError(None, _('connection ended unexpectedly'))
+ yield zd.flush()
+
+class httprepository(repo.repository):
+ def __init__(self, ui, path):
+ self.path = path
+ self.caps = None
+ self.handler = None
+ scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
+ if query or frag:
+ raise util.Abort(_('unsupported URL component: "%s"') %
+ (query or frag))
+
+ # urllib cannot handle URLs with embedded user or passwd
+ self._url, authinfo = url.getauthinfo(path)
+
+ self.ui = ui
+ self.ui.debug(_('using %s\n') % self._url)
+
+ self.urlopener = url.opener(ui, authinfo)
+
+ def __del__(self):
+ for h in self.urlopener.handlers:
+ h.close()
+ if hasattr(h, "close_all"):
+ h.close_all()
+
+ def url(self):
+ return self.path
+
+ # look up capabilities only when needed
+
+ def get_caps(self):
+ if self.caps is None:
+ try:
+ self.caps = set(self.do_read('capabilities').split())
+ except error.RepoError:
+ self.caps = set()
+ self.ui.debug(_('capabilities: %s\n') %
+ (' '.join(self.caps or ['none'])))
+ return self.caps
+
+ capabilities = property(get_caps)
+
+ def lock(self):
+ raise util.Abort(_('operation not supported over http'))
+
+ def do_cmd(self, cmd, **args):
+ data = args.pop('data', None)
+ headers = args.pop('headers', {})
+ self.ui.debug(_("sending %s command\n") % cmd)
+ q = {"cmd": cmd}
+ q.update(args)
+ qs = '?%s' % urllib.urlencode(q)
+ cu = "%s%s" % (self._url, qs)
+ try:
+ if data:
+ self.ui.debug(_("sending %s bytes\n") % len(data))
+ resp = self.urlopener.open(urllib2.Request(cu, data, headers))
+ except urllib2.HTTPError, inst:
+ if inst.code == 401:
+ raise util.Abort(_('authorization failed'))
+ raise
+ except httplib.HTTPException, inst:
+ self.ui.debug(_('http error while sending %s command\n') % cmd)
+ self.ui.traceback()
+ raise IOError(None, inst)
+ except IndexError:
+ # this only happens with Python 2.3, later versions raise URLError
+ raise util.Abort(_('http error, possibly caused by proxy setting'))
+ # record the url we got redirected to
+ resp_url = resp.geturl()
+ if resp_url.endswith(qs):
+ resp_url = resp_url[:-len(qs)]
+ if self._url != resp_url:
+ self.ui.status(_('real URL is %s\n') % resp_url)
+ self._url = resp_url
+ try:
+ proto = resp.getheader('content-type')
+ except AttributeError:
+ proto = resp.headers['content-type']
+
+ safeurl = url.hidepassword(self._url)
+ # accept old "text/plain" and "application/hg-changegroup" for now
+ if not (proto.startswith('application/mercurial-') or
+ proto.startswith('text/plain') or
+ proto.startswith('application/hg-changegroup')):
+ self.ui.debug(_("requested URL: '%s'\n") % url.hidepassword(cu))
+ raise error.RepoError(_("'%s' does not appear to be an hg repository")
+ % safeurl)
+
+ if proto.startswith('application/mercurial-'):
+ try:
+ version = proto.split('-', 1)[1]
+ version_info = tuple([int(n) for n in version.split('.')])
+ except ValueError:
+ raise error.RepoError(_("'%s' sent a broken Content-Type "
+ "header (%s)") % (safeurl, proto))
+ if version_info > (0, 1):
+ raise error.RepoError(_("'%s' uses newer protocol %s") %
+ (safeurl, version))
+
+ return resp
+
+ def do_read(self, cmd, **args):
+ fp = self.do_cmd(cmd, **args)
+ try:
+ return fp.read()
+ finally:
+ # if using keepalive, allow connection to be reused
+ fp.close()
+
+ def lookup(self, key):
+ self.requirecap('lookup', _('look up remote revision'))
+ d = self.do_cmd("lookup", key = key).read()
+ success, data = d[:-1].split(' ', 1)
+ if int(success):
+ return bin(data)
+ raise error.RepoError(data)
+
+ def heads(self):
+ d = self.do_read("heads")
+ try:
+ return map(bin, d[:-1].split(" "))
+ except:
+ raise error.ResponseError(_("unexpected response:"), d)
+
+ def branchmap(self):
+ d = self.do_read("branchmap")
+ try:
+ branchmap = {}
+ for branchpart in d.splitlines():
+ branchheads = branchpart.split(' ')
+ branchname = urllib.unquote(branchheads[0])
+ branchheads = [bin(x) for x in branchheads[1:]]
+ branchmap[branchname] = branchheads
+ return branchmap
+ except:
+ raise error.ResponseError(_("unexpected response:"), d)
+
+ def branches(self, nodes):
+ n = " ".join(map(hex, nodes))
+ d = self.do_read("branches", nodes=n)
+ try:
+ br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
+ return br
+ except:
+ raise error.ResponseError(_("unexpected response:"), d)
+
+ def between(self, pairs):
+ batch = 8 # avoid giant requests
+ r = []
+ for i in xrange(0, len(pairs), batch):
+ n = " ".join(["-".join(map(hex, p)) for p in pairs[i:i + batch]])
+ d = self.do_read("between", pairs=n)
+ try:
+ r += [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
+ except:
+ raise error.ResponseError(_("unexpected response:"), d)
+ return r
+
+ def changegroup(self, nodes, kind):
+ n = " ".join(map(hex, nodes))
+ f = self.do_cmd("changegroup", roots=n)
+ return util.chunkbuffer(zgenerator(f))
+
+ def changegroupsubset(self, bases, heads, source):
+ self.requirecap('changegroupsubset', _('look up remote changes'))
+ baselst = " ".join([hex(n) for n in bases])
+ headlst = " ".join([hex(n) for n in heads])
+ f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
+ return util.chunkbuffer(zgenerator(f))
+
+ def unbundle(self, cg, heads, source):
+ # have to stream bundle to a temp file because we do not have
+ # http 1.1 chunked transfer.
+
+ type = ""
+ types = self.capable('unbundle')
+ # servers older than d1b16a746db6 will send 'unbundle' as a
+ # boolean capability
+ try:
+ types = types.split(',')
+ except AttributeError:
+ types = [""]
+ if types:
+ for x in types:
+ if x in changegroup.bundletypes:
+ type = x
+ break
+
+ tempname = changegroup.writebundle(cg, None, type)
+ fp = url.httpsendfile(tempname, "rb")
+ try:
+ try:
+ resp = self.do_read(
+ 'unbundle', data=fp,
+ headers={'Content-Type': 'application/octet-stream'},
+ heads=' '.join(map(hex, heads)))
+ resp_code, output = resp.split('\n', 1)
+ try:
+ ret = int(resp_code)
+ except ValueError, err:
+ raise error.ResponseError(
+ _('push failed (unexpected response):'), resp)
+ self.ui.write(output)
+ return ret
+ except socket.error, err:
+ if err[0] in (errno.ECONNRESET, errno.EPIPE):
+ raise util.Abort(_('push failed: %s') % err[1])
+ raise util.Abort(err[1])
+ finally:
+ fp.close()
+ os.unlink(tempname)
+
+ def stream_out(self):
+ return self.do_cmd('stream_out')
+
+class httpsrepository(httprepository):
+ def __init__(self, ui, path):
+ if not url.has_https:
+ raise util.Abort(_('Python support for SSL and HTTPS '
+ 'is not installed'))
+ httprepository.__init__(self, ui, path)
+
+def instance(ui, path, create):
+ if create:
+ raise util.Abort(_('cannot create new http repository'))
+ try:
+ if path.startswith('https:'):
+ inst = httpsrepository(ui, path)
+ else:
+ inst = httprepository(ui, path)
+ inst.between([(nullid, nullid)])
+ return inst
+ except error.RepoError:
+ ui.note('(falling back to static-http)\n')
+ return statichttprepo.instance(ui, "static-" + path, create)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/i18n.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/i18n.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,48 @@
+# i18n.py - internationalization support for mercurial
+#
+# Copyright 2005, 2006 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import encoding
+import gettext, sys, os
+
+# modelled after templater.templatepath:
+if hasattr(sys, 'frozen'):
+ module = sys.executable
+else:
+ module = __file__
+
+base = os.path.dirname(module)
+for dir in ('.', '..'):
+ localedir = os.path.normpath(os.path.join(base, dir, 'locale'))
+ if os.path.isdir(localedir):
+ break
+
+t = gettext.translation('hg', localedir, fallback=True)
+
+def gettext(message):
+ """Translate message.
+
+ The message is looked up in the catalog to get a Unicode string,
+ which is encoded in the local encoding before being returned.
+
+ Important: message is restricted to characters in the encoding
+ given by sys.getdefaultencoding() which is most likely 'ascii'.
+ """
+ # If message is None, t.ugettext will return u'None' as the
+ # translation whereas our callers expect us to return None.
+ if message is None:
+ return message
+
+ # We cannot just run the text through encoding.tolocal since that
+ # leads to infinite recursion when encoding._encoding is invalid.
+ try:
+ u = t.ugettext(message)
+ return u.encode(encoding.encoding, "replace")
+ except LookupError:
+ return message
+
+_ = gettext
+
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/ignore.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/ignore.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,92 @@
+# ignore.py - ignored file handling for mercurial
+#
+# Copyright 2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from i18n import _
+import util, match
+import re
+
+_commentre = None
+
+def _parselines(fp):
+ for line in fp:
+ if "#" in line:
+ global _commentre
+ if not _commentre:
+ _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
+ # remove comments prefixed by an even number of escapes
+ line = _commentre.sub(r'\1', line)
+ # fixup properly escaped comments that survived the above
+ line = line.replace("\\#", "#")
+ line = line.rstrip()
+ if line:
+ yield line
+
+def ignore(root, files, warn):
+ '''return the contents of .hgignore files as a list of patterns.
+
+ the files parsed for patterns include:
+ .hgignore in the repository root
+ any additional files specified in the [ui] section of ~/.hgrc
+
+ trailing white space is dropped.
+ the escape character is backslash.
+ comments start with #.
+ empty lines are skipped.
+
+ lines can be of the following formats:
+
+ syntax: regexp # defaults following lines to non-rooted regexps
+ syntax: glob # defaults following lines to non-rooted globs
+ re:pattern # non-rooted regular expression
+ glob:pattern # non-rooted glob
+ pattern # pattern of the current default type'''
+
+ syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
+ pats = {}
+ for f in files:
+ try:
+ pats[f] = []
+ fp = open(f)
+ syntax = 'relre:'
+ for line in _parselines(fp):
+ if line.startswith('syntax:'):
+ s = line[7:].strip()
+ try:
+ syntax = syntaxes[s]
+ except KeyError:
+ warn(_("%s: ignoring invalid syntax '%s'\n") % (f, s))
+ continue
+ pat = syntax + line
+ for s, rels in syntaxes.iteritems():
+ if line.startswith(rels):
+ pat = line
+ break
+ elif line.startswith(s+':'):
+ pat = rels + line[len(s)+1:]
+ break
+ pats[f].append(pat)
+ except IOError, inst:
+ if f != files[0]:
+ warn(_("skipping unreadable ignore file '%s': %s\n") %
+ (f, inst.strerror))
+
+ allpats = []
+ [allpats.extend(patlist) for patlist in pats.values()]
+ if not allpats:
+ return util.never
+
+ try:
+ ignorefunc = match.match(root, '', [], allpats)
+ except util.Abort:
+ # Re-raise an exception where the src is the right file
+ for f, patlist in pats.iteritems():
+ try:
+ match.match(root, '', [], patlist)
+ except util.Abort, inst:
+ raise util.Abort('%s: %s' % (f, inst[0]))
+
+ return ignorefunc
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/keepalive.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/keepalive.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,671 @@
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the
+# Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330,
+# Boston, MA 02111-1307 USA
+
+# This file is part of urlgrabber, a high-level cross-protocol url-grabber
+# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
+
+# Modified by Benoit Boissinot:
+# - fix for digest auth (inspired from urllib2.py @ Python v2.4)
+# Modified by Dirkjan Ochtman:
+# - import md5 function from a local util module
+# Modified by Martin Geisler:
+# - moved md5 function from local util module to this module
+
+"""An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
+
+>>> import urllib2
+>>> from keepalive import HTTPHandler
+>>> keepalive_handler = HTTPHandler()
+>>> opener = urllib2.build_opener(keepalive_handler)
+>>> urllib2.install_opener(opener)
+>>>
+>>> fo = urllib2.urlopen('http://www.python.org')
+
+If a connection to a given host is requested, and all of the existing
+connections are still in use, another connection will be opened. If
+the handler tries to use an existing connection but it fails in some
+way, it will be closed and removed from the pool.
+
+To remove the handler, simply re-run build_opener with no arguments, and
+install that opener.
+
+You can explicitly close connections by using the close_connection()
+method of the returned file-like object (described below) or you can
+use the handler methods:
+
+ close_connection(host)
+ close_all()
+ open_connections()
+
+NOTE: using the close_connection and close_all methods of the handler
+should be done with care when using multiple threads.
+ * there is nothing that prevents another thread from creating new
+ connections immediately after connections are closed
+ * no checks are done to prevent in-use connections from being closed
+
+>>> keepalive_handler.close_all()
+
+EXTRA ATTRIBUTES AND METHODS
+
+ Upon a status of 200, the object returned has a few additional
+ attributes and methods, which should not be used if you want to
+ remain consistent with the normal urllib2-returned objects:
+
+ close_connection() - close the connection to the host
+ readlines() - you know, readlines()
+ status - the return status (ie 404)
+ reason - english translation of status (ie 'File not found')
+
+ If you want the best of both worlds, use this inside an
+ AttributeError-catching try:
+
+ >>> try: status = fo.status
+ >>> except AttributeError: status = None
+
+ Unfortunately, these are ONLY there if status == 200, so it's not
+ easy to distinguish between non-200 responses. The reason is that
+ urllib2 tries to do clever things with error codes 301, 302, 401,
+ and 407, and it wraps the object upon return.
+
+ For python versions earlier than 2.4, you can avoid this fancy error
+ handling by setting the module-level global HANDLE_ERRORS to zero.
+ You see, prior to 2.4, it's the HTTP Handler's job to determine what
+ to handle specially, and what to just pass up. HANDLE_ERRORS == 0
+ means "pass everything up". In python 2.4, however, this job no
+ longer belongs to the HTTP Handler and is now done by a NEW handler,
+ HTTPErrorProcessor. Here's the bottom line:
+
+ python version < 2.4
+ HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
+ errors
+ HANDLE_ERRORS == 0 pass everything up, error processing is
+ left to the calling code
+ python version >= 2.4
+ HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
+ HANDLE_ERRORS == 0 (default) pass everything up, let the
+ other handlers (specifically,
+ HTTPErrorProcessor) decide what to do
+
+ In practice, setting the variable either way makes little difference
+ in python 2.4, so for the most consistent behavior across versions,
+ you probably just want to use the defaults, which will give you
+ exceptions on errors.
+
+"""
+
+# $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
+
+import urllib2
+import httplib
+import socket
+import thread
+
+DEBUG = None
+
+import sys
+if sys.version_info < (2, 4): HANDLE_ERRORS = 1
+else: HANDLE_ERRORS = 0
+
+class ConnectionManager:
+ """
+ The connection manager must be able to:
+ * keep track of all existing
+ """
+ def __init__(self):
+ self._lock = thread.allocate_lock()
+ self._hostmap = {} # map hosts to a list of connections
+ self._connmap = {} # map connections to host
+ self._readymap = {} # map connection to ready state
+
+ def add(self, host, connection, ready):
+ self._lock.acquire()
+ try:
+ if not host in self._hostmap: self._hostmap[host] = []
+ self._hostmap[host].append(connection)
+ self._connmap[connection] = host
+ self._readymap[connection] = ready
+ finally:
+ self._lock.release()
+
+ def remove(self, connection):
+ self._lock.acquire()
+ try:
+ try:
+ host = self._connmap[connection]
+ except KeyError:
+ pass
+ else:
+ del self._connmap[connection]
+ del self._readymap[connection]
+ self._hostmap[host].remove(connection)
+ if not self._hostmap[host]: del self._hostmap[host]
+ finally:
+ self._lock.release()
+
+ def set_ready(self, connection, ready):
+ try: self._readymap[connection] = ready
+ except KeyError: pass
+
+ def get_ready_conn(self, host):
+ conn = None
+ self._lock.acquire()
+ try:
+ if host in self._hostmap:
+ for c in self._hostmap[host]:
+ if self._readymap[c]:
+ self._readymap[c] = 0
+ conn = c
+ break
+ finally:
+ self._lock.release()
+ return conn
+
+ def get_all(self, host=None):
+ if host:
+ return list(self._hostmap.get(host, []))
+ else:
+ return dict(self._hostmap)
+
+class KeepAliveHandler:
+ def __init__(self):
+ self._cm = ConnectionManager()
+
+ #### Connection Management
+ def open_connections(self):
+ """return a list of connected hosts and the number of connections
+ to each. [('foo.com:80', 2), ('bar.org', 1)]"""
+ return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
+
+ def close_connection(self, host):
+ """close connection(s) to
+ host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
+ no error occurs if there is no connection to that host."""
+ for h in self._cm.get_all(host):
+ self._cm.remove(h)
+ h.close()
+
+ def close_all(self):
+ """close all open connections"""
+ for host, conns in self._cm.get_all().iteritems():
+ for h in conns:
+ self._cm.remove(h)
+ h.close()
+
+ def _request_closed(self, request, host, connection):
+ """tells us that this request is now closed and the the
+ connection is ready for another request"""
+ self._cm.set_ready(connection, 1)
+
+ def _remove_connection(self, host, connection, close=0):
+ if close: connection.close()
+ self._cm.remove(connection)
+
+ #### Transaction Execution
+ def http_open(self, req):
+ return self.do_open(HTTPConnection, req)
+
+ def do_open(self, http_class, req):
+ host = req.get_host()
+ if not host:
+ raise urllib2.URLError('no host given')
+
+ try:
+ h = self._cm.get_ready_conn(host)
+ while h:
+ r = self._reuse_connection(h, req, host)
+
+ # if this response is non-None, then it worked and we're
+ # done. Break out, skipping the else block.
+ if r: break
+
+ # connection is bad - possibly closed by server
+ # discard it and ask for the next free connection
+ h.close()
+ self._cm.remove(h)
+ h = self._cm.get_ready_conn(host)
+ else:
+ # no (working) free connections were found. Create a new one.
+ h = http_class(host)
+ if DEBUG: DEBUG.info("creating new connection to %s (%d)",
+ host, id(h))
+ self._cm.add(host, h, 0)
+ self._start_transaction(h, req)
+ r = h.getresponse()
+ except (socket.error, httplib.HTTPException), err:
+ raise urllib2.URLError(err)
+
+ # if not a persistent connection, don't try to reuse it
+ if r.will_close: self._cm.remove(h)
+
+ if DEBUG: DEBUG.info("STATUS: %s, %s", r.status, r.reason)
+ r._handler = self
+ r._host = host
+ r._url = req.get_full_url()
+ r._connection = h
+ r.code = r.status
+ r.headers = r.msg
+ r.msg = r.reason
+
+ if r.status == 200 or not HANDLE_ERRORS:
+ return r
+ else:
+ return self.parent.error('http', req, r,
+ r.status, r.msg, r.headers)
+
+ def _reuse_connection(self, h, req, host):
+ """start the transaction with a re-used connection
+ return a response object (r) upon success or None on failure.
+ This DOES not close or remove bad connections in cases where
+ it returns. However, if an unexpected exception occurs, it
+ will close and remove the connection before re-raising.
+ """
+ try:
+ self._start_transaction(h, req)
+ r = h.getresponse()
+ # note: just because we got something back doesn't mean it
+ # worked. We'll check the version below, too.
+ except (socket.error, httplib.HTTPException):
+ r = None
+ except:
+ # adding this block just in case we've missed
+ # something we will still raise the exception, but
+ # lets try and close the connection and remove it
+ # first. We previously got into a nasty loop
+ # where an exception was uncaught, and so the
+ # connection stayed open. On the next try, the
+ # same exception was raised, etc. The tradeoff is
+ # that it's now possible this call will raise
+ # a DIFFERENT exception
+ if DEBUG: DEBUG.error("unexpected exception - closing " + \
+ "connection to %s (%d)", host, id(h))
+ self._cm.remove(h)
+ h.close()
+ raise
+
+ if r is None or r.version == 9:
+ # httplib falls back to assuming HTTP 0.9 if it gets a
+ # bad header back. This is most likely to happen if
+ # the socket has been closed by the server since we
+ # last used the connection.
+ if DEBUG: DEBUG.info("failed to re-use connection to %s (%d)",
+ host, id(h))
+ r = None
+ else:
+ if DEBUG: DEBUG.info("re-using connection to %s (%d)", host, id(h))
+
+ return r
+
+ def _start_transaction(self, h, req):
+ # What follows mostly reimplements HTTPConnection.request()
+ # except it adds self.parent.addheaders in the mix.
+ headers = req.headers.copy()
+ if sys.version_info >= (2, 4):
+ headers.update(req.unredirected_hdrs)
+ headers.update(self.parent.addheaders)
+ headers = dict((n.lower(), v) for n,v in headers.items())
+ skipheaders = {}
+ for n in ('host', 'accept-encoding'):
+ if n in headers:
+ skipheaders['skip_' + n.replace('-', '_')] = 1
+ try:
+ if req.has_data():
+ data = req.get_data()
+ h.putrequest('POST', req.get_selector(), **skipheaders)
+ if 'content-type' not in headers:
+ h.putheader('Content-type',
+ 'application/x-www-form-urlencoded')
+ if 'content-length' not in headers:
+ h.putheader('Content-length', '%d' % len(data))
+ else:
+ h.putrequest('GET', req.get_selector(), **skipheaders)
+ except (socket.error), err:
+ raise urllib2.URLError(err)
+ for k, v in headers.items():
+ h.putheader(k, v)
+ h.endheaders()
+ if req.has_data():
+ h.send(data)
+
+class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
+ pass
+
+class HTTPResponse(httplib.HTTPResponse):
+ # we need to subclass HTTPResponse in order to
+ # 1) add readline() and readlines() methods
+ # 2) add close_connection() methods
+ # 3) add info() and geturl() methods
+
+ # in order to add readline(), read must be modified to deal with a
+ # buffer. example: readline must read a buffer and then spit back
+ # one line at a time. The only real alternative is to read one
+ # BYTE at a time (ick). Once something has been read, it can't be
+ # put back (ok, maybe it can, but that's even uglier than this),
+ # so if you THEN do a normal read, you must first take stuff from
+ # the buffer.
+
+ # the read method wraps the original to accomodate buffering,
+ # although read() never adds to the buffer.
+ # Both readline and readlines have been stolen with almost no
+ # modification from socket.py
+
+
+ def __init__(self, sock, debuglevel=0, strict=0, method=None):
+ if method: # the httplib in python 2.3 uses the method arg
+ httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
+ else: # 2.2 doesn't
+ httplib.HTTPResponse.__init__(self, sock, debuglevel)
+ self.fileno = sock.fileno
+ self.code = None
+ self._rbuf = ''
+ self._rbufsize = 8096
+ self._handler = None # inserted by the handler later
+ self._host = None # (same)
+ self._url = None # (same)
+ self._connection = None # (same)
+
+ _raw_read = httplib.HTTPResponse.read
+
+ def close(self):
+ if self.fp:
+ self.fp.close()
+ self.fp = None
+ if self._handler:
+ self._handler._request_closed(self, self._host,
+ self._connection)
+
+ def close_connection(self):
+ self._handler._remove_connection(self._host, self._connection, close=1)
+ self.close()
+
+ def info(self):
+ return self.headers
+
+ def geturl(self):
+ return self._url
+
+ def read(self, amt=None):
+ # the _rbuf test is only in this first if for speed. It's not
+ # logically necessary
+ if self._rbuf and not amt is None:
+ L = len(self._rbuf)
+ if amt > L:
+ amt -= L
+ else:
+ s = self._rbuf[:amt]
+ self._rbuf = self._rbuf[amt:]
+ return s
+
+ s = self._rbuf + self._raw_read(amt)
+ self._rbuf = ''
+ return s
+
+ # stolen from Python SVN #68532 to fix issue1088
+ def _read_chunked(self, amt):
+ chunk_left = self.chunk_left
+ value = ''
+
+ # XXX This accumulates chunks by repeated string concatenation,
+ # which is not efficient as the number or size of chunks gets big.
+ while True:
+ if chunk_left is None:
+ line = self.fp.readline()
+ i = line.find(';')
+ if i >= 0:
+ line = line[:i] # strip chunk-extensions
+ try:
+ chunk_left = int(line, 16)
+ except ValueError:
+ # close the connection as protocol synchronisation is
+ # probably lost
+ self.close()
+ raise httplib.IncompleteRead(value)
+ if chunk_left == 0:
+ break
+ if amt is None:
+ value += self._safe_read(chunk_left)
+ elif amt < chunk_left:
+ value += self._safe_read(amt)
+ self.chunk_left = chunk_left - amt
+ return value
+ elif amt == chunk_left:
+ value += self._safe_read(amt)
+ self._safe_read(2) # toss the CRLF at the end of the chunk
+ self.chunk_left = None
+ return value
+ else:
+ value += self._safe_read(chunk_left)
+ amt -= chunk_left
+
+ # we read the whole chunk, get another
+ self._safe_read(2) # toss the CRLF at the end of the chunk
+ chunk_left = None
+
+ # read and discard trailer up to the CRLF terminator
+ ### note: we shouldn't have any trailers!
+ while True:
+ line = self.fp.readline()
+ if not line:
+ # a vanishingly small number of sites EOF without
+ # sending the trailer
+ break
+ if line == '\r\n':
+ break
+
+ # we read everything; close the "file"
+ self.close()
+
+ return value
+
+ def readline(self, limit=-1):
+ i = self._rbuf.find('\n')
+ while i < 0 and not (0 < limit <= len(self._rbuf)):
+ new = self._raw_read(self._rbufsize)
+ if not new: break
+ i = new.find('\n')
+ if i >= 0: i = i + len(self._rbuf)
+ self._rbuf = self._rbuf + new
+ if i < 0: i = len(self._rbuf)
+ else: i = i+1
+ if 0 <= limit < len(self._rbuf): i = limit
+ data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
+ return data
+
+ def readlines(self, sizehint = 0):
+ total = 0
+ list = []
+ while 1:
+ line = self.readline()
+ if not line: break
+ list.append(line)
+ total += len(line)
+ if sizehint and total >= sizehint:
+ break
+ return list
+
+
+class HTTPConnection(httplib.HTTPConnection):
+ # use the modified response class
+ response_class = HTTPResponse
+
+#########################################################################
+##### TEST FUNCTIONS
+#########################################################################
+
+def error_handler(url):
+ global HANDLE_ERRORS
+ orig = HANDLE_ERRORS
+ keepalive_handler = HTTPHandler()
+ opener = urllib2.build_opener(keepalive_handler)
+ urllib2.install_opener(opener)
+ pos = {0: 'off', 1: 'on'}
+ for i in (0, 1):
+ print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
+ HANDLE_ERRORS = i
+ try:
+ fo = urllib2.urlopen(url)
+ fo.read()
+ fo.close()
+ try: status, reason = fo.status, fo.reason
+ except AttributeError: status, reason = None, None
+ except IOError, e:
+ print " EXCEPTION: %s" % e
+ raise
+ else:
+ print " status = %s, reason = %s" % (status, reason)
+ HANDLE_ERRORS = orig
+ hosts = keepalive_handler.open_connections()
+ print "open connections:", hosts
+ keepalive_handler.close_all()
+
+def md5(s):
+ try:
+ from hashlib import md5 as _md5
+ except ImportError:
+ from md5 import md5 as _md5
+ global md5
+ md5 = _md5
+ return _md5(s)
+
+def continuity(url):
+ format = '%25s: %s'
+
+ # first fetch the file with the normal http handler
+ opener = urllib2.build_opener()
+ urllib2.install_opener(opener)
+ fo = urllib2.urlopen(url)
+ foo = fo.read()
+ fo.close()
+ m = md5.new(foo)
+ print format % ('normal urllib', m.hexdigest())
+
+ # now install the keepalive handler and try again
+ opener = urllib2.build_opener(HTTPHandler())
+ urllib2.install_opener(opener)
+
+ fo = urllib2.urlopen(url)
+ foo = fo.read()
+ fo.close()
+ m = md5.new(foo)
+ print format % ('keepalive read', m.hexdigest())
+
+ fo = urllib2.urlopen(url)
+ foo = ''
+ while 1:
+ f = fo.readline()
+ if f: foo = foo + f
+ else: break
+ fo.close()
+ m = md5.new(foo)
+ print format % ('keepalive readline', m.hexdigest())
+
+def comp(N, url):
+ print ' making %i connections to:\n %s' % (N, url)
+
+ sys.stdout.write(' first using the normal urllib handlers')
+ # first use normal opener
+ opener = urllib2.build_opener()
+ urllib2.install_opener(opener)
+ t1 = fetch(N, url)
+ print ' TIME: %.3f s' % t1
+
+ sys.stdout.write(' now using the keepalive handler ')
+ # now install the keepalive handler and try again
+ opener = urllib2.build_opener(HTTPHandler())
+ urllib2.install_opener(opener)
+ t2 = fetch(N, url)
+ print ' TIME: %.3f s' % t2
+ print ' improvement factor: %.2f' % (t1/t2, )
+
+def fetch(N, url, delay=0):
+ import time
+ lens = []
+ starttime = time.time()
+ for i in range(N):
+ if delay and i > 0: time.sleep(delay)
+ fo = urllib2.urlopen(url)
+ foo = fo.read()
+ fo.close()
+ lens.append(len(foo))
+ diff = time.time() - starttime
+
+ j = 0
+ for i in lens[1:]:
+ j = j + 1
+ if not i == lens[0]:
+ print "WARNING: inconsistent length on read %i: %i" % (j, i)
+
+ return diff
+
+def test_timeout(url):
+ global DEBUG
+ dbbackup = DEBUG
+ class FakeLogger:
+ def debug(self, msg, *args): print msg % args
+ info = warning = error = debug
+ DEBUG = FakeLogger()
+ print " fetching the file to establish a connection"
+ fo = urllib2.urlopen(url)
+ data1 = fo.read()
+ fo.close()
+
+ i = 20
+ print " waiting %i seconds for the server to close the connection" % i
+ while i > 0:
+ sys.stdout.write('\r %2i' % i)
+ sys.stdout.flush()
+ time.sleep(1)
+ i -= 1
+ sys.stderr.write('\r')
+
+ print " fetching the file a second time"
+ fo = urllib2.urlopen(url)
+ data2 = fo.read()
+ fo.close()
+
+ if data1 == data2:
+ print ' data are identical'
+ else:
+ print ' ERROR: DATA DIFFER'
+
+ DEBUG = dbbackup
+
+
+def test(url, N=10):
+ print "checking error hander (do this on a non-200)"
+ try: error_handler(url)
+ except IOError:
+ print "exiting - exception will prevent further tests"
+ sys.exit()
+ print
+ print "performing continuity test (making sure stuff isn't corrupted)"
+ continuity(url)
+ print
+ print "performing speed comparison"
+ comp(N, url)
+ print
+ print "performing dropped-connection check"
+ test_timeout(url)
+
+if __name__ == '__main__':
+ import time
+ import sys
+ try:
+ N = int(sys.argv[1])
+ url = sys.argv[2]
+ except:
+ print "%s " % sys.argv[0]
+ else:
+ test(url, N)
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/localrepo.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/localrepo.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,2191 @@
+# localrepo.py - read/write repository class for mercurial
+#
+# Copyright 2005-2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+from node import bin, hex, nullid, nullrev, short
+from i18n import _
+import repo, changegroup, subrepo
+import changelog, dirstate, filelog, manifest, context
+import lock, transaction, store, encoding
+import util, extensions, hook, error
+import match as match_
+import merge as merge_
+from lock import release
+import weakref, stat, errno, os, time, inspect
+propertycache = util.propertycache
+
+class localrepository(repo.repository):
+ capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
+ supported = set('revlogv1 store fncache shared'.split())
+
+ def __init__(self, baseui, path=None, create=0):
+ repo.repository.__init__(self)
+ self.root = os.path.realpath(path)
+ self.path = os.path.join(self.root, ".hg")
+ self.origroot = path
+ self.opener = util.opener(self.path)
+ self.wopener = util.opener(self.root)
+ self.baseui = baseui
+ self.ui = baseui.copy()
+
+ try:
+ self.ui.readconfig(self.join("hgrc"), self.root)
+ extensions.loadall(self.ui)
+ except IOError:
+ pass
+
+ if not os.path.isdir(self.path):
+ if create:
+ if not os.path.exists(path):
+ os.mkdir(path)
+ os.mkdir(self.path)
+ requirements = ["revlogv1"]
+ if self.ui.configbool('format', 'usestore', True):
+ os.mkdir(os.path.join(self.path, "store"))
+ requirements.append("store")
+ if self.ui.configbool('format', 'usefncache', True):
+ requirements.append("fncache")
+ # create an invalid changelog
+ self.opener("00changelog.i", "a").write(
+ '\0\0\0\2' # represents revlogv2
+ ' dummy changelog to prevent using the old repo layout'
+ )
+ reqfile = self.opener("requires", "w")
+ for r in requirements:
+ reqfile.write("%s\n" % r)
+ reqfile.close()
+ else:
+ raise error.RepoError(_("repository %s not found") % path)
+ elif create:
+ raise error.RepoError(_("repository %s already exists") % path)
+ else:
+ # find requirements
+ requirements = set()
+ try:
+ requirements = set(self.opener("requires").read().splitlines())
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ for r in requirements - self.supported:
+ raise error.RepoError(_("requirement '%s' not supported") % r)
+
+ self.sharedpath = self.path
+ try:
+ s = os.path.realpath(self.opener("sharedpath").read())
+ if not os.path.exists(s):
+ raise error.RepoError(
+ _('.hg/sharedpath points to nonexistent directory %s') % s)
+ self.sharedpath = s
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+
+ self.store = store.store(requirements, self.sharedpath, util.opener)
+ self.spath = self.store.path
+ self.sopener = self.store.opener
+ self.sjoin = self.store.join
+ self.opener.createmode = self.store.createmode
+
+ self.tagscache = None
+ self._tagstypecache = None
+ self.branchcache = None
+ self._ubranchcache = None # UTF-8 version of branchcache
+ self._branchcachetip = None
+ self.nodetagscache = None
+ self.filterpats = {}
+ self._datafilters = {}
+ self._transref = self._lockref = self._wlockref = None
+
+ @propertycache
+ def changelog(self):
+ c = changelog.changelog(self.sopener)
+ if 'HG_PENDING' in os.environ:
+ p = os.environ['HG_PENDING']
+ if p.startswith(self.root):
+ c.readpending('00changelog.i.a')
+ self.sopener.defversion = c.version
+ return c
+
+ @propertycache
+ def manifest(self):
+ return manifest.manifest(self.sopener)
+
+ @propertycache
+ def dirstate(self):
+ return dirstate.dirstate(self.opener, self.ui, self.root)
+
+ def __getitem__(self, changeid):
+ if changeid is None:
+ return context.workingctx(self)
+ return context.changectx(self, changeid)
+
+ def __nonzero__(self):
+ return True
+
+ def __len__(self):
+ return len(self.changelog)
+
+ def __iter__(self):
+ for i in xrange(len(self)):
+ yield i
+
+ def url(self):
+ return 'file:' + self.root
+
+ def hook(self, name, throw=False, **args):
+ return hook.hook(self.ui, self, name, throw, **args)
+
+ tag_disallowed = ':\r\n'
+
+ def _tag(self, names, node, message, local, user, date, extra={}):
+ if isinstance(names, str):
+ allchars = names
+ names = (names,)
+ else:
+ allchars = ''.join(names)
+ for c in self.tag_disallowed:
+ if c in allchars:
+ raise util.Abort(_('%r cannot be used in a tag name') % c)
+
+ for name in names:
+ self.hook('pretag', throw=True, node=hex(node), tag=name,
+ local=local)
+
+ def writetags(fp, names, munge, prevtags):
+ fp.seek(0, 2)
+ if prevtags and prevtags[-1] != '\n':
+ fp.write('\n')
+ for name in names:
+ m = munge and munge(name) or name
+ if self._tagstypecache and name in self._tagstypecache:
+ old = self.tagscache.get(name, nullid)
+ fp.write('%s %s\n' % (hex(old), m))
+ fp.write('%s %s\n' % (hex(node), m))
+ fp.close()
+
+ prevtags = ''
+ if local:
+ try:
+ fp = self.opener('localtags', 'r+')
+ except IOError:
+ fp = self.opener('localtags', 'a')
+ else:
+ prevtags = fp.read()
+
+ # local tags are stored in the current charset
+ writetags(fp, names, None, prevtags)
+ for name in names:
+ self.hook('tag', node=hex(node), tag=name, local=local)
+ return
+
+ try:
+ fp = self.wfile('.hgtags', 'rb+')
+ except IOError:
+ fp = self.wfile('.hgtags', 'ab')
+ else:
+ prevtags = fp.read()
+
+ # committed tags are stored in UTF-8
+ writetags(fp, names, encoding.fromlocal, prevtags)
+
+ if '.hgtags' not in self.dirstate:
+ self.add(['.hgtags'])
+
+ m = match_.exact(self.root, '', ['.hgtags'])
+ tagnode = self.commit(message, user, date, extra=extra, match=m)
+
+ for name in names:
+ self.hook('tag', node=hex(node), tag=name, local=local)
+
+ return tagnode
+
+ def tag(self, names, node, message, local, user, date):
+ '''tag a revision with one or more symbolic names.
+
+ names is a list of strings or, when adding a single tag, names may be a
+ string.
+
+ if local is True, the tags are stored in a per-repository file.
+ otherwise, they are stored in the .hgtags file, and a new
+ changeset is committed with the change.
+
+ keyword arguments:
+
+ local: whether to store tags in non-version-controlled file
+ (default False)
+
+ message: commit message to use if committing
+
+ user: name of user to use if committing
+
+ date: date tuple to use if committing'''
+
+ for x in self.status()[:5]:
+ if '.hgtags' in x:
+ raise util.Abort(_('working copy of .hgtags is changed '
+ '(please commit .hgtags manually)'))
+
+ self.tags() # instantiate the cache
+ self._tag(names, node, message, local, user, date)
+
+ def tags(self):
+ '''return a mapping of tag to node'''
+ if self.tagscache:
+ return self.tagscache
+
+ globaltags = {}
+ tagtypes = {}
+
+ def readtags(lines, fn, tagtype):
+ filetags = {}
+ count = 0
+
+ def warn(msg):
+ self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
+
+ for l in lines:
+ count += 1
+ if not l:
+ continue
+ s = l.split(" ", 1)
+ if len(s) != 2:
+ warn(_("cannot parse entry"))
+ continue
+ node, key = s
+ key = encoding.tolocal(key.strip()) # stored in UTF-8
+ try:
+ bin_n = bin(node)
+ except TypeError:
+ warn(_("node '%s' is not well formed") % node)
+ continue
+ if bin_n not in self.changelog.nodemap:
+ # silently ignore as pull -r might cause this
+ continue
+
+ h = []
+ if key in filetags:
+ n, h = filetags[key]
+ h.append(n)
+ filetags[key] = (bin_n, h)
+
+ for k, nh in filetags.iteritems():
+ if k not in globaltags:
+ globaltags[k] = nh
+ tagtypes[k] = tagtype
+ continue
+
+ # we prefer the global tag if:
+ # it supercedes us OR
+ # mutual supercedes and it has a higher rank
+ # otherwise we win because we're tip-most
+ an, ah = nh
+ bn, bh = globaltags[k]
+ if (bn != an and an in bh and
+ (bn not in ah or len(bh) > len(ah))):
+ an = bn
+ ah.extend([n for n in bh if n not in ah])
+ globaltags[k] = an, ah
+ tagtypes[k] = tagtype
+
+ seen = set()
+ f = None
+ ctxs = []
+ for node in self.heads():
+ try:
+ fnode = self[node].filenode('.hgtags')
+ except error.LookupError:
+ continue
+ if fnode not in seen:
+ seen.add(fnode)
+ if not f:
+ f = self.filectx('.hgtags', fileid=fnode)
+ else:
+ f = f.filectx(fnode)
+ ctxs.append(f)
+
+ # read the tags file from each head, ending with the tip
+ for f in reversed(ctxs):
+ readtags(f.data().splitlines(), f, "global")
+
+ try:
+ data = encoding.fromlocal(self.opener("localtags").read())
+ # localtags are stored in the local character set
+ # while the internal tag table is stored in UTF-8
+ readtags(data.splitlines(), "localtags", "local")
+ except IOError:
+ pass
+
+ self.tagscache = {}
+ self._tagstypecache = {}
+ for k, nh in globaltags.iteritems():
+ n = nh[0]
+ if n != nullid:
+ self.tagscache[k] = n
+ self._tagstypecache[k] = tagtypes[k]
+ self.tagscache['tip'] = self.changelog.tip()
+ return self.tagscache
+
+ def tagtype(self, tagname):
+ '''
+ return the type of the given tag. result can be:
+
+ 'local' : a local tag
+ 'global' : a global tag
+ None : tag does not exist
+ '''
+
+ self.tags()
+
+ return self._tagstypecache.get(tagname)
+
+ def tagslist(self):
+ '''return a list of tags ordered by revision'''
+ l = []
+ for t, n in self.tags().iteritems():
+ try:
+ r = self.changelog.rev(n)
+ except:
+ r = -2 # sort to the beginning of the list if unknown
+ l.append((r, t, n))
+ return [(t, n) for r, t, n in sorted(l)]
+
+ def nodetags(self, node):
+ '''return the tags associated with a node'''
+ if not self.nodetagscache:
+ self.nodetagscache = {}
+ for t, n in self.tags().iteritems():
+ self.nodetagscache.setdefault(n, []).append(t)
+ return self.nodetagscache.get(node, [])
+
+ def _branchtags(self, partial, lrev):
+ # TODO: rename this function?
+ tiprev = len(self) - 1
+ if lrev != tiprev:
+ self._updatebranchcache(partial, lrev+1, tiprev+1)
+ self._writebranchcache(partial, self.changelog.tip(), tiprev)
+
+ return partial
+
+ def branchmap(self):
+ tip = self.changelog.tip()
+ if self.branchcache is not None and self._branchcachetip == tip:
+ return self.branchcache
+
+ oldtip = self._branchcachetip
+ self._branchcachetip = tip
+ if self.branchcache is None:
+ self.branchcache = {} # avoid recursion in changectx
+ else:
+ self.branchcache.clear() # keep using the same dict
+ if oldtip is None or oldtip not in self.changelog.nodemap:
+ partial, last, lrev = self._readbranchcache()
+ else:
+ lrev = self.changelog.rev(oldtip)
+ partial = self._ubranchcache
+
+ self._branchtags(partial, lrev)
+ # this private cache holds all heads (not just tips)
+ self._ubranchcache = partial
+
+ # the branch cache is stored on disk as UTF-8, but in the local
+ # charset internally
+ for k, v in partial.iteritems():
+ self.branchcache[encoding.tolocal(k)] = v
+ return self.branchcache
+
+
+ def branchtags(self):
+ '''return a dict where branch names map to the tipmost head of
+ the branch, open heads come before closed'''
+ bt = {}
+ for bn, heads in self.branchmap().iteritems():
+ head = None
+ for i in range(len(heads)-1, -1, -1):
+ h = heads[i]
+ if 'close' not in self.changelog.read(h)[5]:
+ head = h
+ break
+ # no open heads were found
+ if head is None:
+ head = heads[-1]
+ bt[bn] = head
+ return bt
+
+
+ def _readbranchcache(self):
+ partial = {}
+ try:
+ f = self.opener("branchheads.cache")
+ lines = f.read().split('\n')
+ f.close()
+ except (IOError, OSError):
+ return {}, nullid, nullrev
+
+ try:
+ last, lrev = lines.pop(0).split(" ", 1)
+ last, lrev = bin(last), int(lrev)
+ if lrev >= len(self) or self[lrev].node() != last:
+ # invalidate the cache
+ raise ValueError('invalidating branch cache (tip differs)')
+ for l in lines:
+ if not l: continue
+ node, label = l.split(" ", 1)
+ partial.setdefault(label.strip(), []).append(bin(node))
+ except KeyboardInterrupt:
+ raise
+ except Exception, inst:
+ if self.ui.debugflag:
+ self.ui.warn(str(inst), '\n')
+ partial, last, lrev = {}, nullid, nullrev
+ return partial, last, lrev
+
+ def _writebranchcache(self, branches, tip, tiprev):
+ try:
+ f = self.opener("branchheads.cache", "w", atomictemp=True)
+ f.write("%s %s\n" % (hex(tip), tiprev))
+ for label, nodes in branches.iteritems():
+ for node in nodes:
+ f.write("%s %s\n" % (hex(node), label))
+ f.rename()
+ except (IOError, OSError):
+ pass
+
+ def _updatebranchcache(self, partial, start, end):
+ # collect new branch entries
+ newbranches = {}
+ for r in xrange(start, end):
+ c = self[r]
+ newbranches.setdefault(c.branch(), []).append(c.node())
+ # if older branchheads are reachable from new ones, they aren't
+ # really branchheads. Note checking parents is insufficient:
+ # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
+ for branch, newnodes in newbranches.iteritems():
+ bheads = partial.setdefault(branch, [])
+ bheads.extend(newnodes)
+ if len(bheads) < 2:
+ continue
+ newbheads = []
+ # starting from tip means fewer passes over reachable
+ while newnodes:
+ latest = newnodes.pop()
+ if latest not in bheads:
+ continue
+ minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
+ reachable = self.changelog.reachable(latest, minbhrev)
+ bheads = [b for b in bheads if b not in reachable]
+ newbheads.insert(0, latest)
+ bheads.extend(newbheads)
+ partial[branch] = bheads
+
+ def lookup(self, key):
+ if isinstance(key, int):
+ return self.changelog.node(key)
+ elif key == '.':
+ return self.dirstate.parents()[0]
+ elif key == 'null':
+ return nullid
+ elif key == 'tip':
+ return self.changelog.tip()
+ n = self.changelog._match(key)
+ if n:
+ return n
+ if key in self.tags():
+ return self.tags()[key]
+ if key in self.branchtags():
+ return self.branchtags()[key]
+ n = self.changelog._partialmatch(key)
+ if n:
+ return n
+
+ # can't find key, check if it might have come from damaged dirstate
+ if key in self.dirstate.parents():
+ raise error.Abort(_("working directory has unknown parent '%s'!")
+ % short(key))
+ try:
+ if len(key) == 20:
+ key = hex(key)
+ except:
+ pass
+ raise error.RepoError(_("unknown revision '%s'") % key)
+
+ def local(self):
+ return True
+
+ def join(self, f):
+ return os.path.join(self.path, f)
+
+ def wjoin(self, f):
+ return os.path.join(self.root, f)
+
+ def rjoin(self, f):
+ return os.path.join(self.root, util.pconvert(f))
+
+ def file(self, f):
+ if f[0] == '/':
+ f = f[1:]
+ return filelog.filelog(self.sopener, f)
+
+ def changectx(self, changeid):
+ return self[changeid]
+
+ def parents(self, changeid=None):
+ '''get list of changectxs for parents of changeid'''
+ return self[changeid].parents()
+
+ def filectx(self, path, changeid=None, fileid=None):
+ """changeid can be a changeset revision, node, or tag.
+ fileid can be a file revision or node."""
+ return context.filectx(self, path, changeid, fileid)
+
+ def getcwd(self):
+ return self.dirstate.getcwd()
+
+ def pathto(self, f, cwd=None):
+ return self.dirstate.pathto(f, cwd)
+
+ def wfile(self, f, mode='r'):
+ return self.wopener(f, mode)
+
+ def _link(self, f):
+ return os.path.islink(self.wjoin(f))
+
+ def _filter(self, filter, filename, data):
+ if filter not in self.filterpats:
+ l = []
+ for pat, cmd in self.ui.configitems(filter):
+ if cmd == '!':
+ continue
+ mf = match_.match(self.root, '', [pat])
+ fn = None
+ params = cmd
+ for name, filterfn in self._datafilters.iteritems():
+ if cmd.startswith(name):
+ fn = filterfn
+ params = cmd[len(name):].lstrip()
+ break
+ if not fn:
+ fn = lambda s, c, **kwargs: util.filter(s, c)
+ # Wrap old filters not supporting keyword arguments
+ if not inspect.getargspec(fn)[2]:
+ oldfn = fn
+ fn = lambda s, c, **kwargs: oldfn(s, c)
+ l.append((mf, fn, params))
+ self.filterpats[filter] = l
+
+ for mf, fn, cmd in self.filterpats[filter]:
+ if mf(filename):
+ self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
+ data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
+ break
+
+ return data
+
+ def adddatafilter(self, name, filter):
+ self._datafilters[name] = filter
+
+ def wread(self, filename):
+ if self._link(filename):
+ data = os.readlink(self.wjoin(filename))
+ else:
+ data = self.wopener(filename, 'r').read()
+ return self._filter("encode", filename, data)
+
+ def wwrite(self, filename, data, flags):
+ data = self._filter("decode", filename, data)
+ try:
+ os.unlink(self.wjoin(filename))
+ except OSError:
+ pass
+ if 'l' in flags:
+ self.wopener.symlink(data, filename)
+ else:
+ self.wopener(filename, 'w').write(data)
+ if 'x' in flags:
+ util.set_flags(self.wjoin(filename), False, True)
+
+ def wwritedata(self, filename, data):
+ return self._filter("decode", filename, data)
+
+ def transaction(self):
+ tr = self._transref and self._transref() or None
+ if tr and tr.running():
+ return tr.nest()
+
+ # abort here if the journal already exists
+ if os.path.exists(self.sjoin("journal")):
+ raise error.RepoError(_("journal already exists - run hg recover"))
+
+ # save dirstate for rollback
+ try:
+ ds = self.opener("dirstate").read()
+ except IOError:
+ ds = ""
+ self.opener("journal.dirstate", "w").write(ds)
+ self.opener("journal.branch", "w").write(self.dirstate.branch())
+
+ renames = [(self.sjoin("journal"), self.sjoin("undo")),
+ (self.join("journal.dirstate"), self.join("undo.dirstate")),
+ (self.join("journal.branch"), self.join("undo.branch"))]
+ tr = transaction.transaction(self.ui.warn, self.sopener,
+ self.sjoin("journal"),
+ aftertrans(renames),
+ self.store.createmode)
+ self._transref = weakref.ref(tr)
+ return tr
+
+ def recover(self):
+ lock = self.lock()
+ try:
+ if os.path.exists(self.sjoin("journal")):
+ self.ui.status(_("rolling back interrupted transaction\n"))
+ transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
+ self.invalidate()
+ return True
+ else:
+ self.ui.warn(_("no interrupted transaction available\n"))
+ return False
+ finally:
+ lock.release()
+
+ def rollback(self):
+ wlock = lock = None
+ try:
+ wlock = self.wlock()
+ lock = self.lock()
+ if os.path.exists(self.sjoin("undo")):
+ self.ui.status(_("rolling back last transaction\n"))
+ transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
+ util.rename(self.join("undo.dirstate"), self.join("dirstate"))
+ try:
+ branch = self.opener("undo.branch").read()
+ self.dirstate.setbranch(branch)
+ except IOError:
+ self.ui.warn(_("Named branch could not be reset, "
+ "current branch still is: %s\n")
+ % encoding.tolocal(self.dirstate.branch()))
+ self.invalidate()
+ self.dirstate.invalidate()
+ else:
+ self.ui.warn(_("no rollback information available\n"))
+ finally:
+ release(lock, wlock)
+
+ def invalidate(self):
+ for a in "changelog manifest".split():
+ if a in self.__dict__:
+ delattr(self, a)
+ self.tagscache = None
+ self._tagstypecache = None
+ self.nodetagscache = None
+ self.branchcache = None
+ self._ubranchcache = None
+ self._branchcachetip = None
+
+ def _lock(self, lockname, wait, releasefn, acquirefn, desc):
+ try:
+ l = lock.lock(lockname, 0, releasefn, desc=desc)
+ except error.LockHeld, inst:
+ if not wait:
+ raise
+ self.ui.warn(_("waiting for lock on %s held by %r\n") %
+ (desc, inst.locker))
+ # default to 600 seconds timeout
+ l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
+ releasefn, desc=desc)
+ if acquirefn:
+ acquirefn()
+ return l
+
+ def lock(self, wait=True):
+ l = self._lockref and self._lockref()
+ if l is not None and l.held:
+ l.lock()
+ return l
+
+ l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
+ _('repository %s') % self.origroot)
+ self._lockref = weakref.ref(l)
+ return l
+
+ def wlock(self, wait=True):
+ l = self._wlockref and self._wlockref()
+ if l is not None and l.held:
+ l.lock()
+ return l
+
+ l = self._lock(self.join("wlock"), wait, self.dirstate.write,
+ self.dirstate.invalidate, _('working directory of %s') %
+ self.origroot)
+ self._wlockref = weakref.ref(l)
+ return l
+
+ def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
+ """
+ commit an individual file as part of a larger transaction
+ """
+
+ fname = fctx.path()
+ text = fctx.data()
+ flog = self.file(fname)
+ fparent1 = manifest1.get(fname, nullid)
+ fparent2 = fparent2o = manifest2.get(fname, nullid)
+
+ meta = {}
+ copy = fctx.renamed()
+ if copy and copy[0] != fname:
+ # Mark the new revision of this file as a copy of another
+ # file. This copy data will effectively act as a parent
+ # of this new revision. If this is a merge, the first
+ # parent will be the nullid (meaning "look up the copy data")
+ # and the second one will be the other parent. For example:
+ #
+ # 0 --- 1 --- 3 rev1 changes file foo
+ # \ / rev2 renames foo to bar and changes it
+ # \- 2 -/ rev3 should have bar with all changes and
+ # should record that bar descends from
+ # bar in rev2 and foo in rev1
+ #
+ # this allows this merge to succeed:
+ #
+ # 0 --- 1 --- 3 rev4 reverts the content change from rev2
+ # \ / merging rev3 and rev4 should use bar@rev2
+ # \- 2 --- 4 as the merge base
+ #
+
+ cfname = copy[0]
+ crev = manifest1.get(cfname)
+ newfparent = fparent2
+
+ if manifest2: # branch merge
+ if fparent2 == nullid or crev is None: # copied on remote side
+ if cfname in manifest2:
+ crev = manifest2[cfname]
+ newfparent = fparent1
+
+ # find source in nearest ancestor if we've lost track
+ if not crev:
+ self.ui.debug(_(" %s: searching for copy revision for %s\n") %
+ (fname, cfname))
+ for ancestor in self['.'].ancestors():
+ if cfname in ancestor:
+ crev = ancestor[cfname].filenode()
+ break
+
+ self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
+ meta["copy"] = cfname
+ meta["copyrev"] = hex(crev)
+ fparent1, fparent2 = nullid, newfparent
+ elif fparent2 != nullid:
+ # is one parent an ancestor of the other?
+ fparentancestor = flog.ancestor(fparent1, fparent2)
+ if fparentancestor == fparent1:
+ fparent1, fparent2 = fparent2, nullid
+ elif fparentancestor == fparent2:
+ fparent2 = nullid
+
+ # is the file changed?
+ if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
+ changelist.append(fname)
+ return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
+
+ # are just the flags changed during merge?
+ if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
+ changelist.append(fname)
+
+ return fparent1
+
+ def commit(self, text="", user=None, date=None, match=None, force=False,
+ editor=False, extra={}):
+ """Add a new revision to current repository.
+
+ Revision information is gathered from the working directory,
+ match can be used to filter the committed files. If editor is
+ supplied, it is called to get a commit message.
+ """
+
+ def fail(f, msg):
+ raise util.Abort('%s: %s' % (f, msg))
+
+ if not match:
+ match = match_.always(self.root, '')
+
+ if not force:
+ vdirs = []
+ match.dir = vdirs.append
+ match.bad = fail
+
+ wlock = self.wlock()
+ try:
+ p1, p2 = self.dirstate.parents()
+ wctx = self[None]
+
+ if (not force and p2 != nullid and match and
+ (match.files() or match.anypats())):
+ raise util.Abort(_('cannot partially commit a merge '
+ '(do not specify files or patterns)'))
+
+ changes = self.status(match=match, clean=force)
+ if force:
+ changes[0].extend(changes[6]) # mq may commit unchanged files
+
+ # check subrepos
+ subs = []
+ for s in wctx.substate:
+ if match(s) and wctx.sub(s).dirty():
+ subs.append(s)
+ if subs and '.hgsubstate' not in changes[0]:
+ changes[0].insert(0, '.hgsubstate')
+
+ # make sure all explicit patterns are matched
+ if not force and match.files():
+ matched = set(changes[0] + changes[1] + changes[2])
+
+ for f in match.files():
+ if f == '.' or f in matched or f in wctx.substate:
+ continue
+ if f in changes[3]: # missing
+ fail(f, _('file not found!'))
+ if f in vdirs: # visited directory
+ d = f + '/'
+ for mf in matched:
+ if mf.startswith(d):
+ break
+ else:
+ fail(f, _("no match under directory!"))
+ elif f not in self.dirstate:
+ fail(f, _("file not tracked!"))
+
+ if (not force and not extra.get("close") and p2 == nullid
+ and not (changes[0] or changes[1] or changes[2])
+ and self[None].branch() == self['.'].branch()):
+ return None
+
+ ms = merge_.mergestate(self)
+ for f in changes[0]:
+ if f in ms and ms[f] == 'u':
+ raise util.Abort(_("unresolved merge conflicts "
+ "(see hg resolve)"))
+
+ cctx = context.workingctx(self, (p1, p2), text, user, date,
+ extra, changes)
+ if editor:
+ cctx._text = editor(self, cctx, subs)
+
+ # commit subs
+ if subs:
+ state = wctx.substate.copy()
+ for s in subs:
+ self.ui.status(_('committing subrepository %s\n') % s)
+ sr = wctx.sub(s).commit(cctx._text, user, date)
+ state[s] = (state[s][0], sr)
+ subrepo.writestate(self, state)
+
+ ret = self.commitctx(cctx, True)
+
+ # update dirstate and mergestate
+ for f in changes[0] + changes[1]:
+ self.dirstate.normal(f)
+ for f in changes[2]:
+ self.dirstate.forget(f)
+ self.dirstate.setparents(ret)
+ ms.reset()
+
+ return ret
+
+ finally:
+ wlock.release()
+
+ def commitctx(self, ctx, error=False):
+ """Add a new revision to current repository.
+
+ Revision information is passed via the context argument.
+ """
+
+ tr = lock = None
+ removed = ctx.removed()
+ p1, p2 = ctx.p1(), ctx.p2()
+ m1 = p1.manifest().copy()
+ m2 = p2.manifest()
+ user = ctx.user()
+
+ xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
+ self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
+
+ lock = self.lock()
+ try:
+ tr = self.transaction()
+ trp = weakref.proxy(tr)
+
+ # check in files
+ new = {}
+ changed = []
+ linkrev = len(self)
+ for f in sorted(ctx.modified() + ctx.added()):
+ self.ui.note(f + "\n")
+ try:
+ fctx = ctx[f]
+ new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
+ changed)
+ m1.set(f, fctx.flags())
+ except (OSError, IOError):
+ if error:
+ self.ui.warn(_("trouble committing %s!\n") % f)
+ raise
+ else:
+ removed.append(f)
+
+ # update manifest
+ m1.update(new)
+ removed = [f for f in sorted(removed) if f in m1 or f in m2]
+ drop = [f for f in removed if f in m1]
+ for f in drop:
+ del m1[f]
+ mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
+ p2.manifestnode(), (new, drop))
+
+ # update changelog
+ self.changelog.delayupdate()
+ n = self.changelog.add(mn, changed + removed, ctx.description(),
+ trp, p1.node(), p2.node(),
+ user, ctx.date(), ctx.extra().copy())
+ p = lambda: self.changelog.writepending() and self.root or ""
+ self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
+ parent2=xp2, pending=p)
+ self.changelog.finalize(trp)
+ tr.close()
+
+ if self.branchcache:
+ self.branchtags()
+
+ self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
+ return n
+ finally:
+ del tr
+ lock.release()
+
+ def walk(self, match, node=None):
+ '''
+ walk recursively through the directory tree or a given
+ changeset, finding all files matched by the match
+ function
+ '''
+ return self[node].walk(match)
+
+ def status(self, node1='.', node2=None, match=None,
+ ignored=False, clean=False, unknown=False):
+ """return status of files between two nodes or node and working directory
+
+ If node1 is None, use the first dirstate parent instead.
+ If node2 is None, compare node1 with working directory.
+ """
+
+ def mfmatches(ctx):
+ mf = ctx.manifest().copy()
+ for fn in mf.keys():
+ if not match(fn):
+ del mf[fn]
+ return mf
+
+ if isinstance(node1, context.changectx):
+ ctx1 = node1
+ else:
+ ctx1 = self[node1]
+ if isinstance(node2, context.changectx):
+ ctx2 = node2
+ else:
+ ctx2 = self[node2]
+
+ working = ctx2.rev() is None
+ parentworking = working and ctx1 == self['.']
+ match = match or match_.always(self.root, self.getcwd())
+ listignored, listclean, listunknown = ignored, clean, unknown
+
+ # load earliest manifest first for caching reasons
+ if not working and ctx2.rev() < ctx1.rev():
+ ctx2.manifest()
+
+ if not parentworking:
+ def bad(f, msg):
+ if f not in ctx1:
+ self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
+ match.bad = bad
+
+ if working: # we need to scan the working dir
+ s = self.dirstate.status(match, listignored, listclean, listunknown)
+ cmp, modified, added, removed, deleted, unknown, ignored, clean = s
+
+ # check for any possibly clean files
+ if parentworking and cmp:
+ fixup = []
+ # do a full compare of any files that might have changed
+ for f in sorted(cmp):
+ if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
+ or ctx1[f].cmp(ctx2[f].data())):
+ modified.append(f)
+ else:
+ fixup.append(f)
+
+ if listclean:
+ clean += fixup
+
+ # update dirstate for files that are actually clean
+ if fixup:
+ try:
+ # updating the dirstate is optional
+ # so we don't wait on the lock
+ wlock = self.wlock(False)
+ try:
+ for f in fixup:
+ self.dirstate.normal(f)
+ finally:
+ wlock.release()
+ except error.LockError:
+ pass
+
+ if not parentworking:
+ mf1 = mfmatches(ctx1)
+ if working:
+ # we are comparing working dir against non-parent
+ # generate a pseudo-manifest for the working dir
+ mf2 = mfmatches(self['.'])
+ for f in cmp + modified + added:
+ mf2[f] = None
+ mf2.set(f, ctx2.flags(f))
+ for f in removed:
+ if f in mf2:
+ del mf2[f]
+ else:
+ # we are comparing two revisions
+ deleted, unknown, ignored = [], [], []
+ mf2 = mfmatches(ctx2)
+
+ modified, added, clean = [], [], []
+ for fn in mf2:
+ if fn in mf1:
+ if (mf1.flags(fn) != mf2.flags(fn) or
+ (mf1[fn] != mf2[fn] and
+ (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
+ modified.append(fn)
+ elif listclean:
+ clean.append(fn)
+ del mf1[fn]
+ else:
+ added.append(fn)
+ removed = mf1.keys()
+
+ r = modified, added, removed, deleted, unknown, ignored, clean
+ [l.sort() for l in r]
+ return r
+
+ def add(self, list):
+ wlock = self.wlock()
+ try:
+ rejected = []
+ for f in list:
+ p = self.wjoin(f)
+ try:
+ st = os.lstat(p)
+ except:
+ self.ui.warn(_("%s does not exist!\n") % f)
+ rejected.append(f)
+ continue
+ if st.st_size > 10000000:
+ self.ui.warn(_("%s: files over 10MB may cause memory and"
+ " performance problems\n"
+ "(use 'hg revert %s' to unadd the file)\n")
+ % (f, f))
+ if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
+ self.ui.warn(_("%s not added: only files and symlinks "
+ "supported currently\n") % f)
+ rejected.append(p)
+ elif self.dirstate[f] in 'amn':
+ self.ui.warn(_("%s already tracked!\n") % f)
+ elif self.dirstate[f] == 'r':
+ self.dirstate.normallookup(f)
+ else:
+ self.dirstate.add(f)
+ return rejected
+ finally:
+ wlock.release()
+
+ def forget(self, list):
+ wlock = self.wlock()
+ try:
+ for f in list:
+ if self.dirstate[f] != 'a':
+ self.ui.warn(_("%s not added!\n") % f)
+ else:
+ self.dirstate.forget(f)
+ finally:
+ wlock.release()
+
+ def remove(self, list, unlink=False):
+ if unlink:
+ for f in list:
+ try:
+ util.unlink(self.wjoin(f))
+ except OSError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ wlock = self.wlock()
+ try:
+ for f in list:
+ if unlink and os.path.exists(self.wjoin(f)):
+ self.ui.warn(_("%s still exists!\n") % f)
+ elif self.dirstate[f] == 'a':
+ self.dirstate.forget(f)
+ elif f not in self.dirstate:
+ self.ui.warn(_("%s not tracked!\n") % f)
+ else:
+ self.dirstate.remove(f)
+ finally:
+ wlock.release()
+
+ def undelete(self, list):
+ manifests = [self.manifest.read(self.changelog.read(p)[0])
+ for p in self.dirstate.parents() if p != nullid]
+ wlock = self.wlock()
+ try:
+ for f in list:
+ if self.dirstate[f] != 'r':
+ self.ui.warn(_("%s not removed!\n") % f)
+ else:
+ m = f in manifests[0] and manifests[0] or manifests[1]
+ t = self.file(f).read(m[f])
+ self.wwrite(f, t, m.flags(f))
+ self.dirstate.normal(f)
+ finally:
+ wlock.release()
+
+ def copy(self, source, dest):
+ p = self.wjoin(dest)
+ if not (os.path.exists(p) or os.path.islink(p)):
+ self.ui.warn(_("%s does not exist!\n") % dest)
+ elif not (os.path.isfile(p) or os.path.islink(p)):
+ self.ui.warn(_("copy failed: %s is not a file or a "
+ "symbolic link\n") % dest)
+ else:
+ wlock = self.wlock()
+ try:
+ if self.dirstate[dest] in '?r':
+ self.dirstate.add(dest)
+ self.dirstate.copy(source, dest)
+ finally:
+ wlock.release()
+
+ def heads(self, start=None):
+ heads = self.changelog.heads(start)
+ # sort the output in rev descending order
+ heads = [(-self.changelog.rev(h), h) for h in heads]
+ return [n for (r, n) in sorted(heads)]
+
+ def branchheads(self, branch=None, start=None, closed=False):
+ if branch is None:
+ branch = self[None].branch()
+ branches = self.branchmap()
+ if branch not in branches:
+ return []
+ bheads = branches[branch]
+ # the cache returns heads ordered lowest to highest
+ bheads.reverse()
+ if start is not None:
+ # filter out the heads that cannot be reached from startrev
+ bheads = self.changelog.nodesbetween([start], bheads)[2]
+ if not closed:
+ bheads = [h for h in bheads if
+ ('close' not in self.changelog.read(h)[5])]
+ return bheads
+
+ def branches(self, nodes):
+ if not nodes:
+ nodes = [self.changelog.tip()]
+ b = []
+ for n in nodes:
+ t = n
+ while 1:
+ p = self.changelog.parents(n)
+ if p[1] != nullid or p[0] == nullid:
+ b.append((t, n, p[0], p[1]))
+ break
+ n = p[0]
+ return b
+
+ def between(self, pairs):
+ r = []
+
+ for top, bottom in pairs:
+ n, l, i = top, [], 0
+ f = 1
+
+ while n != bottom and n != nullid:
+ p = self.changelog.parents(n)[0]
+ if i == f:
+ l.append(n)
+ f = f * 2
+ n = p
+ i += 1
+
+ r.append(l)
+
+ return r
+
+ def findincoming(self, remote, base=None, heads=None, force=False):
+ """Return list of roots of the subsets of missing nodes from remote
+
+ If base dict is specified, assume that these nodes and their parents
+ exist on the remote side and that no child of a node of base exists
+ in both remote and self.
+ Furthermore base will be updated to include the nodes that exists
+ in self and remote but no children exists in self and remote.
+ If a list of heads is specified, return only nodes which are heads
+ or ancestors of these heads.
+
+ All the ancestors of base are in self and in remote.
+ All the descendants of the list returned are missing in self.
+ (and so we know that the rest of the nodes are missing in remote, see
+ outgoing)
+ """
+ return self.findcommonincoming(remote, base, heads, force)[1]
+
+ def findcommonincoming(self, remote, base=None, heads=None, force=False):
+ """Return a tuple (common, missing roots, heads) used to identify
+ missing nodes from remote.
+
+ If base dict is specified, assume that these nodes and their parents
+ exist on the remote side and that no child of a node of base exists
+ in both remote and self.
+ Furthermore base will be updated to include the nodes that exists
+ in self and remote but no children exists in self and remote.
+ If a list of heads is specified, return only nodes which are heads
+ or ancestors of these heads.
+
+ All the ancestors of base are in self and in remote.
+ """
+ m = self.changelog.nodemap
+ search = []
+ fetch = set()
+ seen = set()
+ seenbranch = set()
+ if base is None:
+ base = {}
+
+ if not heads:
+ heads = remote.heads()
+
+ if self.changelog.tip() == nullid:
+ base[nullid] = 1
+ if heads != [nullid]:
+ return [nullid], [nullid], list(heads)
+ return [nullid], [], []
+
+ # assume we're closer to the tip than the root
+ # and start by examining the heads
+ self.ui.status(_("searching for changes\n"))
+
+ unknown = []
+ for h in heads:
+ if h not in m:
+ unknown.append(h)
+ else:
+ base[h] = 1
+
+ heads = unknown
+ if not unknown:
+ return base.keys(), [], []
+
+ req = set(unknown)
+ reqcnt = 0
+
+ # search through remote branches
+ # a 'branch' here is a linear segment of history, with four parts:
+ # head, root, first parent, second parent
+ # (a branch always has two parents (or none) by definition)
+ unknown = remote.branches(unknown)
+ while unknown:
+ r = []
+ while unknown:
+ n = unknown.pop(0)
+ if n[0] in seen:
+ continue
+
+ self.ui.debug(_("examining %s:%s\n")
+ % (short(n[0]), short(n[1])))
+ if n[0] == nullid: # found the end of the branch
+ pass
+ elif n in seenbranch:
+ self.ui.debug(_("branch already found\n"))
+ continue
+ elif n[1] and n[1] in m: # do we know the base?
+ self.ui.debug(_("found incomplete branch %s:%s\n")
+ % (short(n[0]), short(n[1])))
+ search.append(n[0:2]) # schedule branch range for scanning
+ seenbranch.add(n)
+ else:
+ if n[1] not in seen and n[1] not in fetch:
+ if n[2] in m and n[3] in m:
+ self.ui.debug(_("found new changeset %s\n") %
+ short(n[1]))
+ fetch.add(n[1]) # earliest unknown
+ for p in n[2:4]:
+ if p in m:
+ base[p] = 1 # latest known
+
+ for p in n[2:4]:
+ if p not in req and p not in m:
+ r.append(p)
+ req.add(p)
+ seen.add(n[0])
+
+ if r:
+ reqcnt += 1
+ self.ui.debug(_("request %d: %s\n") %
+ (reqcnt, " ".join(map(short, r))))
+ for p in xrange(0, len(r), 10):
+ for b in remote.branches(r[p:p+10]):
+ self.ui.debug(_("received %s:%s\n") %
+ (short(b[0]), short(b[1])))
+ unknown.append(b)
+
+ # do binary search on the branches we found
+ while search:
+ newsearch = []
+ reqcnt += 1
+ for n, l in zip(search, remote.between(search)):
+ l.append(n[1])
+ p = n[0]
+ f = 1
+ for i in l:
+ self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
+ if i in m:
+ if f <= 2:
+ self.ui.debug(_("found new branch changeset %s\n") %
+ short(p))
+ fetch.add(p)
+ base[i] = 1
+ else:
+ self.ui.debug(_("narrowed branch search to %s:%s\n")
+ % (short(p), short(i)))
+ newsearch.append((p, i))
+ break
+ p, f = i, f * 2
+ search = newsearch
+
+ # sanity check our fetch list
+ for f in fetch:
+ if f in m:
+ raise error.RepoError(_("already have changeset ")
+ + short(f[:4]))
+
+ if base.keys() == [nullid]:
+ if force:
+ self.ui.warn(_("warning: repository is unrelated\n"))
+ else:
+ raise util.Abort(_("repository is unrelated"))
+
+ self.ui.debug(_("found new changesets starting at ") +
+ " ".join([short(f) for f in fetch]) + "\n")
+
+ self.ui.debug(_("%d total queries\n") % reqcnt)
+
+ return base.keys(), list(fetch), heads
+
+ def findoutgoing(self, remote, base=None, heads=None, force=False):
+ """Return list of nodes that are roots of subsets not in remote
+
+ If base dict is specified, assume that these nodes and their parents
+ exist on the remote side.
+ If a list of heads is specified, return only nodes which are heads
+ or ancestors of these heads, and return a second element which
+ contains all remote heads which get new children.
+ """
+ if base is None:
+ base = {}
+ self.findincoming(remote, base, heads, force=force)
+
+ self.ui.debug(_("common changesets up to ")
+ + " ".join(map(short, base.keys())) + "\n")
+
+ remain = set(self.changelog.nodemap)
+
+ # prune everything remote has from the tree
+ remain.remove(nullid)
+ remove = base.keys()
+ while remove:
+ n = remove.pop(0)
+ if n in remain:
+ remain.remove(n)
+ for p in self.changelog.parents(n):
+ remove.append(p)
+
+ # find every node whose parents have been pruned
+ subset = []
+ # find every remote head that will get new children
+ updated_heads = set()
+ for n in remain:
+ p1, p2 = self.changelog.parents(n)
+ if p1 not in remain and p2 not in remain:
+ subset.append(n)
+ if heads:
+ if p1 in heads:
+ updated_heads.add(p1)
+ if p2 in heads:
+ updated_heads.add(p2)
+
+ # this is the set of all roots we have to push
+ if heads:
+ return subset, list(updated_heads)
+ else:
+ return subset
+
+ def pull(self, remote, heads=None, force=False):
+ lock = self.lock()
+ try:
+ common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
+ force=force)
+ if fetch == [nullid]:
+ self.ui.status(_("requesting all changes\n"))
+
+ if not fetch:
+ self.ui.status(_("no changes found\n"))
+ return 0
+
+ if heads is None and remote.capable('changegroupsubset'):
+ heads = rheads
+
+ if heads is None:
+ cg = remote.changegroup(fetch, 'pull')
+ else:
+ if not remote.capable('changegroupsubset'):
+ raise util.Abort(_("Partial pull cannot be done because "
+ "other repository doesn't support "
+ "changegroupsubset."))
+ cg = remote.changegroupsubset(fetch, heads, 'pull')
+ return self.addchangegroup(cg, 'pull', remote.url())
+ finally:
+ lock.release()
+
+ def push(self, remote, force=False, revs=None):
+ # there are two ways to push to remote repo:
+ #
+ # addchangegroup assumes local user can lock remote
+ # repo (local filesystem, old ssh servers).
+ #
+ # unbundle assumes local user cannot lock remote repo (new ssh
+ # servers, http servers).
+
+ if remote.capable('unbundle'):
+ return self.push_unbundle(remote, force, revs)
+ return self.push_addchangegroup(remote, force, revs)
+
+ def prepush(self, remote, force, revs):
+ common = {}
+ remote_heads = remote.heads()
+ inc = self.findincoming(remote, common, remote_heads, force=force)
+
+ update, updated_heads = self.findoutgoing(remote, common, remote_heads)
+ if revs is not None:
+ msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
+ else:
+ bases, heads = update, self.changelog.heads()
+
+ def checkbranch(lheads, rheads, updatelh):
+ '''
+ check whether there are more local heads than remote heads on
+ a specific branch.
+
+ lheads: local branch heads
+ rheads: remote branch heads
+ updatelh: outgoing local branch heads
+ '''
+
+ warn = 0
+
+ if not revs and len(lheads) > len(rheads):
+ warn = 1
+ else:
+ updatelheads = [self.changelog.heads(x, lheads)
+ for x in updatelh]
+ newheads = set(sum(updatelheads, [])) & set(lheads)
+
+ if not newheads:
+ return True
+
+ for r in rheads:
+ if r in self.changelog.nodemap:
+ desc = self.changelog.heads(r, heads)
+ l = [h for h in heads if h in desc]
+ if not l:
+ newheads.add(r)
+ else:
+ newheads.add(r)
+ if len(newheads) > len(rheads):
+ warn = 1
+
+ if warn:
+ if not rheads: # new branch requires --force
+ self.ui.warn(_("abort: push creates new"
+ " remote branch '%s'!\n") %
+ self[updatelh[0]].branch())
+ else:
+ self.ui.warn(_("abort: push creates new remote heads!\n"))
+
+ self.ui.status(_("(did you forget to merge?"
+ " use push -f to force)\n"))
+ return False
+ return True
+
+ if not bases:
+ self.ui.status(_("no changes found\n"))
+ return None, 1
+ elif not force:
+ # Check for each named branch if we're creating new remote heads.
+ # To be a remote head after push, node must be either:
+ # - unknown locally
+ # - a local outgoing head descended from update
+ # - a remote head that's known locally and not
+ # ancestral to an outgoing head
+ #
+ # New named branches cannot be created without --force.
+
+ if remote_heads != [nullid]:
+ if remote.capable('branchmap'):
+ localhds = {}
+ if not revs:
+ localhds = self.branchmap()
+ else:
+ for n in heads:
+ branch = self[n].branch()
+ if branch in localhds:
+ localhds[branch].append(n)
+ else:
+ localhds[branch] = [n]
+
+ remotehds = remote.branchmap()
+
+ for lh in localhds:
+ if lh in remotehds:
+ rheads = remotehds[lh]
+ else:
+ rheads = []
+ lheads = localhds[lh]
+ updatelh = [upd for upd in update
+ if self[upd].branch() == lh]
+ if not updatelh:
+ continue
+ if not checkbranch(lheads, rheads, updatelh):
+ return None, 0
+ else:
+ if not checkbranch(heads, remote_heads, update):
+ return None, 0
+
+ if inc:
+ self.ui.warn(_("note: unsynced remote changes!\n"))
+
+
+ if revs is None:
+ # use the fast path, no race possible on push
+ cg = self._changegroup(common.keys(), 'push')
+ else:
+ cg = self.changegroupsubset(update, revs, 'push')
+ return cg, remote_heads
+
+ def push_addchangegroup(self, remote, force, revs):
+ lock = remote.lock()
+ try:
+ ret = self.prepush(remote, force, revs)
+ if ret[0] is not None:
+ cg, remote_heads = ret
+ return remote.addchangegroup(cg, 'push', self.url())
+ return ret[1]
+ finally:
+ lock.release()
+
+ def push_unbundle(self, remote, force, revs):
+ # local repo finds heads on server, finds out what revs it
+ # must push. once revs transferred, if server finds it has
+ # different heads (someone else won commit/push race), server
+ # aborts.
+
+ ret = self.prepush(remote, force, revs)
+ if ret[0] is not None:
+ cg, remote_heads = ret
+ if force: remote_heads = ['force']
+ return remote.unbundle(cg, remote_heads, 'push')
+ return ret[1]
+
+ def changegroupinfo(self, nodes, source):
+ if self.ui.verbose or source == 'bundle':
+ self.ui.status(_("%d changesets found\n") % len(nodes))
+ if self.ui.debugflag:
+ self.ui.debug(_("list of changesets:\n"))
+ for node in nodes:
+ self.ui.debug("%s\n" % hex(node))
+
+ def changegroupsubset(self, bases, heads, source, extranodes=None):
+ """This function generates a changegroup consisting of all the nodes
+ that are descendents of any of the bases, and ancestors of any of
+ the heads.
+
+ It is fairly complex as determining which filenodes and which
+ manifest nodes need to be included for the changeset to be complete
+ is non-trivial.
+
+ Another wrinkle is doing the reverse, figuring out which changeset in
+ the changegroup a particular filenode or manifestnode belongs to.
+
+ The caller can specify some nodes that must be included in the
+ changegroup using the extranodes argument. It should be a dict
+ where the keys are the filenames (or 1 for the manifest), and the
+ values are lists of (node, linknode) tuples, where node is a wanted
+ node and linknode is the changelog node that should be transmitted as
+ the linkrev.
+ """
+
+ if extranodes is None:
+ # can we go through the fast path ?
+ heads.sort()
+ allheads = self.heads()
+ allheads.sort()
+ if heads == allheads:
+ common = []
+ # parents of bases are known from both sides
+ for n in bases:
+ for p in self.changelog.parents(n):
+ if p != nullid:
+ common.append(p)
+ return self._changegroup(common, source)
+
+ self.hook('preoutgoing', throw=True, source=source)
+
+ # Set up some initial variables
+ # Make it easy to refer to self.changelog
+ cl = self.changelog
+ # msng is short for missing - compute the list of changesets in this
+ # changegroup.
+ msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
+ self.changegroupinfo(msng_cl_lst, source)
+ # Some bases may turn out to be superfluous, and some heads may be
+ # too. nodesbetween will return the minimal set of bases and heads
+ # necessary to re-create the changegroup.
+
+ # Known heads are the list of heads that it is assumed the recipient
+ # of this changegroup will know about.
+ knownheads = set()
+ # We assume that all parents of bases are known heads.
+ for n in bases:
+ knownheads.update(cl.parents(n))
+ knownheads.discard(nullid)
+ knownheads = list(knownheads)
+ if knownheads:
+ # Now that we know what heads are known, we can compute which
+ # changesets are known. The recipient must know about all
+ # changesets required to reach the known heads from the null
+ # changeset.
+ has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
+ junk = None
+ # Transform the list into a set.
+ has_cl_set = set(has_cl_set)
+ else:
+ # If there were no known heads, the recipient cannot be assumed to
+ # know about any changesets.
+ has_cl_set = set()
+
+ # Make it easy to refer to self.manifest
+ mnfst = self.manifest
+ # We don't know which manifests are missing yet
+ msng_mnfst_set = {}
+ # Nor do we know which filenodes are missing.
+ msng_filenode_set = {}
+
+ junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
+ junk = None
+
+ # A changeset always belongs to itself, so the changenode lookup
+ # function for a changenode is identity.
+ def identity(x):
+ return x
+
+ # A function generating function. Sets up an environment for the
+ # inner function.
+ def cmp_by_rev_func(revlog):
+ # Compare two nodes by their revision number in the environment's
+ # revision history. Since the revision number both represents the
+ # most efficient order to read the nodes in, and represents a
+ # topological sorting of the nodes, this function is often useful.
+ def cmp_by_rev(a, b):
+ return cmp(revlog.rev(a), revlog.rev(b))
+ return cmp_by_rev
+
+ # If we determine that a particular file or manifest node must be a
+ # node that the recipient of the changegroup will already have, we can
+ # also assume the recipient will have all the parents. This function
+ # prunes them from the set of missing nodes.
+ def prune_parents(revlog, hasset, msngset):
+ haslst = list(hasset)
+ haslst.sort(cmp_by_rev_func(revlog))
+ for node in haslst:
+ parentlst = [p for p in revlog.parents(node) if p != nullid]
+ while parentlst:
+ n = parentlst.pop()
+ if n not in hasset:
+ hasset.add(n)
+ p = [p for p in revlog.parents(n) if p != nullid]
+ parentlst.extend(p)
+ for n in hasset:
+ msngset.pop(n, None)
+
+ # This is a function generating function used to set up an environment
+ # for the inner function to execute in.
+ def manifest_and_file_collector(changedfileset):
+ # This is an information gathering function that gathers
+ # information from each changeset node that goes out as part of
+ # the changegroup. The information gathered is a list of which
+ # manifest nodes are potentially required (the recipient may
+ # already have them) and total list of all files which were
+ # changed in any changeset in the changegroup.
+ #
+ # We also remember the first changenode we saw any manifest
+ # referenced by so we can later determine which changenode 'owns'
+ # the manifest.
+ def collect_manifests_and_files(clnode):
+ c = cl.read(clnode)
+ for f in c[3]:
+ # This is to make sure we only have one instance of each
+ # filename string for each filename.
+ changedfileset.setdefault(f, f)
+ msng_mnfst_set.setdefault(c[0], clnode)
+ return collect_manifests_and_files
+
+ # Figure out which manifest nodes (of the ones we think might be part
+ # of the changegroup) the recipient must know about and remove them
+ # from the changegroup.
+ def prune_manifests():
+ has_mnfst_set = set()
+ for n in msng_mnfst_set:
+ # If a 'missing' manifest thinks it belongs to a changenode
+ # the recipient is assumed to have, obviously the recipient
+ # must have that manifest.
+ linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
+ if linknode in has_cl_set:
+ has_mnfst_set.add(n)
+ prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
+
+ # Use the information collected in collect_manifests_and_files to say
+ # which changenode any manifestnode belongs to.
+ def lookup_manifest_link(mnfstnode):
+ return msng_mnfst_set[mnfstnode]
+
+ # A function generating function that sets up the initial environment
+ # the inner function.
+ def filenode_collector(changedfiles):
+ next_rev = [0]
+ # This gathers information from each manifestnode included in the
+ # changegroup about which filenodes the manifest node references
+ # so we can include those in the changegroup too.
+ #
+ # It also remembers which changenode each filenode belongs to. It
+ # does this by assuming the a filenode belongs to the changenode
+ # the first manifest that references it belongs to.
+ def collect_msng_filenodes(mnfstnode):
+ r = mnfst.rev(mnfstnode)
+ if r == next_rev[0]:
+ # If the last rev we looked at was the one just previous,
+ # we only need to see a diff.
+ deltamf = mnfst.readdelta(mnfstnode)
+ # For each line in the delta
+ for f, fnode in deltamf.iteritems():
+ f = changedfiles.get(f, None)
+ # And if the file is in the list of files we care
+ # about.
+ if f is not None:
+ # Get the changenode this manifest belongs to
+ clnode = msng_mnfst_set[mnfstnode]
+ # Create the set of filenodes for the file if
+ # there isn't one already.
+ ndset = msng_filenode_set.setdefault(f, {})
+ # And set the filenode's changelog node to the
+ # manifest's if it hasn't been set already.
+ ndset.setdefault(fnode, clnode)
+ else:
+ # Otherwise we need a full manifest.
+ m = mnfst.read(mnfstnode)
+ # For every file in we care about.
+ for f in changedfiles:
+ fnode = m.get(f, None)
+ # If it's in the manifest
+ if fnode is not None:
+ # See comments above.
+ clnode = msng_mnfst_set[mnfstnode]
+ ndset = msng_filenode_set.setdefault(f, {})
+ ndset.setdefault(fnode, clnode)
+ # Remember the revision we hope to see next.
+ next_rev[0] = r + 1
+ return collect_msng_filenodes
+
+ # We have a list of filenodes we think we need for a file, lets remove
+ # all those we know the recipient must have.
+ def prune_filenodes(f, filerevlog):
+ msngset = msng_filenode_set[f]
+ hasset = set()
+ # If a 'missing' filenode thinks it belongs to a changenode we
+ # assume the recipient must have, then the recipient must have
+ # that filenode.
+ for n in msngset:
+ clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
+ if clnode in has_cl_set:
+ hasset.add(n)
+ prune_parents(filerevlog, hasset, msngset)
+
+ # A function generator function that sets up the a context for the
+ # inner function.
+ def lookup_filenode_link_func(fname):
+ msngset = msng_filenode_set[fname]
+ # Lookup the changenode the filenode belongs to.
+ def lookup_filenode_link(fnode):
+ return msngset[fnode]
+ return lookup_filenode_link
+
+ # Add the nodes that were explicitly requested.
+ def add_extra_nodes(name, nodes):
+ if not extranodes or name not in extranodes:
+ return
+
+ for node, linknode in extranodes[name]:
+ if node not in nodes:
+ nodes[node] = linknode
+
+ # Now that we have all theses utility functions to help out and
+ # logically divide up the task, generate the group.
+ def gengroup():
+ # The set of changed files starts empty.
+ changedfiles = {}
+ # Create a changenode group generator that will call our functions
+ # back to lookup the owning changenode and collect information.
+ group = cl.group(msng_cl_lst, identity,
+ manifest_and_file_collector(changedfiles))
+ for chnk in group:
+ yield chnk
+
+ # The list of manifests has been collected by the generator
+ # calling our functions back.
+ prune_manifests()
+ add_extra_nodes(1, msng_mnfst_set)
+ msng_mnfst_lst = msng_mnfst_set.keys()
+ # Sort the manifestnodes by revision number.
+ msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
+ # Create a generator for the manifestnodes that calls our lookup
+ # and data collection functions back.
+ group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
+ filenode_collector(changedfiles))
+ for chnk in group:
+ yield chnk
+
+ # These are no longer needed, dereference and toss the memory for
+ # them.
+ msng_mnfst_lst = None
+ msng_mnfst_set.clear()
+
+ if extranodes:
+ for fname in extranodes:
+ if isinstance(fname, int):
+ continue
+ msng_filenode_set.setdefault(fname, {})
+ changedfiles[fname] = 1
+ # Go through all our files in order sorted by name.
+ for fname in sorted(changedfiles):
+ filerevlog = self.file(fname)
+ if not len(filerevlog):
+ raise util.Abort(_("empty or missing revlog for %s") % fname)
+ # Toss out the filenodes that the recipient isn't really
+ # missing.
+ if fname in msng_filenode_set:
+ prune_filenodes(fname, filerevlog)
+ add_extra_nodes(fname, msng_filenode_set[fname])
+ msng_filenode_lst = msng_filenode_set[fname].keys()
+ else:
+ msng_filenode_lst = []
+ # If any filenodes are left, generate the group for them,
+ # otherwise don't bother.
+ if len(msng_filenode_lst) > 0:
+ yield changegroup.chunkheader(len(fname))
+ yield fname
+ # Sort the filenodes by their revision #
+ msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
+ # Create a group generator and only pass in a changenode
+ # lookup function as we need to collect no information
+ # from filenodes.
+ group = filerevlog.group(msng_filenode_lst,
+ lookup_filenode_link_func(fname))
+ for chnk in group:
+ yield chnk
+ if fname in msng_filenode_set:
+ # Don't need this anymore, toss it to free memory.
+ del msng_filenode_set[fname]
+ # Signal that no more groups are left.
+ yield changegroup.closechunk()
+
+ if msng_cl_lst:
+ self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
+
+ return util.chunkbuffer(gengroup())
+
+ def changegroup(self, basenodes, source):
+ # to avoid a race we use changegroupsubset() (issue1320)
+ return self.changegroupsubset(basenodes, self.heads(), source)
+
+ def _changegroup(self, common, source):
+ """Generate a changegroup of all nodes that we have that a recipient
+ doesn't.
+
+ This is much easier than the previous function as we can assume that
+ the recipient has any changenode we aren't sending them.
+
+ common is the set of common nodes between remote and self"""
+
+ self.hook('preoutgoing', throw=True, source=source)
+
+ cl = self.changelog
+ nodes = cl.findmissing(common)
+ revset = set([cl.rev(n) for n in nodes])
+ self.changegroupinfo(nodes, source)
+
+ def identity(x):
+ return x
+
+ def gennodelst(log):
+ for r in log:
+ if log.linkrev(r) in revset:
+ yield log.node(r)
+
+ def changed_file_collector(changedfileset):
+ def collect_changed_files(clnode):
+ c = cl.read(clnode)
+ changedfileset.update(c[3])
+ return collect_changed_files
+
+ def lookuprevlink_func(revlog):
+ def lookuprevlink(n):
+ return cl.node(revlog.linkrev(revlog.rev(n)))
+ return lookuprevlink
+
+ def gengroup():
+ # construct a list of all changed files
+ changedfiles = set()
+
+ for chnk in cl.group(nodes, identity,
+ changed_file_collector(changedfiles)):
+ yield chnk
+
+ mnfst = self.manifest
+ nodeiter = gennodelst(mnfst)
+ for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
+ yield chnk
+
+ for fname in sorted(changedfiles):
+ filerevlog = self.file(fname)
+ if not len(filerevlog):
+ raise util.Abort(_("empty or missing revlog for %s") % fname)
+ nodeiter = gennodelst(filerevlog)
+ nodeiter = list(nodeiter)
+ if nodeiter:
+ yield changegroup.chunkheader(len(fname))
+ yield fname
+ lookup = lookuprevlink_func(filerevlog)
+ for chnk in filerevlog.group(nodeiter, lookup):
+ yield chnk
+
+ yield changegroup.closechunk()
+
+ if nodes:
+ self.hook('outgoing', node=hex(nodes[0]), source=source)
+
+ return util.chunkbuffer(gengroup())
+
+ def addchangegroup(self, source, srctype, url, emptyok=False):
+ """add changegroup to repo.
+
+ return values:
+ - nothing changed or no source: 0
+ - more heads than before: 1+added heads (2..n)
+ - less heads than before: -1-removed heads (-2..-n)
+ - number of heads stays the same: 1
+ """
+ def csmap(x):
+ self.ui.debug(_("add changeset %s\n") % short(x))
+ return len(cl)
+
+ def revmap(x):
+ return cl.rev(x)
+
+ if not source:
+ return 0
+
+ self.hook('prechangegroup', throw=True, source=srctype, url=url)
+
+ changesets = files = revisions = 0
+
+ # write changelog data to temp files so concurrent readers will not see
+ # inconsistent view
+ cl = self.changelog
+ cl.delayupdate()
+ oldheads = len(cl.heads())
+
+ tr = self.transaction()
+ try:
+ trp = weakref.proxy(tr)
+ # pull off the changeset group
+ self.ui.status(_("adding changesets\n"))
+ clstart = len(cl)
+ chunkiter = changegroup.chunkiter(source)
+ if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
+ raise util.Abort(_("received changelog group is empty"))
+ clend = len(cl)
+ changesets = clend - clstart
+
+ # pull off the manifest group
+ self.ui.status(_("adding manifests\n"))
+ chunkiter = changegroup.chunkiter(source)
+ # no need to check for empty manifest group here:
+ # if the result of the merge of 1 and 2 is the same in 3 and 4,
+ # no new manifest will be created and the manifest group will
+ # be empty during the pull
+ self.manifest.addgroup(chunkiter, revmap, trp)
+
+ # process the files
+ self.ui.status(_("adding file changes\n"))
+ while 1:
+ f = changegroup.getchunk(source)
+ if not f:
+ break
+ self.ui.debug(_("adding %s revisions\n") % f)
+ fl = self.file(f)
+ o = len(fl)
+ chunkiter = changegroup.chunkiter(source)
+ if fl.addgroup(chunkiter, revmap, trp) is None:
+ raise util.Abort(_("received file revlog group is empty"))
+ revisions += len(fl) - o
+ files += 1
+
+ newheads = len(cl.heads())
+ heads = ""
+ if oldheads and newheads != oldheads:
+ heads = _(" (%+d heads)") % (newheads - oldheads)
+
+ self.ui.status(_("added %d changesets"
+ " with %d changes to %d files%s\n")
+ % (changesets, revisions, files, heads))
+
+ if changesets > 0:
+ p = lambda: cl.writepending() and self.root or ""
+ self.hook('pretxnchangegroup', throw=True,
+ node=hex(cl.node(clstart)), source=srctype,
+ url=url, pending=p)
+
+ # make changelog see real files again
+ cl.finalize(trp)
+
+ tr.close()
+ finally:
+ del tr
+
+ if changesets > 0:
+ # forcefully update the on-disk branch cache
+ self.ui.debug(_("updating the branch cache\n"))
+ self.branchtags()
+ self.hook("changegroup", node=hex(cl.node(clstart)),
+ source=srctype, url=url)
+
+ for i in xrange(clstart, clend):
+ self.hook("incoming", node=hex(cl.node(i)),
+ source=srctype, url=url)
+
+ # never return 0 here:
+ if newheads < oldheads:
+ return newheads - oldheads - 1
+ else:
+ return newheads - oldheads + 1
+
+
+ def stream_in(self, remote):
+ fp = remote.stream_out()
+ l = fp.readline()
+ try:
+ resp = int(l)
+ except ValueError:
+ raise error.ResponseError(
+ _('Unexpected response from remote server:'), l)
+ if resp == 1:
+ raise util.Abort(_('operation forbidden by server'))
+ elif resp == 2:
+ raise util.Abort(_('locking the remote repository failed'))
+ elif resp != 0:
+ raise util.Abort(_('the server sent an unknown error code'))
+ self.ui.status(_('streaming all changes\n'))
+ l = fp.readline()
+ try:
+ total_files, total_bytes = map(int, l.split(' ', 1))
+ except (ValueError, TypeError):
+ raise error.ResponseError(
+ _('Unexpected response from remote server:'), l)
+ self.ui.status(_('%d files to transfer, %s of data\n') %
+ (total_files, util.bytecount(total_bytes)))
+ start = time.time()
+ for i in xrange(total_files):
+ # XXX doesn't support '\n' or '\r' in filenames
+ l = fp.readline()
+ try:
+ name, size = l.split('\0', 1)
+ size = int(size)
+ except (ValueError, TypeError):
+ raise error.ResponseError(
+ _('Unexpected response from remote server:'), l)
+ self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
+ # for backwards compat, name was partially encoded
+ ofp = self.sopener(store.decodedir(name), 'w')
+ for chunk in util.filechunkiter(fp, limit=size):
+ ofp.write(chunk)
+ ofp.close()
+ elapsed = time.time() - start
+ if elapsed <= 0:
+ elapsed = 0.001
+ self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
+ (util.bytecount(total_bytes), elapsed,
+ util.bytecount(total_bytes / elapsed)))
+ self.invalidate()
+ return len(self.heads()) + 1
+
+ def clone(self, remote, heads=[], stream=False):
+ '''clone remote repository.
+
+ keyword arguments:
+ heads: list of revs to clone (forces use of pull)
+ stream: use streaming clone if possible'''
+
+ # now, all clients that can request uncompressed clones can
+ # read repo formats supported by all servers that can serve
+ # them.
+
+ # if revlog format changes, client will have to check version
+ # and format flags on "stream" capability, and use
+ # uncompressed only if compatible.
+
+ if stream and not heads and remote.capable('stream'):
+ return self.stream_in(remote)
+ return self.pull(remote, heads)
+
+# used to avoid circular references so destructors work
+def aftertrans(files):
+ renamefiles = [tuple(t) for t in files]
+ def a():
+ for src, dest in renamefiles:
+ util.rename(src, dest)
+ return a
+
+def instance(ui, path, create):
+ return localrepository(ui, util.drop_scheme('file', path), create)
+
+def islocal(path):
+ return True
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/lock.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/lock.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,128 @@
+# lock.py - simple locking scheme for mercurial
+#
+# Copyright 2005, 2006 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2, incorporated herein by reference.
+
+import util, error
+import errno, os, socket, time
+import warnings
+
+class lock(object):
+ # lock is symlink on platforms that support it, file on others.
+
+ # symlink is used because create of directory entry and contents
+ # are atomic even over nfs.
+
+ # old-style lock: symlink to pid
+ # new-style lock: symlink to hostname:pid
+
+ _host = None
+
+ def __init__(self, file, timeout=-1, releasefn=None, desc=None):
+ self.f = file
+ self.held = 0
+ self.timeout = timeout
+ self.releasefn = releasefn
+ self.desc = desc
+ self.lock()
+
+ def __del__(self):
+ if self.held:
+ warnings.warn("use lock.release instead of del lock",
+ category=DeprecationWarning,
+ stacklevel=2)
+
+ # ensure the lock will be removed
+ # even if recursive locking did occur
+ self.held = 1
+
+ self.release()
+
+ def lock(self):
+ timeout = self.timeout
+ while 1:
+ try:
+ self.trylock()
+ return 1
+ except error.LockHeld, inst:
+ if timeout != 0:
+ time.sleep(1)
+ if timeout > 0:
+ timeout -= 1
+ continue
+ raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
+ inst.locker)
+
+ def trylock(self):
+ if self.held:
+ self.held += 1
+ return
+ if lock._host is None:
+ lock._host = socket.gethostname()
+ lockname = '%s:%s' % (lock._host, os.getpid())
+ while not self.held:
+ try:
+ util.makelock(lockname, self.f)
+ self.held = 1
+ except (OSError, IOError), why:
+ if why.errno == errno.EEXIST:
+ locker = self.testlock()
+ if locker is not None:
+ raise error.LockHeld(errno.EAGAIN, self.f, self.desc,
+ locker)
+ else:
+ raise error.LockUnavailable(why.errno, why.strerror,
+ why.filename, self.desc)
+
+ def testlock(self):
+ """return id of locker if lock is valid, else None.
+
+ If old-style lock, we cannot tell what machine locker is on.
+ with new-style lock, if locker is on this machine, we can
+ see if locker is alive. If locker is on this machine but
+ not alive, we can safely break lock.
+
+ The lock file is only deleted when None is returned.
+
+ """
+ locker = util.readlock(self.f)
+ try:
+ host, pid = locker.split(":", 1)
+ except ValueError:
+ return locker
+ if host != lock._host:
+ return locker
+ try:
+ pid = int(pid)
+ except:
+ return locker
+ if util.testpid(pid):
+ return locker
+ # if locker dead, break lock. must do this with another lock
+ # held, or can race and break valid lock.
+ try:
+ l = lock(self.f + '.break')
+ l.trylock()
+ os.unlink(self.f)
+ l.release()
+ except error.LockError:
+ return locker
+
+ def release(self):
+ if self.held > 1:
+ self.held -= 1
+ elif self.held is 1:
+ self.held = 0
+ if self.releasefn:
+ self.releasefn()
+ try:
+ os.unlink(self.f)
+ except: pass
+
+def release(*locks):
+ for lock in locks:
+ if lock is not None:
+ lock.release()
+
diff -r d86e762a994f -r 496dbf12a6cb upmana/mercurial/lsprof.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/upmana/mercurial/lsprof.py Fri Oct 30 22:21:40 2009 -0500
@@ -0,0 +1,115 @@
+#! /usr/bin/env python
+
+import sys
+from _lsprof import Profiler, profiler_entry
+
+__all__ = ['profile', 'Stats']
+
+def profile(f, *args, **kwds):
+ """XXX docstring"""
+ p = Profiler()
+ p.enable(subcalls=True, builtins=True)
+ try:
+ f(*args, **kwds)
+ finally:
+ p.disable()
+ return Stats(p.getstats())
+
+
+class Stats(object):
+ """XXX docstring"""
+
+ def __init__(self, data):
+ self.data = data
+
+ def sort(self, crit="inlinetime"):
+ """XXX docstring"""
+ if crit not in profiler_entry.__dict__:
+ raise ValueError("Can't sort by %s" % crit)
+ self.data.sort(lambda b, a: cmp(getattr(a, crit),
+ getattr(b, crit)))
+ for e in self.data:
+ if e.calls:
+ e.calls.sort(lambda b, a: cmp(getattr(a, crit),
+ getattr(b, crit)))
+
+ def pprint(self, top=None, file=None, limit=None, climit=None):
+ """XXX docstring"""
+ if file is None:
+ file = sys.stdout
+ d = self.data
+ if top is not None:
+ d = d[:top]
+ cols = "% 12s %12s %11.4f %11.4f %s\n"
+ hcols = "% 12s %12s %12s %12s %s\n"
+ file.write(hcols % ("CallCount", "Recursive", "Total(ms)",
+ "Inline(ms)", "module:lineno(function)"))
+ count = 0
+ for e in d:
+ file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
+ e.inlinetime, label(e.code)))
+ count += 1
+ if limit is not None and count == limit:
+ return
+ ccount = 0
+ if e.calls:
+ for se in e.calls:
+ file.write(cols % ("+%s" % se.callcount, se.reccallcount,
+ se.totaltime, se.inlinetime,
+ "+%s" % label(se.code)))
+ count += 1
+ ccount += 1
+ if limit is not None and count == limit:
+ return
+ if climit is not None and ccount == climit:
+ break
+
+ def freeze(self):
+ """Replace all references to code objects with string
+ descriptions; this makes it possible to pickle the instance."""
+
+ # this code is probably rather ickier than it needs to be!
+ for i in range(len(self.data)):
+ e = self.data[i]
+ if not isinstance(e.code, str):
+ self.data[i] = type(e)((label(e.code),) + e[1:])
+ if e.calls:
+ for j in range(len(e.calls)):
+ se = e.calls[j]
+ if not isinstance(se.code, str):
+ e.calls[j] = type(se)((label(se.code),) + se[1:])
+
+_fn2mod = {}
+
+def label(code):
+ if isinstance(code, str):
+ return code
+ try:
+ mname = _fn2mod[code.co_filename]
+ except KeyError:
+ for k, v in sys.modules.iteritems():
+ if v is None:
+ continue
+ if not hasattr(v, '__file__'):
+ continue
+ if not isinstance(v.__file__, str):
+ continue
+ if v.__file__.startswith(code.co_filename):
+ mname = _fn2mod[code.co_filename] = k
+ break
+ else:
+ mname = _fn2mod[code.co_filename] = '<%s>'%code.co_filename
+
+ return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
+
+
+if __name__ == '__main__':
+ import os
+ sys.argv = sys.argv[1:]
+ if not sys.argv:
+ print >> sys.stderr, "usage: lsprof.py