changeset 0:549d0d17895d tip

init checkin of an almost cleaned up codebase
author Victor Ng <victor@monkeybean.ca>
date Sun, 25 Jan 2009 00:02:22 -0500
parents
children
files .hgignore README README.TXT jtds/__init__.py jtds/mssql2k/__init__.py jtds/mssql2k/base.py jtds/mssql2k/call_sp.py jtds/mssql2k/compare_mutually_referential.sql jtds/mssql2k/creation.py jtds/mssql2k/custom_queries.py jtds/mssql2k/custom_triggers.sql jtds/mssql2k/deferred_inserts.sql jtds/mssql2k/introspection.py jtds/mssql2k/operations.py jtds/mssql2k/pagination.sql jtds/mssql2k/pool.py jtds/mssql2k/query.py jtds/mssql2kext/__init__.py jtds/mssql2kext/management/__init__.py jtds/mssql2kext/management/commands/__init__.py jtds/mssql2kext/management/commands/sql2kext.py jtds/mssql2kext/models.py jtds/mssql2kext/views.py setup.py
diffstat 18 files changed, 2049 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/.hgignore	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,8 @@
+syntax: glob
+
+*.pyc
+*.swp
+*egg-info
+*.class
+build
+dist
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/README	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,5 @@
+This is a Jython package to provide a jTDS backend to Django that is
+specific to Microsoft SQL Server 2000.
+
+No attempt is made for compatibility with any version other than SQL Server
+2000.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/README.TXT	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,8 @@
+
+
+SQL 2k query
+
+SELECT     (SELECT     SUM(1)
+                       FROM          foo_simple
+                       WHERE      (auto <= reg.auto)) AS row_number, *
+FROM         foo_simple reg
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2k/base.py	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,233 @@
+"""
+jTDS/MSSQL database backend for Django.
+
+Django uses this if the DATABASE_ENGINE setting is empty (None or empty string).
+
+Each of these API functions, except connection.close(), raises
+ImproperlyConfigured.
+"""
+
+try:
+    # Force the database driver to load 
+    from java.lang import Class
+    cls = Class.forName("net.sourceforge.jtds.jdbc.Driver").newInstance()
+    from pool import ManualPoolingDriver
+    from com.ziclix.python.sql import zxJDBC as Database
+    from com.ziclix.python.sql import zxJDBC
+    from com.ziclix.python.sql import PyStatement, PyExtendedCursor, PyCursor, PyConnection
+except ImportError, e:
+    from django.core.exceptions import ImproperlyConfigured
+    raise ImproperlyConfigured("Error loading zxJDBC module: %s" % e)
+
+from django.core.exceptions import ImproperlyConfigured
+from django.db.backends import *
+from django.db.backends import BaseDatabaseWrapper, BaseDatabaseFeatures, BaseDatabaseValidation
+from django.conf import settings
+from pool import ManualPoolingDriver
+
+from operations import DatabaseOperations
+from introspection import DatabaseIntrospection
+from creation import DatabaseCreation
+
+if not hasattr(settings, "DATABASE_COLLATION"):
+    settings.DATABASE_COLLATION = 'Latin1_General_CI_AS'
+
+def complain(*args, **kwargs):
+    raise ImproperlyConfigured, "You haven't set the DATABASE_ENGINE setting yet."
+
+DatabaseError = zxJDBC.DatabaseError
+IntegrityError = zxJDBC.IntegrityError
+
+class DatabaseClient(BaseDatabaseClient):
+    runshell = complain
+    
+
+class DatabaseWrapper(BaseDatabaseWrapper):    
+    operators = {
+        # Since '=' is used not only for string comparision there is no way
+        # to make it case (in)sensitive. It will simply fallback to the
+        # database collation.
+        'exact': '= %s ',
+        'iexact': "= UPPER(%s) ",
+        'contains': "LIKE %s ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
+        'icontains': "LIKE UPPER(%s) ESCAPE '\\' COLLATE "+ settings.DATABASE_COLLATION,
+        'gt': '> %s',
+        'gte': '>= %s',
+        'lt': '< %s',
+        'lte': '<= %s',
+        'startswith': "LIKE %s ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
+        'endswith': "LIKE %s ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
+        'istartswith': "LIKE UPPER(%s) ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
+        'iendswith': "LIKE UPPER(%s) ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
+    }
+
+
+    def _register_driver(self):
+        # Configure the pooled connection driver
+        if self._LAST_DATABASE_NAME == settings.DATABASE_NAME:
+            return "jdbc_pool_%s" % self._db_count
+
+        self._db_count += 1
+        pool_name = "jdbc_pool_%s" % self._db_count
+
+        db_dict = {
+                'DATABASE_NAME': settings.DATABASE_NAME,
+                'DATABASE_HOST': settings.DATABASE_HOST or 'localhost',
+                'DATABASE_PORT': settings.DATABASE_PORT or 1433,
+                }
+        self.driver = ManualPoolingDriver("jdbc:jtds:sqlserver://%(DATABASE_HOST)s:%(DATABASE_PORT)s/%(DATABASE_NAME)s" % db_dict, 
+                                    settings.DATABASE_USER, 
+                                    settings.DATABASE_PASSWORD,
+                                    pool_name,
+                                    )
+        self._LAST_DATABASE_NAME = settings.DATABASE_NAME
+
+        return pool_name
+
+    def _cursor(self, settings):
+        '''
+        Implementation specific cursor
+        '''
+        new_conn = False
+        if self.connection is None:
+            new_conn = True
+
+            pool_name = self._register_driver()
+
+            if not settings.DATABASE_NAME:
+                from django.core.exceptions import ImproperlyConfigured
+                raise ImproperlyConfigured("You need to specify DATABASE_NAME in your Django settings file.")
+
+            url='jdbc:apache:commons:dbcp:%s' % pool_name
+            self.connection = Database.connect(url, None, None, 'org.apache.commons.dbcp.PoolingDriver')
+
+        cursor = self.connection.cursor()
+        if new_conn:
+            cursor.execute("SET DATEFORMAT ymd")
+
+            # SQL Server violates the SQL standard w.r.t handling NULL values in UNIQUE columns.
+            # We work around this by creating schema bound views on tables with with nullable unique columns
+            # but we need to modify the cursor to abort if the view has problems.
+            # See http://blogs.msdn.com/sqlcat/archive/2005/12/20/506138.aspx
+            cursor.execute("SET ARITHABORT ON")
+            cursor.execute("SET CONCAT_NULL_YIELDS_NULL ON")
+            cursor.execute("SET QUOTED_IDENTIFIER ON")
+            cursor.execute("SET ANSI_NULLS ON")
+            cursor.execute("SET ANSI_PADDING ON")
+            cursor.execute("SET ANSI_WARNINGS ON")
+            cursor.execute("SET NUMERIC_ROUNDABORT OFF")
+            cursor.execute("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE")
+
+            # jTDS can't execute some sql like CREATE DATABASE etc. in
+            # Multi-statement, so we need to commit the above SQL sentences to
+            # avoid this
+
+        return CursorWrapper(cursor)
+
+    def __init__(self, autocommit=False, **kwargs):
+        super(DatabaseWrapper, self).__init__(autocommit=autocommit, **kwargs)
+        self._LAST_DATABASE_NAME = None
+        self.connection = None
+        self._db_count = 0
+
+        self.features = DatabaseFeatures()
+        self.ops = DatabaseOperations()
+        self.client = DatabaseClient()                      # XXX: No client is supported yet
+        self.creation = DatabaseCreation(self)              # Basic type declarations for creating tables
+        self.introspection = DatabaseIntrospection(self)    
+        self.validation = BaseDatabaseValidation()          # XXX: No real database validation yet
+
+class DatabaseFeatures(BaseDatabaseFeatures):
+    uses_custom_query_class = True
+
+
+
+
+class CursorWrapper(object):
+    """
+    A wrapper around the zxJDBC's cursor that takes in account some zxJDBC
+    DB-API 2.0 implementation and common ODBC driver particularities.
+    """
+    def __init__(self, cursor):
+        self.cursor = cursor
+
+    def format_sql(self, sql):
+        # zxjdbc uses '?' instead of '%s' as parameter placeholder.
+        if "%s" in sql:
+            sql = sql.replace('%s', '?')
+        return sql
+
+    def format_params(self, params):
+        fp = []
+        for p in params:
+            p = coerce_sql2k_type(p)
+            fp.append(p)
+        return tuple(fp)
+
+    def execute(self, sql, params=()):
+        sql = self.format_sql(sql)
+        params = self.format_params(params)
+        return self.cursor.execute(sql, params)
+
+    def executemany(self, sql, params_list):
+        sql = self.format_sql(sql)
+        # zxjdbc's cursor.executemany() doesn't support an empty param_list
+        if not params_list:
+            if '?' in sql:
+                return
+        else:
+            raw_pll = params_list
+            params_list = [self.format_params(p) for p in raw_pll]
+        return self.cursor.executemany(sql, params_list)
+
+    def format_results(self, rows):
+        """
+        Decode data coming from the database if needed and convert rows to tuples
+        (zxjdbc Rows are not sliceable).
+        """
+        fr = []
+        for row in rows:
+            fr.append(row)
+        return tuple(fr)
+
+    def fetchone(self):
+        row = self.cursor.fetchone()
+        if row is not None:
+            return self.format_results(row)
+        return row
+
+    def fetchmany(self, chunk):
+        return [self.format_results(row) for row in self.cursor.fetchmany(chunk)]
+
+    def fetchall(self):
+        return [self.format_results(row) for row in self.cursor.fetchall()]
+
+    def __getattr__(self, attr):
+        if attr in self.__dict__:
+            return self.__dict__[attr]
+        else:
+            return getattr(self.cursor, attr)
+
+def coerce_sql2k_type(p):
+    '''
+    Need to coerce some python types to jTDS friendly types
+    so that PreparedStatement::setObject() can work properly
+    '''
+    if isinstance(p, type(True)):
+        if p:
+            return 1
+        else:
+            return 0
+    elif isinstance(p, type(5L)):
+        # zxJDBC doesn't like injecting long types, or maybe it
+        # actually depends on the underlying SQL datatype..
+        # Need to figure out a better fix for this
+        if p == int(p):
+            return int(p)
+        else:
+            raise RuntimeError, "SQL Serer 2000 +jTDS can't seem to handle long values. Found : [%s]" % p
+    return p
+
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2k/call_sp.py	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,76 @@
+from pprint import pprint
+try:
+    # Force the database driver to load 
+    from java.lang import Class
+    cls = Class.forName("net.sourceforge.jtds.jdbc.Driver").newInstance()
+    from jtds.mssql2k.pool import ManualPoolingDriver
+    from com.ziclix.python.sql import zxJDBC as Database
+except ImportError, e:
+    from django.core.exceptions import ImproperlyConfigured
+    raise ImproperlyConfigured("Error loading zxJDBC module: %s" % e)
+
+db_dict = {
+        'DATABASE_NAME': 'test_zxjdbc.jtds',
+        'DATABASE_HOST': 'localhost',
+        'DATABASE_PORT': 1433,
+        }
+pool_name = 'jdbc_pool'
+print "Registering driver for : %s to [%s]" % (str(db_dict), pool_name)
+driver = ManualPoolingDriver("jdbc:jtds:sqlserver://%(DATABASE_HOST)s:%(DATABASE_PORT)s/%(DATABASE_NAME)s" % db_dict, 
+                            'sa',
+                            'sa',
+                            pool_name,
+                            )
+
+
+from java.sql import DriverManager
+from java.sql import Types
+url='jdbc:apache:commons:dbcp:%s' % pool_name
+conn = DriverManager.getConnection(url)
+proc = conn.prepareCall("returnpage(?, ?, ?, ?)");
+proc.setString('@query', "select * from foo_simple");
+proc.setString('@orderby', "auto asc");
+proc.setInt('@startrow', 5);
+proc.setInt('@endrow', 12);
+proc.execute()
+
+rset = proc.getResultSet()
+meta = rset.getMetaData()
+
+
+
+type_dict = {}
+for key in dir(Types):
+    type_dict[getattr(Types, key)]=key
+
+pprint (type_dict)
+
+getter = {1: rset.getString,
+ 2: rset.getLong,
+ 3: rset.getBigDecimal,
+ 4: rset.getInt,
+ 5: rset.getInt,
+ 6: rset.getFloat,
+ 7: rset.getFloat,
+ 8: rset.getDouble,
+ 12: rset.getString,
+ 16: rset.getBoolean,
+ 70: rset.getString,
+ 91: rset.getDate,
+ 92: rset.getTime,
+ 93: rset.getTimestamp,
+ }
+
+col_count = meta.getColumnCount()
+col_getter = {}
+
+for i in range(1, col_count+1):
+    col_getter[i] = getter[meta.getColumnType(i)]
+
+while rset.next():
+    for colnum in range(1, col_count+1):
+        value = col_getter[colnum](colnum)
+        print type(value), value,"|",
+    print 
+    print '-' * 20
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2k/compare_mutually_referential.sql	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,94 @@
+
+
+--- Postgresql
+
+BEGIN;
+CREATE TABLE "mutually_referential_parent" (
+    "id" serial NOT NULL PRIMARY KEY,
+    "name" varchar(100) NOT NULL,
+    "bestchild_id" integer NULL
+)
+;
+CREATE TABLE "mutually_referential_child" (
+    "id" serial NOT NULL PRIMARY KEY,
+    "name" varchar(100) NOT NULL,
+    "parent_id" integer NOT NULL REFERENCES "mutually_referential_parent" ("id") DEFERRABLE INITIALLY DEFERRED
+)
+;
+ALTER TABLE "mutually_referential_parent" ADD CONSTRAINT bestchild_id_refs_id_7ce81e28 FOREIGN KEY ("bestchild_id") REFERENCES "mutually_referential_child" ("id") DEFERRABLE INITIALLY DEFERRED;
+CREATE INDEX "mutually_referential_parent_bestchild_id" ON "mutually_referential_parent" ("bestchild_id");
+CREATE INDEX "mutually_referential_child_parent_id" ON "mutually_referential_child" ("parent_id");
+COMMIT;
+-- Notes: Postgresql lets you specify DEFERRABLE constraints so this allows us
+-- to mangle relationships up without having to worry until transaction commit
+-- time.
+
+--- SQLite3
+BEGIN;
+CREATE TABLE "mutually_referential_parent" (
+    "id" integer NOT NULL PRIMARY KEY,
+    "name" varchar(100) NOT NULL,
+    "bestchild_id" integer NULL
+)
+;
+CREATE TABLE "mutually_referential_child" (
+    "id" integer NOT NULL PRIMARY KEY,
+    "name" varchar(100) NOT NULL,
+    "parent_id" integer NOT NULL REFERENCES "mutually_referential_parent" ("id")
+)
+;
+CREATE INDEX "mutually_referential_parent_bestchild_id" ON "mutually_referential_parent" ("bestchild_id");
+CREATE INDEX "mutually_referential_child_parent_id" ON "mutually_referential_child" ("parent_id");
+COMMIT;
+--- Notes: SQLite doesn't support adding/removing constraints so we don't see  the "bestchild" constraint here.
+
+-- SQL Server 2000
+
+BEGIN;
+CREATE TABLE [mutually_referential_parent] (
+    [id] int IDENTITY (1, 1) NOT NULL PRIMARY KEY,
+    [name] nvarchar(100) NOT NULL,
+    [bestchild_id] int NULL
+)
+;
+CREATE TABLE [mutually_referential_child] (
+    [id] int IDENTITY (1, 1) NOT NULL PRIMARY KEY,
+    [name] nvarchar(100) NOT NULL,
+    [parent_id] int NOT NULL REFERENCES [mutually_referential_parent] ([id])
+)
+;
+ALTER TABLE [mutually_referential_parent] ADD CONSTRAINT [bestchild_id_refs_id_4a696c71] FOREIGN KEY ([bestchild_id]) REFERENCES [mutually_referential_child] ([id]);
+CREATE INDEX [mutually_referential_parent_bestchild_id] ON [mutually_referential_parent] ([bestchild_id]);
+CREATE INDEX [mutually_referential_child_parent_id] ON [mutually_referential_child] ([parent_id]);
+DROP TRIGGER trd;
+CREATE TRIGGER trd ON mutually_referential_parent
+INSTEAD OF DELETE
+AS
+BEGIN
+update mutually_referential_child SET parent_id = NULL WHERE parent_id IN  (SELECT id FROM deleted)
+update mutually_referential_parent SET bestchild_id = NULL WHERE id IN  (SELECT id FROM deleted)
+END;
+DROP TRIGGER trd_child;
+CREATE TRIGGER trd_child ON mutually_referential_child
+INSTEAD OF DELETE
+AS
+BEGIN
+update mutually_referential_parent SET bestchild_id = NULL WHERE bestchild_id IN (SELECT id FROM deleted)
+update mutually_referential_child SET parent_id = NULL WHERE id IN (SELECT id FROM deleted)
+END;
+DROP TRIGGER trd_after;
+CREATE TRIGGER trd_after ON mutually_referential_parent
+AFTER DELETE
+AS
+BEGIN
+delete FROM mutually_referential_parent WHERE id IN (SELECT id FROM deleted)
+END;
+DROP TRIGGER trd_after_child;
+CREATE TRIGGER trd_after_child ON mutually_referential_child
+AFTER DELETE
+AS
+BEGIN
+delete FROM mutually_referential_child WHERE id IN (SELECT id FROM deleted) 
+END;
+COMMIT;
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2k/creation.py	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,404 @@
+from django.db.backends.creation import BaseDatabaseCreation
+
+class DatabaseCreation(BaseDatabaseCreation):
+    '''
+    Overloaded bits of the database creation code
+    '''
+
+    data_types = {
+        'AutoField':         'int IDENTITY (1, 1)',
+        'BooleanField':      'bit',
+        'CharField':         'nvarchar(%(max_length)s)',
+        'CommaSeparatedIntegerField': 'nvarchar(%(max_length)s)',
+        'DateField':         'smalldatetime',
+        'DateTimeField':     'datetime',
+        'DecimalField':      'numeric(%(max_digits)s, %(decimal_places)s)',
+        'FileField':         'nvarchar(%(max_length)s)',
+        'FilePathField':     'nvarchar(%(max_length)s)',
+        'FloatField':        'double precision',
+        'IntegerField':      'int',
+        'IPAddressField':    'nvarchar(15)',
+        'NullBooleanField':  'bit',
+        'OneToOneField':     'int',
+        'PositiveIntegerField': 'int CHECK ([%(column)s] >= 0)',
+        'PositiveSmallIntegerField': 'smallint CHECK ([%(column)s] >= 0)',
+        'SlugField':         'nvarchar(%(max_length)s)',
+        'SmallIntegerField': 'smallint',
+        'TextField':         'ntext',
+        'TimeField':         'datetime',
+    }
+
+    def __init__(self, connection):
+        super(DatabaseCreation,self).__init__(connection)
+
+        # Keep track of all unique nullable fields
+        self.unique_fields = []
+
+        # We need to keep track of all seen models and created models for
+        # ourself so that we can properly generate all the constraint triggers
+        self._seen_models = set()
+        self._created_models = set()
+
+        self._trigger_sql = set()
+
+    def create_test_db(self, verbosity=1, autoclobber=False):
+        result = super(DatabaseCreation, self).create_test_db(verbosity, autoclobber)
+
+        # Force the SQL2k command to run now.
+        from jtds.mssql2kext.management.commands import sql2kext 
+        sql2kext.Command().handle_noargs()
+
+        return result
+
+    def _destroy_test_db(self, test_database_name, verbosity):
+        cursor = self.connection.cursor()
+        if not self.connection.connection.autocommit:
+            self.connection.connection.commit()
+        self.connection.connection.autocommit = True
+        cursor.execute("ALTER DATABASE %s SET SINGLE_USER WITH ROLLBACK IMMEDIATE " % self.connection.ops.quote_name(test_database_name))
+        cursor.execute("DROP DATABASE %s" %self.connection.ops.quote_name(test_database_name))
+        self.connection.close()
+
+    def sql_for_many_to_many(self, model, style):
+        """
+        We need to inject the trigger code for a model after all the tables for this application have been
+        created.
+
+        The code goes in this method only because it's known that the syncdb command in 
+        django.core.management.commands.syncdb call this last.
+
+        A better option would be to have a signal hook after all models have been
+        created, but before the the applications are signalled so that the database 
+        backend can respond to creation prior to individual applications respond.
+        """
+        final_output = super(DatabaseCreation, self).sql_for_many_to_many(model, style)
+        from django.db import models
+        opts = model._meta
+        app_label = opts.app_label
+        app = [app for app in models.get_apps() if app.__name__.split('.')[-2] == app_label][0]
+        app_model_set = set(models.get_models(app))
+
+        # Wait until the app_model_set is finished loading
+        if app_model_set != (app_model_set & self._seen_models | set([model])):
+            return final_output
+
+        # This is the last model - we can safely assume it's ok to 
+        # inject all the constraint trigger code now
+        model_fkeys = {}
+        for model in app_model_set:
+            opts = model._meta
+            model_fkeys[model] = []
+            for f in opts.local_fields:
+                if f.rel:
+                    model_fkeys[model].append(f)
+
+        qn = self.connection.ops.quote_name
+
+        for model, model_keys in model_fkeys.items():
+            sql_block = []
+            # For each model, we want the list of all foreign keys
+            # to clear out references to other objects
+            # and to clear all references 
+            tmpl = '''UPDATE %(table)s SET %(this_to_rel)s = NULL where %(this_pkey)s in (SELECT %(this_pkey)s from deleted)'''
+            opts = model._meta
+            table = opts.db_table
+            this_pkey = [f for f in opts.local_fields if f.primary_key][0].column
+
+            for model_f in model_keys:
+                sql_dict = {'table': qn(table),
+                            'this_to_rel': qn(model_f.column),
+                            'this_pkey': qn(this_pkey),}
+                if model_f.null:
+                    sql_block.append(tmpl % sql_dict)
+
+            # Generate all inbound relationships and clear the foreign keys
+            for inbound_model in app_model_set:
+                inbound_rels = [(inbound_model, f) for f in model_fkeys[inbound_model] if f.rel.to == model]
+                for in_model, in_f in inbound_rels:
+                    tmpl = '''UPDATE %(other_table)s SET %(fkey)s = NULL where %(fkey)s in (SELECT %(this_pkey)s from deleted)'''
+                    rel_opts = in_model._meta
+                    other_table = rel_opts.db_table
+                    sql_dict = {'other_table': qn(other_table),
+                                'fkey': qn(in_f.column),
+                                'this_pkey': qn(this_pkey),
+                                }
+                    if in_f.null:
+                        sql_block.append(tmpl % sql_dict)
+
+            trigger_name = '%s_%x' % (table, abs(hash(table)))
+
+            instead_of_sql = """
+CREATE TRIGGER %(instead_trigger_name)s ON %(table)s 
+INSTEAD OF DELETE
+AS
+BEGIN
+%(sql)s
+    DELETE FROM %(table)s WHERE %(this_pkey)s IN (SELECT %(this_pkey)s FROM deleted) 
+print '%(escaped_sql)s'
+END
+;
+            """ % { 
+                    'instead_trigger_name': qn('instead_%s' % trigger_name),
+                    'table': qn(table),
+                    'sql': '\n'.join(['    %s' % stmt for stmt in sql_block]),
+                    'escaped_sql': ('\n'.join(['    %s' % stmt for stmt in sql_block])).replace("'", "\\'"),
+                    'this_pkey': qn(this_pkey),
+                    }
+
+
+            if instead_of_sql not in self._trigger_sql:
+                # We only want to generate the instead trigger if there is an actual
+                # code block
+                if len(sql_block) <> 0:
+                    self._trigger_sql.add(instead_of_sql)
+                    final_output.append(instead_of_sql)
+
+        return final_output
+
+    def sql_for_pending_references(self, model, style, pending_references):
+        """
+        SQL Server 2000 needs to inject trigger code to emulate deferrable
+        constraints.
+
+        On object delete, we manually set the foreign keys to NULL with an
+        INSTEAD OF DELETE trigger, and then actually delete the record in the
+        AFTER DELETE trigger.
+
+        If the columns are specified with NOT NULL constraints, the trigger will fail
+        and will exhibit the correct behaviour.  If NULL is allowed, this will
+        allow us to emulate DEFERRABLE constraints.
+
+        Note that SQL Server 2000 will automatically delete triggers that are
+        bound to tables when the table is dropped.
+        """
+        import copy
+        # Make a shallow copy of the pending_references
+        pending_references_orig = copy.copy(pending_references)
+
+        final_output = super(DatabaseCreation, self).sql_for_pending_references(model, style, pending_references)
+
+        return final_output
+
+    def sql_create_model(self, model, style, known_models=set()):
+        '''
+        Returns the SQL required to create a single model, as a tuple of:
+            (list_of_sql, pending_references_dict)
+
+        overload this to create a view with SCHEMABINDING applied to the original table
+        to support fields marked as unique and nullable
+
+        The key differences between this and the super class implementation is that
+        we do not generate unique constriants for nullable field types, or 
+        unique_together fieldsets.
+        '''
+
+        self._seen_models.update(known_models)
+        self._created_models.add(model)
+
+        from django.db import models
+
+        opts = model._meta
+        final_output = []
+        table_output = []
+        pending_references = {}
+        qn = self.connection.ops.quote_name
+        for f in opts.local_fields:
+            col_type = f.db_type()
+            tablespace = f.db_tablespace or opts.db_tablespace
+            if col_type is None:
+                # Skip ManyToManyFields, because they're not represented as
+                # database columns in this table.
+                continue
+            # Make the definition (e.g. 'foo VARCHAR(30)') for this field.
+            field_output = [style.SQL_FIELD(qn(f.column)),
+                style.SQL_COLTYPE(col_type)]
+            field_output.append(style.SQL_KEYWORD('%sNULL' % (not f.null and 'NOT ' or '')))
+            if f.primary_key:
+                field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
+            elif f.unique:
+                if not f.null:
+                    field_output.append(style.SQL_KEYWORD('UNIQUE'))
+                self.unique_fields.append(f)
+
+            if tablespace and f.unique:
+                # We must specify the index tablespace inline, because we
+                # won't be generating a CREATE INDEX statement for this field.
+                field_output.append(self.connection.ops.tablespace_sql(tablespace, inline=True))
+            if f.rel:
+                ref_output, pending = self.sql_for_inline_foreign_key_references(f, known_models, style)
+                if pending:
+                    pr = pending_references.setdefault(f.rel.to, []).append((model, f))
+                else:
+                    field_output.extend(ref_output)
+            table_output.append(' '.join(field_output))
+        if opts.order_with_respect_to:
+            table_output.append(style.SQL_FIELD(qn('_order')) + ' ' + \
+                style.SQL_COLTYPE(models.IntegerField().db_type()) + ' ' + \
+                style.SQL_KEYWORD('NULL'))
+
+        for field_constraints in opts.unique_together:
+            contraint_fields = [opts.get_field(f) for f in field_constraints]
+            null_allowed = [f for f in contraint_fields if f.null]
+
+            # Only do an inline UNIQUE constraint if none of the unique_together columns
+            # allow nulls.  Otherwise - let the schemabinding hack build the unique index
+            if len(null_allowed) == 0:
+                table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' % \
+                    ", ".join([style.SQL_FIELD(qn(opts.get_field(f).column)) for f in field_constraints]))
+
+        full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + style.SQL_TABLE(qn(opts.db_table)) + ' (']
+        for i, line in enumerate(table_output): # Combine and add commas.
+            full_statement.append('    %s%s' % (line, i < len(table_output)-1 and ',' or ''))
+        full_statement.append(')')
+        if opts.db_tablespace:
+            full_statement.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
+        full_statement.append(';')
+        final_output.append('\n'.join(full_statement))
+
+        if self.unique_fields:
+            final_output.extend(self._create_schemabinding_view(style, opts))
+
+        if opts.has_auto_field:
+            # Add any extra SQL needed to support auto-incrementing primary keys.
+            auto_column = opts.auto_field.db_column or opts.auto_field.name
+            autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table, auto_column)
+            if autoinc_sql:
+                for stmt in autoinc_sql:
+                    final_output.append(stmt)
+
+        return final_output, pending_references
+
+    def _create_schemabinding_view(self, style, opts):
+        '''
+        Walk the list of unique_fields and generate a view to enforce
+        uniqueness on 
+        '''
+
+        # Do a quick check to see if we have nullable and unique fields
+        # defined
+        if len([f for f in self.unique_fields if f.null and f.unique]) == 0:
+            return []
+
+        sql_stmts = []
+        #sql_stmts.append("-- Start SCHEMABINDING hack for %s" % style.SQL_TABLE(qn(db_table)))
+
+        db_table, local_fields = opts.db_table, opts.local_fields
+
+        qn = self.connection.ops.quote_name
+
+        d ={'view_name': style.SQL_TABLE(qn("%s_vw" % db_table)),
+            'fields': ', \n    '.join(["    %s" % style.SQL_FIELD(qn(f.column)) for f in local_fields]),
+            'table_name': style.SQL_TABLE(qn(db_table)),
+            'null_parts': ' OR\n        '.join(['%s IS NOT NULL' % style.SQL_FIELD(qn(f.column)) for f in local_fields if f.null]),
+            }
+
+
+        sql_parts = []
+        sql_parts.append("CREATE VIEW %(view_name)s WITH SCHEMABINDING " % d)
+        sql_parts.append("    AS")
+        sql_parts.append("    SELECT")
+        sql_parts.append("    %(fields)s" % d)
+        sql_parts.append("    FROM")
+        sql_parts.append("        [dbo].%(table_name)s" % d)
+        sql_parts.append("    WHERE")
+        sql_parts.append("        %(null_parts)s" % d)
+
+        sql_stmts.append('\n'.join(sql_parts))
+        sql_parts = []
+
+
+        # Now create all the indices
+        unique_nullable = [f for f in local_fields if f.null and f.unique]
+        for i, f in enumerate(unique_nullable):
+            d ={'vidx_name' : style.SQL_TABLE(qn("%s_vidx_%s" % (db_table, i))),
+                'idx_name' : style.SQL_TABLE(qn("%s_idx_%s" % (db_table, i))),
+                'table_name': style.SQL_TABLE(qn(db_table)),
+                'view_name': style.SQL_TABLE(qn("%s_vw" % db_table)),
+                'col_name': style.SQL_FIELD(qn(f.column)),
+                }
+            if i == 0:
+                sql_stmts.append("CREATE UNIQUE CLUSTERED INDEX %(vidx_name)s on %(view_name)s (%(col_name)s);" % d)
+            else:
+                sql_stmts.append("CREATE UNIQUE INDEX %(vidx_name)s on %(view_name)s (%(col_name)s);" % d)
+            sql_stmts.append("CREATE INDEX %(idx_name)s on %(table_name)s (%(col_name)s);" % d)
+
+
+        # To synthesize unique_together over fields where NULLs are allowed,
+        # we create a view per unique_together clause
+
+        for fc_idx, field_constraints in enumerate(opts.unique_together):
+            fields = [opts.get_field(f) for f in field_constraints]
+
+            unique_together_fields = set([f for f in opts.local_fields if f.null]).intersection(set(fields))
+
+            null_bits = ['%s IS NOT NULL' % style.SQL_FIELD(qn(f.column)) for f in fields if f.null]
+            if len(null_bits) == 0:
+                # No NULLable columns, skip this
+                continue
+
+            d ={'view_name': style.SQL_TABLE(qn("%s_%s_utvw" % (db_table, fc_idx))),
+                'fields': ', \n    '.join([style.SQL_FIELD(qn(f.column)) for f in fields]),
+                'table_name': style.SQL_TABLE(qn(db_table)),
+                'null_parts': ' OR\n        '.join(null_bits),
+                }
+
+            sql_parts = []
+            sql_parts.append("CREATE VIEW %(view_name)s WITH SCHEMABINDING " % d)
+            sql_parts.append("    AS")
+            sql_parts.append("    SELECT")
+            sql_parts.append("    %(fields)s" % d)
+            sql_parts.append("    FROM")
+            sql_parts.append("        [dbo].%(table_name)s" % d)
+            sql_parts.append("    WHERE")
+            sql_parts.append("        %(null_parts)s" % d)
+            sql_stmts.append('\n'.join(sql_parts))
+
+            d ={'vidx_name' : style.SQL_TABLE(qn("%s_utidx_%s" % (db_table, fc_idx))),
+                'view_name': style.SQL_TABLE(qn("%s_%s_utvw" % (db_table, fc_idx))),
+                'table_name': style.SQL_TABLE(qn(db_table)),
+                'col_names': ', '.join([style.SQL_FIELD(qn(f.column)) for f in fields]),
+                }
+            # Create a unique clustered index on the VIEW to enforce uniqueness
+            # Note that the view itself will filter out the NULLable column 
+            sql_stmts.append("CREATE UNIQUE CLUSTERED INDEX %(vidx_name)s on %(view_name)s (%(col_names)s);" % d)
+
+            # Now, finally create a NON-unique index across the unique_together fields on the TABLE
+            # to provide index speed
+            d ={'idx_name' : style.SQL_TABLE(qn("%s_%s_ut_idx" % (db_table, fc_idx))),
+                'table_name': style.SQL_TABLE(qn(db_table)),
+                'col_name': ', '.join([style.SQL_FIELD(qn(f.column)) for f in fields]),
+                }
+
+            sql_stmts.append("CREATE INDEX %(idx_name)s on %(table_name)s (%(col_name)s);" % d)
+
+            #sql_stmts.append("--  END SCHEMABINDING hack for %s" % style.SQL_TABLE(qn(db_table)))
+
+            """
+            Now for some closure magic.  We just grab the first field in the local_fields list
+            and obtain the post_create_sql code, substituting in a lambda function if nothing
+            is available.
+
+            We apply a closure and extends the post_create_sql method with the SQL we've just 
+            generated to synthesize proper UNIQUE+NULL capability
+            """
+            # We need to bind the sql_stmts to the first field 
+            field = opts.local_fields[0]
+            def wrap_statements(old_post_create_sql, stmts):
+                def closure(style, db_table):
+                    result = []
+                    if old_post_create_sql:
+                        result.extend([sql for sql in old_post_create_sql(style, db_table)])
+                    result.extend(stmts)
+                    return result
+                return closure
+
+            old_func = getattr(field, 'post_create_sql', lambda x, y : [])
+            field.post_create_sql = wrap_statements(old_func, sql_stmts)
+
+        return []
+
+# Stored procedure code
+
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2k/custom_triggers.sql	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,44 @@
+DROP TRIGGER trd
+GO
+CREATE TRIGGER trd ON mutually_referential_parent
+INSTEAD OF DELETE
+AS
+BEGIN
+update mutually_referential_child SET parent_id = NULL WHERE parent_id IN  (SELECT id FROM deleted)
+update mutually_referential_parent SET bestchild_id = NULL WHERE id IN  (SELECT id FROM deleted)
+END
+GO
+
+DROP TRIGGER trd_child
+GO
+CREATE TRIGGER trd_child ON mutually_referential_child
+INSTEAD OF DELETE
+AS
+BEGIN
+update mutually_referential_parent SET bestchild_id = NULL WHERE bestchild_id IN (SELECT id FROM deleted)
+update mutually_referential_child SET parent_id = NULL WHERE id IN (SELECT id FROM deleted)
+END
+GO
+
+DROP TRIGGER trd_after
+GO
+
+CREATE TRIGGER trd_after ON mutually_referential_parent
+AFTER DELETE
+AS
+BEGIN
+delete FROM mutually_referential_parent WHERE id IN (SELECT id FROM deleted)
+END
+GO
+
+
+DROP TRIGGER trd_after_child
+GO
+ 
+CREATE TRIGGER trd_after_child ON mutually_referential_child
+AFTER DELETE
+AS
+BEGIN
+delete FROM mutually_referential_child WHERE id IN (SELECT id FROM deleted) 
+END
+GO
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2k/deferred_inserts.sql	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,105 @@
+DROP TABLE [deferred_serializers_article]
+GO
+DROP TABLE [deferred_serializers_category]
+GO
+DROP TABLE [deferred_serializers_author]
+GO
+DROP TABLE [#deferred_serializers_article]
+GO
+DROP PROCEDURE [thunk_tmp_deferred_serializers_article]
+GO
+
+
+CREATE TABLE [deferred_serializers_category] (
+    [id] int IDENTITY (1, 1) NOT NULL PRIMARY KEY,
+    [name] nvarchar(20) NOT NULL
+)
+GO
+
+CREATE TABLE [deferred_serializers_author] (
+    [id] int IDENTITY (1, 1) NOT NULL PRIMARY KEY,
+    [name] nvarchar(20) NOT NULL
+)
+GO
+
+CREATE TABLE [deferred_serializers_article] (
+    [id] int IDENTITY (1, 1) NOT NULL PRIMARY KEY,
+    [author_id] int NOT NULL REFERENCES [deferred_serializers_author] ([id]),
+    [headline] nvarchar(50) NOT NULL,
+    [pub_date] datetime NOT NULL
+)
+GO
+-- Seutp the temp table
+CREATE TABLE [#deferred_serializers_article] (
+	[id] int IDENTITY (1, 1) NOT NULL PRIMARY KEY,
+	[author_id] int,
+	[headline] nvarchar(50) NOT NULL,
+	[pub_date] datetime NOT NULL
+)
+
+GO
+
+-- Define a user function that verifies foreign key constraints where foreign keys are not allowed to be NULL
+CREATE PROCEDURE [thunk_tmp_deferred_serializers_article] 
+AS
+BEGIN
+        DECLARE @tmp_rowcount int
+	DECLARE @rowcount int
+	SELECT @tmp_rowcount = COUNT([id]) from [#deferred_serializers_article]
+
+        -- Repeate for each non-null foreign key constraint 
+	SELECT @rowcount = COUNT([f].[id]) 
+				FROM [deferred_serializers_author] AS f 
+				WHERE [f].[id] in 
+				(SELECT [author_id] from [#deferred_serializers_article])
+        print 'Match rows found: ' + CAST(@rowcount AS VARCHAR)
+        print 'Temp rows: ' + CAST(@tmp_rowcount AS VARCHAR)
+	IF @rowcount <> @tmp_rowcount
+          -- Rowcount has to be non-zero
+          RAISERROR ('Foreign key is not satisfied for [deferred_serializers_article].[author_id]',16,1)
+        -- To here for each fkey
+
+        -- Finally - move the data from the temp table
+        -- Note that we let SQL Server generate new IDs for the records as we shift them back to the 'real' table
+        SET IDENTITY_INSERT [deferred_serializers_article] OFF
+  	INSERT INTO [deferred_serializers_article] ([author_id], [headline], [pub_date]) 
+		(SELECT [author_id], [headline], [pub_date] FROM [#deferred_serializers_article])
+        DELETE FROM [#deferred_serializers_article]
+        RETURN 1
+END
+
+GO
+
+
+
+-- Setup fixture data
+BEGIN TRANSACTION
+
+	-- Put in a temp record that's a forward reference
+	SET IDENTITY_INSERT [#deferred_serializers_article] OFF
+	GO
+	INSERT INTO [#deferred_serializers_article]
+	(author_id, headline, pub_date)
+	values
+	(4, 'Forward references are ok', '2006-06-01')
+	GO
+	
+	
+	-- Put in the reference
+	SET IDENTITY_INSERT [deferred_serializers_author] ON
+	GO
+	INSERT INTO deferred_serializers_author (id, name) VALUES (4, 'Jimmy')
+	GO
+	SET IDENTITY_INSERT [deferred_serializers_author] OFF
+	GO
+	
+	-- Run the SP to verify references and shift data back
+	
+	exec thunk_tmp_deferred_serializers_article
+
+COMMIT TRANSACTION
+GO
+DROP TABLE [#deferred_serializers_article]
+GO
+SELECT * from [deferred_serializers_article]
+GO
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2k/introspection.py	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,164 @@
+from django.db.backends import BaseDatabaseIntrospection
+from com.ziclix.python.sql import zxJDBC as Database
+
+SQL_AUTOFIELD = -777555
+
+class DatabaseIntrospection(BaseDatabaseIntrospection):
+    data_types_reverse = {
+        SQL_AUTOFIELD:              'AutoField',
+        Database.BIGINT:            'IntegerField',
+        Database.BIT:               'BooleanField',
+        Database.CHAR:              'CharField',
+        Database.DECIMAL:           'DecimalField',
+        Database.DOUBLE:            'FloatField',
+        Database.FLOAT:             'FloatField',
+        #Database.SQL_GUID:              'TextField',
+        Database.INTEGER:           'IntegerField',
+        #Database.LONGVARBINARY:     ,
+        #Database.LONGVARCHAR:       ,
+        Database.NUMERIC:           'DecimalField',
+        Database.REAL:              'FloatField',
+        Database.SMALLINT:          'SmallIntegerField',
+        Database.TINYINT:           'SmallIntegerField',
+        Database.DATETIME:          'DateField',
+        Database.TIME:              'TimeField',
+        Database.TIMESTAMP:         'DateTimeField',
+        #Database.SQL_VARBINARY:         ,
+        Database.VARCHAR:           'TextField',
+        Database.NCHAR:             'CharField',
+        Database.LONGNVARCHAR:      'TextField',
+        Database.NVARCHAR:          'TextField',
+    }
+
+    def get_table_list(self, cursor):
+        """
+        Returns a list of table names in the current database.
+        """
+        # TABLES: http://msdn2.microsoft.com/en-us/library/ms186224.aspx
+
+        cursor.execute("SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE'")
+        return [row[0] for row in cursor.fetchall()]
+
+
+    def _is_auto_field(self, cursor, table_name, column_name):
+        """
+        Checks whether column is Identity
+        """
+        # COLUMNPROPERTY: http://msdn2.microsoft.com/en-us/library/ms174968.aspx
+
+        cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
+                         (self.connection.ops.quote_name(table_name), column_name))
+        return cursor.fetchall()[0][0]
+
+    def get_table_description(self, cursor, table_name, identity_check=True):
+        """Returns a description of the table, with DB-API cursor.description interface.
+
+        The 'auto_check' parameter has been added to the function argspec.
+        If set to True, the function will check each of the table's fields for the
+        IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
+
+        When a field is found with an IDENTITY property, it is given a custom field number
+        of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
+        """
+
+        columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table_name)]
+        items = []
+        for column in columns:
+            if identity_check and self._is_auto_field(cursor, table_name, column[0]):
+                column[1] = SQL_AUTOFIELD
+            if column[1] == Database.NVARCHAR and column[3] < 4000:
+                column[1] = Database.NCHAR
+            items.append(column)
+        return items
+
+    def _name_to_index(self, cursor, table_name):
+        """
+        Returns a dictionary of {field_name: field_index} for the given table.
+        Indexes are 0-based.
+        """
+        return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, identity_check=False))])
+
+    def get_relations(self, cursor, table_name):
+        """
+        Returns a dictionary of {field_index: (field_index_other_table, other_table)}
+        representing all relationships to the given table. Indexes are 0-based.
+        """
+        # CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
+        # CONSTRAINT_TABLE_USAGE:  http://msdn2.microsoft.com/en-us/library/ms179883.aspx
+        # REFERENTIAL_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms179987.aspx
+        # TABLE_CONSTRAINTS:       http://msdn2.microsoft.com/en-us/library/ms181757.aspx
+
+        table_index = self._name_to_index(cursor, table_name)
+        sql = """
+SELECT e.COLUMN_NAME AS column_name,
+  c.TABLE_NAME AS referenced_table_name,
+  d.COLUMN_NAME AS referenced_column_name
+FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
+INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS b
+  ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME
+INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_TABLE_USAGE AS c
+  ON b.UNIQUE_CONSTRAINT_NAME = c.CONSTRAINT_NAME
+INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS d
+  ON c.CONSTRAINT_NAME = d.CONSTRAINT_NAME
+INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS e
+  ON a.CONSTRAINT_NAME = e.CONSTRAINT_NAME
+WHERE a.TABLE_NAME = %s AND a.CONSTRAINT_TYPE = 'FOREIGN KEY'"""
+        cursor.execute(sql, (table_name,))
+        return dict([(table_index[item[0]], (self._name_to_index(cursor, item[1])[item[2]], item[1]))
+                     for item in cursor.fetchall()])
+
+    def get_indexes(self, cursor, table_name):
+        """
+        Returns a dictionary of fieldname -> infodict for the given table,
+        where each infodict is in the format:
+            {'primary_key': boolean representing whether it's the primary key,
+             'unique': boolean representing whether it's a unique index,
+             'db_index': boolean representing whether it's a non-unique index}
+        """
+        # CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
+        # TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
+
+        pk_uk_sql = """
+SELECT b.COLUMN_NAME, a.CONSTRAINT_TYPE
+FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
+INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS b
+  ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME AND a.TABLE_NAME = b.TABLE_NAME
+WHERE a.TABLE_NAME = %s AND (CONSTRAINT_TYPE = 'PRIMARY KEY' OR CONSTRAINT_TYPE = 'UNIQUE')"""
+
+        field_names = [item[0] for item in self.get_table_description(cursor, table_name, identity_check=False)]
+        indexes, results = {}, {}
+        cursor.execute(pk_uk_sql, (table_name,))
+        data = cursor.fetchall()
+        if data:
+            results.update(data)
+
+        if self.connection.sqlserver_version >= 2005:
+            # non-unique, non-compound indexes, only in SS2005?
+            ix_sql = """
+SELECT DISTINCT c.name
+FROM sys.columns c
+INNER JOIN sys.index_columns ic
+  ON ic.object_id = c.object_id AND ic.column_id = c.column_id
+INNER JOIN sys.indexes ix
+  ON ix.object_id = ic.object_id AND ix.index_id = ic.index_id
+INNER JOIN sys.tables t
+  ON t.object_id = ix.object_id
+WHERE ix.object_id IN (
+  SELECT ix.object_id
+  FROM sys.indexes ix
+  GROUP BY ix.object_id, ix.index_id
+  HAVING count(1) = 1)
+AND ix.is_primary_key = 0
+AND ix.is_unique_constraint = 0
+AND t.name = %s"""
+
+            cursor.execute(ix_sql, (table_name,))
+            for column in [r[0] for r in cursor.fetchall()]:
+                if column not in results:
+                    results[column] = 'IX'
+
+        for field in field_names:
+            val = results.get(field, None)
+            indexes[field] = dict(primary_key=(val=='PRIMARY KEY'), unique=(val=='UNIQUE'), db_index=(val=='IX'))
+
+        return indexes
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2k/operations.py	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,230 @@
+from django.db.backends import BaseDatabaseOperations
+import datetime
+import time
+
+import query
+
+class DatabaseOperations(BaseDatabaseOperations):
+    # Define the parts of an ODBC date string
+    # so we can do substring operations to match
+    DATE_PARTS = {'year': (1,4),
+              'month': (6,2),
+              'day': (9,2)}
+
+    def regex_lookup(self, lookup_type):
+        """
+        Returns the string to use in a query when performing regular expression
+        lookups (using "regex" or "iregex"). The resulting string should
+        contain a '%s' placeholder for the column being searched against.
+
+        If the feature is not supported (or part of it is not supported), a
+        NotImplementedError exception can be raised.
+        """
+        if lookup_type == 'regex':
+            ignore_case = 0
+        else:
+            ignore_case = 1
+
+        return "dbo.regex(%%s, %%s, %s) = 1" % ignore_case
+
+    def start_transaction_sql(self):
+        """
+        Returns the SQL statement required to start a transaction.
+        """
+        return "BEGIN TRANSACTION"
+
+    def date_extract_sql(self, lookup_type, field_name):
+
+        start, end = self.DATE_PARTS[lookup_type]
+        return "CONVERT(INT, SUBSTRING(%s, %s, %s))" % (self.quote_name(field_name), start, end)
+
+    def _unquote_fieldname(self, fieldname):
+        '''
+        Try to unquote the fieldname so that SQL Server doesn't assign a
+        weird semi-random name to the converted column.
+
+        We *only* return the column name part though - we drop the table name.
+
+        This method is really only used by the date_trunc_sql method and isn't meant
+        for any other use.
+        '''
+        assert fieldname.startswith('[') and fieldname.endswith(']')
+        short_name = fieldname.split('.')[-1][1:-1]
+        return short_name
+
+    def date_trunc_sql(self, lookup_type, field_name):
+    	quoted_field_name = self.quote_name(field_name)
+
+        short_name = self.quote_name(self._unquote_fieldname(quoted_field_name))
+
+        sql_dict = {'name': quoted_field_name, 'short_name': short_name}
+
+        if lookup_type == 'year':
+            return "CONVERT(datetime, CONVERT(varchar, DATEPART(year, %(name)s)) + '/01/01') AS %(short_name)s" % sql_dict
+
+        if lookup_type == 'month':
+            return "CONVERT(datetime, CONVERT(varchar, DATEPART(year, %(name)s)) + '/' + CONVERT(varchar, DATEPART(month, %(name)s)) + '/01') AS %(short_name)s" %\
+                    sql_dict
+
+        if lookup_type == 'day':
+            return "CONVERT(datetime, CONVERT(varchar(12), %(name)s)) AS %(short_name)s" % sql_dict
+
+    def last_insert_id(self, cursor, table_name, pk_name):
+        cursor.execute("SELECT CAST(IDENT_CURRENT(%s) AS bigint)", [self.quote_name(table_name)])
+        return cursor.fetchone()[0]
+
+    def no_limit_value(self):
+        return None
+
+    def prep_for_like_query(self, x):
+        """Prepares a value for use in a LIKE query."""
+        from django.utils.encoding import smart_unicode
+        return (
+            smart_unicode(x).\
+                replace("\\", "\\\\").\
+                replace("%", "\%").\
+                replace("_", "\_").\
+                replace("[", "\[").\
+                replace("]", "\]")
+            )
+
+    def query_class(self, DefaultQueryClass):
+        return query.query_class(DefaultQueryClass)
+
+    def quote_name(self, name):
+        if 'CONVERT(' in name:
+            # SQL Server has a fragile parser.  If we'v already applied CONVERT on a column, treat this
+            # column as pre-quoted.  No - it doesn't make any sense.  Don't think too hard about this.
+            return name
+        if name.startswith('[') and name.endswith(']'):
+            return name # already quoted
+        return '[%s]' % name
+
+    def random_function_sql(self):
+        return 'RAND()'
+
+    def sql_flush(self, style, tables, sequences):
+        """
+        Returns a list of SQL statements required to remove all data from
+        the given database tables (without actually removing the tables
+        themselves).
+
+        The `style` argument is a Style object as returned by either
+        color_style() or no_style() in django.core.management.color.
+        
+        Originally taken from django-pyodbc project.
+        """
+        if not tables:
+            return list()
+            
+        qn = self.quote_name
+            
+        # Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY; use DELETE instead.
+        # (which is slow)
+        from django.db import connection
+        cursor = connection.cursor()
+        # Try to minimize the risks of the braindeaded inconsistency in
+        # DBCC CHEKIDENT(table, RESEED, n) behavior.
+        seqs = []
+        for seq in sequences:
+            cursor.execute("SELECT COUNT(*) FROM %s" % qn(seq["table"]))
+            rowcnt = cursor.fetchone()[0]
+            elem = dict()
+
+            if rowcnt:
+                elem['start_id'] = 0
+            else:
+                elem['start_id'] = 1
+
+            elem.update(seq)
+            seqs.append(elem)
+
+        cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS")
+        fks = cursor.fetchall()
+        
+        sql_list = list()
+
+        # Turn off constraints.
+        sql_list.extend(['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % (
+            qn(fk[0]), qn(fk[1])) for fk in fks])
+
+        # Delete data from tables.
+        sql_list.extend(['%s %s %s;' % (
+            style.SQL_KEYWORD('DELETE'), 
+            style.SQL_KEYWORD('FROM'), 
+            style.SQL_FIELD(qn(t))
+            ) for t in tables])
+
+        # Reset the counters on each table.
+        sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
+            style.SQL_KEYWORD('DBCC'),
+            style.SQL_KEYWORD('CHECKIDENT'),
+            style.SQL_FIELD(qn(seq["table"])),
+            style.SQL_KEYWORD('RESEED'),
+            style.SQL_FIELD('%d' % seq['start_id']),
+            style.SQL_KEYWORD('WITH'),
+            style.SQL_KEYWORD('NO_INFOMSGS'),
+            ) for seq in seqs])
+
+        # Turn constraints back on.
+        sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % (
+            qn(fk[0]), qn(fk[1])) for fk in fks])
+
+        return sql_list
+
+    def tablespace_sql(self, tablespace, inline=False):
+        return "ON %s" % self.quote_name(tablespace)
+        
+    def value_to_db_datetime(self, value):
+        if value is None:
+            return None
+            
+        if value.tzinfo is not None:
+            raise ValueError("SQL Server 2005 does not support timezone-aware datetimes.")
+
+        # SQL Server 2005 doesn't support microseconds
+        return value.replace(microsecond=0)
+    
+    def value_to_db_time(self, value):
+        # MS SQL 2005 doesn't support microseconds
+        #...but it also doesn't really suport bare times
+        if value is None:
+            return None
+        return value.replace(microsecond=0)
+	        
+    def value_to_db_decimal(self, value, max_digits, decimal_places):
+        if value is None or value == '':
+            return None
+        return value # Should be a decimal type (or string)
+
+    def year_lookup_bounds(self, value):
+        """
+        Returns a two-elements list with the lower and upper bound to be used
+        with a BETWEEN operator to query a field value using a year lookup
+
+        `value` is an int, containing the looked-up year.
+        """
+        first = '%s-01-01 00:00:00'
+        second = '%s-12-31 23:59:59'
+        return [first % value, second % value]
+
+    def field_cast_sql(self, db_type):
+        """
+        Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
+        to cast it before using it in a WHERE statement. Note that the
+        resulting string should contain a '%s' placeholder for the column being
+        searched against.
+        """
+        if db_type is None:
+            return '%s'
+
+        if 'DATETIME' == db_type.upper():
+            # We need to convert date and datetime columns into
+            # ODBC canonical format.
+            # See: http://msdn.microsoft.com/en-us/library/ms187928.aspx
+            return "CONVERT(varchar, %s, 120)"
+        elif 'SMALLDATETIME' == db_type.upper():
+            return "SUBSTRING(CONVERT(varchar, %s, 120), 1, 10)"
+        return '%s'
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2k/pagination.sql	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,45 @@
+CREATE PROCEDURE ReturnPage @query varchar(2000), @OrderBy varchar(2000),
+                            @StartRow int, @EndRow int
+AS
+BEGIN
+declare @ColList varchar(2000);
+declare @Where varchar(2000);
+declare @i int; 
+declare @i2 int;
+declare @tmp varchar(2000);
+declare @dec varchar(2000);
+declare @f varchar(100);
+declare @d varchar(100);
+declare @Symbol char(2);
+declare @SQL varchar(5000);
+declare @Sort varchar(2000);
+set @Sort = @OrderBy + ', '
+set @dec = ''
+set @Where  = ''
+set @SQL = ''
+set @i = charindex(',' , @Sort)
+while @i != 0
+ begin
+  set @tmp = left(@Sort,@i-1)
+  set @i2 = charindex(' ', @tmp)
+  set @f = ltrim(rtrim(left(@tmp,@i2-1)))
+  set @d = ltrim(rtrim(substring(@tmp,@i2+1,100)))
+  set @Sort = rtrim(ltrim(substring(@Sort,@i+1,100)))
+  set @i = charindex(',', @Sort)
+  set @symbol = case when @d = 'ASC' then '>' else '<' end +
+                case when @i=0 then '=' else '' end
+  set @dec = @dec + 'declare @' + @f + ' sql_variant; '
+  set @ColList = isnull(replace(replace(@colList,'>','='),'<','=') + ' and ','') +
+                 @f + @Symbol + ' @' + @f
+  set @Where = @Where + ' OR (' + @ColList + ') '
+  set @SQL = @SQL + ', @' + @f + '= ' + @f
+ end
+set @SQL = @dec + ' ' +
+           'SET ROWCOUNT ' + convert(varchar(10), @StartRow) + '; ' +
+           'SELECT ' + substring(@SQL,3,7000) + ' from (' + @query + ') a ORDER BY ' +
+           @OrderBy + '; ' + 'SET ROWCOUNT ' +
+           convert(varchar(10), 1 + @EndRow - @StartRow) + '; ' +
+           'select * from (' + @query + ') a WHERE ' +
+           substring(@Where,4,7000) + ' ORDER BY ' + @OrderBy + '; SET ROWCOUNT 0;'
+exec(@SQL)
+END
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2k/pool.py	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,64 @@
+from java.lang import Class
+from java.lang import System
+from java.io import PrintWriter
+from java.sql import DriverManager
+from java.sql import Connection
+from java.sql import SQLException
+
+#  Here are the dbcp-specific classes.
+#  Note that they are only used in the setupDriver
+#  method. In normal use, your classes interact
+#  only with the standard JDBC API
+
+from org.apache.commons.pool.impl import GenericObjectPool
+from org.apache.commons.dbcp import PoolableConnectionFactory
+from org.apache.commons.dbcp import BasicDataSource
+from org.apache.commons.dbcp import DataSourceConnectionFactory
+import time
+
+class ManualPoolingDriver(object):
+    def __init__(self, connectURI, username, password, pool_name):
+        self.connectionPool = GenericObjectPool(None)
+        self._pool_name = pool_name
+
+        source = BasicDataSource()
+        source.setUrl(connectURI)
+        source.setUsername(username)
+        source.setPassword(password)
+        source.setInitialSize(1) # Number of connections to start with
+        source.setMinIdle(5)     # Allow a bottom of 5 idle connections
+        source.setMaxActive(10)  # Max of 10 database connection
+        source.setDefaultTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED)
+        source.setMinEvictableIdleTimeMillis(500) 
+
+        self.connectionFactory = DataSourceConnectionFactory(source)
+
+        #  Now we'll create the PoolableConnectionFactory, which wraps
+        #  the "real" Connections created by the ConnectionFactory with
+        #  the classes that implement the pooling functionality.
+        self.poolableConnectionFactory = PoolableConnectionFactory(self.connectionFactory,
+                self.connectionPool,
+                None,
+                None,
+                False,
+                True)
+
+        #  Finally, we create the PoolingDriver itself...
+        Class.forName("org.apache.commons.dbcp.PoolingDriver")
+        driver = DriverManager.getDriver("jdbc:apache:commons:dbcp:")
+
+        #  ...and register our pool with it.
+        driver.registerPool(self._pool_name, self.connectionPool)
+        #  Now we can just use the connect string "jdbc:apache:commons:dbcp:<pool_name>"
+        #  to access our pool of Connections.
+
+    def printDriverStats(self):
+        driver = DriverManager.getDriver("jdbc:apache:commons:dbcp:")
+        connectionPool = driver.getConnectionPool(self._pool_name)
+        print "NumActive: " + str(connectionPool.getNumActive())
+        print "NumIdle: " + str(connectionPool.getNumIdle())
+
+    def shutdownDriver(self):
+        driver = DriverManager.getDriver("jdbc:apache:commons:dbcp:")
+        driver.closePool(self._pool_name)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2k/query.py	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,418 @@
+"""Custom Query class for MS SQL Serever."""
+
+# query_class returns the base class to use for Django queries.
+# The custom 'SqlServerQuery' class derives from django.db.models.sql.query.Query
+# which is passed in as "QueryClass" by Django itself.
+#
+# SqlServerQuery overrides:
+# ...insert queries to add "SET IDENTITY_INSERT" if needed.
+# ...select queries to emulate LIMIT/OFFSET for sliced queries.
+
+#DEBUG=True
+DEBUG=False
+
+import string
+
+# Cache. Maps default query class to new SqlServer query class.
+_classes = {}
+
+
+from com.ziclix.python.sql import PyStatement, PyExtendedCursor, PyCursor
+from java.sql import Types
+
+
+def query_class(QueryClass):
+    """Return a custom Query subclass for SQL Server."""
+    class SqlServerQuery(QueryClass):
+        def __init__(self, *args, **kwargs):
+            super(SqlServerQuery, self).__init__(*args, **kwargs)
+
+            # If we are an insert query, wrap "as_sql"
+            if self.__class__.__name__ == "InsertQuery":
+                self._parent_as_sql = self.as_sql
+                self.as_sql = self._insert_as_sql
+
+        def __reduce__(self):
+            """
+            Enable pickling for this class (normal pickling handling doesn't
+            work as Python can only pickle module-level classes by default).
+            """
+            if hasattr(QueryClass, '__getstate__'):
+                assert hasattr(QueryClass, '__setstate__')
+                data = self.__getstate__()
+            else:
+                data = self.__dict__
+            return (unpickle_query_class, (QueryClass,), data)
+
+        def get_primary_keys(self):
+            return set([f for f in self.model._meta.fields if f.primary_key])
+
+        def resolve_columns(self, row, fields=()):
+            """
+            Cater for the fact that SQL Server has no separate Date and Time
+            data types.
+            """
+            from django.db.models.fields import DateField, DateTimeField, \
+                TimeField, BooleanField, NullBooleanField
+            values = []
+            for value, field in map(None, row, fields):
+                if value is not None:
+                    if isinstance(field, DateTimeField):
+                        # DateTimeField subclasses DateField so must be checked
+                        # first.
+                        pass # do nothing
+                    elif isinstance(field, DateField):
+                        value = value.date() # extract date
+                    elif isinstance(field, TimeField):
+                        value = value.time() # extract time
+                    elif isinstance(field, (BooleanField, NullBooleanField)):
+                        if value in (1,'t','True','1',True):
+                            value = True
+                        else:
+                            value = False
+                values.append(value)
+            return values
+            
+        def as_sql(self, with_limits=True, with_col_aliases=False):
+            """
+            """
+            # By default, just build the result and params with the superclass results
+
+            sql, params = super(SqlServerQuery, self).as_sql(with_limits=False,
+                    with_col_aliases=with_col_aliases)
+
+            # Now comes the tricky part, we need to specialize the query to work against SQL Server 2k
+
+            # Stuff to watch for
+            if DEBUG:
+                print "SQL              [%s] " % sql
+                print "Params           [%s] " % str(params)
+                print "High mark        [%s] " % self.high_mark
+                print "Low mark         [%s] " % self.low_mark
+                print "Distinct         [%s] " % self.distinct
+                print "With Limits      [%s] " % with_limits
+                print "With col aliases [%s] " % with_col_aliases
+                print "Columns          [%s] " % self.get_columns(with_col_aliases)
+                print "Ordering         [%s] " % self.get_ordering()
+
+
+            if self.high_mark or self.low_mark:
+                # Ok, we do a lot of finagling here just to get pagination
+                cstmt = self._setup_pagination(sql, params, with_limits, with_col_aliases, \
+                                self.low_mark, self.high_mark)
+                sql, params = PyStatement(cstmt, '', PyStatement.STATEMENT_PREPARED), ()
+            return sql, params
+
+        def _insert_as_sql(self, *args, **kwargs):
+            meta = self.get_meta()
+
+            quoted_table = self.connection.ops.quote_name(meta.db_table)
+            # Get (sql,params) from original InsertQuery.as_sql
+            sql, params = self._parent_as_sql(*args,**kwargs)
+
+            if (meta.pk.attname in self.columns) and (meta.pk.__class__.__name__ == "AutoField"):
+                # check if only have pk and default value
+                if len(self.columns)==1 and len(params)==0:
+                    sql = "INSERT INTO %s DEFAULT VALUES" % quoted_table
+                else:
+                    sql = "SET IDENTITY_INSERT %s ON;%s;SET IDENTITY_INSERT %s OFF" %\
+                        (quoted_table, sql, quoted_table)
+
+            return sql, params
+
+        def clone(self, klass=None, **kwargs):
+            # Just use the parent clone - don't specialize any queries
+            result = super(SqlServerQuery, self).clone(klass, **kwargs)
+            return result
+
+        def execute_sql(self, result_type='multi'):
+            """
+            Run the query against the database and returns the result(s). The
+            return value is a single data item if result_type is SINGLE, or an
+            iterator over the results if the result_type is MULTI.
+
+            result_type is either MULTI (use fetchmany() to retrieve all rows),
+            SINGLE (only retrieve a single row), or None (no results expected, but
+            the cursor is returned, since it's used by subclasses such as
+            InsertQuery).
+            """
+            from django.db.models.sql.constants import MULTI, SINGLE, GET_ITERATOR_CHUNK_SIZE
+
+            if self.high_mark and self.high_mark <= self.low_mark:
+                # Short circuit if we're slicing nothing
+                return []
+
+            # Pull in these imports from main Django DB
+            # code base, but we can't import at the top level
+            # or else we get circular imports 
+            from django.db.models.sql.datastructures import EmptyResultSet
+            from django.db.models.sql.query import empty_iter
+            try:
+                sql, params = self.as_sql()
+                if not sql:
+                    raise EmptyResultSet
+            except EmptyResultSet:
+                if result_type == MULTI:
+                    return empty_iter()
+                else:
+                    return
+
+            cursor = self.connection.cursor()
+            if isinstance(sql, PyStatement):
+                # We sometimes need to override with a PyStatement because
+                # it's the only way to implement paginated results
+                pycur = cursor
+                while not isinstance(pycur, PyCursor):
+                    pycur = pycur.cursor
+                sql.execute(pycur, None, None)
+            else:
+                if DEBUG:
+                    print sql, params
+                cursor.execute(sql, params)
+
+            if not result_type:
+                return cursor
+            if result_type == SINGLE:
+                if self.ordering_aliases:
+                    return cursor.fetchone()[:-len(self.ordering_aliases)]
+                return cursor.fetchone()
+
+            # The MULTI case.
+            if self.ordering_aliases:
+                from django.db.models.sql.query import order_modified_iter
+                result = order_modified_iter(cursor, len(self.ordering_aliases),
+                        self.connection.features.empty_fetchmany_value)
+            else:
+                result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
+                        self.connection.features.empty_fetchmany_value)
+
+            # Need to serialize all the results because we don't want to maintain
+            # state between requests
+            result = list(result)
+
+            # Force the PyStatement to close if we ever created one
+            if isinstance(sql, PyStatement):
+                sql.close()
+                # Drop the temp table
+                cur = self.connection.cursor()
+                cur.execute("drop table #temp_table")
+                cur.close()
+
+            return result
+
+        def get_ordering(self):
+            result = super(SqlServerQuery, self).get_ordering()
+            if self.ordering_aliases and self.distinct:
+                # Clear ordering aliases if we're using a distinct query.
+                # Ordering aliases will just screw things up 
+                self.ordering_aliases = []
+            return result
+
+        def _setup_pagination(self, sql, params, with_limits, with_col_aliases, \
+                                low_mark, high_mark):
+
+            # Ok, got the column labels, now extract the type information by running the query *twice*
+            # Yes, horribly inefficient, but how are you really going to handle all the bizarre corner
+            # cases for SQL mangling?
+            shim_sql = self._get_temp_table_cols(sql, params)
+
+            # Ok, so we need to obtain the raw JDBC connection, create a prepared statement
+            # and append the ORDERING_
+            cursor = self.connection.cursor()
+
+            jconn = cursor.cursor.connection.__connection__
+
+            cstmt = jconn.prepareCall("returnpage(?, ?, ?, ?)");
+            cstmt.setString('@query', shim_sql)
+            cstmt.setString('@orderby', "djy_sql2k_sort_id ASC")
+
+            # The *only* ordering alias we care about during pagination since we are 
+            # forcing the output of the original SQL select to go into 
+
+            self.ordering_aliases = ['djy_sql2k_sort_id']
+            from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
+            if low_mark and high_mark:
+                low, high = low_mark +1, high_mark
+            elif low_mark:
+                # We limit the upper bound to GET_ITERATOR_CHUNK_SIZE number of records or
+                # else we risk SQL2k throwing us an instance of java.sql.DataTruncation
+                low, high = low_mark +1, GET_ITERATOR_CHUNK_SIZE+(low_mark+1)
+            elif high_mark:
+                low, high = 1, high_mark
+            else:
+                raise RuntimeError, "Can't paginate when we have no low or high marks!"
+
+            cstmt.setInt('@startrow', low)
+            cstmt.setInt('@endrow', high)
+            if DEBUG:
+                print "Shim SQL : ", shim_sql
+                print "Low mark : ", low
+                print "High mark: ", high
+            return cstmt
+
+        def _get_temp_table_cols(self, sql, params):
+            '''
+            I'm sure there's a *good* way of doing this, but here's a bad way of doing it that works. :)
+            '''
+            cursor = self.connection.cursor()
+
+            pycur = cursor
+            while not isinstance(pycur, PyCursor):
+                pycur = pycur.cursor
+            jconn = pycur.connection.__connection__
+            if params:
+                j_sql = sql.replace("%s", '?')
+            else:
+                j_sql = sql
+            j_pstmt = jconn.prepareStatement(j_sql)
+            if params:
+                # handle parameters
+                from base import coerce_sql2k_type
+                for i in range(len(params)):
+                    param_obj = coerce_sql2k_type(params[i])
+                    j_pstmt.setObject(i+1, param_obj)
+            if DEBUG:
+                print "JDBC statement: ", j_sql
+                print "JDBC Params: ", params
+            j_pstmt.execute()
+            rset = j_pstmt.getResultSet()
+            meta = rset.getMetaData()
+            col_count = meta.getColumnCount()
+
+            # Generate the (table_alias, col_alias) tuple list
+
+            col_tuples = extract_colnames(j_sql)
+
+            col_defs = []
+            for col_num in range(1, col_count+1):
+                col_dict = {}
+                col_dict['table_alias'] = col_tuples[col_num-1][0]
+                col_dict['label'] = meta.getColumnLabel(col_num)
+                col_dict['name'] = meta.getColumnName(col_num).replace("-", '__')
+                col_dict['sql_type'] = meta.getColumnTypeName(col_num)
+                col_dict['prec'] = meta.getPrecision(col_num)
+                col_dict['scale'] = meta.getScale(col_num)
+                col_dict['nullable'] = meta.isNullable(col_num) == 1
+
+                if col_dict['sql_type'] == 'ntext' and col_dict['prec'] > 8000:
+                    # This looks like a dodgy declaration - just force it to be blank
+                    # and let SQL Server use the default size
+                    col_dict['prec'] = ''
+
+                col_defs.append(col_dict)
+            rset.close()
+            j_pstmt.close()
+
+            # Now - reconstitute the table defintion based on the column definition
+            col_sql = []
+            # Note that we have _0xdj_ between the table alias and colname.  Use that to coerce the values back
+            reverse_types = {
+                    'int': "%(table_alias)s_0xdj_%(name)s int " ,
+                    'bit': '%(table_alias)s_0xdj_%(name)s bit ',
+                    'datetime': "%(table_alias)s_0xdj_%(name)s %(sql_type)s " ,
+                    'smalldatetime': "%(table_alias)s_0xdj_%(name)s %(sql_type)s " ,
+                    'numeric': "%(table_alias)s_0xdj_%(name)s %(sql_type)s (%(prec)s, %(scale)s) " ,
+                    'double': "%(table_alias)s_0xdj_%(name)s double precision " ,
+                    'smallint': "%(table_alias)s_0xdj_%(name)s int " ,
+                    'nvarchar': "%(table_alias)s_0xdj_%(name)s %(sql_type)s (%(prec)s) COLLATE SQL_Latin1_General_CP1_CI_AS " ,
+                    'ntext': "%(table_alias)s_0xdj_%(name)s %(sql_type)s (%(prec)s) COLLATE SQL_Latin1_General_CP1_CI_AS " ,
+                    }
+
+            for cdef in col_defs:
+                key = cdef['sql_type'].split()[0]
+                value = reverse_types[key]
+                if key == 'ntext' and cdef['prec'] == '':
+                    # Drop the brackets around the ntext size declaration
+                    value = "%(table_alias)s_0xdj_%(name)s %(sql_type)s COLLATE SQL_Latin1_General_CP1_CI_AS " 
+
+                fragment = value % cdef
+
+                if cdef['nullable']:
+                    fragment += "NULL "
+                else:
+                    fragment += "NOT NULL "
+
+                col_sql.append(fragment)
+
+            table_sql = '''
+            CREATE TABLE #temp_table ( 
+                djy_sql2k_sort_id int IDENTITY (1, 1) NOT NULL,
+                %s
+            ) 
+            ''' % ', \n'.join(col_sql)
+
+            create_cur = self.connection.cursor()
+            if DEBUG:
+                print table_sql
+            create_cur.execute(table_sql)
+            create_cur.close()
+
+            shim_cur = self.connection.cursor()
+            shim_col_names = ', '.join(["%s_0xdj_%s" % (cdef['table_alias'], cdef['name']) for cdef in col_defs])
+            shim_sql = "insert into #temp_table (%s) %s" % (shim_col_names, sql)
+            if DEBUG:
+                print "Insertion SQL: ", shim_sql
+                print "Insertion Params: ", params
+            shim_cur.execute(shim_sql, params)
+            shim_cur.close()
+
+            select_sql = "select %s, djy_sql2k_sort_id from #temp_table" % shim_col_names
+            if DEBUG:
+                print "Select SQL: ", select_sql
+
+            return select_sql
+
+    _classes[QueryClass] = SqlServerQuery
+    return SqlServerQuery
+
+
+def extract_colnames(j_sql):
+    '''
+    Return 2-tuples of (table_alias, col_name)
+    '''
+    j_sql = j_sql.replace("SELECT ",'').strip()
+    j_sql = j_sql.replace("DISTINCT ",'').strip()
+    j_sql = j_sql[:j_sql.find(" FROM")]
+    return _tuplize(_tokenize(j_sql))
+
+
+def _tokenize(input):
+    '''
+    Tokenize input using brackets as a stack and commas to denote terminators
+    '''
+
+    stack = 0
+    buffer = ''
+    results = []
+    for ch in input:
+        if ch == ',' and stack == 0:
+            results.append(buffer.strip())
+            buffer = ''
+            continue
+        elif ch == '(':
+            stack += 1
+        elif ch == ')':
+            stack -= 1
+        buffer += ch
+    results.append(buffer)
+    return results
+
+def _tuplize(col_names):
+    result = []
+    for cname in col_names:
+        if ' AS ' in cname:
+            col_alias = cname.split(' AS ')[-1].strip()
+        else:
+            col_alias= cname.strip()
+        tuple = []
+        if '.' in col_alias:
+            for part in col_alias.split("."):
+                if part.startswith("[") and part.endswith("]"):
+                    tuple.append(part[1:-1])
+                else:
+                    tuple.append(part)
+        else:
+            tuple = ['', col_alias]
+
+        result.append(tuple)
+    return result
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2kext/management/commands/sql2kext.py	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,133 @@
+'''
+We define a custom command to install the stored procedures into MSSQL2K
+'''
+from django.core.management.base import NoArgsCommand
+from django.core.management.color import no_style
+from optparse import make_option
+import sys
+
+class Command(NoArgsCommand):
+    option_list = NoArgsCommand.option_list 
+
+    help = "Install the stored procedures required to make SQL Server 2000 play nice"
+
+    def handle_noargs(self, **options):
+        from django.db import connection, transaction, models
+        from django.conf import settings
+        from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal
+
+        verbosity = int(options.get('verbosity', 1))
+        interactive = options.get('interactive')
+        show_traceback = options.get('traceback', False)
+
+        self.style = no_style()
+
+        cursor = connection.cursor()
+
+        print "SQL Server 2000: Installing pagination stored procedure"
+        cursor.execute(RETURN_PAGE_SQL)
+        print "SQL Server 2000: Installing regular expression support" 
+        cursor.execute(REGEX_FUNC)
+
+        transaction.commit_unless_managed()
+
+
+
+RETURN_PAGE_SQL = '''
+CREATE PROCEDURE ReturnPage @query varchar(2000), @OrderBy varchar(2000),
+                            @StartRow int, @EndRow int
+AS
+BEGIN
+declare @ColList varchar(2000);
+declare @Where varchar(2000);
+declare @i int; 
+declare @i2 int;
+declare @tmp varchar(2000);
+declare @dec varchar(2000);
+declare @f varchar(100);
+declare @d varchar(100);
+declare @Symbol char(2);
+declare @SQL varchar(5000);
+declare @Sort varchar(2000);
+set @Sort = @OrderBy + ', '
+set @dec = ''
+set @Where  = ''
+set @SQL = ''
+set @i = charindex(',' , @Sort)
+while @i != 0
+ begin
+  set @tmp = left(@Sort,@i-1)
+  set @i2 = charindex(' ', @tmp)
+  set @f = ltrim(rtrim(left(@tmp,@i2-1)))
+  set @d = ltrim(rtrim(substring(@tmp,@i2+1,100)))
+  set @Sort = rtrim(ltrim(substring(@Sort,@i+1,100)))
+  set @i = charindex(',', @Sort)
+  set @symbol = case when @d = 'ASC' then '>' else '<' end +
+                case when @i=0 then '=' else '' end
+  set @dec = @dec + 'declare @' + @f + ' sql_variant; '
+  set @ColList = isnull(replace(replace(@colList,'>','='),'<','=') + ' and ','') +
+                 @f + @Symbol + ' @' + @f
+  set @Where = @Where + ' OR (' + @ColList + ') '
+  set @SQL = @SQL + ', @' + @f + '= ' + @f
+ end
+set @SQL = @dec + ' ' +
+           'SET ROWCOUNT ' + convert(varchar(10), @StartRow) + '; ' +
+           'SELECT ' + substring(@SQL,3,7000) + ' from (' + @query + ') a ORDER BY ' +
+           @OrderBy + '; ' + 'SET ROWCOUNT ' +
+           convert(varchar(10), 1 + @EndRow - @StartRow) + '; ' +
+           'select * from (' + @query + ') a WHERE ' +
+           substring(@Where,4,7000) + ' ORDER BY ' + @OrderBy + '; SET ROWCOUNT 0;'
+exec(@SQL)
+END
+'''
+
+REGEX_FUNC = '''
+CREATE FUNCTION dbo.regex
+	(
+		@source varchar(5000),
+		@regexp varchar(1000),
+		@ignorecase bit = 0
+	)
+RETURNS bit
+AS
+	BEGIN
+		DECLARE @hr integer
+		DECLARE @objRegExp integer
+		DECLARE @objMatches integer
+		DECLARE @objMatch integer
+		DECLARE @count integer
+		DECLARE @results bit
+		
+		EXEC @hr = sp_OACreate 'VBScript.RegExp', @objRegExp OUTPUT
+		IF @hr <> 0 BEGIN
+			SET @results = 0
+			RETURN @results
+		END
+		EXEC @hr = sp_OASetProperty @objRegExp, 'Pattern', @regexp
+		IF @hr <> 0 BEGIN
+			SET @results = 0
+			RETURN @results
+		END
+		EXEC @hr = sp_OASetProperty @objRegExp, 'Global', false
+		IF @hr <> 0 BEGIN
+			SET @results = 0
+			RETURN @results
+		END
+		EXEC @hr = sp_OASetProperty @objRegExp, 'IgnoreCase', @ignorecase
+		IF @hr <> 0 BEGIN
+			SET @results = 0
+			RETURN @results
+		END	
+		EXEC @hr = sp_OAMethod @objRegExp, 'Test', @results OUTPUT, @source
+		IF @hr <> 0 BEGIN
+			SET @results = 0
+			RETURN @results
+		END
+		EXEC @hr = sp_OADestroy @objRegExp
+		IF @hr <> 0 BEGIN
+			SET @results = 0
+			RETURN @results
+		END
+	RETURN @results
+	END
+'''
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2kext/models.py	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,3 @@
+from django.db import models
+
+# Create your models here.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/jtds/mssql2kext/views.py	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,1 @@
+# Create your views here.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/setup.py	Sun Jan 25 00:02:22 2009 -0500
@@ -0,0 +1,14 @@
+from setuptools import setup, find_packages
+
+setup(
+    name='django-jtds.mssql2k',
+    version='0.1',
+    author='Victor Ng',
+    author_email = 'crankycoder@gmail.com',
+    description = '''zxJDBC wrapper for jTDS backed databases''',
+    license = 'GNU LGPL',
+    packages = find_packages(),
+    zip_safe = False,
+    include_package_data = True,
+    )
+