Package web2py :: Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module web2py.gluon.dal

   1  #!/bin/env python 
   2  # -*- coding: utf-8 -*- 
   3   
   4  """ 
   5  This file is part of the web2py Web Framework 
   6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
   7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
   8   
   9  Thanks to 
  10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
  11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
  12      * Denes 
  13      * Chris Clark 
  14      * clach05 
  15      * Denes Lengyel 
  16      * and many others who have contributed to current and previous versions 
  17   
  18  This file contains the DAL support for many relational databases, 
  19  including: 
  20  - SQLite 
  21  - MySQL 
  22  - Postgres 
  23  - Oracle 
  24  - MS SQL 
  25  - DB2 
  26  - Interbase 
  27  - Ingres 
  28  - SapDB (experimental) 
  29  - Cubrid (experimental) 
  30  - CouchDB (experimental) 
  31  - MongoDB (in progress) 
  32  - Google:nosql 
  33  - Google:sql 
  34   
  35  Example of usage: 
  36   
  37  >>> # from dal import DAL, Field 
  38   
  39  ### create DAL connection (and create DB if not exists) 
  40  >>> db=DAL(('mysql://a:b@locahost/x','sqlite://storage.sqlite'),folder=None) 
  41   
  42  ### define a table 'person' (create/aster as necessary) 
  43  >>> person = db.define_table('person',Field('name','string')) 
  44   
  45  ### insert a record 
  46  >>> id = person.insert(name='James') 
  47   
  48  ### retrieve it by id 
  49  >>> james = person(id) 
  50   
  51  ### retrieve it by name 
  52  >>> james = person(name='James') 
  53   
  54  ### retrieve it by arbitrary query 
  55  >>> query = (person.name=='James')&(person.name.startswith('J')) 
  56  >>> james = db(query).select(person.ALL)[0] 
  57   
  58  ### update one record 
  59  >>> james.update_record(name='Jim') 
  60   
  61  ### update multiple records by query 
  62  >>> db(person.name.like('J%')).update(name='James') 
  63  1 
  64   
  65  ### delete records by query 
  66  >>> db(person.name.lower()=='jim').delete() 
  67  0 
  68   
  69  ### retrieve multiple records (rows) 
  70  >>> people = db(person).select(orderby=person.name,groupby=person.name,limitby=(0,100)) 
  71   
  72  ### further filter them 
  73  >>> james = people.find(lambda row: row.name=='James').first() 
  74  >>> print james.id, james.name 
  75  1 James 
  76   
  77  ### check aggrgates 
  78  >>> counter = person.id.count() 
  79  >>> print db(person).select(counter).first()(counter) 
  80  1 
  81   
  82  ### delete one record 
  83  >>> james.delete_record() 
  84  1 
  85   
  86  ### delete (drop) entire database table 
  87  >>> person.drop() 
  88   
  89  Supported field types: 
  90  id string text boolean integer double decimal password upload blob time date datetime, 
  91   
  92  Supported DAL URI strings: 
  93  'sqlite://test.db' 
  94  'sqlite:memory' 
  95  'jdbc:sqlite://test.db' 
  96  'mysql://root:none@localhost/test' 
  97  'postgres://mdipierro:none@localhost/test' 
  98  'jdbc:postgres://mdipierro:none@localhost/test' 
  99  'mssql://web2py:none@A64X2/web2py_test' 
 100  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
 101  'oracle://username:password@database' 
 102  'firebird://user:password@server:3050/database' 
 103  'db2://DSN=dsn;UID=user;PWD=pass' 
 104  'firebird://username:password@hostname/database' 
 105  'firebird_embedded://username:password@c://path' 
 106  'informix://user:password@server:3050/database' 
 107  'informixu://user:password@server:3050/database' # unicode informix 
 108  'google:datastore' # for google app engine datastore 
 109  'google:sql' # for google app engine with sql (mysql compatible) 
 110   
 111  For more info: 
 112  help(DAL) 
 113  help(Field) 
 114  """ 
 115   
 116  ################################################################################### 
 117  # this file orly exposes DAL and Field 
 118  ################################################################################### 
 119   
 120  __all__ = ['DAL', 'Field'] 
 121  MAXCHARLENGTH = 512 
 122  INFINITY = 32768 # not quite but reasonable default max varchar length 
 123   
 124  import re 
 125  import sys 
 126  import locale 
 127  import os 
 128  import types 
 129  import cPickle 
 130  import datetime 
 131  import threading 
 132  import time 
 133  import cStringIO 
 134  import csv 
 135  import copy 
 136  import socket 
 137  import logging 
 138  import copy_reg 
 139  import base64 
 140  import shutil 
 141  import marshal 
 142  import decimal 
 143  import struct 
 144  import urllib 
 145  import hashlib 
 146  import uuid 
 147  import glob 
 148   
 149  CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType, 
 150                   types.MethodType, types.BuiltinMethodType) 
 151   
 152   
 153  ################################################################################### 
 154  # following checks allows running of dal without web2py as a standalone module 
 155  ################################################################################### 
 156  try: 
 157      from utils import web2py_uuid 
 158  except ImportError: 
 159      import uuid 
160 - def web2py_uuid(): return str(uuid.uuid4())
161 162 try: 163 import portalocker 164 have_portalocker = True 165 except ImportError: 166 have_portalocker = False 167 168 try: 169 import serializers 170 have_serializers = True 171 except ImportError: 172 have_serializers = False 173 174 try: 175 import validators 176 have_validators = True 177 except ImportError: 178 have_validators = False 179 180 logger = logging.getLogger("web2py.dal") 181 DEFAULT = lambda:0 182 183 sql_locker = threading.RLock() 184 thread = threading.local() 185 186 # internal representation of tables with field 187 # <table>.<field>, tables and fields may only be [a-zA-Z0-0_] 188 189 regex_dbname = re.compile('^(\w+)(\:\w+)*') 190 table_field = re.compile('^[\w_]+\.[\w_]+$') 191 regex_content = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') 192 regex_cleanup_fn = re.compile('[\'"\s;]+') 193 string_unpack=re.compile('(?<!\|)\|(?!\|)') 194 regex_python_keywords = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 195 196 197 198 # list of drivers will be built on the fly 199 # and lists only what is available 200 drivers = [] 201 202 try: 203 from pysqlite2 import dbapi2 as sqlite3 204 drivers.append('pysqlite2') 205 except ImportError: 206 try: 207 from sqlite3 import dbapi2 as sqlite3 208 drivers.append('SQLite3') 209 except ImportError: 210 logger.debug('no sqlite3 or pysqlite2.dbapi2 driver') 211 212 try: 213 import contrib.pymysql as pymysql 214 drivers.append('pymysql') 215 except ImportError: 216 logger.debug('no pymysql driver') 217 218 try: 219 import psycopg2 220 drivers.append('PostgreSQL') 221 except ImportError: 222 logger.debug('no psycopg2 driver') 223 224 try: 225 import cx_Oracle 226 drivers.append('Oracle') 227 except ImportError: 228 logger.debug('no cx_Oracle driver') 229 230 try: 231 import pyodbc 232 drivers.append('MSSQL/DB2') 233 except ImportError: 234 logger.debug('no MSSQL/DB2 driver') 235 236 try: 237 import kinterbasdb 238 drivers.append('Interbase') 239 except ImportError: 240 logger.debug('no kinterbasdb driver') 241 242 try: 243 import firebirdsql 244 drivers.append('Firebird') 245 except ImportError: 246 logger.debug('no Firebird driver') 247 248 try: 249 import informixdb 250 drivers.append('Informix') 251 logger.warning('Informix support is experimental') 252 except ImportError: 253 logger.debug('no informixdb driver') 254 255 try: 256 import sapdb 257 drivers.append('SAPDB') 258 logger.warning('SAPDB support is experimental') 259 except ImportError: 260 logger.debug('no sapdb driver') 261 262 try: 263 import cubriddb 264 drivers.append('Cubrid') 265 logger.warning('Cubrid support is experimental') 266 except ImportError: 267 logger.debug('no cubriddb driver') 268 269 try: 270 from com.ziclix.python.sql import zxJDBC 271 import java.sql 272 from org.sqlite import JDBC # required later by java.sql; ensure we have it 273 drivers.append('zxJDBC') 274 logger.warning('zxJDBC support is experimental') 275 is_jdbc = True 276 except ImportError: 277 logger.debug('no zxJDBC driver') 278 is_jdbc = False 279 280 try: 281 import ingresdbi 282 drivers.append('Ingres') 283 except ImportError: 284 logger.debug('no Ingres driver') 285 # NOTE could try JDBC....... 286 287 try: 288 from new import classobj 289 from google.appengine.ext import db as gae 290 from google.appengine.api import namespace_manager, rdbms 291 from google.appengine.api.datastore_types import Key ### needed for belongs on ID 292 from google.appengine.ext.db.polymodel import PolyModel 293 294 drivers.append('google') 295
296 - class GAEDecimalProperty(gae.Property):
297 """ 298 GAE decimal implementation 299 """ 300 data_type = decimal.Decimal 301
302 - def __init__(self, precision, scale, **kwargs):
303 super(GAEDecimalProperty, self).__init__(self, **kwargs) 304 d = '1.' 305 for x in range(scale): 306 d += '0' 307 self.round = decimal.Decimal(d)
308
309 - def get_value_for_datastore(self, model_instance):
310 value = super(GAEDecimalProperty, self).get_value_for_datastore(model_instance) 311 if value: 312 return str(value) 313 else: 314 return None
315
316 - def make_value_from_datastore(self, value):
317 if value: 318 return decimal.Decimal(value).quantize(self.round) 319 else: 320 return None
321
322 - def validate(self, value):
323 value = super(GAEDecimalProperty, self).validate(value) 324 if value is None or isinstance(value, decimal.Decimal): 325 return value 326 elif isinstance(value, basestring): 327 return decimal.Decimal(value) 328 raise gae.BadValueError("Property %s must be a Decimal or string." % self.name)
329 330 except ImportError: 331 pass 332 333 ################################################################################### 334 # class that handles connection pooling (all adapters derived form this one) 335 ################################################################################### 336
337 -class ConnectionPool(object):
338 339 pools = {} 340 341 @staticmethod
342 - def set_folder(folder):
343 thread.folder = folder
344 345 # ## this allows gluon to commit/rollback all dbs in this thread 346 347 @staticmethod
348 - def close_all_instances(action):
349 """ to close cleanly databases in a multithreaded environment """ 350 if not hasattr(thread,'instances'): 351 return 352 while thread.instances: 353 instance = thread.instances.pop() 354 getattr(instance,action)() 355 # ## if you want pools, recycle this connection 356 really = True 357 if instance.pool_size: 358 sql_locker.acquire() 359 pool = ConnectionPool.pools[instance.uri] 360 if len(pool) < instance.pool_size: 361 pool.append(instance.connection) 362 really = False 363 sql_locker.release() 364 if really: 365 getattr(instance,'close')() 366 return
367
368 - def find_or_make_work_folder(self):
369 """ this actually does not make the folder. it has to be there """ 370 if hasattr(thread,'folder'): 371 self.folder = thread.folder 372 else: 373 self.folder = thread.folder = '' 374 375 # Creating the folder if it does not exist 376 if False and self.folder and not os.path.exists(self.folder): 377 os.mkdir(self.folder)
378
379 - def pool_connection(self, f):
380 if not self.pool_size: 381 self.connection = f() 382 else: 383 uri = self.uri 384 sql_locker.acquire() 385 if not uri in ConnectionPool.pools: 386 ConnectionPool.pools[uri] = [] 387 if ConnectionPool.pools[uri]: 388 self.connection = ConnectionPool.pools[uri].pop() 389 sql_locker.release() 390 else: 391 sql_locker.release() 392 self.connection = f() 393 if not hasattr(thread,'instances'): 394 thread.instances = [] 395 thread.instances.append(self)
396 397 398 ################################################################################### 399 # this is a generic adapter that does nothing; all others are derived form this one 400 ################################################################################### 401
402 -class BaseAdapter(ConnectionPool):
403 404 maxcharlength = INFINITY 405 commit_on_alter_table = False 406 support_distributed_transaction = False 407 uploads_in_blob = False 408 types = { 409 'boolean': 'CHAR(1)', 410 'string': 'CHAR(%(length)s)', 411 'text': 'TEXT', 412 'password': 'CHAR(%(length)s)', 413 'blob': 'BLOB', 414 'upload': 'CHAR(%(length)s)', 415 'integer': 'INTEGER', 416 'double': 'DOUBLE', 417 'decimal': 'DOUBLE', 418 'date': 'DATE', 419 'time': 'TIME', 420 'datetime': 'TIMESTAMP', 421 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 422 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 423 'list:integer': 'TEXT', 424 'list:string': 'TEXT', 425 'list:reference': 'TEXT', 426 } 427
428 - def file_exists(self, filename):
429 """ 430 to be used ONLY for files that on GAE may not be on filesystem 431 """ 432 return os.path.exists(filename)
433
434 - def file_open(self, filename, mode='rb', lock=True):
435 """ 436 to be used ONLY for files that on GAE may not be on filesystem 437 """ 438 fileobj = open(filename,mode) 439 if have_portalocker and lock: 440 if mode in ('r','rb'): 441 portalocker.lock(fileobj,portalocker.LOCK_SH) 442 elif mode in ('w','wb','a'): 443 portalocker.lock(fileobj,portalocker.LOCK_EX) 444 else: 445 raise RuntimeError, "Unsupported file_open mode" 446 return fileobj
447
448 - def file_close(self, fileobj, unlock=True):
449 """ 450 to be used ONLY for files that on GAE may not be on filesystem 451 """ 452 if fileobj: 453 if have_portalocker and unlock: 454 portalocker.unlock(fileobj) 455 fileobj.close()
456
457 - def file_delete(self, filename):
458 os.unlink(filename)
459
460 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 461 credential_decoder=lambda x:x, driver_args={}, 462 adapter_args={}):
463 self.db = db 464 self.dbengine = "None" 465 self.uri = uri 466 self.pool_size = pool_size 467 self.folder = folder 468 self.db_codec = db_codec 469 class Dummy(object): 470 lastrowid = 1 471 def __getattr__(self, value): 472 return lambda *a, **b: []
473 self.connection = Dummy() 474 self.cursor = Dummy() 475
476 - def sequence_name(self,tablename):
477 return '%s_sequence' % tablename
478
479 - def trigger_name(self,tablename):
480 return '%s_sequence' % tablename
481 482
483 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
484 fields = [] 485 sql_fields = {} 486 sql_fields_aux = {} 487 TFK = {} 488 tablename = table._tablename 489 sortable = 0 490 for field in table: 491 sortable += 1 492 k = field.name 493 if isinstance(field.type,SQLCustomType): 494 ftype = field.type.native or field.type.type 495 elif field.type.startswith('reference'): 496 referenced = field.type[10:].strip() 497 constraint_name = self.constraint_name(tablename, field.name) 498 if hasattr(table,'_primarykey'): 499 rtablename,rfieldname = referenced.split('.') 500 rtable = table._db[rtablename] 501 rfield = rtable[rfieldname] 502 # must be PK reference or unique 503 if rfieldname in rtable._primarykey or rfield.unique: 504 ftype = self.types[rfield.type[:9]] % dict(length=rfield.length) 505 # multicolumn primary key reference? 506 if not rfield.unique and len(rtable._primarykey)>1 : 507 # then it has to be a table level FK 508 if rtablename not in TFK: 509 TFK[rtablename] = {} 510 TFK[rtablename][rfieldname] = field.name 511 else: 512 ftype = ftype + \ 513 self.types['reference FK'] %dict(\ 514 constraint_name=constraint_name, 515 table_name=tablename, 516 field_name=field.name, 517 foreign_key='%s (%s)'%(rtablename, rfieldname), 518 on_delete_action=field.ondelete) 519 else: 520 # make a guess here for circular references 521 id_fieldname = referenced in table._db and table._db[referenced]._id.name or 'id' 522 ftype = self.types[field.type[:9]]\ 523 % dict(table_name=tablename, 524 field_name=field.name, 525 constraint_name=constraint_name, 526 foreign_key=referenced + ('(%s)' % id_fieldname), 527 on_delete_action=field.ondelete) 528 elif field.type.startswith('list:reference'): 529 ftype = self.types[field.type[:14]] 530 elif field.type.startswith('decimal'): 531 precision, scale = [int(x) for x in field.type[8:-1].split(',')] 532 ftype = self.types[field.type[:7]] % \ 533 dict(precision=precision,scale=scale) 534 elif not field.type in self.types: 535 raise SyntaxError, 'Field: unknown field type: %s for %s' % \ 536 (field.type, field.name) 537 else: 538 ftype = self.types[field.type]\ 539 % dict(length=field.length) 540 if not field.type.startswith('id') and not field.type.startswith('reference'): 541 if field.notnull: 542 ftype += ' NOT NULL' 543 else: 544 ftype += self.ALLOW_NULL() 545 if field.unique: 546 ftype += ' UNIQUE' 547 548 # add to list of fields 549 sql_fields[field.name] = dict(sortable=sortable, 550 type=str(field.type), 551 sql=ftype) 552 553 if isinstance(field.default,(str,int,float)): 554 # caveat: sql_fields and sql_fields_aux differ for default values 555 # sql_fields is used to trigger migrations and sql_fields_aux 556 # are used for create table 557 # the reason is that we do not want to trigger a migration simply 558 # because a default value changes 559 not_null = self.NOT_NULL(field.default,field.type) 560 ftype = ftype.replace('NOT NULL',not_null) 561 sql_fields_aux[field.name] = dict(sql=ftype) 562 563 fields.append('%s %s' % (field.name, ftype)) 564 other = ';' 565 566 # backend-specific extensions to fields 567 if self.dbengine == 'mysql': 568 if not hasattr(table, "_primarykey"): 569 fields.append('PRIMARY KEY(%s)' % table._id.name) 570 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 571 572 fields = ',\n '.join(fields) 573 for rtablename in TFK: 574 rfields = TFK[rtablename] 575 pkeys = table._db[rtablename]._primarykey 576 fkeys = [ rfields[k] for k in pkeys ] 577 fields = fields + ',\n ' + \ 578 self.types['reference TFK'] %\ 579 dict(table_name=tablename, 580 field_name=', '.join(fkeys), 581 foreign_table=rtablename, 582 foreign_key=', '.join(pkeys), 583 on_delete_action=field.ondelete) 584 585 if hasattr(table,'_primarykey'): 586 query = '''CREATE TABLE %s(\n %s,\n %s) %s''' % \ 587 (tablename, fields, self.PRIMARY_KEY(', '.join(table._primarykey)),other) 588 else: 589 query = '''CREATE TABLE %s(\n %s\n)%s''' % \ 590 (tablename, fields, other) 591 592 if self.uri.startswith('sqlite:///'): 593 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' 594 dbpath = self.uri[9:self.uri.rfind('/')].decode('utf8').encode(path_encoding) 595 else: 596 dbpath = self.folder 597 598 if not migrate: 599 return query 600 elif self.uri.startswith('sqlite:memory'): 601 table._dbt = None 602 elif isinstance(migrate, str): 603 table._dbt = os.path.join(dbpath, migrate) 604 else: 605 table._dbt = os.path.join(dbpath, '%s_%s.table' \ 606 % (table._db._uri_hash, tablename)) 607 if table._dbt: 608 table._loggername = os.path.join(dbpath, 'sql.log') 609 logfile = self.file_open(table._loggername, 'a') 610 else: 611 logfile = None 612 if not table._dbt or not self.file_exists(table._dbt): 613 if table._dbt: 614 logfile.write('timestamp: %s\n' 615 % datetime.datetime.today().isoformat()) 616 logfile.write(query + '\n') 617 if not fake_migrate: 618 self.create_sequence_and_triggers(query,table) 619 table._db.commit() 620 if table._dbt: 621 tfile = self.file_open(table._dbt, 'w') 622 cPickle.dump(sql_fields, tfile) 623 self.file_close(tfile) 624 if fake_migrate: 625 logfile.write('faked!\n') 626 else: 627 logfile.write('success!\n') 628 else: 629 tfile = self.file_open(table._dbt, 'r') 630 try: 631 sql_fields_old = cPickle.load(tfile) 632 except EOFError: 633 self.file_close(tfile) 634 self.file_close(logfile) 635 raise RuntimeError, 'File %s appears corrupted' % table._dbt 636 self.file_close(tfile) 637 if sql_fields != sql_fields_old: 638 self.migrate_table(table, 639 sql_fields, sql_fields_old, 640 sql_fields_aux, logfile, 641 fake_migrate=fake_migrate) 642 self.file_close(logfile) 643 return query
644
645 - def migrate_table( 646 self, 647 table, 648 sql_fields, 649 sql_fields_old, 650 sql_fields_aux, 651 logfile, 652 fake_migrate=False, 653 ):
654 tablename = table._tablename 655 def fix(item): 656 k,v=item 657 if not isinstance(v,dict): 658 v=dict(type='unkown',sql=v) 659 return k.lower(),v
660 ### make sure all field names are lower case to avoid conflicts 661 sql_fields = dict(fix(v) for v in sql_fields.items()) 662 sql_fields_old = dict(fix(v) for v in sql_fields_old.items()) 663 sql_fields_aux = dict(fix(v) for v in sql_fields_aux.items()) 664 665 keys = sql_fields.keys() 666 for key in sql_fields_old: 667 if not key in keys: 668 keys.append(key) 669 if self.dbengine == 'mssql': 670 new_add = '; ALTER TABLE %s ADD ' % tablename 671 else: 672 new_add = ', ADD ' 673 674 metadata_change = False 675 sql_fields_current = copy.copy(sql_fields_old) 676 for key in keys: 677 query = None 678 if not key in sql_fields_old: 679 sql_fields_current[key] = sql_fields[key] 680 query = ['ALTER TABLE %s ADD %s %s;' % \ 681 (tablename, key, 682 sql_fields_aux[key]['sql'].replace(', ', new_add))] 683 metadata_change = True 684 elif self.dbengine == 'sqlite': 685 if key in sql_fields: 686 sql_fields_current[key] = sql_fields[key] 687 metadata_change = True 688 elif not key in sql_fields: 689 del sql_fields_current[key] 690 if not self.dbengine in ('firebird',): 691 query = ['ALTER TABLE %s DROP COLUMN %s;' % (tablename, key)] 692 else: 693 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 694 metadata_change = True 695 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 696 and not isinstance(table[key].type, SQLCustomType) \ 697 and not (table[key].type.startswith('reference') and \ 698 sql_fields[key]['sql'].startswith('INT,') and \ 699 sql_fields_old[key]['sql'].startswith('INT NOT NULL,')): 700 sql_fields_current[key] = sql_fields[key] 701 t = tablename 702 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 703 if not self.dbengine in ('firebird',): 704 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 705 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 706 'ALTER TABLE %s DROP COLUMN %s;' % (t, key), 707 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 708 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 709 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)] 710 else: 711 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 712 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 713 'ALTER TABLE %s DROP %s;' % (t, key), 714 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 715 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 716 'ALTER TABLE %s DROP %s__tmp;' % (t, key)] 717 metadata_change = True 718 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 719 sql_fields_current[key] = sql_fields[key] 720 metadata_change = True 721 722 if query: 723 logfile.write('timestamp: %s\n' 724 % datetime.datetime.today().isoformat()) 725 table._db['_lastsql'] = '\n'.join(query) 726 for sub_query in query: 727 logfile.write(sub_query + '\n') 728 if not fake_migrate: 729 self.execute(sub_query) 730 # caveat. mysql, oracle and firebird do not allow multiple alter table 731 # in one transaction so we must commit partial transactions and 732 # update table._dbt after alter table. 733 if table._db._adapter.commit_on_alter_table: 734 table._db.commit() 735 tfile = self.file_open(table._dbt, 'w') 736 cPickle.dump(sql_fields_current, tfile) 737 self.file_close(tfile) 738 logfile.write('success!\n') 739 else: 740 logfile.write('faked!\n') 741 elif metadata_change: 742 tfile = self.file_open(table._dbt, 'w') 743 cPickle.dump(sql_fields_current, tfile) 744 self.file_close(tfile) 745 746 if metadata_change and \ 747 not (query and self.dbengine in ('mysql','oracle','firebird')): 748 table._db.commit() 749 tfile = self.file_open(table._dbt, 'w') 750 cPickle.dump(sql_fields_current, tfile) 751 self.file_close(tfile) 752
753 - def LOWER(self,first):
754 return 'LOWER(%s)' % self.expand(first)
755
756 - def UPPER(self,first):
757 return 'UPPER(%s)' % self.expand(first)
758
759 - def EXTRACT(self,first,what):
760 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
761
762 - def AGGREGATE(self,first,what):
763 return "%s(%s)" % (what,self.expand(first))
764
765 - def JOIN(self):
766 return 'JOIN'
767
768 - def LEFT_JOIN(self):
769 return 'LEFT JOIN'
770
771 - def RANDOM(self):
772 return 'Random()'
773
774 - def NOT_NULL(self,default,field_type):
775 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
776
777 - def COALESCE_ZERO(self,first):
778 return 'COALESCE(%s,0)' % self.expand(first)
779
780 - def ALLOW_NULL(self):
781 return ''
782
783 - def SUBSTRING(self,field,parameters):
784 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
785
786 - def PRIMARY_KEY(self,key):
787 return 'PRIMARY KEY(%s)' % key
788
789 - def _drop(self,table,mode):
790 return ['DROP TABLE %s;' % table]
791
792 - def drop(self, table, mode=''):
793 if table._dbt: 794 logfile = self.file_open(table._loggername, 'a') 795 queries = self._drop(table, mode) 796 for query in queries: 797 if table._dbt: 798 logfile.write(query + '\n') 799 self.execute(query) 800 table._db.commit() 801 del table._db[table._tablename] 802 del table._db.tables[table._db.tables.index(table._tablename)] 803 table._db._update_referenced_by(table._tablename) 804 if table._dbt: 805 self.file_delete(table._dbt) 806 logfile.write('success!\n')
807
808 - def _insert(self,table,fields):
809 keys = ','.join(f.name for f,v in fields) 810 values = ','.join(self.expand(v,f.type) for f,v in fields) 811 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values)
812
813 - def insert(self,table,fields):
814 query = self._insert(table,fields) 815 try: 816 self.execute(query) 817 except Exception, e: 818 if isinstance(e,self.integrity_error_class()): 819 return None 820 raise e 821 if hasattr(table,'_primarykey'): 822 return dict([(k[0].name, k[1]) for k in fields \ 823 if k[0].name in table._primarykey]) 824 id = self.lastrowid(table) 825 if not isinstance(id,int): 826 return id 827 rid = Reference(id) 828 (rid._table, rid._record) = (table, None) 829 return rid
830
831 - def bulk_insert(self,table,items):
832 return [self.insert(table,item) for item in items]
833
834 - def NOT(self,first):
835 return '(NOT %s)' % self.expand(first)
836
837 - def AND(self,first,second):
838 return '(%s AND %s)' % (self.expand(first),self.expand(second))
839
840 - def OR(self,first,second):
841 return '(%s OR %s)' % (self.expand(first),self.expand(second))
842
843 - def BELONGS(self,first,second):
844 if isinstance(second,str): 845 return '(%s IN (%s))' % (self.expand(first),second[:-1]) 846 return '(%s IN (%s))' % (self.expand(first), 847 ','.join(self.expand(item,first.type) for item in second))
848
849 - def LIKE(self,first,second):
850 return '(%s LIKE %s)' % (self.expand(first),self.expand(second,'string'))
851
852 - def STARTSWITH(self,first,second):
853 return '(%s LIKE %s)' % (self.expand(first),self.expand(second+'%','string'))
854
855 - def ENDSWITH(self,first,second):
856 return '(%s LIKE %s)' % (self.expand(first),self.expand('%'+second,'string'))
857
858 - def CONTAINS(self,first,second):
859 if first.type in ('string','text'): 860 key = '%'+str(second).replace('%','%%')+'%' 861 elif first.type.startswith('list:'): 862 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 863 return '(%s LIKE %s)' % (self.expand(first),self.expand(key,'string'))
864
865 - def EQ(self,first,second=None):
866 if second is None: 867 return '(%s IS NULL)' % self.expand(first) 868 return '(%s = %s)' % (self.expand(first),self.expand(second,first.type))
869
870 - def NE(self,first,second=None):
871 if second is None: 872 return '(%s IS NOT NULL)' % self.expand(first) 873 return '(%s <> %s)' % (self.expand(first),self.expand(second,first.type))
874
875 - def LT(self,first,second=None):
876 return '(%s < %s)' % (self.expand(first),self.expand(second,first.type))
877
878 - def LE(self,first,second=None):
879 return '(%s <= %s)' % (self.expand(first),self.expand(second,first.type))
880
881 - def GT(self,first,second=None):
882 return '(%s > %s)' % (self.expand(first),self.expand(second,first.type))
883
884 - def GE(self,first,second=None):
885 return '(%s >= %s)' % (self.expand(first),self.expand(second,first.type))
886
887 - def ADD(self,first,second):
888 return '(%s + %s)' % (self.expand(first),self.expand(second,first.type))
889
890 - def SUB(self,first,second):
891 return '(%s - %s)' % (self.expand(first),self.expand(second,first.type))
892
893 - def MUL(self,first,second):
894 return '(%s * %s)' % (self.expand(first),self.expand(second,first.type))
895
896 - def DIV(self,first,second):
897 return '(%s / %s)' % (self.expand(first),self.expand(second,first.type))
898
899 - def MOD(self,first,second):
900 return '(%s %% %s)' % (self.expand(first),self.expand(second,first.type))
901
902 - def AS(self,first,second):
903 return '%s AS %s' % (self.expand(first),second)
904
905 - def ON(self,first,second):
906 return '%s ON %s' % (self.expand(first),self.expand(second))
907
908 - def INVERT(self,first):
909 return '%s DESC' % self.expand(first)
910
911 - def COMMA(self,first,second):
912 return '%s, %s' % (self.expand(first),self.expand(second))
913
914 - def expand(self,expression,field_type=None):
915 if isinstance(expression,Field): 916 return str(expression) 917 elif isinstance(expression, (Expression, Query)): 918 if not expression.second is None: 919 return expression.op(expression.first, expression.second) 920 elif not expression.first is None: 921 return expression.op(expression.first) 922 else: 923 return expression.op() 924 elif field_type: 925 return self.represent(expression,field_type) 926 elif isinstance(expression,(list,tuple)): 927 return ','.join([self.represent(item,field_type) for item in expression]) 928 else: 929 return str(expression)
930
931 - def alias(self,table,alias):
932 """ 933 given a table object, makes a new table object 934 with alias name. 935 """ 936 other = copy.copy(table) 937 other['_ot'] = other._tablename 938 other['ALL'] = SQLALL(other) 939 other['_tablename'] = alias 940 for fieldname in other.fields: 941 other[fieldname] = copy.copy(other[fieldname]) 942 other[fieldname]._tablename = alias 943 other[fieldname].tablename = alias 944 other[fieldname].table = other 945 table._db[alias] = table 946 return other
947
948 - def _truncate(self,table,mode = ''):
949 tablename = table._tablename 950 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
951
952 - def truncate(self,table,mode= ' '):
953 if table._dbt: 954 logfile = self.file_open(table._loggername, 'a') 955 queries = table._db._adapter._truncate(table, mode) 956 for query in queries: 957 if table._dbt: 958 logfile.write(query + '\n') 959 self.execute(query) 960 table._db.commit() 961 if table._dbt: 962 logfile.write('success!\n')
963
964 - def _update(self,tablename,query,fields):
965 if query: 966 sql_w = ' WHERE ' + self.expand(query) 967 else: 968 sql_w = '' 969 sql_v = ','.join(['%s=%s' % (field.name, self.expand(value,field.type)) for (field,value) in fields]) 970 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
971
972 - def update(self,tablename,query,fields):
973 sql = self._update(tablename,query,fields) 974 self.execute(sql) 975 try: 976 return self.cursor.rowcount 977 except: 978 return None
979
980 - def _delete(self,tablename, query):
981 if query: 982 sql_w = ' WHERE ' + self.expand(query) 983 else: 984 sql_w = '' 985 return 'DELETE FROM %s%s;' % (tablename, sql_w)
986
987 - def delete(self,tablename,query):
988 sql = self._delete(tablename,query) 989 ### special code to handle CASCADE in SQLite 990 db = self.db 991 table = db[tablename] 992 if self.dbengine=='sqlite' and table._referenced_by: 993 deleted = [x[table._id.name] for x in db(query).select(table._id)] 994 ### end special code to handle CASCADE in SQLite 995 self.execute(sql) 996 try: 997 counter = self.cursor.rowcount 998 except: 999 counter = None 1000 ### special code to handle CASCADE in SQLite 1001 if self.dbengine=='sqlite' and counter: 1002 for tablename,fieldname in table._referenced_by: 1003 f = db[tablename][fieldname] 1004 if f.type=='reference '+table._tablename and f.ondelete=='CASCADE': 1005 db(db[tablename][fieldname].belongs(deleted)).delete() 1006 ### end special code to handle CASCADE in SQLite 1007 return counter
1008
1009 - def get_table(self,query):
1010 tablenames = self.tables(query) 1011 if len(tablenames)==1: 1012 return tablenames[0] 1013 elif len(tablenames)<1: 1014 raise RuntimeError, "No table selected" 1015 else: 1016 raise RuntimeError, "Too many tables selected"
1017
1018 - def _select(self, query, fields, attributes):
1019 for key in set(attributes.keys())-set(('orderby','groupby','limitby', 1020 'required','cache','left', 1021 'distinct','having', 'join')): 1022 raise SyntaxError, 'invalid select attribute: %s' % key 1023 # ## if not fields specified take them all from the requested tables 1024 new_fields = [] 1025 for item in fields: 1026 if isinstance(item,SQLALL): 1027 new_fields += item.table 1028 else: 1029 new_fields.append(item) 1030 fields = new_fields 1031 tablenames = self.tables(query) 1032 query = self.filter_tenant(query,tablenames) 1033 if not fields: 1034 for table in tablenames: 1035 for field in self.db[table]: 1036 fields.append(field) 1037 else: 1038 for field in fields: 1039 if isinstance(field,basestring) and table_field.match(field): 1040 tn,fn = field.split('.') 1041 field = self.db[tn][fn] 1042 for tablename in self.tables(field): 1043 if not tablename in tablenames: 1044 tablenames.append(tablename) 1045 if len(tablenames) < 1: 1046 raise SyntaxError, 'Set: no tables selected' 1047 sql_f = ', '.join([self.expand(f) for f in fields]) 1048 self._colnames = [c.strip() for c in sql_f.split(', ')] 1049 if query: 1050 sql_w = ' WHERE ' + self.expand(query) 1051 else: 1052 sql_w = '' 1053 sql_o = '' 1054 sql_s = '' 1055 left = attributes.get('left', False) 1056 inner_join = attributes.get('join', False) 1057 distinct = attributes.get('distinct', False) 1058 groupby = attributes.get('groupby', False) 1059 orderby = attributes.get('orderby', False) 1060 having = attributes.get('having', False) 1061 limitby = attributes.get('limitby', False) 1062 if distinct is True: 1063 sql_s += 'DISTINCT' 1064 elif distinct: 1065 sql_s += 'DISTINCT ON (%s)' % distinct 1066 if inner_join: 1067 icommand = self.JOIN() 1068 if not isinstance(inner_join, (tuple, list)): 1069 inner_join = [inner_join] 1070 ijoint = [t._tablename for t in inner_join if not isinstance(t,Expression)] 1071 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1072 ijoinont = [t.first._tablename for t in ijoinon] 1073 iexcluded = [t for t in tablenames if not t in ijoint + ijoinont] 1074 if left: 1075 join = attributes['left'] 1076 command = self.LEFT_JOIN() 1077 if not isinstance(join, (tuple, list)): 1078 join = [join] 1079 joint = [t._tablename for t in join if not isinstance(t,Expression)] 1080 joinon = [t for t in join if isinstance(t, Expression)] 1081 #patch join+left patch (solves problem with ordering in left joins) 1082 tables_to_merge={} 1083 [tables_to_merge.update(dict.fromkeys(self.tables(t))) for t in joinon] 1084 joinont = [t.first._tablename for t in joinon] 1085 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1086 important_tablenames = joint + joinont + tables_to_merge.keys() 1087 excluded = [t for t in tablenames if not t in important_tablenames ] 1088 if inner_join and not left: 1089 sql_t = ', '.join(iexcluded) 1090 for t in ijoinon: 1091 sql_t += ' %s %s' % (icommand, str(t)) 1092 elif not inner_join and left: 1093 sql_t = ', '.join([ t for t in excluded + tables_to_merge.keys()]) 1094 if joint: 1095 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1096 for t in joinon: 1097 sql_t += ' %s %s' % (command, str(t)) 1098 elif inner_join and left: 1099 sql_t = ','.join([ t for t in excluded + tables_to_merge.keys() if t in iexcluded ]) 1100 for t in ijoinon: 1101 sql_t += ' %s %s' % (icommand, str(t)) 1102 if joint: 1103 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1104 for t in joinon: 1105 sql_t += ' %s %s' % (command, str(t)) 1106 else: 1107 sql_t = ', '.join(tablenames) 1108 if groupby: 1109 if isinstance(groupby, (list, tuple)): 1110 groupby = xorify(groupby) 1111 sql_o += ' GROUP BY %s' % self.expand(groupby) 1112 if having: 1113 sql_o += ' HAVING %s' % attributes['having'] 1114 if orderby: 1115 if isinstance(orderby, (list, tuple)): 1116 orderby = xorify(orderby) 1117 if str(orderby) == '<random>': 1118 sql_o += ' ORDER BY %s' % self.RANDOM() 1119 else: 1120 sql_o += ' ORDER BY %s' % self.expand(orderby) 1121 if limitby: 1122 if not orderby and tablenames: 1123 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in ((hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey) or [self.db[t]._id.name])]) 1124 # oracle does not support limitby 1125 return self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby)
1126
1127 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1128 if limitby: 1129 (lmin, lmax) = limitby 1130 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1131 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
1132
1133 - def select(self,query,fields,attributes):
1134 """ 1135 Always returns a Rows object, even if it may be empty 1136 """ 1137 def response(sql): 1138 self.execute(sql) 1139 return self.cursor.fetchall()
1140 sql = self._select(query,fields,attributes) 1141 if attributes.get('cache', None): 1142 (cache_model, time_expire) = attributes['cache'] 1143 del attributes['cache'] 1144 key = self.uri + '/' + sql 1145 key = (key<=200) and key or hashlib.md5(key).hexdigest() 1146 rows = cache_model(key, lambda: response(sql), time_expire) 1147 else: 1148 rows = response(sql) 1149 if isinstance(rows,tuple): 1150 rows = list(rows) 1151 limitby = attributes.get('limitby',None) or (0,) 1152 rows = self.rowslice(rows,limitby[0],None) 1153 return self.parse(rows,self._colnames) 1154
1155 - def _count(self,query,distinct=None):
1156 tablenames = self.tables(query) 1157 if query: 1158 sql_w = ' WHERE ' + self.expand(query) 1159 else: 1160 sql_w = '' 1161 sql_t = ','.join(tablenames) 1162 if distinct: 1163 if isinstance(distinct,(list,tuple)): 1164 distinct = xorify(distinct) 1165 sql_d = self.expand(distinct) 1166 return 'SELECT count(DISTINCT %s) FROM %s%s' % (sql_d, sql_t, sql_w) 1167 return 'SELECT count(*) FROM %s%s' % (sql_t, sql_w)
1168
1169 - def count(self,query,distinct=None):
1170 self.execute(self._count(query,distinct)) 1171 return self.cursor.fetchone()[0]
1172 1173
1174 - def tables(self,query):
1175 tables = set() 1176 if isinstance(query, Field): 1177 tables.add(query.tablename) 1178 elif isinstance(query, (Expression, Query)): 1179 if query.first!=None: 1180 tables = tables.union(self.tables(query.first)) 1181 if query.second!=None: 1182 tables = tables.union(self.tables(query.second)) 1183 return list(tables)
1184
1185 - def commit(self):
1186 return self.connection.commit()
1187
1188 - def rollback(self):
1189 return self.connection.rollback()
1190
1191 - def close(self):
1192 return self.connection.close()
1193
1194 - def distributed_transaction_begin(self,key):
1195 return
1196
1197 - def prepare(self,key):
1198 self.connection.prepare()
1199
1200 - def commit_prepared(self,key):
1201 self.connection.commit()
1202
1203 - def rollback_prepared(self,key):
1204 self.connection.rollback()
1205
1206 - def concat_add(self,table):
1207 return ', ADD '
1208
1209 - def constraint_name(self, table, fieldname):
1210 return '%s_%s__constraint' % (table,fieldname)
1211
1212 - def create_sequence_and_triggers(self, query, table, **args):
1213 self.execute(query)
1214
1215 - def log_execute(self,*a,**b):
1216 self.db._lastsql = a[0] 1217 t0 = time.time() 1218 ret = self.cursor.execute(*a,**b) 1219 self.db._timings.append((a[0],time.time()-t0)) 1220 return ret
1221
1222 - def execute(self,*a,**b):
1223 return self.log_execute(*a, **b)
1224
1225 - def represent(self, obj, fieldtype):
1226 if isinstance(obj,CALLABLETYPES): 1227 obj = obj() 1228 if isinstance(fieldtype, SQLCustomType): 1229 return fieldtype.encoder(obj) 1230 if isinstance(obj, (Expression, Field)): 1231 return str(obj) 1232 if fieldtype.startswith('list:'): 1233 if not obj: 1234 obj = [] 1235 if not isinstance(obj, (list, tuple)): 1236 obj = [obj] 1237 if isinstance(obj, (list, tuple)): 1238 obj = bar_encode(obj) 1239 if obj is None: 1240 return 'NULL' 1241 if obj == '' and not fieldtype[:2] in ['st', 'te', 'pa', 'up']: 1242 return 'NULL' 1243 r = self.represent_exceptions(obj,fieldtype) 1244 if r != None: 1245 return r 1246 if fieldtype == 'boolean': 1247 if obj and not str(obj)[:1].upper() in ['F', '0']: 1248 return "'T'" 1249 else: 1250 return "'F'" 1251 if fieldtype == 'id' or fieldtype == 'integer': 1252 return str(int(obj)) 1253 if fieldtype.startswith('decimal'): 1254 return str(obj) 1255 elif fieldtype.startswith('reference'): # reference 1256 if fieldtype.find('.')>0: 1257 return repr(obj) 1258 elif isinstance(obj, (Row, Reference)): 1259 return str(obj['id']) 1260 return str(int(obj)) 1261 elif fieldtype == 'double': 1262 return repr(float(obj)) 1263 if isinstance(obj, unicode): 1264 obj = obj.encode(self.db_codec) 1265 if fieldtype == 'blob': 1266 obj = base64.b64encode(str(obj)) 1267 elif fieldtype == 'date': 1268 if isinstance(obj, (datetime.date, datetime.datetime)): 1269 obj = obj.isoformat()[:10] 1270 else: 1271 obj = str(obj) 1272 elif fieldtype == 'datetime': 1273 if isinstance(obj, datetime.datetime): 1274 obj = obj.isoformat()[:19].replace('T',' ') 1275 elif isinstance(obj, datetime.date): 1276 obj = obj.isoformat()[:10]+' 00:00:00' 1277 else: 1278 obj = str(obj) 1279 elif fieldtype == 'time': 1280 if isinstance(obj, datetime.time): 1281 obj = obj.isoformat()[:10] 1282 else: 1283 obj = str(obj) 1284 if not isinstance(obj,str): 1285 obj = str(obj) 1286 try: 1287 obj.decode(self.db_codec) 1288 except: 1289 obj = obj.decode('latin1').encode(self.db_codec) 1290 return "'%s'" % obj.replace("'", "''")
1291
1292 - def represent_exceptions(self, obj, fieldtype):
1293 return None
1294
1295 - def lastrowid(self,table):
1296 return None
1297
1298 - def integrity_error_class(self):
1299 return type(None)
1300
1301 - def rowslice(self,rows,minimum=0,maximum=None):
1302 """ by default this function does nothing, overload when db does not do slicing """ 1303 return rows
1304
1305 - def parse(self, rows, colnames, blob_decode=True):
1306 db = self.db 1307 virtualtables = [] 1308 new_rows = [] 1309 for (i,row) in enumerate(rows): 1310 new_row = Row() 1311 for j,colname in enumerate(colnames): 1312 value = row[j] 1313 if not table_field.match(colnames[j]): 1314 if not '_extra' in new_row: 1315 new_row['_extra'] = Row() 1316 new_row['_extra'][colnames[j]] = value 1317 select_as_parser = re.compile("\s+AS\s+(\S+)") 1318 new_column_name = select_as_parser.search(colnames[j]) 1319 if not new_column_name is None: 1320 column_name = new_column_name.groups(0) 1321 setattr(new_row,column_name[0],value) 1322 continue 1323 (tablename, fieldname) = colname.split('.') 1324 table = db[tablename] 1325 field = table[fieldname] 1326 field_type = field.type 1327 if field.type != 'blob' and isinstance(value, str): 1328 try: 1329 value = value.decode(db._db_codec) 1330 except Exception: 1331 pass 1332 if isinstance(value, unicode): 1333 value = value.encode('utf-8') 1334 if not tablename in new_row: 1335 colset = new_row[tablename] = Row() 1336 virtualtables.append(tablename) 1337 else: 1338 colset = new_row[tablename] 1339 1340 if isinstance(field_type, SQLCustomType): 1341 colset[fieldname] = field_type.decoder(value) 1342 # field_type = field_type.type 1343 elif not isinstance(field_type, str) or value is None: 1344 colset[fieldname] = value 1345 elif isinstance(field_type, str) and \ 1346 field_type.startswith('reference'): 1347 referee = field_type[10:].strip() 1348 if not '.' in referee: 1349 colset[fieldname] = rid = Reference(value) 1350 (rid._table, rid._record) = (db[referee], None) 1351 else: ### reference not by id 1352 colset[fieldname] = value 1353 elif field_type == 'boolean': 1354 if value == True or str(value)[:1].lower() == 't': 1355 colset[fieldname] = True 1356 else: 1357 colset[fieldname] = False 1358 elif field_type == 'date' \ 1359 and (not isinstance(value, datetime.date)\ 1360 or isinstance(value, datetime.datetime)): 1361 (y, m, d) = [int(x) for x in 1362 str(value)[:10].strip().split('-')] 1363 colset[fieldname] = datetime.date(y, m, d) 1364 elif field_type == 'time' \ 1365 and not isinstance(value, datetime.time): 1366 time_items = [int(x) for x in 1367 str(value)[:8].strip().split(':')[:3]] 1368 if len(time_items) == 3: 1369 (h, mi, s) = time_items 1370 else: 1371 (h, mi, s) = time_items + [0] 1372 colset[fieldname] = datetime.time(h, mi, s) 1373 elif field_type == 'datetime'\ 1374 and not isinstance(value, datetime.datetime): 1375 (y, m, d) = [int(x) for x in 1376 str(value)[:10].strip().split('-')] 1377 time_items = [int(x) for x in 1378 str(value)[11:19].strip().split(':')[:3]] 1379 if len(time_items) == 3: 1380 (h, mi, s) = time_items 1381 else: 1382 (h, mi, s) = time_items + [0] 1383 colset[fieldname] = datetime.datetime(y, m, d, h, mi, s) 1384 elif field_type == 'blob' and blob_decode: 1385 colset[fieldname] = base64.b64decode(str(value)) 1386 elif field_type.startswith('decimal'): 1387 decimals = [int(x) for x in field_type[8:-1].split(',')][-1] 1388 if self.dbengine == 'sqlite': 1389 value = ('%.' + str(decimals) + 'f') % value 1390 if not isinstance(value, decimal.Decimal): 1391 value = decimal.Decimal(str(value)) 1392 colset[fieldname] = value 1393 elif field_type.startswith('list:integer'): 1394 if not self.dbengine=='google:datastore': 1395 colset[fieldname] = bar_decode_integer(value) 1396 else: 1397 colset[fieldname] = value 1398 elif field_type.startswith('list:reference'): 1399 if not self.dbengine=='google:datastore': 1400 colset[fieldname] = bar_decode_integer(value) 1401 else: 1402 colset[fieldname] = value 1403 elif field_type.startswith('list:string'): 1404 if not self.dbengine=='google:datastore': 1405 colset[fieldname] = bar_decode_string(value) 1406 else: 1407 colset[fieldname] = value 1408 else: 1409 colset[fieldname] = value 1410 if field_type == 'id': 1411 id = colset[field.name] 1412 colset.update_record = lambda _ = (colset, table, id), **a: update_record(_, a) 1413 colset.delete_record = lambda t = table, i = id: t._db(t._id==i).delete() 1414 for (referee_table, referee_name) in \ 1415 table._referenced_by: 1416 s = db[referee_table][referee_name] 1417 if not referee_table in colset: 1418 # for backward compatibility 1419 colset[referee_table] = Set(db, s == id) 1420 ### add new feature? 1421 ### colset[referee_table+'_by_'+refree_name] = Set(db, s == id) 1422 colset['id'] = id 1423 new_rows.append(new_row) 1424 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 1425 for tablename in virtualtables: 1426 for item in db[tablename].virtualfields: 1427 try: 1428 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 1429 except KeyError: 1430 # to avoid breaking virtualfields when partial select 1431 pass 1432 return rowsobj
1433
1434 - def filter_tenant(self,query,tablenames):
1435 fieldname = self.db._request_tenant 1436 for tablename in tablenames: 1437 table = self.db[tablename] 1438 if fieldname in table: 1439 default = table[fieldname].default 1440 if default!=None: 1441 query = query&(table[fieldname]==default) 1442 return query
1443 1444 ################################################################################### 1445 # List of all the available adapters, they all extend BaseAdapter 1446 ################################################################################### 1447
1448 -class SQLiteAdapter(BaseAdapter):
1449
1450 - def EXTRACT(self,field,what):
1451 return "web2py_extract('%s',%s)" % (what,self.expand(field))
1452 1453 @staticmethod
1454 - def web2py_extract(lookup, s):
1455 table = { 1456 'year': (0, 4), 1457 'month': (5, 7), 1458 'day': (8, 10), 1459 'hour': (11, 13), 1460 'minute': (14, 16), 1461 'second': (17, 19), 1462 } 1463 try: 1464 (i, j) = table[lookup] 1465 return int(s[i:j]) 1466 except: 1467 return None
1468
1469 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1470 credential_decoder=lambda x:x, driver_args={}, 1471 adapter_args={}):
1472 self.db = db 1473 self.dbengine = "sqlite" 1474 self.uri = uri 1475 self.pool_size = pool_size 1476 self.folder = folder 1477 self.db_codec = db_codec 1478 self.find_or_make_work_folder() 1479 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] 1480 if uri.startswith('sqlite:memory'): 1481 dbpath = ':memory:' 1482 else: 1483 dbpath = uri.split('://')[1] 1484 if dbpath[0] != '/': 1485 dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath) 1486 if not 'check_same_thread' in driver_args: 1487 driver_args['check_same_thread'] = False 1488 def connect(dbpath=dbpath, driver_args=driver_args): 1489 return sqlite3.Connection(dbpath, **driver_args)
1490 self.pool_connection(connect) 1491 self.cursor = self.connection.cursor() 1492 self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract)
1493
1494 - def _truncate(self,table,mode = ''):
1495 tablename = table._tablename 1496 return ['DELETE FROM %s;' % tablename, 1497 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
1498
1499 - def lastrowid(self,table):
1500 return self.cursor.lastrowid
1501 1502
1503 -class JDBCSQLiteAdapter(SQLiteAdapter):
1504
1505 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1506 credential_decoder=lambda x:x, driver_args={}, 1507 adapter_args={}):
1508 self.db = db 1509 self.dbengine = "sqlite" 1510 self.uri = uri 1511 self.pool_size = pool_size 1512 self.folder = folder 1513 self.db_codec = db_codec 1514 self.find_or_make_work_folder() 1515 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] 1516 if uri.startswith('sqlite:memory'): 1517 dbpath = ':memory:' 1518 else: 1519 dbpath = uri.split('://')[1] 1520 if dbpath[0] != '/': 1521 dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath) 1522 def connect(dbpath=dbpath,driver_args=driver_args): 1523 return zxJDBC.connect(java.sql.DriverManager.getConnection('jdbc:sqlite:'+dbpath),**driver_args)
1524 self.pool_connection(connect) 1525 self.cursor = self.connection.cursor() 1526 self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract)
1527
1528 - def execute(self,a):
1529 return self.log_execute(a[:-1])
1530 1531
1532 -class MySQLAdapter(BaseAdapter):
1533 1534 driver = globals().get('pymysql',None) 1535 maxcharlength = 255 1536 commit_on_alter_table = True 1537 support_distributed_transaction = True 1538 types = { 1539 'boolean': 'CHAR(1)', 1540 'string': 'VARCHAR(%(length)s)', 1541 'text': 'LONGTEXT', 1542 'password': 'VARCHAR(%(length)s)', 1543 'blob': 'LONGBLOB', 1544 'upload': 'VARCHAR(%(length)s)', 1545 'integer': 'INT', 1546 'double': 'DOUBLE', 1547 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1548 'date': 'DATE', 1549 'time': 'TIME', 1550 'datetime': 'DATETIME', 1551 'id': 'INT AUTO_INCREMENT NOT NULL', 1552 'reference': 'INT, INDEX %(field_name)s__idx (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1553 'list:integer': 'LONGTEXT', 1554 'list:string': 'LONGTEXT', 1555 'list:reference': 'LONGTEXT', 1556 } 1557
1558 - def RANDOM(self):
1559 return 'RAND()'
1560
1561 - def SUBSTRING(self,field,parameters):
1562 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1563
1564 - def _drop(self,table,mode):
1565 # breaks db integrity but without this mysql does not drop table 1566 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table,'SET FOREIGN_KEY_CHECKS=1;']
1567
1568 - def distributed_transaction_begin(self,key):
1569 self.execute('XA START;')
1570
1571 - def prepare(self,key):
1572 self.execute("XA END;") 1573 self.execute("XA PREPARE;")
1574
1575 - def commit_prepared(self,ley):
1576 self.execute("XA COMMIT;")
1577
1578 - def rollback_prepared(self,key):
1579 self.execute("XA ROLLBACK;")
1580
1581 - def concat_add(self,table):
1582 return '; ALTER TABLE %s ADD ' % table
1583
1584 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1585 credential_decoder=lambda x:x, driver_args={}, 1586 adapter_args={}):
1587 self.db = db 1588 self.dbengine = "mysql" 1589 self.uri = uri 1590 self.pool_size = pool_size 1591 self.folder = folder 1592 self.db_codec = db_codec 1593 self.find_or_make_work_folder() 1594 uri = uri.split('://')[1] 1595 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 1596 if not m: 1597 raise SyntaxError, \ 1598 "Invalid URI string in DAL: %s" % self.uri 1599 user = credential_decoder(m.group('user')) 1600 if not user: 1601 raise SyntaxError, 'User required' 1602 password = credential_decoder(m.group('password')) 1603 if not password: 1604 password = '' 1605 host = m.group('host') 1606 if not host: 1607 raise SyntaxError, 'Host name required' 1608 db = m.group('db') 1609 if not db: 1610 raise SyntaxError, 'Database name required' 1611 port = int(m.group('port') or '3306') 1612 charset = m.group('charset') or 'utf8' 1613 driver_args.update(dict(db=db, 1614 user=credential_decoder(user), 1615 passwd=credential_decoder(password), 1616 host=host, 1617 port=port, 1618 charset=charset)) 1619 def connect(driver_args=driver_args): 1620 return self.driver.connect(**driver_args)
1621 self.pool_connection(connect) 1622 self.cursor = self.connection.cursor() 1623 self.execute('SET FOREIGN_KEY_CHECKS=1;') 1624 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
1625
1626 - def lastrowid(self,table):
1627 self.execute('select last_insert_id();') 1628 return int(self.cursor.fetchone()[0])
1629 1630
1631 -class PostgreSQLAdapter(BaseAdapter):
1632 1633 support_distributed_transaction = True 1634 types = { 1635 'boolean': 'CHAR(1)', 1636 'string': 'VARCHAR(%(length)s)', 1637 'text': 'TEXT', 1638 'password': 'VARCHAR(%(length)s)', 1639 'blob': 'BYTEA', 1640 'upload': 'VARCHAR(%(length)s)', 1641 'integer': 'INTEGER', 1642 'double': 'FLOAT8', 1643 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1644 'date': 'DATE', 1645 'time': 'TIME', 1646 'datetime': 'TIMESTAMP', 1647 'id': 'SERIAL PRIMARY KEY', 1648 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1649 'list:integer': 'TEXT', 1650 'list:string': 'TEXT', 1651 'list:reference': 'TEXT', 1652 } 1653
1654 - def sequence_name(self,table):
1655 return '%s_id_Seq' % table
1656
1657 - def RANDOM(self):
1658 return 'RANDOM()'
1659
1660 - def distributed_transaction_begin(self,key):
1661 return
1662
1663 - def prepare(self,key):
1664 self.execute("PREPARE TRANSACTION '%s';" % key)
1665
1666 - def commit_prepared(self,key):
1667 self.execute("COMMIT PREPARED '%s';" % key)
1668
1669 - def rollback_prepared(self,key):
1670 self.execute("ROLLBACK PREPARED '%s';" % key)
1671
1672 - def create_sequence_and_triggers(self, query, table, **args):
1673 # following lines should only be executed if table._sequence_name does not exist 1674 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 1675 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 1676 # % (table._tablename, table._fieldname, table._sequence_name)) 1677 self.execute(query)
1678
1679 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1680 credential_decoder=lambda x:x, driver_args={}, 1681 adapter_args={}):
1682 self.db = db 1683 self.dbengine = "postgres" 1684 self.uri = uri 1685 self.pool_size = pool_size 1686 self.folder = folder 1687 self.db_codec = db_codec 1688 self.find_or_make_work_folder() 1689 uri = uri.split('://')[1] 1690 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) 1691 if not m: 1692 raise SyntaxError, "Invalid URI string in DAL" 1693 user = credential_decoder(m.group('user')) 1694 if not user: 1695 raise SyntaxError, 'User required' 1696 password = credential_decoder(m.group('password')) 1697 if not password: 1698 password = '' 1699 host = m.group('host') 1700 if not host: 1701 raise SyntaxError, 'Host name required' 1702 db = m.group('db') 1703 if not db: 1704 raise SyntaxError, 'Database name required' 1705 port = m.group('port') or '5432' 1706 sslmode = m.group('sslmode') 1707 if sslmode: 1708 msg = ("dbname='%s' user='%s' host='%s'" 1709 "port=%s password='%s' sslmode='%s'") \ 1710 % (db, user, host, port, password, sslmode) 1711 else: 1712 msg = ("dbname='%s' user='%s' host='%s'" 1713 "port=%s password='%s'") \ 1714 % (db, user, host, port, password) 1715 def connect(msg=msg,driver_args=driver_args): 1716 return psycopg2.connect(msg,**driver_args)
1717 self.pool_connection(connect) 1718 self.connection.set_client_encoding('UTF8') 1719 self.cursor = self.connection.cursor() 1720 self.execute('BEGIN;') 1721 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 1722 self.execute("SET standard_conforming_strings=on;")
1723
1724 - def lastrowid(self,table):
1725 self.execute("select currval('%s')" % table._sequence_name) 1726 return int(self.cursor.fetchone()[0])
1727
1728 - def LIKE(self,first,second):
1729 return '(%s ILIKE %s)' % (self.expand(first),self.expand(second,'string'))
1730
1731 - def STARTSWITH(self,first,second):
1732 return '(%s ILIKE %s)' % (self.expand(first),self.expand(second+'%','string'))
1733
1734 - def ENDSWITH(self,first,second):
1735 return '(%s ILIKE %s)' % (self.expand(first),self.expand('%'+second,'string'))
1736
1737 - def CONTAINS(self,first,second):
1738 if first.type in ('string','text'): 1739 key = '%'+str(second).replace('%','%%')+'%' 1740 elif first.type.startswith('list:'): 1741 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 1742 return '(%s ILIKE %s)' % (self.expand(first),self.expand(key,'string'))
1743
1744 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
1745
1746 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1747 credential_decoder=lambda x:x, driver_args={}, 1748 adapter_args={}):
1749 self.db = db 1750 self.dbengine = "postgres" 1751 self.uri = uri 1752 self.pool_size = pool_size 1753 self.folder = folder 1754 self.db_codec = db_codec 1755 self.find_or_make_work_folder() 1756 uri = uri.split('://')[1] 1757 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) 1758 if not m: 1759 raise SyntaxError, "Invalid URI string in DAL" 1760 user = credential_decoder(m.group('user')) 1761 if not user: 1762 raise SyntaxError, 'User required' 1763 password = credential_decoder(m.group('password')) 1764 if not password: 1765 password = '' 1766 host = m.group('host') 1767 if not host: 1768 raise SyntaxError, 'Host name required' 1769 db = m.group('db') 1770 if not db: 1771 raise SyntaxError, 'Database name required' 1772 port = m.group('port') or '5432' 1773 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 1774 def connect(msg=msg,driver_args=driver_args): 1775 return zxJDBC.connect(*msg,**driver_args)
1776 self.pool_connection(connect) 1777 self.connection.set_client_encoding('UTF8') 1778 self.cursor = self.connection.cursor() 1779 self.execute('BEGIN;') 1780 self.execute("SET CLIENT_ENCODING TO 'UNICODE';")
1781 1782
1783 -class OracleAdapter(BaseAdapter):
1784 commit_on_alter_table = False 1785 types = { 1786 'boolean': 'CHAR(1)', 1787 'string': 'VARCHAR2(%(length)s)', 1788 'text': 'CLOB', 1789 'password': 'VARCHAR2(%(length)s)', 1790 'blob': 'CLOB', 1791 'upload': 'VARCHAR2(%(length)s)', 1792 'integer': 'INT', 1793 'double': 'FLOAT', 1794 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1795 'date': 'DATE', 1796 'time': 'CHAR(8)', 1797 'datetime': 'DATE', 1798 'id': 'NUMBER PRIMARY KEY', 1799 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1800 'list:integer': 'CLOB', 1801 'list:string': 'CLOB', 1802 'list:reference': 'CLOB', 1803 } 1804
1805 - def sequence_name(self,tablename):
1806 return '%s_sequence' % tablename
1807
1808 - def trigger_name(self,tablename):
1809 return '%s_trigger' % tablename
1810
1811 - def LEFT_JOIN(self):
1812 return 'LEFT OUTER JOIN'
1813
1814 - def RANDOM(self):
1815 return 'dbms_random.value'
1816
1817 - def NOT_NULL(self,default,field_type):
1818 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
1819
1820 - def _drop(self,table,mode):
1821 sequence_name = table._sequence_name 1822 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
1823
1824 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1825 if limitby: 1826 (lmin, lmax) = limitby 1827 if len(sql_w) > 1: 1828 sql_w_row = sql_w + ' AND w_row > %i' % lmin 1829 else: 1830 sql_w_row = 'WHERE w_row > %i' % lmin 1831 return '%s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 1832 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
1833
1834 - def constraint_name(self, tablename, fieldname):
1835 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 1836 if len(constraint_name)>30: 1837 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 1838 return constraint_name
1839
1840 - def represent_exceptions(self, obj, fieldtype):
1841 if fieldtype == 'blob': 1842 obj = base64.b64encode(str(obj)) 1843 return ":CLOB('%s')" % obj 1844 elif fieldtype == 'date': 1845 if isinstance(obj, (datetime.date, datetime.datetime)): 1846 obj = obj.isoformat()[:10] 1847 else: 1848 obj = str(obj) 1849 return "to_date('%s','yyyy-mm-dd')" % obj 1850 elif fieldtype == 'datetime': 1851 if isinstance(obj, datetime.datetime): 1852 obj = obj.isoformat()[:19].replace('T',' ') 1853 elif isinstance(obj, datetime.date): 1854 obj = obj.isoformat()[:10]+' 00:00:00' 1855 else: 1856 obj = str(obj) 1857 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 1858 return None
1859
1860 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1861 credential_decoder=lambda x:x, driver_args={}, 1862 adapter_args={}):
1863 self.db = db 1864 self.dbengine = "oracle" 1865 self.uri = uri 1866 self.pool_size = pool_size 1867 self.folder = folder 1868 self.db_codec = db_codec 1869 self.find_or_make_work_folder() 1870 uri = uri.split('://')[1] 1871 if not 'threaded' in driver_args: 1872 driver_args['threaded']=True 1873 def connect(uri=uri,driver_args=driver_args): 1874 return cx_Oracle.connect(uri,**driver_args)
1875 self.pool_connection(connect) 1876 self.cursor = self.connection.cursor() 1877 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 1878 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
1879 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 1880
1881 - def execute(self, command):
1882 args = [] 1883 i = 1 1884 while True: 1885 m = self.oracle_fix.match(command) 1886 if not m: 1887 break 1888 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 1889 args.append(m.group('clob')[6:-2].replace("''", "'")) 1890 i += 1 1891 return self.log_execute(command[:-1], args)
1892
1893 - def create_sequence_and_triggers(self, query, table, **args):
1894 tablename = table._tablename 1895 sequence_name = table._sequence_name 1896 trigger_name = table._trigger_name 1897 self.execute(query) 1898 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE;' % sequence_name) 1899 self.execute('CREATE OR REPLACE TRIGGER %s BEFORE INSERT ON %s FOR EACH ROW BEGIN SELECT %s.nextval INTO :NEW.id FROM DUAL; END;\n' % (trigger_name, tablename, sequence_name))
1900
1901 - def lastrowid(self,table):
1902 sequence_name = table._sequence_name 1903 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 1904 return int(self.cursor.fetchone()[0])
1905 1906
1907 -class MSSQLAdapter(BaseAdapter):
1908 types = { 1909 'boolean': 'BIT', 1910 'string': 'VARCHAR(%(length)s)', 1911 'text': 'TEXT', 1912 'password': 'VARCHAR(%(length)s)', 1913 'blob': 'IMAGE', 1914 'upload': 'VARCHAR(%(length)s)', 1915 'integer': 'INT', 1916 'double': 'FLOAT', 1917 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1918 'date': 'DATETIME', 1919 'time': 'CHAR(8)', 1920 'datetime': 'DATETIME', 1921 'id': 'INT IDENTITY PRIMARY KEY', 1922 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1923 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1924 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 1925 'list:integer': 'TEXT', 1926 'list:string': 'TEXT', 1927 'list:reference': 'TEXT', 1928 } 1929
1930 - def EXTRACT(self,field,what):
1931 return "DATEPART(%s,%s)" % (what, self.expand(field))
1932
1933 - def LEFT_JOIN(self):
1934 return 'LEFT OUTER JOIN'
1935
1936 - def RANDOM(self):
1937 return 'NEWID()'
1938
1939 - def ALLOW_NULL(self):
1940 return ' NULL'
1941
1942 - def SUBSTRING(self,field,parameters):
1943 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1944
1945 - def PRIMARY_KEY(self,key):
1946 return 'PRIMARY KEY CLUSTERED (%s)' % key
1947
1948 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1949 if limitby: 1950 (lmin, lmax) = limitby 1951 sql_s += ' TOP %i' % lmax 1952 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
1953
1954 - def represent_exceptions(self, obj, fieldtype):
1955 if fieldtype == 'boolean': 1956 if obj and not str(obj)[0].upper() == 'F': 1957 return '1' 1958 else: 1959 return '0' 1960 return None
1961
1962 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1963 credential_decoder=lambda x:x, driver_args={}, 1964 adapter_args={}, fake_connect=False):
1965 self.db = db 1966 self.dbengine = "mssql" 1967 self.uri = uri 1968 self.pool_size = pool_size 1969 self.folder = folder 1970 self.db_codec = db_codec 1971 self.find_or_make_work_folder() 1972 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 1973 uri = uri.split('://')[1] 1974 if '@' not in uri: 1975 try: 1976 m = re.compile('^(?P<dsn>.+)$').match(uri) 1977 if not m: 1978 raise SyntaxError, \ 1979 'Parsing uri string(%s) has no result' % self.uri 1980 dsn = m.group('dsn') 1981 if not dsn: 1982 raise SyntaxError, 'DSN required' 1983 except SyntaxError, e: 1984 logger.error('NdGpatch error') 1985 raise e 1986 cnxn = 'DSN=%s' % dsn 1987 else: 1988 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$').match(uri) 1989 if not m: 1990 raise SyntaxError, \ 1991 "Invalid URI string in DAL: %s" % uri 1992 user = credential_decoder(m.group('user')) 1993 if not user: 1994 raise SyntaxError, 'User required' 1995 password = credential_decoder(m.group('password')) 1996 if not password: 1997 password = '' 1998 host = m.group('host') 1999 if not host: 2000 raise SyntaxError, 'Host name required' 2001 db = m.group('db') 2002 if not db: 2003 raise SyntaxError, 'Database name required' 2004 port = m.group('port') or '1433' 2005 # Parse the optional url name-value arg pairs after the '?' 2006 # (in the form of arg1=value1&arg2=value2&...) 2007 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 2008 argsdict = { 'DRIVER':'{SQL Server}' } 2009 urlargs = m.group('urlargs') or '' 2010 argpattern = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 2011 for argmatch in argpattern.finditer(urlargs): 2012 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 2013 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.items()]) 2014 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 2015 % (host, port, db, user, password, urlargs) 2016 def connect(cnxn=cnxn,driver_args=driver_args): 2017 return pyodbc.connect(cnxn,**driver_args)
2018 if not fake_connect: 2019 self.pool_connection(connect) 2020 self.cursor = self.connection.cursor()
2021
2022 - def lastrowid(self,table):
2023 #self.execute('SELECT @@IDENTITY;') 2024 self.execute('SELECT SCOPE_IDENTITY();') 2025 return int(self.cursor.fetchone()[0])
2026
2027 - def integrity_error_class(self):
2028 return pyodbc.IntegrityError
2029
2030 - def rowslice(self,rows,minimum=0,maximum=None):
2031 if maximum is None: 2032 return rows[minimum:] 2033 return rows[minimum:maximum]
2034 2035
2036 -class MSSQL2Adapter(MSSQLAdapter):
2037 types = { 2038 'boolean': 'CHAR(1)', 2039 'string': 'NVARCHAR(%(length)s)', 2040 'text': 'NTEXT', 2041 'password': 'NVARCHAR(%(length)s)', 2042 'blob': 'IMAGE', 2043 'upload': 'NVARCHAR(%(length)s)', 2044 'integer': 'INT', 2045 'double': 'FLOAT', 2046 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2047 'date': 'DATETIME', 2048 'time': 'CHAR(8)', 2049 'datetime': 'DATETIME', 2050 'id': 'INT IDENTITY PRIMARY KEY', 2051 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2052 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2053 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2054 'list:integer': 'NTEXT', 2055 'list:string': 'NTEXT', 2056 'list:reference': 'NTEXT', 2057 } 2058
2059 - def represent(self, obj, fieldtype):
2060 value = BaseAdapter.represent(self, obj, fieldtype) 2061 if fieldtype == 'string' or fieldtype == 'text' and value[:1]=="'": 2062 value = 'N'+value 2063 return value
2064
2065 - def execute(self,a):
2066 return self.log_execute(a,'utf8')
2067 2068
2069 -class FireBirdAdapter(BaseAdapter):
2070 2071 commit_on_alter_table = False 2072 support_distributed_transaction = True 2073 types = { 2074 'boolean': 'CHAR(1)', 2075 'string': 'VARCHAR(%(length)s)', 2076 'text': 'BLOB SUB_TYPE 1', 2077 'password': 'VARCHAR(%(length)s)', 2078 'blob': 'BLOB SUB_TYPE 0', 2079 'upload': 'VARCHAR(%(length)s)', 2080 'integer': 'INTEGER', 2081 'double': 'DOUBLE PRECISION', 2082 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 2083 'date': 'DATE', 2084 'time': 'TIME', 2085 'datetime': 'TIMESTAMP', 2086 'id': 'INTEGER PRIMARY KEY', 2087 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2088 'list:integer': 'BLOB SUB_TYPE 1', 2089 'list:string': 'BLOB SUB_TYPE 1', 2090 'list:reference': 'BLOB SUB_TYPE 1', 2091 } 2092
2093 - def sequence_name(self,tablename):
2094 return 'genid_%s' % tablename
2095
2096 - def trigger_name(self,tablename):
2097 return 'trg_id_%s' % tablename
2098
2099 - def RANDOM(self):
2100 return 'RAND()'
2101
2102 - def NOT_NULL(self,default,field_type):
2103 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2104
2105 - def SUBSTRING(self,field,parameters):
2106 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
2107
2108 - def _drop(self,table,mode):
2109 sequence_name = table._sequence_name 2110 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
2111
2112 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2113 if limitby: 2114 (lmin, lmax) = limitby 2115 sql_s += ' FIRST %i SKIP %i' % (lmax - lmin, lmin) 2116 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2117
2118 - def _truncate(self,table,mode = ''):
2119 return ['DELETE FROM %s;' % table._tablename, 2120 'SET GENERATOR %s TO 0;' % table._sequence_name]
2121
2122 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2123 credential_decoder=lambda x:x, driver_args={}, 2124 adapter_args={}):
2125 self.db = db 2126 self.dbengine = "firebird" 2127 self.uri = uri 2128 self.pool_size = pool_size 2129 self.folder = folder 2130 self.db_codec = db_codec 2131 self.find_or_make_work_folder() 2132 uri = uri.split('://')[1] 2133 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2134 if not m: 2135 raise SyntaxError, "Invalid URI string in DAL: %s" % uri 2136 user = credential_decoder(m.group('user')) 2137 if not user: 2138 raise SyntaxError, 'User required' 2139 password = credential_decoder(m.group('password')) 2140 if not password: 2141 password = '' 2142 host = m.group('host') 2143 if not host: 2144 raise SyntaxError, 'Host name required' 2145 port = int(m.group('port') or 3050) 2146 db = m.group('db') 2147 if not db: 2148 raise SyntaxError, 'Database name required' 2149 charset = m.group('charset') or 'UTF8' 2150 driver_args.update(dict(dsn='%s/%s:%s' % (host,port,db), 2151 user = credential_decoder(user), 2152 password = credential_decoder(password), 2153 charset = charset)) 2154 def connect(driver_args=driver_args, adapter_args=adapter_args): 2155 if adapter_args.has_key('driver_name'): 2156 if adapter_args['driver_name'] == 'kinterbasdb': 2157 conn = kinterbasdb.connect(**driver_args) 2158 elif adapter_args['driver_name'] == 'firebirdsql': 2159 conn = firebirdsql.connect(**driver_args) 2160 else: 2161 conn = kinterbasdb.connect(**driver_args) 2162 2163 return conn
2164 2165 self.pool_connection(connect) 2166 2167 self.cursor = self.connection.cursor()
2168
2169 - def create_sequence_and_triggers(self, query, table, **args):
2170 tablename = table._tablename 2171 sequence_name = table._sequence_name 2172 trigger_name = table._trigger_name 2173 self.execute(query) 2174 self.execute('create generator %s;' % sequence_name) 2175 self.execute('set generator %s to 0;' % sequence_name) 2176 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
2177
2178 - def lastrowid(self,table):
2179 sequence_name = table._sequence_name 2180 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 2181 return int(self.cursor.fetchone()[0])
2182 2183
2184 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
2185
2186 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2187 credential_decoder=lambda x:x, driver_args={}, 2188 adapter_args={}):
2189 self.db = db 2190 self.dbengine = "firebird" 2191 self.uri = uri 2192 self.pool_size = pool_size 2193 self.folder = folder 2194 self.db_codec = db_codec 2195 self.find_or_make_work_folder() 2196 uri = uri.split('://')[1] 2197 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2198 if not m: 2199 raise SyntaxError, \ 2200 "Invalid URI string in DAL: %s" % self.uri 2201 user = credential_decoder(m.group('user')) 2202 if not user: 2203 raise SyntaxError, 'User required' 2204 password = credential_decoder(m.group('password')) 2205 if not password: 2206 password = '' 2207 pathdb = m.group('path') 2208 if not pathdb: 2209 raise SyntaxError, 'Path required' 2210 charset = m.group('charset') 2211 if not charset: 2212 charset = 'UTF8' 2213 host = '' 2214 driver_args.update(dict(host=host, 2215 database=pathdb, 2216 user=credential_decoder(user), 2217 password=credential_decoder(password), 2218 charset=charset)) 2219 #def connect(driver_args=driver_args): 2220 # return kinterbasdb.connect(**driver_args) 2221 def connect(driver_args=driver_args, adapter_args=adapter_args): 2222 if adapter_args.has_key('driver_name'): 2223 if adapter_args['driver_name'] == 'kinterbasdb': 2224 conn = kinterbasdb.connect(**driver_args) 2225 elif adapter_args['driver_name'] == 'firebirdsql': 2226 conn = firebirdsql.connect(**driver_args) 2227 else: 2228 conn = kinterbasdb.connect(**driver_args) 2229 2230 return conn
2231 2232 self.pool_connection(connect) 2233 2234 self.cursor = self.connection.cursor()
2235 2236
2237 -class InformixAdapter(BaseAdapter):
2238 types = { 2239 'boolean': 'CHAR(1)', 2240 'string': 'VARCHAR(%(length)s)', 2241 'text': 'BLOB SUB_TYPE 1', 2242 'password': 'VARCHAR(%(length)s)', 2243 'blob': 'BLOB SUB_TYPE 0', 2244 'upload': 'VARCHAR(%(length)s)', 2245 'integer': 'INTEGER', 2246 'double': 'FLOAT', 2247 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2248 'date': 'DATE', 2249 'time': 'CHAR(8)', 2250 'datetime': 'DATETIME', 2251 'id': 'SERIAL', 2252 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2253 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 2254 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 2255 'list:integer': 'BLOB SUB_TYPE 1', 2256 'list:string': 'BLOB SUB_TYPE 1', 2257 'list:reference': 'BLOB SUB_TYPE 1', 2258 } 2259
2260 - def RANDOM(self):
2261 return 'Random()'
2262
2263 - def NOT_NULL(self,default,field_type):
2264 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2265
2266 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2267 if limitby: 2268 (lmin, lmax) = limitby 2269 fetch_amt = lmax - lmin 2270 dbms_version = int(self.connection.dbms_version.split('.')[0]) 2271 if lmin and (dbms_version >= 10): 2272 # Requires Informix 10.0+ 2273 sql_s += ' SKIP %d' % (lmin, ) 2274 if fetch_amt and (dbms_version >= 9): 2275 # Requires Informix 9.0+ 2276 sql_s += ' FIRST %d' % (fetch_amt, ) 2277 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2278
2279 - def represent_exceptions(self, obj, fieldtype):
2280 if fieldtype == 'date': 2281 if isinstance(obj, (datetime.date, datetime.datetime)): 2282 obj = obj.isoformat()[:10] 2283 else: 2284 obj = str(obj) 2285 return "to_date('%s','yyyy-mm-dd')" % obj 2286 elif fieldtype == 'datetime': 2287 if isinstance(obj, datetime.datetime): 2288 obj = obj.isoformat()[:19].replace('T',' ') 2289 elif isinstance(obj, datetime.date): 2290 obj = obj.isoformat()[:10]+' 00:00:00' 2291 else: 2292 obj = str(obj) 2293 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 2294 return None
2295
2296 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2297 credential_decoder=lambda x:x, driver_args={}, 2298 adapter_args={}):
2299 self.db = db 2300 self.dbengine = "informix" 2301 self.uri = uri 2302 self.pool_size = pool_size 2303 self.folder = folder 2304 self.db_codec = db_codec 2305 self.find_or_make_work_folder() 2306 uri = uri.split('://')[1] 2307 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) 2308 if not m: 2309 raise SyntaxError, \ 2310 "Invalid URI string in DAL: %s" % self.uri 2311 user = credential_decoder(m.group('user')) 2312 if not user: 2313 raise SyntaxError, 'User required' 2314 password = credential_decoder(m.group('password')) 2315 if not password: 2316 password = '' 2317 host = m.group('host') 2318 if not host: 2319 raise SyntaxError, 'Host name required' 2320 db = m.group('db') 2321 if not db: 2322 raise SyntaxError, 'Database name required' 2323 user = credential_decoder(user) 2324 password = credential_decoder(password) 2325 dsn = '%s@%s' % (db,host) 2326 driver_args.update(dict(user=user,password=password,autocommit=True)) 2327 def connect(dsn=dsn,driver_args=driver_args): 2328 return informixdb.connect(dsn,**driver_args)
2329 self.pool_connection(connect) 2330 self.cursor = self.connection.cursor()
2331
2332 - def execute(self,command):
2333 if command[-1:]==';': 2334 command = command[:-1] 2335 return self.log_execute(command)
2336
2337 - def lastrowid(self,table):
2338 return self.cursor.sqlerrd[1]
2339
2340 - def integrity_error_class(self):
2341 return informixdb.IntegrityError
2342 2343
2344 -class DB2Adapter(BaseAdapter):
2345 types = { 2346 'boolean': 'CHAR(1)', 2347 'string': 'VARCHAR(%(length)s)', 2348 'text': 'CLOB', 2349 'password': 'VARCHAR(%(length)s)', 2350 'blob': 'BLOB', 2351 'upload': 'VARCHAR(%(length)s)', 2352 'integer': 'INT', 2353 'double': 'DOUBLE', 2354 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2355 'date': 'DATE', 2356 'time': 'TIME', 2357 'datetime': 'TIMESTAMP', 2358 'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 2359 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2360 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2361 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2362 'list:integer': 'CLOB', 2363 'list:string': 'CLOB', 2364 'list:reference': 'CLOB', 2365 } 2366
2367 - def LEFT_JOIN(self):
2368 return 'LEFT OUTER JOIN'
2369
2370 - def RANDOM(self):
2371 return 'RAND()'
2372
2373 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2374 if limitby: 2375 (lmin, lmax) = limitby 2376 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 2377 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2378
2379 - def represent_exceptions(self, obj, fieldtype):
2380 if fieldtype == 'blob': 2381 obj = base64.b64encode(str(obj)) 2382 return "BLOB('%s')" % obj 2383 elif fieldtype == 'datetime': 2384 if isinstance(obj, datetime.datetime): 2385 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 2386 elif isinstance(obj, datetime.date): 2387 obj = obj.isoformat()[:10]+'-00.00.00' 2388 return "'%s'" % obj 2389 return None
2390
2391 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2392 credential_decoder=lambda x:x, driver_args={}, 2393 adapter_args={}):
2394 self.db = db 2395 self.dbengine = "db2" 2396 self.uri = uri 2397 self.pool_size = pool_size 2398 self.folder = folder 2399 self.db_codec = db_codec 2400 self.find_or_make_work_folder() 2401 cnxn = uri.split('://', 1)[1] 2402 def connect(cnxn=cnxn,driver_args=driver_args): 2403 return pyodbc.connect(cnxn,**driver_args)
2404 self.pool_connection(connect) 2405 self.cursor = self.connection.cursor()
2406
2407 - def execute(self,command):
2408 if command[-1:]==';': 2409 command = command[:-1] 2410 return self.log_execute(command)
2411
2412 - def lastrowid(self,table):
2413 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 2414 return int(self.cursor.fetchone()[0])
2415
2416 - def rowslice(self,rows,minimum=0,maximum=None):
2417 if maximum is None: 2418 return rows[minimum:] 2419 return rows[minimum:maximum]
2420 2421 2422 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name 2423 # (ANSI-SQL wants this form of name 2424 # to be a delimited identifier) 2425
2426 -class IngresAdapter(BaseAdapter):
2427 2428 types = { 2429 'boolean': 'CHAR(1)', 2430 'string': 'VARCHAR(%(length)s)', 2431 'text': 'CLOB', 2432 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 2433 'blob': 'BLOB', 2434 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 2435 'integer': 'INTEGER4', # or int8... 2436 'double': 'FLOAT8', 2437 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2438 'date': 'ANSIDATE', 2439 'time': 'TIME WITHOUT TIME ZONE', 2440 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 2441 'id': 'integer4 not null unique with default next value for %s' % INGRES_SEQNAME, 2442 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2443 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2444 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 2445 'list:integer': 'CLOB', 2446 'list:string': 'CLOB', 2447 'list:reference': 'CLOB', 2448 } 2449
2450 - def LEFT_JOIN(self):
2451 return 'LEFT OUTER JOIN'
2452
2453 - def RANDOM(self):
2454 return 'RANDOM()'
2455
2456 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2457 if limitby: 2458 (lmin, lmax) = limitby 2459 fetch_amt = lmax - lmin 2460 if fetch_amt: 2461 sql_s += ' FIRST %d ' % (fetch_amt, ) 2462 if lmin: 2463 # Requires Ingres 9.2+ 2464 sql_o += ' OFFSET %d' % (lmin, ) 2465 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2466
2467 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2468 credential_decoder=lambda x:x, driver_args={}, 2469 adapter_args={}):
2470 self.db = db 2471 self.dbengine = "ingres" 2472 self.uri = uri 2473 self.pool_size = pool_size 2474 self.folder = folder 2475 self.db_codec = db_codec 2476 self.find_or_make_work_folder() 2477 connstr = self._uri.split(':', 1)[1] 2478 # Simple URI processing 2479 connstr = connstr.lstrip() 2480 while connstr.startswith('/'): 2481 connstr = connstr[1:] 2482 database_name=connstr # Assume only (local) dbname is passed in 2483 vnode = '(local)' 2484 servertype = 'ingres' 2485 trace = (0, None) # No tracing 2486 driver_args.update(dict(database=database_name, 2487 vnode=vnode, 2488 servertype=servertype, 2489 trace=trace)) 2490 def connect(driver_args=driver_args): 2491 return ingresdbi.connect(**driver_args)
2492 self.pool_connection(connect) 2493 self.cursor = self.connection.cursor()
2494
2495 - def create_sequence_and_triggers(self, query, table, **args):
2496 # post create table auto inc code (if needed) 2497 # modify table to btree for performance.... 2498 # Older Ingres releases could use rule/trigger like Oracle above. 2499 if hasattr(table,'_primarykey'): 2500 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 2501 (table._tablename, 2502 ', '.join(["'%s'" % x for x in table.primarykey])) 2503 self.execute(modify_tbl_sql) 2504 else: 2505 tmp_seqname='%s_iisq' % table._tablename 2506 query=query.replace(INGRES_SEQNAME, tmp_seqname) 2507 self.execute('create sequence %s' % tmp_seqname) 2508 self.execute(query) 2509 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
2510 2511
2512 - def lastrowid(self,table):
2513 tmp_seqname='%s_iisq' % table 2514 self.execute('select current value for %s' % tmp_seqname) 2515 return int(self.cursor.fetchone()[0]) # don't really need int type cast here...
2516
2517 - def integrity_error_class(self):
2518 return ingresdbi.IntegrityError
2519 2520
2521 -class IngresUnicodeAdapter(IngresAdapter):
2522 types = { 2523 'boolean': 'CHAR(1)', 2524 'string': 'NVARCHAR(%(length)s)', 2525 'text': 'NCLOB', 2526 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 2527 'blob': 'BLOB', 2528 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 2529 'integer': 'INTEGER4', # or int8... 2530 'double': 'FLOAT8', 2531 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2532 'date': 'ANSIDATE', 2533 'time': 'TIME WITHOUT TIME ZONE', 2534 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 2535 'id': 'integer4 not null unique with default next value for %s'% INGRES_SEQNAME, 2536 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2537 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2538 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 2539 'list:integer': 'NCLOB', 2540 'list:string': 'NCLOB', 2541 'list:reference': 'NCLOB', 2542 }
2543
2544 -class SAPDBAdapter(BaseAdapter):
2545 2546 support_distributed_transaction = False 2547 types = { 2548 'boolean': 'CHAR(1)', 2549 'string': 'VARCHAR(%(length)s)', 2550 'text': 'LONG', 2551 'password': 'VARCHAR(%(length)s)', 2552 'blob': 'LONG', 2553 'upload': 'VARCHAR(%(length)s)', 2554 'integer': 'INT', 2555 'double': 'FLOAT', 2556 'decimal': 'FIXED(%(precision)s,%(scale)s)', 2557 'date': 'DATE', 2558 'time': 'TIME', 2559 'datetime': 'TIMESTAMP', 2560 'id': 'INT PRIMARY KEY', 2561 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2562 'list:integer': 'LONG', 2563 'list:string': 'LONG', 2564 'list:reference': 'LONG', 2565 } 2566
2567 - def sequence_name(self,table):
2568 return '%s_id_Seq' % table
2569
2570 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2571 if limitby: 2572 (lmin, lmax) = limitby 2573 if len(sql_w) > 1: 2574 sql_w_row = sql_w + ' AND w_row > %i' % lmin 2575 else: 2576 sql_w_row = 'WHERE w_row > %i' % lmin 2577 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 2578 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2579
2580 - def create_sequence_and_triggers(self, query, table, **args):
2581 # following lines should only be executed if table._sequence_name does not exist 2582 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2583 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2584 % (table._tablename, table._id.name, table._sequence_name)) 2585 self.execute(query)
2586
2587 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2588 credential_decoder=lambda x:x, driver_args={}, 2589 adapter_args={}):
2590 self.db = db 2591 self.dbengine = "sapdb" 2592 self.uri = uri 2593 self.pool_size = pool_size 2594 self.folder = folder 2595 self.db_codec = db_codec 2596 self.find_or_make_work_folder() 2597 uri = uri.split('://')[1] 2598 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) 2599 if not m: 2600 raise SyntaxError, "Invalid URI string in DAL" 2601 user = credential_decoder(m.group('user')) 2602 if not user: 2603 raise SyntaxError, 'User required' 2604 password = credential_decoder(m.group('password')) 2605 if not password: 2606 password = '' 2607 host = m.group('host') 2608 if not host: 2609 raise SyntaxError, 'Host name required' 2610 db = m.group('db') 2611 if not db: 2612 raise SyntaxError, 'Database name required' 2613 def connect(user=user,password=password,database=db,host=host,driver_args=driver_args): 2614 return sapdb.Connection(user,password,database,host,**driver_args)
2615 self.pool_connection(connect) 2616 # self.connection.set_client_encoding('UTF8') 2617 self.cursor = self.connection.cursor()
2618
2619 - def lastrowid(self,table):
2620 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 2621 return int(self.cursor.fetchone()[0])
2622
2623 -class CubridAdapter(MySQLAdapter):
2624 2625 driver = globals().get('cubriddb',None) 2626
2627 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2628 credential_decoder=lambda x:x, driver_args={}, 2629 adapter_args={}):
2630 self.db = db 2631 self.dbengine = "cubrid" 2632 self.uri = uri 2633 self.pool_size = pool_size 2634 self.folder = folder 2635 self.db_codec = db_codec 2636 self.find_or_make_work_folder() 2637 uri = uri.split('://')[1] 2638 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2639 if not m: 2640 raise SyntaxError, \ 2641 "Invalid URI string in DAL: %s" % self.uri 2642 user = credential_decoder(m.group('user')) 2643 if not user: 2644 raise SyntaxError, 'User required' 2645 password = credential_decoder(m.group('password')) 2646 if not password: 2647 password = '' 2648 host = m.group('host') 2649 if not host: 2650 raise SyntaxError, 'Host name required' 2651 db = m.group('db') 2652 if not db: 2653 raise SyntaxError, 'Database name required' 2654 port = int(m.group('port') or '30000') 2655 charset = m.group('charset') or 'utf8' 2656 user=credential_decoder(user), 2657 passwd=credential_decoder(password), 2658 def connect(host,port,db,user,passwd,driver_args=driver_args): 2659 return self.driver.connect(host,port,db,user,passwd,**driver_args)
2660 self.pool_connection(connect) 2661 self.cursor = self.connection.cursor() 2662 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2663 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2664 2665 2666 ######## GAE MySQL ########## 2667
2668 -class DatabaseStoredFile:
2669 2670 web2py_filesystem = False 2671
2672 - def __init__(self,db,filename,mode):
2673 if db._adapter.dbengine != 'mysql': 2674 raise RuntimeError, "only MySQL can store metadata .table files in database for now" 2675 self.db = db 2676 self.filename = filename 2677 self.mode = mode 2678 if not self.web2py_filesystem: 2679 self.db.executesql("CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(512), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;") 2680 DatabaseStoredFile.web2py_filesystem = True 2681 self.p=0 2682 self.data = '' 2683 if mode in ('r','rw','a'): 2684 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" % filename 2685 rows = self.db.executesql(query) 2686 if rows: 2687 self.data = rows[0][0] 2688 elif os.path.exists(filename): 2689 self.data = open(filename,'r').read() 2690 elif mode in ('r','rw'): 2691 raise RuntimeError, "File %s does not exist" % filename
2692
2693 - def read(self, bytes):
2694 data = self.data[self.p:self.p+bytes] 2695 self.p += len(data) 2696 return data
2697
2698 - def readline(self):
2699 i = self.data.find('\n',self.p)+1 2700 if i>0: 2701 data, self.p = self.data[self.p:i], i 2702 else: 2703 data, self.p = self.data[self.p:], len(self.data) 2704 return data
2705
2706 - def write(self,data):
2707 self.data += data
2708
2709 - def close(self):
2710 self.db.executesql("DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 2711 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')" % \ 2712 (self.filename, self.data.replace("'","''")) 2713 self.db.executesql(query) 2714 self.db.commit()
2715 2716 @staticmethod
2717 - def exists(db,filename):
2718 if os.path.exists(filename): 2719 return True 2720 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 2721 if db.executesql(query): 2722 return True 2723 return False
2724 2725
2726 -class UseDatabaseStoredFile:
2727
2728 - def file_exists(self, filename):
2729 return DatabaseStoredFile.exists(self.db,filename)
2730
2731 - def file_open(self, filename, mode='rb', lock=True):
2732 return DatabaseStoredFile(self.db,filename,mode)
2733
2734 - def file_close(self, fileobj, unlock=True):
2735 fileobj.close()
2736
2737 - def file_delete(self,filename):
2738 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 2739 self.db.executesql(query) 2740 self.db.commit()
2741
2742 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
2743
2744 - def __init__(self, db, uri='google:sql://realm:domain/database', pool_size=0, 2745 folder=None, db_codec='UTF-8', check_reserved=None, 2746 migrate=True, fake_migrate=False, 2747 credential_decoder = lambda x:x, driver_args={}, 2748 adapter_args={}):
2749 2750 self.db = db 2751 self.dbengine = "mysql" 2752 self.uri = uri 2753 self.pool_size = pool_size 2754 self.folder = folder 2755 self.db_codec = db_codec 2756 self.folder = folder or '$HOME/'+thread.folder.split('/applications/',1)[1] 2757 2758 m = re.compile('^(?P<instance>.*)/(?P<db>.*)$').match(self.uri[len('google:sql://'):]) 2759 if not m: 2760 raise SyntaxError, "Invalid URI string in SQLDB: %s" % self._uri 2761 instance = credential_decoder(m.group('instance')) 2762 db = credential_decoder(m.group('db')) 2763 driver_args['instance'] = instance 2764 if not migrate: 2765 driver_args['database'] = db 2766 def connect(driver_args=driver_args): 2767 return rdbms.connect(**driver_args)
2768 self.pool_connection(connect) 2769 self.cursor = self.connection.cursor() 2770 if migrate: 2771 # self.execute('DROP DATABASE %s' % db) 2772 self.execute('CREATE DATABASE IF NOT EXISTS %s' % db) 2773 self.execute('USE %s' % db) 2774 self.execute("SET FOREIGN_KEY_CHECKS=1;") 2775 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2776
2777 -class NoSQLAdapter(BaseAdapter):
2778
2779 - def represent(self, obj, fieldtype):
2780 if isinstance(obj,CALLABLETYPES): 2781 obj = obj() 2782 if isinstance(fieldtype, SQLCustomType): 2783 return fieldtype.encoder(obj) 2784 if isinstance(obj, (Expression, Field)): 2785 raise SyntaxError, "non supported on GAE" 2786 if self.dbengine=='google:datastore' in globals(): 2787 if isinstance(fieldtype, gae.Property): 2788 return obj 2789 if fieldtype.startswith('list:'): 2790 if not obj: 2791 obj = [] 2792 if not isinstance(obj, (list, tuple)): 2793 obj = [obj] 2794 if obj == '' and not fieldtype[:2] in ['st','te','pa','up']: 2795 return None 2796 if obj != None: 2797 if isinstance(obj, list) and not fieldtype.startswith('list'): 2798 obj = [self.represent(o, fieldtype) for o in obj] 2799 elif fieldtype in ('integer','id'): 2800 obj = long(obj) 2801 elif fieldtype == 'double': 2802 obj = float(obj) 2803 elif fieldtype.startswith('reference'): 2804 if isinstance(obj, (Row, Reference)): 2805 obj = obj['id'] 2806 obj = long(obj) 2807 elif fieldtype == 'boolean': 2808 if obj and not str(obj)[0].upper() == 'F': 2809 obj = True 2810 else: 2811 obj = False 2812 elif fieldtype == 'date': 2813 if not isinstance(obj, datetime.date): 2814 (y, m, d) = [int(x) for x in str(obj).strip().split('-')] 2815 obj = datetime.date(y, m, d) 2816 elif isinstance(obj,datetime.datetime): 2817 (y, m, d) = (obj.year, obj.month, obj.day) 2818 obj = datetime.date(y, m, d) 2819 elif fieldtype == 'time': 2820 if not isinstance(obj, datetime.time): 2821 time_items = [int(x) for x in str(obj).strip().split(':')[:3]] 2822 if len(time_items) == 3: 2823 (h, mi, s) = time_items 2824 else: 2825 (h, mi, s) = time_items + [0] 2826 obj = datetime.time(h, mi, s) 2827 elif fieldtype == 'datetime': 2828 if not isinstance(obj, datetime.datetime): 2829 (y, m, d) = [int(x) for x in str(obj)[:10].strip().split('-')] 2830 time_items = [int(x) for x in str(obj)[11:].strip().split(':')[:3]] 2831 while len(time_items)<3: 2832 time_items.append(0) 2833 (h, mi, s) = time_items 2834 obj = datetime.datetime(y, m, d, h, mi, s) 2835 elif fieldtype == 'blob': 2836 pass 2837 elif fieldtype.startswith('list:string'): 2838 if obj!=None and not isinstance(obj,(list,tuple)): 2839 obj=[obj] 2840 return [str(x) for x in obj] 2841 elif fieldtype.startswith('list:'): 2842 if obj!=None and not isinstance(obj,(list,tuple)): 2843 obj=[obj] 2844 return [int(x) for x in obj] 2845 elif isinstance(obj, str): 2846 obj = obj.decode('utf8') 2847 elif not isinstance(obj, unicode): 2848 obj = unicode(obj) 2849 return obj
2850
2851 - def _insert(self,table,fields):
2852 return 'insert %s in %s' % (fields, table)
2853
2854 - def _count(self,query,distinct=None):
2855 return 'count %s' % repr(query)
2856
2857 - def _select(self,query,fields,attributes):
2858 return 'select %s where %s' % (repr(fields), repr(query))
2859
2860 - def _delete(self,tablename, query):
2861 return 'delete %s where %s' % (repr(tablename),repr(query))
2862
2863 - def _update(self,tablename,query,fields):
2864 return 'update %s (%s) where %s' % (repr(tablename), 2865 repr(fields),repr(query))
2866
2867 - def commit(self):
2868 """ 2869 remember: no transactions on many NoSQL 2870 """ 2871 pass
2872
2873 - def rollback(self):
2874 """ 2875 remember: no transactions on many NoSQL 2876 """ 2877 pass
2878
2879 - def close(self):
2880 """ 2881 remember: no transactions on many NoSQL 2882 """ 2883 pass
2884 2885 2886 # these functions should never be called!
2887 - def OR(self,first,second): raise SyntaxError, "Not supported"
2888 - def AND(self,first,second): raise SyntaxError, "Not supported"
2889 - def AS(self,first,second): raise SyntaxError, "Not supported"
2890 - def ON(self,first,second): raise SyntaxError, "Not supported"
2891 - def STARTSWITH(self,first,second=None): raise SyntaxError, "Not supported"
2892 - def ENDSWITH(self,first,second=None): raise SyntaxError, "Not supported"
2893 - def ADD(self,first,second): raise SyntaxError, "Not supported"
2894 - def SUB(self,first,second): raise SyntaxError, "Not supported"
2895 - def MUL(self,first,second): raise SyntaxError, "Not supported"
2896 - def DIV(self,first,second): raise SyntaxError, "Not supported"
2897 - def LOWER(self,first): raise SyntaxError, "Not supported"
2898 - def UPPER(self,first): raise SyntaxError, "Not supported"
2899 - def EXTRACT(self,first,what): raise SyntaxError, "Not supported"
2900 - def AGGREGATE(self,first,what): raise SyntaxError, "Not supported"
2901 - def LEFT_JOIN(self): raise SyntaxError, "Not supported"
2902 - def RANDOM(self): raise SyntaxError, "Not supported"
2903 - def SUBSTRING(self,field,parameters): raise SyntaxError, "Not supported"
2904 - def PRIMARY_KEY(self,key): raise SyntaxError, "Not supported"
2905 - def LIKE(self,first,second): raise SyntaxError, "Not supported"
2906 - def drop(self,table,mode): raise SyntaxError, "Not supported"
2907 - def alias(self,table,alias): raise SyntaxError, "Not supported"
2908 - def migrate_table(self,*a,**b): raise SyntaxError, "Not supported"
2909 - def distributed_transaction_begin(self,key): raise SyntaxError, "Not supported"
2910 - def prepare(self,key): raise SyntaxError, "Not supported"
2911 - def commit_prepared(self,key): raise SyntaxError, "Not supported"
2912 - def rollback_prepared(self,key): raise SyntaxError, "Not supported"
2913 - def concat_add(self,table): raise SyntaxError, "Not supported"
2914 - def constraint_name(self, table, fieldname): raise SyntaxError, "Not supported"
2915 - def create_sequence_and_triggers(self, query, table, **args): pass
2916 - def log_execute(self,*a,**b): raise SyntaxError, "Not supported"
2917 - def execute(self,*a,**b): raise SyntaxError, "Not supported"
2918 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError, "Not supported"
2919 - def lastrowid(self,table): raise SyntaxError, "Not supported"
2920 - def integrity_error_class(self): raise SyntaxError, "Not supported"
2921 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError, "Not supported"
2922 2923
2924 -class GAEF(object):
2925 - def __init__(self,name,op,value,apply):
2926 self.name=name=='id' and '__key__' or name 2927 self.op=op 2928 self.value=value 2929 self.apply=apply
2930 - def __repr__(self):
2931 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
2932
2933 -class GoogleDatastoreAdapter(NoSQLAdapter):
2934 uploads_in_blob = True 2935 types = {} 2936
2937 - def file_exists(self, filename): pass
2938 - def file_open(self, filename, mode='rb', lock=True): pass
2939 - def file_close(self, fileobj, unlock=True): pass
2940
2941 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2942 credential_decoder=lambda x:x, driver_args={}, 2943 adapter_args={}):
2944 self.types.update({ 2945 'boolean': gae.BooleanProperty, 2946 'string': (lambda: gae.StringProperty(multiline=True)), 2947 'text': gae.TextProperty, 2948 'password': gae.StringProperty, 2949 'blob': gae.BlobProperty, 2950 'upload': gae.StringProperty, 2951 'integer': gae.IntegerProperty, 2952 'double': gae.FloatProperty, 2953 'decimal': GAEDecimalProperty, 2954 'date': gae.DateProperty, 2955 'time': gae.TimeProperty, 2956 'datetime': gae.DateTimeProperty, 2957 'id': None, 2958 'reference': gae.IntegerProperty, 2959 'list:string': (lambda: gae.StringListProperty(default=None)), 2960 'list:integer': (lambda: gae.ListProperty(int,default=None)), 2961 'list:reference': (lambda: gae.ListProperty(int,default=None)), 2962 }) 2963 self.db = db 2964 self.uri = uri 2965 self.dbengine = 'google:datastore' 2966 self.folder = folder 2967 db['_lastsql'] = '' 2968 self.db_codec = 'UTF-8' 2969 self.pool_size = 0 2970 match = re.compile('.*://(?P<namespace>.+)').match(uri) 2971 if match: 2972 namespace_manager.set_namespace(match.group('namespace'))
2973
2974 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
2975 myfields = {} 2976 for k in table.fields: 2977 if isinstance(polymodel,Table) and k in polymodel.fields(): 2978 continue 2979 field = table[k] 2980 attr = {} 2981 if isinstance(field.type, SQLCustomType): 2982 ftype = self.types[field.type.native or field.type.type](**attr) 2983 elif isinstance(field.type, gae.Property): 2984 ftype = field.type 2985 elif field.type.startswith('id'): 2986 continue 2987 elif field.type.startswith('decimal'): 2988 precision, scale = field.type[7:].strip('()').split(',') 2989 precision = int(precision) 2990 scale = int(scale) 2991 ftype = GAEDecimalProperty(precision, scale, **attr) 2992 elif field.type.startswith('reference'): 2993 if field.notnull: 2994 attr = dict(required=True) 2995 referenced = field.type[10:].strip() 2996 ftype = self.types[field.type[:9]](table._db[referenced]) 2997 elif field.type.startswith('list:reference'): 2998 if field.notnull: 2999 attr = dict(required=True) 3000 referenced = field.type[15:].strip() 3001 ftype = self.types[field.type[:14]](**attr) 3002 elif field.type.startswith('list:'): 3003 ftype = self.types[field.type](**attr) 3004 elif not field.type in self.types\ 3005 or not self.types[field.type]: 3006 raise SyntaxError, 'Field: unknown field type: %s' % field.type 3007 else: 3008 ftype = self.types[field.type](**attr) 3009 myfields[field.name] = ftype 3010 if not polymodel: 3011 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 3012 elif polymodel==True: 3013 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 3014 elif isinstance(polymodel,Table): 3015 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 3016 else: 3017 raise SyntaxError, "polymodel must be None, True, a table or a tablename" 3018 return None
3019
3020 - def expand(self,expression,field_type=None):
3021 if isinstance(expression,Field): 3022 if expression.type in ('text','blob'): 3023 raise SyntaxError, 'AppEngine does not index by: %s' % expression.type 3024 return expression.name 3025 elif isinstance(expression, (Expression, Query)): 3026 if not expression.second is None: 3027 return expression.op(expression.first, expression.second) 3028 elif not expression.first is None: 3029 return expression.op(expression.first) 3030 else: 3031 return expression.op() 3032 elif field_type: 3033 return self.represent(expression,field_type) 3034 elif isinstance(expression,(list,tuple)): 3035 return ','.join([self.represent(item,field_type) for item in expression]) 3036 else: 3037 return str(expression)
3038 3039 ### TODO from gql.py Expression
3040 - def AND(self,first,second):
3041 a = self.expand(first) 3042 b = self.expand(second) 3043 if b[0].name=='__key__' and a[0].name!='__key__': 3044 return b+a 3045 return a+b
3046
3047 - def EQ(self,first,second=None):
3048 if isinstance(second, Key): 3049 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 3050 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
3051
3052 - def NE(self,first,second=None):
3053 if first.type != 'id': 3054 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 3055 else: 3056 second = Key.from_path(first._tablename, long(second)) 3057 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
3058
3059 - def LT(self,first,second=None):
3060 if first.type != 'id': 3061 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 3062 else: 3063 second = Key.from_path(first._tablename, long(second)) 3064 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
3065
3066 - def LE(self,first,second=None):
3067 if first.type != 'id': 3068 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 3069 else: 3070 second = Key.from_path(first._tablename, long(second)) 3071 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
3072
3073 - def GT(self,first,second=None):
3074 if first.type != 'id' or second==0 or second == '0': 3075 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 3076 else: 3077 second = Key.from_path(first._tablename, long(second)) 3078 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
3079
3080 - def GE(self,first,second=None):
3081 if first.type != 'id': 3082 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 3083 else: 3084 second = Key.from_path(first._tablename, long(second)) 3085 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
3086
3087 - def INVERT(self,first):
3088 return '-%s' % first.name
3089
3090 - def COMMA(self,first,second):
3091 return '%s, %s' % (self.expand(first),self.expand(second))
3092
3093 - def BELONGS(self,first,second=None):
3094 if not isinstance(second,(list, tuple)): 3095 raise SyntaxError, "Not supported" 3096 if first.type != 'id': 3097 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 3098 else: 3099 second = [Key.from_path(first._tablename, i) for i in second] 3100 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
3101
3102 - def CONTAINS(self,first,second):
3103 if not first.type.startswith('list:'): 3104 raise SyntaxError, "Not supported" 3105 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:a in b)]
3106
3107 - def NOT(self,first):
3108 nops = { self.EQ: self.NE, 3109 self.NE: self.EQ, 3110 self.LT: self.GE, 3111 self.GT: self.LE, 3112 self.LE: self.GT, 3113 self.GE: self.LT} 3114 if not isinstance(first,Query): 3115 raise SyntaxError, "Not suported" 3116 nop = nops.get(first.op,None) 3117 if not nop: 3118 raise SyntaxError, "Not suported %s" % first.op.__name__ 3119 first.op = nop 3120 return self.expand(first)
3121
3122 - def truncate(self,table,mode):
3123 self.db(table.id > 0).delete()
3124
3125 - def select_raw(self,query,fields=[],attributes={}):
3126 new_fields = [] 3127 for item in fields: 3128 if isinstance(item,SQLALL): 3129 new_fields += item.table 3130 else: 3131 new_fields.append(item) 3132 fields = new_fields 3133 if query: 3134 tablename = self.get_table(query) 3135 elif fields: 3136 tablename = fields[0].tablename 3137 query = fields[0].table._id>0 3138 else: 3139 raise SyntaxError, "Unable to determine a tablename" 3140 query = self.filter_tenant(query,[tablename]) 3141 tableobj = self.db[tablename]._tableobj 3142 items = tableobj.all() 3143 filters = self.expand(query) 3144 for filter in filters: 3145 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 3146 continue 3147 elif filter.name=='__key__' and filter.op=='=': 3148 if filter.value==0: 3149 items = [] 3150 elif isinstance(filter.value, Key): 3151 item = tableobj.get(filter.value) 3152 items = (item and [item]) or [] 3153 else: 3154 item = tableobj.get_by_id(filter.value) 3155 items = (item and [item]) or [] 3156 elif isinstance(items,list): # i.e. there is a single record! 3157 items = [i for i in items if filter.apply(getattr(item,filter.name), 3158 filter.value)] 3159 else: 3160 if filter.name=='__key__': items.order('__key__') 3161 items = items.filter('%s %s' % (filter.name,filter.op),filter.value) 3162 if not isinstance(items,list): 3163 if attributes.get('left', None): 3164 raise SyntaxError, 'Set: no left join in appengine' 3165 if attributes.get('groupby', None): 3166 raise SyntaxError, 'Set: no groupby in appengine' 3167 orderby = attributes.get('orderby', False) 3168 if orderby: 3169 ### THIS REALLY NEEDS IMPROVEMENT !!! 3170 if isinstance(orderby, (list, tuple)): 3171 orderby = xorify(orderby) 3172 if isinstance(orderby,Expression): 3173 orderby = self.expand(orderby) 3174 orders = orderby.split(', ') 3175 for order in orders: 3176 order={'-id':'-__key__','id':'__key__'}.get(order,order) 3177 items = items.order(order) 3178 if attributes.get('limitby', None): 3179 (lmin, lmax) = attributes['limitby'] 3180 (limit, offset) = (lmax - lmin, lmin) 3181 items = items.fetch(limit, offset=offset) 3182 fields = self.db[tablename].fields 3183 return (items, tablename, fields)
3184
3185 - def select(self,query,fields,attributes):
3186 (items, tablename, fields) = self.select_raw(query,fields,attributes) 3187 # self.db['_lastsql'] = self._select(query,fields,attributes) 3188 rows = [ 3189 [t=='id' and int(item.key().id()) or getattr(item, t) for t in fields] 3190 for item in items] 3191 colnames = ['%s.%s' % (tablename, t) for t in fields] 3192 return self.parse(rows, colnames, False)
3193 3194
3195 - def count(self,query,distinct=None):
3196 if distinct: 3197 raise RuntimeError, "COUNT DISTINCT not supported" 3198 (items, tablename, fields) = self.select_raw(query) 3199 # self.db['_lastsql'] = self._count(query) 3200 try: 3201 return len(items) 3202 except TypeError: 3203 return items.count(limit=None)
3204
3205 - def delete(self,tablename, query):
3206 """ 3207 This function was changed on 2010-05-04 because according to 3208 http://code.google.com/p/googleappengine/issues/detail?id=3119 3209 GAE no longer support deleting more than 1000 records. 3210 """ 3211 # self.db['_lastsql'] = self._delete(tablename,query) 3212 (items, tablename, fields) = self.select_raw(query) 3213 # items can be one item or a query 3214 if not isinstance(items,list): 3215 counter = items.count(limit=None) 3216 leftitems = items.fetch(1000) 3217 while len(leftitems): 3218 gae.delete(leftitems) 3219 leftitems = items.fetch(1000) 3220 else: 3221 counter = len(items) 3222 gae.delete(items) 3223 return counter
3224
3225 - def update(self,tablename,query,update_fields):
3226 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 3227 (items, tablename, fields) = self.select_raw(query) 3228 counter = 0 3229 for item in items: 3230 for field, value in update_fields: 3231 setattr(item, field.name, self.represent(value,field.type)) 3232 item.put() 3233 counter += 1 3234 logger.info(str(counter)) 3235 return counter
3236
3237 - def insert(self,table,fields):
3238 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 3239 # table._db['_lastsql'] = self._insert(table,fields) 3240 tmp = table._tableobj(**dfields) 3241 tmp.put() 3242 rid = Reference(tmp.key().id()) 3243 (rid._table, rid._record) = (table, None) 3244 return rid
3245
3246 - def bulk_insert(self,table,items):
3247 parsed_items = [] 3248 for item in items: 3249 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 3250 parsed_items.append(table._tableobj(**dfields)) 3251 gae.put(parsed_items) 3252 return True
3253 3254 try: 3255 import couchdb 3256 drivers.append('CouchDB') 3257 except ImportError: 3258 logger.debug('no couchdb driver') 3259
3260 -def uuid2int(uuidv):
3261 return uuid.UUID(uuidv).int
3262
3263 -def int2uuid(n):
3264 return str(uuid.UUID(int=n))
3265
3266 -class CouchDBAdapter(NoSQLAdapter):
3267 uploads_in_blob = True 3268 types = { 3269 'boolean': bool, 3270 'string': str, 3271 'text': str, 3272 'password': str, 3273 'blob': str, 3274 'upload': str, 3275 'integer': long, 3276 'double': float, 3277 'date': datetime.date, 3278 'time': datetime.time, 3279 'datetime': datetime.datetime, 3280 'id': long, 3281 'reference': long, 3282 'list:string': list, 3283 'list:integer': list, 3284 'list:reference': list, 3285 } 3286
3287 - def file_exists(self, filename): pass
3288 - def file_open(self, filename, mode='rb', lock=True): pass
3289 - def file_close(self, fileobj, unlock=True): pass
3290
3291 - def expand(self,expression,field_type=None):
3292 if isinstance(expression,Field): 3293 if expression.type=='id': 3294 return "%s._id" % expression.tablename 3295 return BaseAdapter.expand(self,expression,field_type)
3296
3297 - def AND(self,first,second):
3298 return '(%s && %s)' % (self.expand(first),self.expand(second))
3299
3300 - def OR(self,first,second):
3301 return '(%s || %s)' % (self.expand(first),self.expand(second))
3302
3303 - def EQ(self,first,second):
3304 if second is None: 3305 return '(%s == null)' % self.expand(first) 3306 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
3307
3308 - def NE(self,first,second):
3309 if second is None: 3310 return '(%s != null)' % self.expand(first) 3311 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
3312
3313 - def COMMA(self,first,second):
3314 return '%s + %s' % (self.expand(first),self.expand(second))
3315
3316 - def represent(self, obj, fieldtype):
3317 value = NoSQLAdapter.represent(self, obj, fieldtype) 3318 if fieldtype=='id': 3319 return repr(str(int(value))) 3320 return repr(not isinstance(value,unicode) and value or value.encode('utf8'))
3321
3322 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 3323 pool_size=0,folder=None,db_codec ='UTF-8', 3324 credential_decoder=lambda x:x, driver_args={}, 3325 adapter_args={}):
3326 self.db = db 3327 self.uri = uri 3328 self.dbengine = 'couchdb' 3329 self.folder = folder 3330 db['_lastsql'] = '' 3331 self.db_codec = 'UTF-8' 3332 self.pool_size = pool_size 3333 3334 url='http://'+uri[10:] 3335 def connect(url=url,driver_args=driver_args): 3336 return couchdb.Server(url,**driver_args)
3337 self.pool_connection(connect)
3338
3339 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
3340 if migrate: 3341 try: 3342 self.connection.create(table._tablename) 3343 except: 3344 pass
3345
3346 - def insert(self,table,fields):
3347 id = uuid2int(web2py_uuid()) 3348 ctable = self.connection[table._tablename] 3349 values = dict((k.name,NoSQLAdapter.represent(self,v,k.type)) for k,v in fields) 3350 values['_id'] = str(id) 3351 ctable.save(values) 3352 return id
3353
3354 - def _select(self,query,fields,attributes):
3355 if not isinstance(query,Query): 3356 raise SyntaxError, "Not Supported" 3357 for key in set(attributes.keys())-set(('orderby','groupby','limitby', 3358 'required','cache','left', 3359 'distinct','having')): 3360 raise SyntaxError, 'invalid select attribute: %s' % key 3361 new_fields=[] 3362 for item in fields: 3363 if isinstance(item,SQLALL): 3364 new_fields += item.table 3365 else: 3366 new_fields.append(item) 3367 def uid(fd): 3368 return fd=='id' and '_id' or fd
3369 def get(row,fd): 3370 return fd=='id' and int(row['_id']) or row.get(fd,None) 3371 fields = new_fields 3372 tablename = self.get_table(query) 3373 fieldnames = [f.name for f in (fields or self.db[tablename])] 3374 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 3375 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 3376 fn="function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);}" %\ 3377 dict(t=tablename, 3378 query=self.expand(query), 3379 order='%s._id' % tablename, 3380 fields=fields) 3381 return fn, colnames 3382
3383 - def select(self,query,fields,attributes):
3384 if not isinstance(query,Query): 3385 raise SyntaxError, "Not Supported" 3386 fn, colnames = self._select(query,fields,attributes) 3387 tablename = colnames[0].split('.')[0] 3388 ctable = self.connection[tablename] 3389 rows = [cols['value'] for cols in ctable.query(fn)] 3390 return self.parse(rows, colnames, False)
3391
3392 - def delete(self,tablename,query):
3393 if not isinstance(query,Query): 3394 raise SyntaxError, "Not Supported" 3395 if query.first.type=='id' and query.op==self.EQ: 3396 id = query.second 3397 tablename = query.first.tablename 3398 assert(tablename == query.first.tablename) 3399 ctable = self.connection[tablename] 3400 try: 3401 del ctable[str(id)] 3402 return 1 3403 except couchdb.http.ResourceNotFound: 3404 return 0 3405 else: 3406 tablename = self.get_table(query) 3407 rows = self.select(query,[self.db[tablename].id],{}) 3408 ctable = self.connection[tablename] 3409 for row in rows: 3410 del ctable[str(row.id)] 3411 return len(rows)
3412
3413 - def update(self,tablename,query,fields):
3414 if not isinstance(query,Query): 3415 raise SyntaxError, "Not Supported" 3416 if query.first.type=='id' and query.op==self.EQ: 3417 id = query.second 3418 tablename = query.first.tablename 3419 ctable = self.connection[tablename] 3420 try: 3421 doc = ctable[str(id)] 3422 for key,value in fields: 3423 doc[key.name] = NoSQLAdapter.represent(self,value,self.db[tablename][key.name].type) 3424 ctable.save(doc) 3425 return 1 3426 except couchdb.http.ResourceNotFound: 3427 return 0 3428 else: 3429 tablename = self.get_table(query) 3430 rows = self.select(query,[self.db[tablename].id],{}) 3431 ctable = self.connection[tablename] 3432 table = self.db[tablename] 3433 for row in rows: 3434 doc = ctable[str(row.id)] 3435 for key,value in fields: 3436 doc[key.name] = NoSQLAdapter.represent(self,value,table[key.name].type) 3437 ctable.save(doc) 3438 return len(rows)
3439
3440 - def count(self,query,distinct=None):
3441 if distinct: 3442 raise RuntimeError, "COUNT DISTINCT not supported" 3443 if not isinstance(query,Query): 3444 raise SyntaxError, "Not Supported" 3445 tablename = self.get_table(query) 3446 rows = self.select(query,[self.db[tablename].id],{}) 3447 return len(rows)
3448
3449 -def cleanup(text):
3450 """ 3451 validates that the given text is clean: only contains [0-9a-zA-Z_] 3452 """ 3453 3454 if re.compile('[^0-9a-zA-Z_]').findall(text): 3455 raise SyntaxError, \ 3456 'only [0-9a-zA-Z_] allowed in table and field names, received %s' \ 3457 % text 3458 return text
3459 3460 3461 try: 3462 import pymongo 3463 drivers.append('mongoDB') 3464 except: 3465 logger.debug('no mongoDB driver') 3466
3467 -class MongoDBAdapter(NoSQLAdapter):
3468 uploads_in_blob = True 3469 types = { 3470 'boolean': bool, 3471 'string': str, 3472 'text': str, 3473 'password': str, 3474 'blob': str, 3475 'upload': str, 3476 'integer': long, 3477 'double': float, 3478 'date': datetime.date, 3479 'time': datetime.time, 3480 'datetime': datetime.datetime, 3481 'id': long, 3482 'reference': long, 3483 'list:string': list, 3484 'list:integer': list, 3485 'list:reference': list, 3486 } 3487
3488 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 3489 pool_size=0,folder=None,db_codec ='UTF-8', 3490 credential_decoder=lambda x:x, driver_args={}, 3491 adapter_args={}):
3492 self.db = db 3493 self.uri = uri 3494 self.dbengine = 'mongodb' 3495 self.folder = folder 3496 db['_lastsql'] = '' 3497 self.db_codec = 'UTF-8' 3498 self.pool_size = pool_size 3499 3500 m = re.compile('^(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(self._uri[10:]) 3501 if not m: 3502 raise SyntaxError, "Invalid URI string in DAL: %s" % self._uri 3503 host = m.group('host') 3504 if not host: 3505 raise SyntaxError, 'mongodb: host name required' 3506 dbname = m.group('db') 3507 if not dbname: 3508 raise SyntaxError, 'mongodb: db name required' 3509 port = m.group('port') or 27017 3510 driver_args.update(dict(host=host,port=port)) 3511 def connect(dbname=dbname,driver_args=driver_args): 3512 return pymongo.Connection(**driver_args)[dbname]
3513 self.pool_connection(connect)
3514
3515 - def insert(self,table,fields):
3516 ctable = self.connection[table._tablename] 3517 values = dict((k,self.represent(v,table[k].type)) for k,v in fields) 3518 ctable.insert(values) 3519 return uuid2int(id)
3520 3521
3522 - def count(self,query):
3523 raise RuntimeError, "Not implemented"
3524
3525 - def select(self,query,fields,attributes):
3526 raise RuntimeError, "Not implemented"
3527
3528 - def delete(self,tablename, query):
3529 raise RuntimeError, "Not implemented"
3530
3531 - def update(self,tablename,query,fields):
3532 raise RuntimeError, "Not implemented"
3533 3534 3535 ######################################################################## 3536 # end of adapters 3537 ######################################################################## 3538 3539 ADAPTERS = { 3540 'sqlite': SQLiteAdapter, 3541 'sqlite:memory': SQLiteAdapter, 3542 'mysql': MySQLAdapter, 3543 'postgres': PostgreSQLAdapter, 3544 'oracle': OracleAdapter, 3545 'mssql': MSSQLAdapter, 3546 'mssql2': MSSQL2Adapter, 3547 'db2': DB2Adapter, 3548 'informix': InformixAdapter, 3549 'firebird': FireBirdAdapter, 3550 'firebird_embedded': FireBirdAdapter, 3551 'ingres': IngresAdapter, 3552 'ingresu': IngresUnicodeAdapter, 3553 'sapdb': SAPDBAdapter, 3554 'cubrid': CubridAdapter, 3555 'jdbc:sqlite': JDBCSQLiteAdapter, 3556 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 3557 'jdbc:postgres': JDBCPostgreSQLAdapter, 3558 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 3559 'google:datastore': GoogleDatastoreAdapter, 3560 'google:sql': GoogleSQLAdapter, 3561 'couchdb': CouchDBAdapter, 3562 'mongodb': MongoDBAdapter, 3563 } 3564 3565
3566 -def sqlhtml_validators(field):
3567 """ 3568 Field type validation, using web2py's validators mechanism. 3569 3570 makes sure the content of a field is in line with the declared 3571 fieldtype 3572 """ 3573 if not have_validators: 3574 return [] 3575 field_type, field_length = field.type, field.length 3576 if isinstance(field_type, SQLCustomType): 3577 if hasattr(field_type, 'validator'): 3578 return field_type.validator 3579 else: 3580 field_type = field_type.type 3581 elif not isinstance(field_type,str): 3582 return [] 3583 requires=[] 3584 def ff(r,id): 3585 row=r(id) 3586 if not row: 3587 return id 3588 elif hasattr(r, '_format') and isinstance(r._format,str): 3589 return r._format % row 3590 elif hasattr(r, '_format') and callable(r._format): 3591 return r._format(row) 3592 else: 3593 return id
3594 if field_type == 'string': 3595 requires.append(validators.IS_LENGTH(field_length)) 3596 elif field_type == 'text': 3597 requires.append(validators.IS_LENGTH(2 ** 16)) 3598 elif field_type == 'password': 3599 requires.append(validators.IS_LENGTH(field_length)) 3600 elif field_type == 'double': 3601 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 3602 elif field_type == 'integer': 3603 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 3604 elif field_type.startswith('decimal'): 3605 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 3606 elif field_type == 'date': 3607 requires.append(validators.IS_DATE()) 3608 elif field_type == 'time': 3609 requires.append(validators.IS_TIME()) 3610 elif field_type == 'datetime': 3611 requires.append(validators.IS_DATETIME()) 3612 elif field.db and field_type.startswith('reference') and \ 3613 field_type.find('.') < 0 and \ 3614 field_type[10:] in field.db.tables: 3615 referenced = field.db[field_type[10:]] 3616 def repr_ref(id, r=referenced, f=ff): return f(r, id) 3617 field.represent = field.represent or repr_ref 3618 if hasattr(referenced, '_format') and referenced._format: 3619 requires = validators.IS_IN_DB(field.db,referenced.id, 3620 referenced._format) 3621 if field.unique: 3622 requires._and = validators.IS_NOT_IN_DB(field.db,field) 3623 if field.tablename == field_type[10:]: 3624 return validators.IS_EMPTY_OR(requires) 3625 return requires 3626 elif field.db and field_type.startswith('list:reference') and \ 3627 field_type.find('.') < 0 and \ 3628 field_type[15:] in field.db.tables: 3629 referenced = field.db[field_type[15:]] 3630 def list_ref_repr(ids, r=referenced, f=ff): 3631 if not ids: 3632 return None 3633 refs = r._db(r.id.belongs(ids)).select(r.id) 3634 return (refs and ', '.join(str(f(r,ref.id)) for ref in refs) or '') 3635 field.represent = field.represent or list_ref_repr 3636 if hasattr(referenced, '_format') and referenced._format: 3637 requires = validators.IS_IN_DB(field.db,referenced.id, 3638 referenced._format,multiple=True) 3639 else: 3640 requires = validators.IS_IN_DB(field.db,referenced.id, 3641 multiple=True) 3642 if field.unique: 3643 requires._and = validators.IS_NOT_IN_DB(field.db,field) 3644 return requires 3645 elif field_type.startswith('list:'): 3646 def repr_list(values): return', '.join(str(v) for v in (values or [])) 3647 field.represent = field.represent or repr_list 3648 if field.unique: 3649 requires.insert(0,validators.IS_NOT_IN_DB(field.db,field)) 3650 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 3651 if field.notnull and not field_type[:2] in sff: 3652 requires.insert(0, validators.IS_NOT_EMPTY()) 3653 elif not field.notnull and field_type[:2] in sff and requires: 3654 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 3655 return requires 3656 3657
3658 -def bar_escape(item):
3659 return str(item).replace('|', '||')
3660
3661 -def bar_encode(items):
3662 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
3663
3664 -def bar_decode_integer(value):
3665 return [int(x) for x in value.split('|') if x.strip()]
3666
3667 -def bar_decode_string(value):
3668 return [x.replace('||', '|') for x in string_unpack.split(value[1:-1]) if x.strip()]
3669 3670
3671 -class Row(dict):
3672 3673 """ 3674 a dictionary that lets you do d['a'] as well as d.a 3675 this is only used to store a Row 3676 """ 3677
3678 - def __getitem__(self, key):
3679 key=str(key) 3680 if key in self.get('_extra',{}): 3681 return self._extra[key] 3682 return dict.__getitem__(self, key)
3683
3684 - def __call__(self,key):
3685 return self.__getitem__(key)
3686
3687 - def __setitem__(self, key, value):
3688 dict.__setitem__(self, str(key), value)
3689
3690 - def __getattr__(self, key):
3691 return self[key]
3692
3693 - def __setattr__(self, key, value):
3694 self[key] = value
3695
3696 - def __repr__(self):
3697 return '<Row ' + dict.__repr__(self) + '>'
3698
3699 - def __int__(self):
3700 return dict.__getitem__(self,'id')
3701
3702 - def __eq__(self,other):
3703 try: 3704 return self.as_dict() == other.as_dict() 3705 except AttributeError: 3706 return False
3707
3708 - def __ne__(self,other):
3709 return not (self == other)
3710
3711 - def __copy__(self):
3712 return Row(dict(self))
3713
3714 - def as_dict(self,datetime_to_str=False):
3715 SERIALIZABLE_TYPES = (str,unicode,int,long,float,bool,list) 3716 d = dict(self) 3717 for k in copy.copy(d.keys()): 3718 v=d[k] 3719 if d[k] is None: 3720 continue 3721 elif isinstance(v,Row): 3722 d[k]=v.as_dict() 3723 elif isinstance(v,Reference): 3724 d[k]=int(v) 3725 elif isinstance(v,decimal.Decimal): 3726 d[k]=float(v) 3727 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 3728 if datetime_to_str: 3729 d[k] = v.isoformat().replace('T',' ')[:19] 3730 elif not isinstance(v,SERIALIZABLE_TYPES): 3731 del d[k] 3732 return d
3733 3734
3735 -def Row_unpickler(data):
3736 return Row(cPickle.loads(data))
3737
3738 -def Row_pickler(data):
3739 return Row_unpickler, (cPickle.dumps(data.as_dict(datetime_to_str=False)),)
3740 3741 copy_reg.pickle(Row, Row_pickler, Row_unpickler) 3742 3743 3744 ################################################################################ 3745 # Everything below should be independent on the specifics of the 3746 # database and should for RDBMs and some NoSQL databases 3747 ################################################################################ 3748
3749 -class SQLCallableList(list):
3750 - def __call__(self):
3751 return copy.copy(self)
3752 3753
3754 -class DAL(dict):
3755 3756 """ 3757 an instance of this class represents a database connection 3758 3759 Example:: 3760 3761 db = DAL('sqlite://test.db') 3762 db.define_table('tablename', Field('fieldname1'), 3763 Field('fieldname2')) 3764 """ 3765 3766 @staticmethod
3767 - def set_folder(folder):
3768 """ 3769 # ## this allows gluon to set a folder for this thread 3770 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 3771 """ 3772 BaseAdapter.set_folder(folder)
3773 3774 @staticmethod
3775 - def distributed_transaction_begin(*instances):
3776 if not instances: 3777 return 3778 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 3779 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 3780 instances = enumerate(instances) 3781 for (i, db) in instances: 3782 if not db._adapter.support_distributed_transaction(): 3783 raise SyntaxError, \ 3784 'distributed transaction not suported by %s' % db._dbname 3785 for (i, db) in instances: 3786 db._adapter.distributed_transaction_begin(keys[i])
3787 3788 @staticmethod
3789 - def distributed_transaction_commit(*instances):
3790 if not instances: 3791 return 3792 instances = enumerate(instances) 3793 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 3794 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 3795 for (i, db) in instances: 3796 if not db._adapter.support_distributed_transaction(): 3797 raise SyntaxError, \ 3798 'distributed transaction not suported by %s' % db._dbanme 3799 try: 3800 for (i, db) in instances: 3801 db._adapter.prepare(keys[i]) 3802 except: 3803 for (i, db) in instances: 3804 db._adapter.rollback_prepared(keys[i]) 3805 raise RuntimeError, 'failure to commit distributed transaction' 3806 else: 3807 for (i, db) in instances: 3808 db._adapter.commit_prepared(keys[i]) 3809 return
3810 3811
3812 - def __init__(self, uri='sqlite://dummy.db', pool_size=0, folder=None, 3813 db_codec='UTF-8', check_reserved=None, 3814 migrate=True, fake_migrate=False, 3815 migrate_enabled=True, fake_migrate_all=False, 3816 decode_credentials=False, driver_args=None, 3817 adapter_args={}, attempts=5, auto_import=False):
3818 """ 3819 Creates a new Database Abstraction Layer instance. 3820 3821 Keyword arguments: 3822 3823 :uri: string that contains information for connecting to a database. 3824 (default: 'sqlite://dummy.db') 3825 :pool_size: How many open connections to make to the database object. 3826 :folder: <please update me> 3827 :db_codec: string encoding of the database (default: 'UTF-8') 3828 :check_reserved: list of adapters to check tablenames and column names 3829 against sql reserved keywords. (Default None) 3830 3831 * 'common' List of sql keywords that are common to all database types 3832 such as "SELECT, INSERT". (recommended) 3833 * 'all' Checks against all known SQL keywords. (not recommended) 3834 <adaptername> Checks against the specific adapters list of keywords 3835 (recommended) 3836 * '<adaptername>_nonreserved' Checks against the specific adapters 3837 list of nonreserved keywords. (if available) 3838 :migrate (defaults to True) sets default migrate behavior for all tables 3839 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 3840 :migrate_enabled (defaults to True). If set to False disables ALL migrations 3841 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 3842 :attempts (defaults to 5). Number of times to attempt connecting 3843 """ 3844 if not decode_credentials: 3845 credential_decoder = lambda cred: cred 3846 else: 3847 credential_decoder = lambda cred: urllib.unquote(cred) 3848 if folder: 3849 self.set_folder(folder) 3850 self._uri = uri 3851 self._pool_size = pool_size 3852 self._db_codec = db_codec 3853 self._lastsql = '' 3854 self._timings = [] 3855 self._pending_references = {} 3856 self._request_tenant = 'request_tenant' 3857 self._common_fields = [] 3858 if not str(attempts).isdigit() or attempts < 0: 3859 attempts = 5 3860 if uri: 3861 uris = isinstance(uri,(list,tuple)) and uri or [uri] 3862 error = '' 3863 connected = False 3864 for k in range(attempts): 3865 for uri in uris: 3866 try: 3867 if is_jdbc and not uri.startswith('jdbc:'): 3868 uri = 'jdbc:'+uri 3869 self._dbname = regex_dbname.match(uri).group() 3870 if not self._dbname in ADAPTERS: 3871 raise SyntaxError, "Error in URI '%s' or database not supported" % self._dbname 3872 # notice that driver args or {} else driver_args defaults to {} global, not correct 3873 args = (self,uri,pool_size,folder,db_codec,credential_decoder,driver_args or {}, adapter_args) 3874 self._adapter = ADAPTERS[self._dbname](*args) 3875 connected = True 3876 break 3877 except SyntaxError: 3878 raise 3879 except Exception, error: 3880 pass 3881 if connected: 3882 break 3883 else: 3884 time.sleep(1) 3885 if not connected: 3886 raise RuntimeError, "Failure to connect, tried %d times:\n%s" % (attempts, error) 3887 else: 3888 args = (self,'None',0,folder,db_codec) 3889 self._adapter = BaseAdapter(*args) 3890 migrate = fake_migrate = False 3891 adapter = self._adapter 3892 self._uri_hash = hashlib.md5(adapter.uri).hexdigest() 3893 self.tables = SQLCallableList() 3894 self.check_reserved = check_reserved 3895 if self.check_reserved: 3896 from reserved_sql_keywords import ADAPTERS as RSK 3897 self.RSK = RSK 3898 self._migrate = migrate 3899 self._fake_migrate = fake_migrate 3900 self._migrate_enabled = migrate_enabled 3901 self._fake_migrate_all = fake_migrate_all 3902 if auto_import: 3903 self.import_table_definitions(adapter.folder)
3904
3905 - def import_table_definitions(self,path,migrate=False,fake_migrate=False):
3906 pattern = os.path.join(path,self._uri_hash+'_*.table') 3907 for filename in glob.glob(pattern): 3908 tfile = self._adapter.file_open(filename, 'r') 3909 sql_fields = cPickle.load(tfile) 3910 name = filename[len(pattern)-7:-6] 3911 mf = [(value['sortable'],Field(key,type=value['type'])) \ 3912 for key, value in sql_fields.items()] 3913 mf.sort() 3914 self.define_table(name,*[item[1] for item in mf], 3915 **dict(migrate=migrate,fake_migrate=fake_migrate))
3916
3917 - def check_reserved_keyword(self, name):
3918 """ 3919 Validates ``name`` against SQL keywords 3920 Uses self.check_reserve which is a list of 3921 operators to use. 3922 self.check_reserved 3923 ['common', 'postgres', 'mysql'] 3924 self.check_reserved 3925 ['all'] 3926 """ 3927 for backend in self.check_reserved: 3928 if name.upper() in self.RSK[backend]: 3929 raise SyntaxError, 'invalid table/column name "%s" is a "%s" reserved SQL keyword' % (name, backend.upper())
3930
3931 - def __contains__(self, tablename):
3932 if self.has_key(tablename): 3933 return True 3934 else: 3935 return False
3936
3937 - def parse_as_rest(self,patterns,args,vars,query=None,nested_select=True):
3938 """ 3939 EXAMPLE: 3940 3941 db.define_table('person',Field('name'),Field('info')) 3942 db.define_table('pet',Field('person',db.person),Field('name'),Field('info')) 3943 3944 @request.restful() 3945 def index(): 3946 def GET(*args,**vars): 3947 patterns = [ 3948 "/persons[person]", 3949 "/{person.name.startswith}", 3950 "/{person.name}/:field", 3951 "/{person.name}/pets[pet.person]", 3952 "/{person.name}/pet[pet.person]/{pet.name}", 3953 "/{person.name}/pet[pet.person]/{pet.name}/:field" 3954 ] 3955 parser = db.parse_as_rest(patterns,args,vars) 3956 if parser.status == 200: 3957 return dict(content=parser.response) 3958 else: 3959 raise HTTP(parser.status,parser.error) 3960 def POST(table_name,**vars): 3961 if table_name == 'person': 3962 return db.person.validate_and_insert(**vars) 3963 elif table_name == 'pet': 3964 return db.pet.validate_and_insert(**vars) 3965 else: 3966 raise HTTP(400) 3967 return locals() 3968 """ 3969 3970 db = self 3971 re1 = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 3972 re2 = re.compile('^.+\[.+\]$') 3973 3974 def auto_table(table,base='',depth=0): 3975 patterns = [] 3976 for field in db[table].fields: 3977 if base: 3978 tag = '%s/%s' % (base,field.replace('_','-')) 3979 else: 3980 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 3981 f = db[table][field] 3982 if not f.readable: continue 3983 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 3984 tag += '/{%s.%s}' % (table,field) 3985 patterns.append(tag) 3986 patterns.append(tag+'/:field') 3987 elif f.type.startswith('boolean'): 3988 tag += '/{%s.%s}' % (table,field) 3989 patterns.append(tag) 3990 patterns.append(tag+'/:field') 3991 elif f.type.startswith('double') or f.type.startswith('integer'): 3992 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 3993 patterns.append(tag) 3994 patterns.append(tag+'/:field') 3995 elif f.type.startswith('list:'): 3996 tag += '/{%s.%s.contains}' % (table,field) 3997 patterns.append(tag) 3998 patterns.append(tag+'/:field') 3999 elif f.type in ('date','datetime'): 4000 tag+= '/{%s.%s.year}' % (table,field) 4001 patterns.append(tag) 4002 patterns.append(tag+'/:field') 4003 tag+='/{%s.%s.month}' % (table,field) 4004 patterns.append(tag) 4005 patterns.append(tag+'/:field') 4006 tag+='/{%s.%s.day}' % (table,field) 4007 patterns.append(tag) 4008 patterns.append(tag+'/:field') 4009 if f.type in ('datetime','time'): 4010 tag+= '/{%s.%s.hour}' % (table,field) 4011 patterns.append(tag) 4012 patterns.append(tag+'/:field') 4013 tag+='/{%s.%s.minute}' % (table,field) 4014 patterns.append(tag) 4015 patterns.append(tag+'/:field') 4016 tag+='/{%s.%s.second}' % (table,field) 4017 patterns.append(tag) 4018 patterns.append(tag+'/:field') 4019 if depth>0: 4020 for rtable,rfield in db[table]._referenced_by: 4021 tag+='/%s[%s.%s]' % (rtable,rtable,rfield) 4022 patterns.append(tag) 4023 patterns += auto_table(rtable,base=tag,depth=depth-1) 4024 return patterns
4025 4026 if patterns=='auto': 4027 patterns=[] 4028 for table in db.tables: 4029 if not table.startswith('auth_'): 4030 patterns += auto_table(table,base='',depth=1) 4031 else: 4032 i = 0 4033 while i<len(patterns): 4034 pattern = patterns[i] 4035 tokens = pattern.split('/') 4036 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 4037 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1],'/'.join(tokens[:-1])) 4038 patterns = patterns[:i]+new_patterns+patterns[i+1:] 4039 i += len(new_patterns) 4040 else: 4041 i += 1 4042 if '/'.join(args) == 'patterns': 4043 return Row({'status':200,'pattern':'list', 4044 'error':None,'response':patterns}) 4045 for pattern in patterns: 4046 otable=table=None 4047 dbset=db(query) 4048 i=0 4049 tags = pattern[1:].split('/') 4050 # print pattern 4051 if len(tags)!=len(args): 4052 continue 4053 for tag in tags: 4054 # print i, tag, args[i] 4055 if re1.match(tag): 4056 # print 're1:'+tag 4057 tokens = tag[1:-1].split('.') 4058 table, field = tokens[0], tokens[1] 4059 if not otable or table == otable: 4060 if len(tokens)==2 or tokens[2]=='eq': 4061 query = db[table][field]==args[i] 4062 elif tokens[2]=='ne': 4063 query = db[table][field]!=args[i] 4064 elif tokens[2]=='lt': 4065 query = db[table][field]<args[i] 4066 elif tokens[2]=='gt': 4067 query = db[table][field]>args[i] 4068 elif tokens[2]=='ge': 4069 query = db[table][field]>=args[i] 4070 elif tokens[2]=='le': 4071 query = db[table][field]<=args[i] 4072 elif tokens[2]=='year': 4073 query = db[table][field].year()==args[i] 4074 elif tokens[2]=='month': 4075 query = db[table][field].month()==args[i] 4076 elif tokens[2]=='day': 4077 query = db[table][field].day()==args[i] 4078 elif tokens[2]=='hour': 4079 query = db[table][field].hour()==args[i] 4080 elif tokens[2]=='minute': 4081 query = db[table][field].minutes()==args[i] 4082 elif tokens[2]=='second': 4083 query = db[table][field].seconds()==args[i] 4084 elif tokens[2]=='startswith': 4085 query = db[table][field].startswith(args[i]) 4086 elif tokens[2]=='contains': 4087 query = db[table][field].contains(args[i]) 4088 else: 4089 raise RuntimeError, "invalid pattern: %s" % pattern 4090 if len(tokens)==4 and tokens[3]=='not': 4091 query = ~query 4092 elif len(tokens)>=4: 4093 raise RuntimeError, "invalid pattern: %s" % pattern 4094 dbset=dbset(query) 4095 else: 4096 raise RuntimeError, "missing relation in pattern: %s" % pattern 4097 elif otable and re2.match(tag) and args[i]==tag[:tag.find('[')]: 4098 # print 're2:'+tag 4099 ref = tag[tag.find('[')+1:-1] 4100 if '.' in ref: 4101 table,field = ref.split('.') 4102 # print table,field 4103 if nested_select: 4104 try: 4105 dbset=db(db[table][field].belongs(dbset._select(db[otable]._id))) 4106 except ValueError: 4107 return Row({'status':400,'pattern':pattern, 4108 'error':'invalid path','response':None}) 4109 else: 4110 items = [item.id for item in dbset.select(db[otable]._id)] 4111 dbset=db(db[table][field].belongs(items)) 4112 else: 4113 dbset=dbset(db[ref]) 4114 elif tag==':field' and table: 4115 # # print 're3:'+tag 4116 field = args[i] 4117 if not field in db[table]: break 4118 try: 4119 item = dbset.select(db[table][field],limitby=(0,1)).first() 4120 except ValueError: 4121 return Row({'status':400,'pattern':pattern, 4122 'error':'invalid path','response':None}) 4123 if not item: 4124 return Row({'status':404,'pattern':pattern, 4125 'error':'record not found','response':None}) 4126 else: 4127 return Row({'status':200,'response':item[field], 4128 'pattern':pattern}) 4129 elif tag != args[i]: 4130 break 4131 otable = table 4132 i += 1 4133 if i==len(tags) and table: 4134 otable,ofield = vars.get('order','%s.%s' % (table,field)).split('.',1) 4135 try: 4136 if otable[:1]=='~': orderby = ~db[otable[1:]][ofield] 4137 else: orderby = db[otable][ofield] 4138 except KeyError: 4139 return Row({'status':400,'error':'invalid orderby','response':None}) 4140 fields = [field for field in db[table] if field.readable] 4141 count = dbset.count() 4142 try: 4143 limits = (int(vars.get('min',0)),int(vars.get('max',1000))) 4144 if limits[0]<0 or limits[1]<limits[0]: raise ValueError 4145 except ValueError: 4146 Row({'status':400,'error':'invalid limits','response':None}) 4147 if count > limits[1]-limits[0]: 4148 Row({'status':400,'error':'too many records','response':None}) 4149 try: 4150 response = dbset.select(limitby=limits,orderby=orderby,*fields) 4151 except ValueError: 4152 return Row({'status':400,'pattern':pattern, 4153 'error':'invalid path','response':None}) 4154 return Row({'status':200,'response':response,'pattern':pattern}) 4155 return Row({'status':400,'error':'no mathcing pattern','response':None})
4156 4157
4158 - def define_table( 4159 self, 4160 tablename, 4161 *fields, 4162 **args 4163 ):
4164 4165 for key in args: 4166 if key not in [ 4167 'migrate', 4168 'primarykey', 4169 'fake_migrate', 4170 'format', 4171 'trigger_name', 4172 'sequence_name', 4173 'polymodel']: 4174 raise SyntaxError, 'invalid table "%s" attribute: %s' % (tablename, key) 4175 migrate = self._migrate_enabled and args.get('migrate',self._migrate) 4176 fake_migrate = self._fake_migrate_all or args.get('fake_migrate',self._fake_migrate) 4177 format = args.get('format',None) 4178 trigger_name = args.get('trigger_name', None) 4179 sequence_name = args.get('sequence_name', None) 4180 primarykey=args.get('primarykey',None) 4181 polymodel=args.get('polymodel',None) 4182 if not isinstance(tablename,str): 4183 raise SyntaxError, "missing table name" 4184 tablename = cleanup(tablename) 4185 lowertablename = tablename.lower() 4186 4187 if tablename.startswith('_') or hasattr(self,lowertablename) or \ 4188 regex_python_keywords.match(tablename): 4189 raise SyntaxError, 'invalid table name: %s' % tablename 4190 elif lowertablename in self.tables: 4191 raise SyntaxError, 'table already defined: %s' % tablename 4192 elif self.check_reserved: 4193 self.check_reserved_keyword(tablename) 4194 4195 if self._common_fields: 4196 fields = [f for f in fields] + [f for f in self._common_fields] 4197 4198 t = self[tablename] = Table(self, tablename, *fields, 4199 **dict(primarykey=primarykey, 4200 trigger_name=trigger_name, 4201 sequence_name=sequence_name)) 4202 # db magic 4203 if self._uri in (None,'None'): 4204 return t 4205 4206 t._create_references() 4207 4208 if migrate or self._adapter.dbengine=='google:datastore': 4209 try: 4210 sql_locker.acquire() 4211 self._adapter.create_table(t,migrate=migrate, 4212 fake_migrate=fake_migrate, 4213 polymodel=polymodel) 4214 finally: 4215 sql_locker.release() 4216 else: 4217 t._dbt = None 4218 self.tables.append(tablename) 4219 t._format = format 4220 return t
4221
4222 - def __iter__(self):
4223 for tablename in self.tables: 4224 yield self[tablename]
4225
4226 - def __getitem__(self, key):
4227 return dict.__getitem__(self, str(key))
4228
4229 - def __setitem__(self, key, value):
4230 dict.__setitem__(self, str(key), value)
4231
4232 - def __getattr__(self, key):
4233 return self[key]
4234
4235 - def __setattr__(self, key, value):
4236 if key[:1]!='_' and key in self: 4237 raise SyntaxError, \ 4238 'Object %s exists and cannot be redefined' % key 4239 self[key] = value
4240
4241 - def __repr__(self):
4242 return '<DAL ' + dict.__repr__(self) + '>'
4243
4244 - def __call__(self, query=None):
4245 if isinstance(query,Table): 4246 query = query._id>0 4247 elif isinstance(query,Field): 4248 query = query!=None 4249 return Set(self, query)
4250
4251 - def commit(self):
4252 self._adapter.commit()
4253
4254 - def rollback(self):
4255 self._adapter.rollback()
4256
4257 - def executesql(self, query, placeholders=None, as_dict=False):
4258 """ 4259 placeholders is optional and will always be None when using DAL 4260 if using raw SQL with placeholders, placeholders may be 4261 a sequence of values to be substituted in 4262 or, *if supported by the DB driver*, a dictionary with keys 4263 matching named placeholders in your SQL. 4264 4265 Added 2009-12-05 "as_dict" optional argument. Will always be 4266 None when using DAL. If using raw SQL can be set to True 4267 and the results cursor returned by the DB driver will be 4268 converted to a sequence of dictionaries keyed with the db 4269 field names. Tested with SQLite but should work with any database 4270 since the cursor.description used to get field names is part of the 4271 Python dbi 2.0 specs. Results returned with as_dict = True are 4272 the same as those returned when applying .to_list() to a DAL query. 4273 4274 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 4275 4276 --bmeredyk 4277 """ 4278 if placeholders: 4279 self._adapter.execute(query, placeholders) 4280 else: 4281 self._adapter.execute(query) 4282 if as_dict: 4283 if not hasattr(self._adapter.cursor,'description'): 4284 raise RuntimeError, "database does not support executesql(...,as_dict=True)" 4285 # Non-DAL legacy db query, converts cursor results to dict. 4286 # sequence of 7-item sequences. each sequence tells about a column. 4287 # first item is always the field name according to Python Database API specs 4288 columns = self._adapter.cursor.description 4289 # reduce the column info down to just the field names 4290 fields = [f[0] for f in columns] 4291 # will hold our finished resultset in a list 4292 data = self._adapter.cursor.fetchall() 4293 # convert the list for each row into a dictionary so it's 4294 # easier to work with. row['field_name'] rather than row[0] 4295 return [dict(zip(fields,row)) for row in data] 4296 # see if any results returned from database 4297 try: 4298 return self._adapter.cursor.fetchall() 4299 except: 4300 return None
4301
4302 - def _update_referenced_by(self, other):
4303 for tablename in self.tables: 4304 by = self[tablename]._referenced_by 4305 by[:] = [item for item in by if not item[0] == other]
4306
4307 - def export_to_csv_file(self, ofile, *args, **kwargs):
4308 for table in self.tables: 4309 ofile.write('TABLE %s\r\n' % table) 4310 self(self[table]._id > 0).select().export_to_csv_file(ofile, *args, **kwargs) 4311 ofile.write('\r\n\r\n') 4312 ofile.write('END')
4313
4314 - def import_from_csv_file(self, ifile, id_map={}, null='<NULL>', 4315 unique='uuid', *args, **kwargs):
4316 for line in ifile: 4317 line = line.strip() 4318 if not line: 4319 continue 4320 elif line == 'END': 4321 return 4322 elif not line.startswith('TABLE ') or not line[6:] in self.tables: 4323 raise SyntaxError, 'invalid file format' 4324 else: 4325 tablename = line[6:] 4326 self[tablename].import_from_csv_file(ifile, id_map, null, 4327 unique, *args, **kwargs)
4328 4329
4330 -class SQLALL(object):
4331 """ 4332 Helper class providing a comma-separated string having all the field names 4333 (prefixed by table name and '.') 4334 4335 normally only called from within gluon.sql 4336 """ 4337
4338 - def __init__(self, table):
4339 self.table = table
4340
4341 - def __str__(self):
4342 return ', '.join([str(field) for field in self.table])
4343 4344
4345 -class Reference(int):
4346
4347 - def __allocate(self):
4348 if not self._record: 4349 self._record = self._table[int(self)] 4350 if not self._record: 4351 raise RuntimeError, "Using a recursive select but encountered a broken reference: %s %d"%(self._table, int(self))
4352
4353 - def __getattr__(self, key):
4354 if key == 'id': 4355 return int(self) 4356 self.__allocate() 4357 return self._record.get(key, None)
4358
4359 - def __setattr__(self, key, value):
4360 if key.startswith('_'): 4361 int.__setattr__(self, key, value) 4362 return 4363 self.__allocate() 4364 self._record[key] = value
4365
4366 - def __getitem__(self, key):
4367 if key == 'id': 4368 return int(self) 4369 self.__allocate() 4370 return self._record.get(key, None)
4371
4372 - def __setitem__(self,key,value):
4373 self.__allocate() 4374 self._record[key] = value
4375 4376
4377 -def Reference_unpickler(data):
4378 return marshal.loads(data)
4379
4380 -def Reference_pickler(data):
4381 try: 4382 marshal_dump = marshal.dumps(int(data)) 4383 except AttributeError: 4384 marshal_dump = 'i%s' % struct.pack('<i', int(data)) 4385 return (Reference_unpickler, (marshal_dump,))
4386 4387 copy_reg.pickle(Reference, Reference_pickler, Reference_unpickler) 4388 4389
4390 -class Table(dict):
4391 4392 """ 4393 an instance of this class represents a database table 4394 4395 Example:: 4396 4397 db = DAL(...) 4398 db.define_table('users', Field('name')) 4399 db.users.insert(name='me') # print db.users._insert(...) to see SQL 4400 db.users.drop() 4401 """ 4402
4403 - def __init__( 4404 self, 4405 db, 4406 tablename, 4407 *fields, 4408 **args 4409 ):
4410 """ 4411 Initializes the table and performs checking on the provided fields. 4412 4413 Each table will have automatically an 'id'. 4414 4415 If a field is of type Table, the fields (excluding 'id') from that table 4416 will be used instead. 4417 4418 :raises SyntaxError: when a supplied field is of incorrect type. 4419 """ 4420 self._tablename = tablename 4421 self._sequence_name = args.get('sequence_name',None) or \ 4422 db and db._adapter.sequence_name(tablename) 4423 self._trigger_name = args.get('trigger_name',None) or \ 4424 db and db._adapter.trigger_name(tablename) 4425 4426 primarykey = args.get('primarykey', None) 4427 fieldnames,newfields=set(),[] 4428 if primarykey and not isinstance(primarykey,list): 4429 raise SyntaxError, "primarykey must be a list of fields from table '%s'" \ 4430 % tablename 4431 elif primarykey: 4432 self._primarykey = primarykey 4433 elif not [f for f in fields if hasattr(f,'type') and f.type=='id']: 4434 field = Field('id', 'id') 4435 newfields.append(field) 4436 fieldnames.add('id') 4437 self._id = field 4438 for field in fields: 4439 if not isinstance(field, (Field, Table)): 4440 raise SyntaxError, \ 4441 'define_table argument is not a Field or Table: %s' % field 4442 elif isinstance(field, Field) and not field.name in fieldnames: 4443 if hasattr(field, '_db'): 4444 field = copy.copy(field) 4445 newfields.append(field) 4446 fieldnames.add(field.name) 4447 if field.type=='id': 4448 self._id = field 4449 elif isinstance(field, Table): 4450 table = field 4451 for field in table: 4452 if not field.name in fieldnames and not field.type=='id': 4453 newfields.append(copy.copy(field)) 4454 fieldnames.add(field.name) 4455 else: 4456 # let's ignore new fields with duplicated names!!! 4457 pass 4458 fields = newfields 4459 self._db = db 4460 tablename = tablename 4461 self.fields = SQLCallableList() 4462 self.virtualfields = [] 4463 fields = list(fields) 4464 4465 if db and self._db._adapter.uploads_in_blob==True: 4466 for field in fields: 4467 if isinstance(field, Field) and field.type == 'upload'\ 4468 and field.uploadfield is True: 4469 tmp = field.uploadfield = '%s_blob' % field.name 4470 fields.append(self._db.Field(tmp, 'blob', default='')) 4471 4472 lower_fieldnames = set() 4473 for field in fields: 4474 if db and db.check_reserved: 4475 db.check_reserved_keyword(field.name) 4476 4477 if field.name.lower() in lower_fieldnames: 4478 raise SyntaxError, "duplicate field %s in table %s" % (field.name, tablename) 4479 else: 4480 lower_fieldnames.add(field.name.lower()) 4481 4482 self.fields.append(field.name) 4483 self[field.name] = field 4484 if field.type == 'id': 4485 self['id'] = field 4486 field.tablename = field._tablename = tablename 4487 field.table = field._table = self 4488 field.db = field._db = self._db 4489 field.length = min(field.length,self._db and self._db._adapter.maxcharlength or INFINITY) 4490 if field.requires == DEFAULT: 4491 field.requires = sqlhtml_validators(field) 4492 self.ALL = SQLALL(self) 4493 4494 if hasattr(self,'_primarykey'): 4495 for k in self._primarykey: 4496 if k not in self.fields: 4497 raise SyntaxError, \ 4498 "primarykey must be a list of fields from table '%s " % tablename 4499 else: 4500 self[k].notnull = True
4501
4502 - def _validate(self,**vars):
4503 errors = Row() 4504 for key,value in vars.items(): 4505 value,error = self[key].validate(value) 4506 if error: 4507 errors[key] = error 4508 return errors
4509
4510 - def _create_references(self):
4511 pr = self._db._pending_references 4512 self._referenced_by = [] 4513 for fieldname in self.fields: 4514 field=self[fieldname] 4515 if isinstance(field.type,str) and field.type[:10] == 'reference ': 4516 ref = field.type[10:].strip() 4517 if not ref.split(): 4518 raise SyntaxError, 'Table: reference to nothing: %s' %ref 4519 refs = ref.split('.') 4520 rtablename = refs[0] 4521 if not rtablename in self._db: 4522 pr[rtablename] = pr.get(rtablename,[]) + [field] 4523 continue 4524 rtable = self._db[rtablename] 4525 if len(refs)==2: 4526 rfieldname = refs[1] 4527 if not hasattr(rtable,'_primarykey'): 4528 raise SyntaxError,\ 4529 'keyed tables can only reference other keyed tables (for now)' 4530 if rfieldname not in rtable.fields: 4531 raise SyntaxError,\ 4532 "invalid field '%s' for referenced table '%s' in table '%s'" \ 4533 % (rfieldname, rtablename, self._tablename) 4534 rtable._referenced_by.append((self._tablename, field.name)) 4535 for referee in pr.get(self._tablename,[]): 4536 self._referenced_by.append((referee._tablename,referee.name))
4537
4538 - def _filter_fields(self, record, id=False):
4539 return dict([(k, v) for (k, v) in record.items() if k 4540 in self.fields and (self[k].type!='id' or id)])
4541
4542 - def _build_query(self,key):
4543 """ for keyed table only """ 4544 query = None 4545 for k,v in key.iteritems(): 4546 if k in self._primarykey: 4547 if query: 4548 query = query & (self[k] == v) 4549 else: 4550 query = (self[k] == v) 4551 else: 4552 raise SyntaxError, \ 4553 'Field %s is not part of the primary key of %s' % \ 4554 (k,self._tablename) 4555 return query
4556
4557 - def __getitem__(self, key):
4558 if not key: 4559 return None 4560 elif isinstance(key, dict): 4561 """ for keyed table """ 4562 query = self._build_query(key) 4563 rows = self._db(query).select() 4564 if rows: 4565 return rows[0] 4566 return None 4567 elif str(key).isdigit(): 4568 return self._db(self.id == key).select(limitby=(0,1)).first() 4569 elif key: 4570 return dict.__getitem__(self, str(key))
4571
4572 - def __call__(self, key=DEFAULT, **kwargs):
4573 if key!=DEFAULT: 4574 if isinstance(key, Query): 4575 record = self._db(key).select(limitby=(0,1)).first() 4576 elif not str(key).isdigit(): 4577 record = None 4578 else: 4579 record = self._db(self.id == key).select(limitby=(0,1)).first() 4580 if record: 4581 for k,v in kwargs.items(): 4582 if record[k]!=v: return None 4583 return record 4584 elif kwargs: 4585 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.items()]) 4586 return self._db(query).select(limitby=(0,1)).first() 4587 else: 4588 return None
4589
4590 - def __setitem__(self, key, value):
4591 if isinstance(key, dict) and isinstance(value, dict): 4592 """ option for keyed table """ 4593 if set(key.keys()) == set(self._primarykey): 4594 value = self._filter_fields(value) 4595 kv = {} 4596 kv.update(value) 4597 kv.update(key) 4598 if not self.insert(**kv): 4599 query = self._build_query(key) 4600 self._db(query).update(**self._filter_fields(value)) 4601 else: 4602 raise SyntaxError,\ 4603 'key must have all fields from primary key: %s'%\ 4604 (self._primarykey) 4605 elif str(key).isdigit(): 4606 if key == 0: 4607 self.insert(**self._filter_fields(value)) 4608 elif not self._db(self.id == key)\ 4609 .update(**self._filter_fields(value)): 4610 raise SyntaxError, 'No such record: %s' % key 4611 else: 4612 if isinstance(key, dict): 4613 raise SyntaxError,\ 4614 'value must be a dictionary: %s' % value 4615 dict.__setitem__(self, str(key), value)
4616
4617 - def __delitem__(self, key):
4618 if isinstance(key, dict): 4619 query = self._build_query(key) 4620 if not self._db(query).delete(): 4621 raise SyntaxError, 'No such record: %s' % key 4622 elif not str(key).isdigit() or not self._db(self.id == key).delete(): 4623 raise SyntaxError, 'No such record: %s' % key
4624
4625 - def __getattr__(self, key):
4626 return self[key]
4627
4628 - def __setattr__(self, key, value):
4629 if key in self: 4630 raise SyntaxError, 'Object exists and cannot be redefined: %s' % key 4631 self[key] = value
4632
4633 - def __iter__(self):
4634 for fieldname in self.fields: 4635 yield self[fieldname]
4636
4637 - def __repr__(self):
4638 return '<Table ' + dict.__repr__(self) + '>'
4639
4640 - def __str__(self):
4641 if self.get('_ot', None): 4642 return '%s AS %s' % (self._ot, self._tablename) 4643 return self._tablename
4644
4645 - def _drop(self, mode = ''):
4646 return self._db._adapter._drop(self, mode)
4647
4648 - def drop(self, mode = ''):
4649 return self._db._adapter.drop(self,mode)
4650
4651 - def _listify(self,fields,update=False):
4652 new_fields = [] 4653 new_fields_names = [] 4654 for name in fields: 4655 if not name in self.fields: 4656 raise SyntaxError, 'Field %s does not belong to the table' % name 4657 new_fields.append((self[name],fields[name])) 4658 new_fields_names.append(name) 4659 for ofield in self: 4660 if not ofield.name in new_fields_names: 4661 if not update and ofield.default!=None: 4662 new_fields.append((ofield,ofield.default)) 4663 elif update and ofield.update!=None: 4664 new_fields.append((ofield,ofield.update)) 4665 for ofield in self: 4666 if not ofield.name in new_fields_names and ofield.compute: 4667 try: 4668 new_fields.append((ofield,ofield.compute(Row(fields)))) 4669 except KeyError: 4670 pass 4671 if not update and ofield.required and not ofield.name in new_fields_names: 4672 raise SyntaxError,'Table: missing required field: %s' % ofield.name 4673 return new_fields
4674
4675 - def _insert(self, **fields):
4676 return self._db._adapter._insert(self,self._listify(fields))
4677
4678 - def insert(self, **fields):
4679 return self._db._adapter.insert(self,self._listify(fields))
4680
4681 - def validate_and_insert(self,**fields):
4682 response = Row() 4683 response.errors = self._validate(**fields) 4684 if not response.errors: 4685 response.id = self.insert(**fields) 4686 else: 4687 response.id = None 4688 return response
4689
4690 - def update_or_insert(self, key=DEFAULT, **values):
4691 if key==DEFAULT: 4692 record = self(**values) 4693 else: 4694 record = self(key) 4695 if record: 4696 record.update_record(**values) 4697 newid = None 4698 else: 4699 newid = self.insert(**values) 4700 return newid
4701
4702 - def bulk_insert(self, items):
4703 """ 4704 here items is a list of dictionaries 4705 """ 4706 items = [self._listify(item) for item in items] 4707 return self._db._adapter.bulk_insert(self,items)
4708
4709 - def _truncate(self, mode = None):
4710 return self._db._adapter._truncate(self, mode)
4711
4712 - def truncate(self, mode = None):
4713 return self._db._adapter.truncate(self, mode)
4714
4715 - def import_from_csv_file( 4716 self, 4717 csvfile, 4718 id_map=None, 4719 null='<NULL>', 4720 unique='uuid', 4721 *args, **kwargs 4722 ):
4723 """ 4724 import records from csv file. Column headers must have same names as 4725 table fields. field 'id' is ignored. If column names read 'table.file' 4726 the 'table.' prefix is ignored. 4727 'unique' argument is a field which must be unique 4728 (typically a uuid field) 4729 """ 4730 4731 delimiter = kwargs.get('delimiter', ',') 4732 quotechar = kwargs.get('quotechar', '"') 4733 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 4734 4735 reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar, quoting=quoting) 4736 colnames = None 4737 if isinstance(id_map, dict): 4738 if not self._tablename in id_map: 4739 id_map[self._tablename] = {} 4740 id_map_self = id_map[self._tablename] 4741 4742 def fix(field, value, id_map): 4743 if value == null: 4744 value = None 4745 elif field.type in ('double','integer'): 4746 value = None 4747 elif field.type.startswith('list:string'): 4748 value = bar_decode_string(value) 4749 elif field.type.startswith('list:reference'): 4750 ref_table = field.type[10:].strip() 4751 value = [id_map[ref_table][int(v)] \ 4752 for v in bar_decode_string(value)] 4753 elif field.type.startswith('list:'): 4754 value = bar_decode_integer(value) 4755 elif id_map and field.type.startswith('reference'): 4756 try: 4757 value = id_map[field.type[9:].strip()][value] 4758 except KeyError: 4759 pass 4760 return (field.name, value)
4761 4762 def is_id(colname): 4763 if colname in self: 4764 return self[colname].type == 'id' 4765 else: 4766 return False
4767 4768 for line in reader: 4769 if not line: 4770 break 4771 if not colnames: 4772 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 4773 cols, cid = [], [] 4774 for i,colname in enumerate(colnames): 4775 if is_id(colname): 4776 cid = i 4777 else: 4778 cols.append(i) 4779 if colname == unique: 4780 unique_idx = i 4781 else: 4782 items = [fix(self[colnames[i]], line[i], id_map) \ 4783 for i in cols if colnames[i] in self.fields] 4784 # Validation. Check for duplicate of 'unique' &, 4785 # if present, update instead of insert. 4786 if not unique or unique not in colnames: 4787 new_id = self.insert(**dict(items)) 4788 else: 4789 unique_value = line[unique_idx] 4790 query = self._db[self][unique] == unique_value 4791 record = self._db(query).select().first() 4792 if record: 4793 record.update_record(**dict(items)) 4794 new_id = record[self._id.name] 4795 else: 4796 new_id = self.insert(**dict(items)) 4797 if id_map and cid != []: 4798 id_map_self[line[cid]] = new_id 4799
4800 - def with_alias(self, alias):
4801 return self._db._adapter.alias(self,alias)
4802
4803 - def on(self, query):
4804 return Expression(self._db,self._db._adapter.ON,self,query)
4805 4806 4807
4808 -class Expression(object):
4809
4810 - def __init__( 4811 self, 4812 db, 4813 op, 4814 first=None, 4815 second=None, 4816 type=None, 4817 ):
4818 4819 self.db = db 4820 self.op = op 4821 self.first = first 4822 self.second = second 4823 ### self._tablename = first._tablename ## CHECK 4824 if not type and first and hasattr(first,'type'): 4825 self.type = first.type 4826 else: 4827 self.type = type
4828
4829 - def sum(self):
4830 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'SUM', self.type)
4831
4832 - def max(self):
4833 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MAX', self.type)
4834
4835 - def min(self):
4836 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MIN', self.type)
4837
4838 - def len(self):
4839 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'LENGTH', 'integer')
4840
4841 - def lower(self):
4842 return Expression(self.db, self.db._adapter.LOWER, self, None, self.type)
4843
4844 - def upper(self):
4845 return Expression(self.db, self.db._adapter.UPPER, self, None, self.type)
4846
4847 - def year(self):
4848 return Expression(self.db, self.db._adapter.EXTRACT, self, 'year', 'integer')
4849
4850 - def month(self):
4851 return Expression(self.db, self.db._adapter.EXTRACT, self, 'month', 'integer')
4852
4853 - def day(self):
4854 return Expression(self.db, self.db._adapter.EXTRACT, self, 'day', 'integer')
4855
4856 - def hour(self):
4857 return Expression(self.db, self.db._adapter.EXTRACT, self, 'hour', 'integer')
4858
4859 - def minutes(self):
4860 return Expression(self.db, self.db._adapter.EXTRACT, self, 'minute', 'integer')
4861
4862 - def coalesce_zero(self):
4863 return Expression(self.db, self.db._adapter.COALESCE_ZERO, self, None, self.type)
4864
4865 - def seconds(self):
4866 return Expression(self.db, self.db._adapter.EXTRACT, self, 'second', 'integer')
4867
4868 - def __getslice__(self, start, stop):
4869 if start < 0: 4870 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 4871 else: 4872 pos0 = start + 1 4873 4874 if stop < 0: 4875 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 4876 elif stop == sys.maxint: 4877 length = self.len() 4878 else: 4879 length = '(%s - %s)' % (stop + 1, pos0) 4880 return Expression(self.db,self.db._adapter.SUBSTRING, 4881 self, (pos0, length), self.type)
4882
4883 - def __getitem__(self, i):
4884 return self[i:i + 1]
4885
4886 - def __str__(self):
4887 return self.db._adapter.expand(self,self.type)
4888
4889 - def __or__(self, other): # for use in sortby
4890 return Expression(self.db,self.db._adapter.COMMA,self,other,self.type)
4891
4892 - def __invert__(self):
4893 if hasattr(self,'_op') and self.op == self.db._adapter.INVERT: 4894 return self.first 4895 return Expression(self.db,self.db._adapter.INVERT,self,type=self.type)
4896
4897 - def __add__(self, other):
4898 return Expression(self.db,self.db._adapter.ADD,self,other,self.type)
4899
4900 - def __sub__(self, other):
4901 if self.type == 'integer': 4902 result_type = 'integer' 4903 elif self.type in ['date','time','datetime','double']: 4904 result_type = 'double' 4905 else: 4906 raise SyntaxError, "subtraction operation not supported for type" 4907 return Expression(self.db,self.db._adapter.SUB,self,other, 4908 result_type)
4909 - def __mul__(self, other):
4910 return Expression(self.db,self.db._adapter.MUL,self,other,self.type)
4911
4912 - def __div__(self, other):
4913 return Expression(self.db,self.db._adapter.DIV,self,other,self.type)
4914
4915 - def __mod__(self, other):
4916 return Expression(self.db,self.db._adapter.MOD,self,other,self.type)
4917
4918 - def __eq__(self, value):
4919 return Query(self.db, self.db._adapter.EQ, self, value)
4920
4921 - def __ne__(self, value):
4922 return Query(self.db, self.db._adapter.NE, self, value)
4923
4924 - def __lt__(self, value):
4925 return Query(self.db, self.db._adapter.LT, self, value)
4926
4927 - def __le__(self, value):
4928 return Query(self.db, self.db._adapter.LE, self, value)
4929
4930 - def __gt__(self, value):
4931 return Query(self.db, self.db._adapter.GT, self, value)
4932
4933 - def __ge__(self, value):
4934 return Query(self.db, self.db._adapter.GE, self, value)
4935
4936 - def like(self, value):
4937 return Query(self.db, self.db._adapter.LIKE, self, value)
4938
4939 - def belongs(self, value):
4940 return Query(self.db, self.db._adapter.BELONGS, self, value)
4941
4942 - def startswith(self, value):
4943 if not self.type in ('string', 'text'): 4944 raise SyntaxError, "startswith used with incompatible field type" 4945 return Query(self.db, self.db._adapter.STARTSWITH, self, value)
4946
4947 - def endswith(self, value):
4948 if not self.type in ('string', 'text'): 4949 raise SyntaxError, "endswith used with incompatible field type" 4950 return Query(self.db, self.db._adapter.ENDSWITH, self, value)
4951
4952 - def contains(self, value):
4953 if not self.type in ('string', 'text') and not self.type.startswith('list:'): 4954 raise SyntaxError, "contains used with incompatible field type" 4955 return Query(self.db, self.db._adapter.CONTAINS, self, value)
4956
4957 - def with_alias(self,alias):
4958 return Expression(self.db,self.db._adapter.AS,self,alias,self.type)
4959 4960 # for use in both Query and sortby 4961 4962
4963 -class SQLCustomType(object):
4964 """ 4965 allows defining of custom SQL types 4966 4967 Example:: 4968 4969 decimal = SQLCustomType( 4970 type ='double', 4971 native ='integer', 4972 encoder =(lambda x: int(float(x) * 100)), 4973 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 4974 ) 4975 4976 db.define_table( 4977 'example', 4978 Field('value', type=decimal) 4979 ) 4980 4981 :param type: the web2py type (default = 'string') 4982 :param native: the backend type 4983 :param encoder: how to encode the value to store it in the backend 4984 :param decoder: how to decode the value retrieved from the backend 4985 :param validator: what validators to use ( default = None, will use the 4986 default validator for type) 4987 """ 4988
4989 - def __init__( 4990 self, 4991 type='string', 4992 native=None, 4993 encoder=None, 4994 decoder=None, 4995 validator=None, 4996 _class=None, 4997 ):
4998 4999 self.type = type 5000 self.native = native 5001 self.encoder = encoder or (lambda x: x) 5002 self.decoder = decoder or (lambda x: x) 5003 self.validator = validator 5004 self._class = _class or type
5005
5006 - def startswith(self, dummy=None):
5007 return False
5008
5009 - def __getslice__(self, a=0, b=100):
5010 return None
5011
5012 - def __getitem__(self, i):
5013 return None
5014
5015 - def __str__(self):
5016 return self._class
5017 5018
5019 -class Field(Expression):
5020 5021 """ 5022 an instance of this class represents a database field 5023 5024 example:: 5025 5026 a = Field(name, 'string', length=32, default=None, required=False, 5027 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 5028 notnull=False, unique=False, 5029 uploadfield=True, widget=None, label=None, comment=None, 5030 uploadfield=True, # True means store on disk, 5031 # 'a_field_name' means store in this field in db 5032 # False means file content will be discarded. 5033 writable=True, readable=True, update=None, authorize=None, 5034 autodelete=False, represent=None, uploadfolder=None, 5035 uploadseparate=False # upload to separate directories by uuid_keys 5036 # first 2 character and tablename.fieldname 5037 # False - old behavior 5038 # True - put uploaded file in 5039 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 5040 # directory) 5041 5042 to be used as argument of DAL.define_table 5043 5044 allowed field types: 5045 string, boolean, integer, double, text, blob, 5046 date, time, datetime, upload, password 5047 5048 strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql) 5049 fields should have a default or they will be required in SQLFORMs 5050 the requires argument is used to validate the field input in SQLFORMs 5051 5052 """ 5053
5054 - def __init__( 5055 self, 5056 fieldname, 5057 type='string', 5058 length=None, 5059 default=DEFAULT, 5060 required=False, 5061 requires=DEFAULT, 5062 ondelete='CASCADE', 5063 notnull=False, 5064 unique=False, 5065 uploadfield=True, 5066 widget=None, 5067 label=None, 5068 comment=None, 5069 writable=True, 5070 readable=True, 5071 update=None, 5072 authorize=None, 5073 autodelete=False, 5074 represent=None, 5075 uploadfolder=None, 5076 uploadseparate=False, 5077 compute=None, 5078 custom_store=None, 5079 custom_retrieve=None, 5080 ):
5081 self.db = None 5082 self.op = None 5083 self.first = None 5084 self.second = None 5085 if not isinstance(fieldname,str): 5086 raise SyntaxError, "missing field name" 5087 if fieldname.startswith(':'): 5088 fieldname,readable,writable=fieldname[1:],False,False 5089 elif fieldname.startswith('.'): 5090 fieldname,readable,writable=fieldname[1:],False,False 5091 if '=' in fieldname: 5092 fieldname,default = fieldname.split('=',1) 5093 self.name = fieldname = cleanup(fieldname) 5094 if hasattr(Table,fieldname) or fieldname[0] == '_' or \ 5095 regex_python_keywords.match(fieldname): 5096 raise SyntaxError, 'Field: invalid field name: %s' % fieldname 5097 if isinstance(type, Table): 5098 type = 'reference ' + type._tablename 5099 self.type = type # 'string', 'integer' 5100 self.length = (length is None) and MAXCHARLENGTH or length 5101 if default==DEFAULT: 5102 self.default = update or None 5103 else: 5104 self.default = default 5105 self.required = required # is this field required 5106 self.ondelete = ondelete.upper() # this is for reference fields only 5107 self.notnull = notnull 5108 self.unique = unique 5109 self.uploadfield = uploadfield 5110 self.uploadfolder = uploadfolder 5111 self.uploadseparate = uploadseparate 5112 self.widget = widget 5113 self.label = label or ' '.join(item.capitalize() for item in fieldname.split('_')) 5114 self.comment = comment 5115 self.writable = writable 5116 self.readable = readable 5117 self.update = update 5118 self.authorize = authorize 5119 self.autodelete = autodelete 5120 if not represent and type in ('list:integer','list:string'): 5121 represent=lambda x: ', '.join(str(y) for y in x or []) 5122 self.represent = represent 5123 self.compute = compute 5124 self.isattachment = True 5125 self.custom_store = custom_store 5126 self.custom_retrieve = custom_retrieve 5127 if self.label is None: 5128 self.label = ' '.join([x.capitalize() for x in 5129 fieldname.split('_')]) 5130 if requires is None: 5131 self.requires = [] 5132 else: 5133 self.requires = requires
5134
5135 - def store(self, file, filename=None, path=None):
5136 if self.custom_store: 5137 return self.custom_store(file,filename,path) 5138 if not filename: 5139 filename = file.name 5140 filename = os.path.basename(filename.replace('/', os.sep)\ 5141 .replace('\\', os.sep)) 5142 m = re.compile('\.(?P<e>\w{1,5})$').search(filename) 5143 extension = m and m.group('e') or 'txt' 5144 uuid_key = web2py_uuid().replace('-', '')[-16:] 5145 encoded_filename = base64.b16encode(filename).lower() 5146 newfilename = '%s.%s.%s.%s' % \ 5147 (self._tablename, self.name, uuid_key, encoded_filename) 5148 newfilename = newfilename[:200] + '.' + extension 5149 if isinstance(self.uploadfield,Field): 5150 blob_uploadfield_name = self.uploadfield.uploadfield 5151 keys={self.uploadfield.name: newfilename, 5152 blob_uploadfield_name: file.read()} 5153 self.uploadfield.table.insert(**keys) 5154 elif self.uploadfield == True: 5155 if path: 5156 pass 5157 elif self.uploadfolder: 5158 path = self.uploadfolder 5159 elif self.db._adapter.folder: 5160 path = os.path.join(self.db._adapter.folder, '..', 'uploads') 5161 else: 5162 raise RuntimeError, "you must specify a Field(...,uploadfolder=...)" 5163 if self.uploadseparate: 5164 path = os.path.join(path,"%s.%s" % (self._tablename, self.name),uuid_key[:2]) 5165 if not os.path.exists(path): 5166 os.makedirs(path) 5167 pathfilename = os.path.join(path, newfilename) 5168 dest_file = open(pathfilename, 'wb') 5169 shutil.copyfileobj(file, dest_file) 5170 dest_file.close() 5171 return newfilename
5172
5173 - def retrieve(self, name, path=None):
5174 if self.custom_retrieve: 5175 return self.custom_retrieve(name, path) 5176 import http 5177 if self.authorize or isinstance(self.uploadfield, str): 5178 row = self.db(self == name).select().first() 5179 if not row: 5180 raise http.HTTP(404) 5181 if self.authorize and not self.authorize(row): 5182 raise http.HTTP(403) 5183 try: 5184 m = regex_content.match(name) 5185 if not m or not self.isattachment: 5186 raise TypeError, 'Can\'t retrieve %s' % name 5187 filename = base64.b16decode(m.group('name'), True) 5188 filename = regex_cleanup_fn.sub('_', filename) 5189 except (TypeError, AttributeError): 5190 filename = name 5191 if isinstance(self.uploadfield, str): # ## if file is in DB 5192 return (filename, cStringIO.StringIO(row[self.uploadfield] or '')) 5193 elif isinstance(self.uploadfield,Field): 5194 blob_uploadfield_name = self.uploadfield.uploadfield 5195 query = self.uploadfield == name 5196 data = self.uploadfield.table(query)[blob_uploadfield_name] 5197 return (filename, cStringIO.StringIO(data)) 5198 else: 5199 # ## if file is on filesystem 5200 if path: 5201 pass 5202 elif self.uploadfolder: 5203 path = self.uploadfolder 5204 else: 5205 path = os.path.join(self.db._adapter.folder, '..', 'uploads') 5206 if self.uploadseparate: 5207 t = m.group('table') 5208 f = m.group('field') 5209 u = m.group('uuidkey') 5210 path = os.path.join(path,"%s.%s" % (t,f),u[:2]) 5211 return (filename, open(os.path.join(path, name), 'rb'))
5212
5213 - def formatter(self, value):
5214 if value is None or not self.requires: 5215 return value 5216 if not isinstance(self.requires, (list, tuple)): 5217 requires = [self.requires] 5218 elif isinstance(self.requires, tuple): 5219 requires = list(self.requires) 5220 else: 5221 requires = copy.copy(self.requires) 5222 requires.reverse() 5223 for item in requires: 5224 if hasattr(item, 'formatter'): 5225 value = item.formatter(value) 5226 return value
5227
5228 - def validate(self, value):
5229 if not self.requires: 5230 return (value, None) 5231 requires = self.requires 5232 if not isinstance(requires, (list, tuple)): 5233 requires = [requires] 5234 for validator in requires: 5235 (value, error) = validator(value) 5236 if error: 5237 return (value, error) 5238 return (value, None)
5239
5240 - def count(self):
5241 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'COUNT', 'integer')
5242
5243 - def __nonzero__(self):
5244 return True
5245
5246 - def __str__(self):
5247 try: 5248 return '%s.%s' % (self.tablename, self.name) 5249 except: 5250 return '<no table>.%s' % self.name
5251 5252
5253 -class Query(object):
5254 5255 """ 5256 a query object necessary to define a set. 5257 it can be stored or can be passed to DAL.__call__() to obtain a Set 5258 5259 Example:: 5260 5261 query = db.users.name=='Max' 5262 set = db(query) 5263 records = set.select() 5264 5265 """ 5266
5267 - def __init__( 5268 self, 5269 db, 5270 op, 5271 first=None, 5272 second=None, 5273 ):
5274 self.db = db 5275 self.op = op 5276 self.first = first 5277 self.second = second
5278
5279 - def __str__(self):
5280 return self.db._adapter.expand(self)
5281
5282 - def __and__(self, other):
5283 return Query(self.db,self.db._adapter.AND,self,other)
5284
5285 - def __or__(self, other):
5286 return Query(self.db,self.db._adapter.OR,self,other)
5287
5288 - def __invert__(self):
5289 if self.op==self.db._adapter.NOT: 5290 return self.first 5291 return Query(self.db,self.db._adapter.NOT,self)
5292 5293 5294 regex_quotes = re.compile("'[^']*'") 5295 5296
5297 -def xorify(orderby):
5298 if not orderby: 5299 return None 5300 orderby2 = orderby[0] 5301 for item in orderby[1:]: 5302 orderby2 = orderby2 | item 5303 return orderby2
5304 5305
5306 -class Set(object):
5307 5308 """ 5309 a Set represents a set of records in the database, 5310 the records are identified by the query=Query(...) object. 5311 normally the Set is generated by DAL.__call__(Query(...)) 5312 5313 given a set, for example 5314 set = db(db.users.name=='Max') 5315 you can: 5316 set.update(db.users.name='Massimo') 5317 set.delete() # all elements in the set 5318 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 5319 and take subsets: 5320 subset = set(db.users.id<5) 5321 """ 5322
5323 - def __init__(self, db, query):
5324 self.db = db 5325 self._db = db # for backward compatibility 5326 self.query = query
5327
5328 - def __call__(self, query):
5329 if isinstance(query,Table): 5330 query = query._id>0 5331 elif isinstance(query,Field): 5332 query = query!=None 5333 if self.query: 5334 return Set(self.db, self.query & query) 5335 else: 5336 return Set(self.db, query)
5337
5338 - def _count(self,distinct=None):
5339 return self.db._adapter._count(self.query,distinct)
5340
5341 - def _select(self, *fields, **attributes):
5342 return self.db._adapter._select(self.query,fields,attributes)
5343
5344 - def _delete(self):
5345 tablename=self.db._adapter.get_table(self.query) 5346 return self.db._adapter._delete(tablename,self.query)
5347
5348 - def _update(self, **update_fields):
5349 tablename = self.db._adapter.get_table(self.query) 5350 fields = self.db[tablename]._listify(update_fields,update=True) 5351 return self.db._adapter._update(tablename,self.query,fields)
5352
5353 - def isempty(self):
5354 return not self.select(limitby=(0,1))
5355
5356 - def count(self,distinct=None):
5357 return self.db._adapter.count(self.query,distinct)
5358
5359 - def select(self, *fields, **attributes):
5360 return self.db._adapter.select(self.query,fields,attributes)
5361
5362 - def delete(self):
5363 tablename=self.db._adapter.get_table(self.query) 5364 self.delete_uploaded_files() 5365 return self.db._adapter.delete(tablename,self.query)
5366
5367 - def update(self, **update_fields):
5368 tablename = self.db._adapter.get_table(self.query) 5369 fields = self.db[tablename]._listify(update_fields,update=True) 5370 if not fields: 5371 raise SyntaxError, "No fields to update" 5372 self.delete_uploaded_files(update_fields) 5373 return self.db._adapter.update(tablename,self.query,fields)
5374
5375 - def delete_uploaded_files(self, upload_fields=None):
5376 table = self.db[self.db._adapter.tables(self.query)[0]] 5377 # ## mind uploadfield==True means file is not in DB 5378 if upload_fields: 5379 fields = upload_fields.keys() 5380 else: 5381 fields = table.fields 5382 fields = [f for f in fields if table[f].type == 'upload' 5383 and table[f].uploadfield == True 5384 and table[f].autodelete] 5385 if not fields: 5386 return 5387 for record in self.select(*[table[f] for f in fields]): 5388 for fieldname in fields: 5389 field = table[fieldname] 5390 oldname = record.get(fieldname, None) 5391 if not oldname: 5392 continue 5393 if upload_fields and oldname == upload_fields[fieldname]: 5394 continue 5395 uploadfolder = field.uploadfolder 5396 if not uploadfolder: 5397 uploadfolder = os.path.join(self.db._adapter.folder, '..', 'uploads') 5398 if field.uploadseparate: 5399 items = oldname.split('.') 5400 uploadfolder = os.path.join(uploadfolder, 5401 "%s.%s" % (items[0], items[1]), 5402 items[2][:2]) 5403 oldpath = os.path.join(uploadfolder, oldname) 5404 if os.path.exists(oldpath): 5405 os.unlink(oldpath)
5406 5407
5408 -def update_record(pack, a={}):
5409 (colset, table, id) = pack 5410 b = a or dict(colset) 5411 c = dict([(k,v) for (k,v) in b.items() if k in table.fields and table[k].type!='id']) 5412 table._db(table._id==id).update(**c) 5413 for (k, v) in c.items(): 5414 colset[k] = v
5415 5416
5417 -class Rows(object):
5418 5419 """ 5420 A wrapper for the return value of a select. It basically represents a table. 5421 It has an iterator and each row is represented as a dictionary. 5422 """ 5423 5424 # ## TODO: this class still needs some work to care for ID/OID 5425
5426 - def __init__( 5427 self, 5428 db=None, 5429 records=[], 5430 colnames=[], 5431 compact=True, 5432 rawrows=None 5433 ):
5434 self.db = db 5435 self.records = records 5436 self.colnames = colnames 5437 self.compact = compact 5438 self.response = rawrows
5439
5440 - def setvirtualfields(self,**keyed_virtualfields):
5441 if not keyed_virtualfields: 5442 return self 5443 for row in self.records: 5444 for (tablename,virtualfields) in keyed_virtualfields.items(): 5445 attributes = dir(virtualfields) 5446 virtualfields.__dict__.update(row) 5447 if not tablename in row: 5448 box = row[tablename] = Row() 5449 else: 5450 box = row[tablename] 5451 for attribute in attributes: 5452 if attribute[0] != '_': 5453 method = getattr(virtualfields,attribute) 5454 if hasattr(method,'im_func') and method.im_func.func_code.co_argcount: 5455 box[attribute]=method() 5456 return self
5457
5458 - def __and__(self,other):
5459 if self.colnames!=other.colnames: raise Exception, 'Cannot & incompatible Rows objects' 5460 records = self.records+other.records 5461 return Rows(self.db,records,self.colnames)
5462
5463 - def __or__(self,other):
5464 if self.colnames!=other.colnames: raise Exception, 'Cannot | incompatible Rows objects' 5465 records = self.records 5466 records += [record for record in other.records \ 5467 if not record in records] 5468 return Rows(self.db,records,self.colnames)
5469
5470 - def __nonzero__(self):
5471 if len(self.records): 5472 return 1 5473 return 0
5474
5475 - def __len__(self):
5476 return len(self.records)
5477
5478 - def __getslice__(self, a, b):
5479 return Rows(self.db,self.records[a:b],self.colnames)
5480
5481 - def __getitem__(self, i):
5482 row = self.records[i] 5483 keys = row.keys() 5484 if self.compact and len(keys) == 1 and keys[0] != '_extra': 5485 return row[row.keys()[0]] 5486 return row
5487
5488 - def __iter__(self):
5489 """ 5490 iterator over records 5491 """ 5492 5493 for i in xrange(len(self)): 5494 yield self[i]
5495
5496 - def __str__(self):
5497 """ 5498 serializes the table into a csv file 5499 """ 5500 5501 s = cStringIO.StringIO() 5502 self.export_to_csv_file(s) 5503 return s.getvalue()
5504
5505 - def first(self):
5506 if not self.records: 5507 return None 5508 return self[0]
5509
5510 - def last(self):
5511 if not self.records: 5512 return None 5513 return self[-1]
5514
5515 - def find(self,f):
5516 """ 5517 returns a new Rows object, a subset of the original object, 5518 filtered by the function f 5519 """ 5520 if not self.records: 5521 return Rows(self.db, [], self.colnames) 5522 records = [] 5523 for i in range(0,len(self)): 5524 row = self[i] 5525 if f(row): 5526 records.append(self.records[i]) 5527 return Rows(self.db, records, self.colnames)
5528
5529 - def exclude(self, f):
5530 """ 5531 removes elements from the calling Rows object, filtered by the function f, 5532 and returns a new Rows object containing the removed elements 5533 """ 5534 if not self.records: 5535 return Rows(self.db, [], self.colnames) 5536 removed = [] 5537 i=0 5538 while i<len(self): 5539 row = self[i] 5540 if f(row): 5541 removed.append(self.records[i]) 5542 del self.records[i] 5543 else: 5544 i += 1 5545 return Rows(self.db, removed, self.colnames)
5546
5547 - def sort(self, f, reverse=False):
5548 """ 5549 returns a list of sorted elements (not sorted in place) 5550 """ 5551 return Rows(self.db,sorted(self,key=f,reverse=reverse),self.colnames)
5552
5553 - def as_list(self, 5554 compact=True, 5555 storage_to_dict=True, 5556 datetime_to_str=True):
5557 """ 5558 returns the data as a list or dictionary. 5559 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 5560 :param datetime_to_str: convert datetime fields as strings (default True) 5561 """ 5562 (oc, self.compact) = (self.compact, compact) 5563 if storage_to_dict: 5564 items = [item.as_dict(datetime_to_str) for item in self] 5565 else: 5566 items = [item for item in self] 5567 self.compact = compact 5568 return items
5569 5570
5571 - def as_dict(self, 5572 key='id', 5573 compact=True, 5574 storage_to_dict=True, 5575 datetime_to_str=True):
5576 """ 5577 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 5578 5579 :param key: the name of the field to be used as dict key, normally the id 5580 :param compact: ? (default True) 5581 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 5582 :param datetime_to_str: convert datetime fields as strings (default True) 5583 """ 5584 rows = self.as_list(compact, storage_to_dict, datetime_to_str) 5585 if isinstance(key,str) and key.count('.')==1: 5586 (table, field) = key.split('.') 5587 return dict([(r[table][field],r) for r in rows]) 5588 elif isinstance(key,str): 5589 return dict([(r[key],r) for r in rows]) 5590 else: 5591 return dict([(key(r),r) for r in rows])
5592
5593 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
5594 """ 5595 export data to csv, the first line contains the column names 5596 5597 :param ofile: where the csv must be exported to 5598 :param null: how null values must be represented (default '<NULL>') 5599 :param delimiter: delimiter to separate values (default ',') 5600 :param quotechar: character to use to quote string values (default '"') 5601 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 5602 :param represent: use the fields .represent value (default False) 5603 :param colnames: list of column names to use (default self.colnames) 5604 This will only work when exporting rows objects!!!! 5605 DO NOT use this with db.export_to_csv() 5606 """ 5607 delimiter = kwargs.get('delimiter', ',') 5608 quotechar = kwargs.get('quotechar', '"') 5609 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 5610 represent = kwargs.get('represent', False) 5611 writer = csv.writer(ofile, delimiter=delimiter, 5612 quotechar=quotechar, quoting=quoting) 5613 colnames = kwargs.get('colnames', self.colnames) 5614 # a proper csv starting with the column names 5615 writer.writerow(colnames) 5616 5617 def none_exception(value): 5618 """ 5619 returns a cleaned up value that can be used for csv export: 5620 - unicode text is encoded as such 5621 - None values are replaced with the given representation (default <NULL>) 5622 """ 5623 if value is None: 5624 return null 5625 elif isinstance(value, unicode): 5626 return value.encode('utf8') 5627 elif isinstance(value,Reference): 5628 return int(value) 5629 elif hasattr(value, 'isoformat'): 5630 return value.isoformat()[:19].replace('T', ' ') 5631 elif isinstance(value, (list,tuple)): # for type='list:..' 5632 return bar_encode(value) 5633 return value
5634 5635 for record in self: 5636 row = [] 5637 for col in colnames: 5638 if not table_field.match(col): 5639 row.append(record._extra[col]) 5640 else: 5641 (t, f) = col.split('.') 5642 field = self.db[t][f] 5643 if isinstance(record.get(t, None), (Row,dict)): 5644 value = record[t][f] 5645 else: 5646 value = record[f] 5647 if represent and field.represent: 5648 value = field.represent(value) 5649 row.append(none_exception(value)) 5650 writer.writerow(row)
5651
5652 - def xml(self):
5653 """ 5654 serializes the table using sqlhtml.SQLTABLE (if present) 5655 """ 5656 5657 import sqlhtml 5658 return sqlhtml.SQLTABLE(self).xml()
5659
5660 - def json(self, mode='object', default=None):
5661 """ 5662 serializes the table to a JSON list of objects 5663 """ 5664 mode = mode.lower() 5665 if not mode in ['object', 'array']: 5666 raise SyntaxError, 'Invalid JSON serialization mode: %s' % mode 5667 5668 def inner_loop(record, col): 5669 (t, f) = col.split('.') 5670 res = None 5671 if not table_field.match(col): 5672 res = record._extra[col] 5673 else: 5674 if isinstance(record.get(t, None), Row): 5675 res = record[t][f] 5676 else: 5677 res = record[f] 5678 if mode == 'object': 5679 return (f, res) 5680 else: 5681 return res
5682 5683 if mode == 'object': 5684 items = [dict([inner_loop(record, col) for col in 5685 self.colnames]) for record in self] 5686 else: 5687 items = [[inner_loop(record, col) for col in self.colnames] 5688 for record in self] 5689 if have_serializers: 5690 return serializers.json(items,default=default or serializers.custom_json) 5691 else: 5692 import simplejson 5693 return simplejson.dumps(items) 5694
5695 -def Rows_unpickler(data):
5696 return cPickle.loads(data)
5697
5698 -def Rows_pickler(data):
5699 return Rows_unpickler, \ 5700 (cPickle.dumps(data.as_list(storage_to_dict=True, 5701 datetime_to_str=False)),)
5702 5703 copy_reg.pickle(Rows, Rows_pickler, Rows_unpickler) 5704 5705 5706 ################################################################################ 5707 # dummy function used to define some doctests 5708 ################################################################################ 5709
5710 -def test_all():
5711 """ 5712 5713 >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") 5714 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 5715 >>> tmp = db.define_table('users',\ 5716 Field('stringf', 'string', length=32, required=True),\ 5717 Field('booleanf', 'boolean', default=False),\ 5718 Field('passwordf', 'password', notnull=True),\ 5719 Field('uploadf', 'upload'),\ 5720 Field('blobf', 'blob'),\ 5721 Field('integerf', 'integer', unique=True),\ 5722 Field('doublef', 'double', unique=True,notnull=True),\ 5723 Field('datef', 'date', default=datetime.date.today()),\ 5724 Field('timef', 'time'),\ 5725 Field('datetimef', 'datetime'),\ 5726 migrate='test_user.table') 5727 5728 Insert a field 5729 5730 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 5731 uploadf=None, integerf=5, doublef=3.14,\ 5732 datef=datetime.date(2001, 1, 1),\ 5733 timef=datetime.time(12, 30, 15),\ 5734 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 5735 1 5736 5737 Drop the table 5738 5739 >>> db.users.drop() 5740 5741 Examples of insert, select, update, delete 5742 5743 >>> tmp = db.define_table('person',\ 5744 Field('name'),\ 5745 Field('birth','date'),\ 5746 migrate='test_person.table') 5747 >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') 5748 >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') 5749 5750 commented len(db().select(db.person.ALL)) 5751 commented 2 5752 5753 >>> me = db(db.person.id==person_id).select()[0] # test select 5754 >>> me.name 5755 'Massimo' 5756 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 5757 1 5758 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 5759 1 5760 5761 Update a single record 5762 5763 >>> me.update_record(name=\"Max\") 5764 >>> me.name 5765 'Max' 5766 5767 Examples of complex search conditions 5768 5769 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 5770 1 5771 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 5772 1 5773 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 5774 1 5775 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 5776 >>> me.name 5777 'Max' 5778 5779 Examples of search conditions using extract from date/datetime/time 5780 5781 >>> len(db(db.person.birth.month()==12).select()) 5782 1 5783 >>> len(db(db.person.birth.year()>1900).select()) 5784 1 5785 5786 Example of usage of NULL 5787 5788 >>> len(db(db.person.birth==None).select()) ### test NULL 5789 0 5790 >>> len(db(db.person.birth!=None).select()) ### test NULL 5791 1 5792 5793 Examples of search conditions using lower, upper, and like 5794 5795 >>> len(db(db.person.name.upper()=='MAX').select()) 5796 1 5797 >>> len(db(db.person.name.like('%ax')).select()) 5798 1 5799 >>> len(db(db.person.name.upper().like('%AX')).select()) 5800 1 5801 >>> len(db(~db.person.name.upper().like('%AX')).select()) 5802 0 5803 5804 orderby, groupby and limitby 5805 5806 >>> people = db().select(db.person.name, orderby=db.person.name) 5807 >>> order = db.person.name|~db.person.birth 5808 >>> people = db().select(db.person.name, orderby=order) 5809 5810 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 5811 5812 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 5813 5814 Example of one 2 many relation 5815 5816 >>> tmp = db.define_table('dog',\ 5817 Field('name'),\ 5818 Field('birth','date'),\ 5819 Field('owner',db.person),\ 5820 migrate='test_dog.table') 5821 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 5822 1 5823 5824 A simple JOIN 5825 5826 >>> len(db(db.dog.owner==db.person.id).select()) 5827 1 5828 5829 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 5830 1 5831 5832 Drop tables 5833 5834 >>> db.dog.drop() 5835 >>> db.person.drop() 5836 5837 Example of many 2 many relation and Set 5838 5839 >>> tmp = db.define_table('author', Field('name'),\ 5840 migrate='test_author.table') 5841 >>> tmp = db.define_table('paper', Field('title'),\ 5842 migrate='test_paper.table') 5843 >>> tmp = db.define_table('authorship',\ 5844 Field('author_id', db.author),\ 5845 Field('paper_id', db.paper),\ 5846 migrate='test_authorship.table') 5847 >>> aid = db.author.insert(name='Massimo') 5848 >>> pid = db.paper.insert(title='QCD') 5849 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 5850 5851 Define a Set 5852 5853 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 5854 >>> rows = authored_papers.select(db.author.name, db.paper.title) 5855 >>> for row in rows: print row.author.name, row.paper.title 5856 Massimo QCD 5857 5858 Example of search condition using belongs 5859 5860 >>> set = (1, 2, 3) 5861 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 5862 >>> print rows[0].title 5863 QCD 5864 5865 Example of search condition using nested select 5866 5867 >>> nested_select = db()._select(db.authorship.paper_id) 5868 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 5869 >>> print rows[0].title 5870 QCD 5871 5872 Example of expressions 5873 5874 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 5875 >>> db(mynumber.id>0).delete() 5876 0 5877 >>> for i in range(10): tmp = mynumber.insert(x=i) 5878 >>> db(mynumber.id>0).select(mynumber.x.sum())[0](mynumber.x.sum()) 5879 45 5880 5881 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 5882 5 5883 5884 Output in csv 5885 5886 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 5887 author.name,paper.title\r 5888 Massimo,QCD 5889 5890 Delete all leftover tables 5891 5892 >>> DAL.distributed_transaction_commit(db) 5893 5894 >>> db.mynumber.drop() 5895 >>> db.authorship.drop() 5896 >>> db.author.drop() 5897 >>> db.paper.drop() 5898 """
5899 ################################################################################ 5900 # deprecated since the new DAL; here only for backward compatibility 5901 ################################################################################ 5902 5903 SQLField = Field 5904 SQLTable = Table 5905 SQLXorable = Expression 5906 SQLQuery = Query 5907 SQLSet = Set 5908 SQLRows = Rows 5909 SQLStorage = Row 5910 SQLDB = DAL 5911 GQLDB = DAL 5912 DAL.Field = Field # was necessary in gluon/globals.py session.connect 5913 DAL.Table = Table # was necessary in gluon/globals.py session.connect 5914 5915 ################################################################################ 5916 # run tests 5917 ################################################################################ 5918 5919 if __name__ == '__main__': 5920 import doctest 5921 doctest.testmod() 5922