| Trees | Indices | Help |
|
|---|
|
|
1
2
3
4 # This remains for documentation only.
5 raise ImportError('This module is deprecated. Use gmPG2.py.')
6
7
8
9 """Broker for PostgreSQL distributed backend connections.
10
11 @copyright: author
12
13 TODO: iterator/generator batch fetching:
14 - http://groups-beta.google.com/group/comp.lang.python/msg/7ff516d7d9387dad
15 - search Google for "Geneator/Iterator Nesting Problem - Any Ideas? 2.4"
16
17 winner:
18 def resultset_functional_batchgenerator(cursor, size=100):
19 for results in iter(lambda: cursor.fetchmany(size), []):
20 for rec in results:
21 yield rec
22 """
23 # =======================================================================
24 # $Source: /cvsroot/gnumed/gnumed/gnumed/client/pycommon/gmPG.py,v $
25 __version__ = "$Revision: 1.90 $"
26 __author__ = "H.Herb <hherb@gnumed.net>, I.Haywood <i.haywood@ugrad.unimelb.edu.au>, K.Hilbert <Karsten.Hilbert@gmx.net>"
27 __license__ = 'GPL (details at http://www.gnu.org)'
28
29 print "gmPG phased out, please replace with gmPG2"
30
31 import sys
32 sys.exit
33
34 _query_logging_verbosity = 1
35
36 # check whether this adapter module suits our needs
37 assert(float(dbapi.apilevel) >= 2.0)
38 assert(dbapi.threadsafety > 0)
39 assert(dbapi.paramstyle == 'pyformat')
40
41 _listener_api = None
42
43 # default encoding for connections
44 _default_client_encoding = {'wire': None, 'string': None}
45
46 # default time zone for connections
47 # OR: mxDT.now().gmtoffset()
48 if time.daylight:
49 tz = time.altzone
50 else:
51 tz = time.timezone
52 # do some magic to convert Python's timezone to a valid ISO timezone
53 # is this safe or will it return things like 13.5 hours ?
54 _default_client_timezone = "%+.1f" % (-tz / 3600.0)
55
56 _serialize_failure = "serialize access due to concurrent update"
57
58 #======================================================================
59 # a bunch of useful queries
60 #----------------------------------------------------------------------
61 QTablePrimaryKeyIndex = """
62 SELECT
63 indkey
64 FROM
65 pg_index
66 WHERE
67 indrelid =
68 (SELECT oid FROM pg_class WHERE relname = '%s');
69 """
70
71 query_pkey_name = """
72 SELECT
73 pga.attname
74 FROM
75 (pg_attribute pga inner join pg_index pgi on (pga.attrelid=pgi.indrelid))
76 WHERE
77 pga.attnum=pgi.indkey[0]
78 and
79 pgi.indisprimary is true
80 and
81 pga.attrelid=(SELECT oid FROM pg_class WHERE relname = %s)"""
82
83 query_fkey_names = """
84 select tgargs from pg_trigger where
85 tgname like 'RI%%'
86 and
87 tgrelid = (
88 select oid from pg_class where relname=%s
89 )
90 """
91
92 # get columns and data types for a given table
93 query_table_col_defs = """select
94 cols.column_name,
95 cols.udt_name
96 from
97 information_schema.columns cols
98 where
99 cols.table_schema = %s
100 and
101 cols.table_name = %s
102 order by
103 cols.ordinal_position"""
104
105 query_table_attributes = """select
106 cols.column_name
107 from
108 information_schema.columns cols
109 where
110 cols.table_schema = %s
111 and
112 cols.table_name = %s
113 order by
114 cols.ordinal_position"""
115
116 query_child_tables = """
117 select
118 pgn.nspname as namespace,
119 pgc.relname as table
120 from
121 pg_namespace pgn,
122 pg_class pgc
123 where
124 pgc.relnamespace = pgn.oid
125 and
126 pgc.oid in (
127 select inhrelid from pg_inherits where inhparent = (
128 select oid from pg_class where
129 relnamespace = (select oid from pg_namespace where nspname = %(schema)s) and
130 relname = %(table)s
131 )
132 )"""
133
134
135 # a handy return to dbapi simplicity
136 last_ro_cursor_desc = None
137
138 #======================================================================
140 "maintains a static dictionary of available database connections"
141
142 # cached read-only connection objects
143 __ro_conns = {}
144 # maps service names to physical databases
145 __service2db_map = {}
146 # connections in use per service (for reference counting)
147 __conn_use_count = {}
148 # variable used to check whether a first connection has been initialized yet or not
149 __is_connected = None
150 # maps backend listening threads to database ids
151 __listeners = {}
152 # gmLoginInfo.LoginInfo instance
153 __login = None
154 #-----------------------------
156 """parameter login is of type gmLoginInfo.LoginInfo"""
157 # if login data is given: re-establish connections
158 if login is not None:
159 self.__disconnect()
160 if ConnectionPool.__is_connected is None:
161 # CAREFUL: this affects the whole connection
162 dbapi.fetchReturnsList = True
163 ConnectionPool.__is_connected = self.__setup_default_ro_conns(login=login, encoding=encoding)
164 #-----------------------------
167 # NOTE: do not kill listeners here which would mean to
168 # kill them when we throw away *any* ConnectionPool
169 # instance - not what we want
170 #-----------------------------
171 # connection API
172 #-----------------------------
174 """Get a connection."""
175
176 logininfo = self.GetLoginInfoFor(service)
177
178 # either get a cached read-only connection
179 if readonly:
180 if ConnectionPool.__ro_conns.has_key(service):
181 try:
182 ConnectionPool.__conn_use_count[service] += 1
183 except KeyError:
184 ConnectionPool.__conn_use_count[service] = 1
185 conn = ConnectionPool.__ro_conns[service]
186 else:
187 try:
188 ConnectionPool.__conn_use_count['default'] += 1
189 except KeyError:
190 ConnectionPool.__conn_use_count['default'] = 1
191 conn = ConnectionPool.__ro_conns['default']
192 # or a brand-new read-write connection
193 else:
194 _log.Log(gmLog.lData, "requesting RW connection to service [%s]" % service)
195 conn = self.__pgconnect(logininfo, readonly = 0, encoding = encoding)
196 if conn is None:
197 return None
198
199 if extra_verbose:
200 conn.conn.toggleShowQuery
201
202 return conn
203 #-----------------------------
205 """decrease reference counter of active connection"""
206 if ConnectionPool.__ro_conns.has_key(service):
207 try:
208 ConnectionPool.__conn_use_count[service] -= 1
209 except:
210 ConnectionPool.__conn_use_count[service] = 0
211 else:
212 try:
213 ConnectionPool.__conn_use_count['default'] -= 1
214 except:
215 ConnectionPool.__conn_use_count['default'] = 0
216 #-----------------------------
219 #-----------------------------
220 - def get_connection_for_user(self, user=None, password=None, service="default", encoding=None, extra_verbose=None):
221 """Get a connection for a given user.
222
223 This will return a connection just as GetConnection() would
224 except that the user to be used for authentication can be
225 specified. All the other parameters are going to be the
226 same, IOW it will connect to the same server, port and database
227 as any other connection obtained through this broker.
228
229 You will have to specify the password, of course, if it
230 is needed for PostgreSQL authentication.
231
232 This will always return a read-write connection.
233 """
234 if user is None:
235 _log.Log(gmLog.lErr, 'user must be given')
236 raise ValueError, 'gmPG.py::%s.get_connection_for_user(): user name must be given' % self.__class__.__name__
237
238 logininfo = self.GetLoginInfoFor(service)
239 logininfo.SetUser(user=user)
240 logininfo.SetPassword(passwd=password)
241
242 _log.Log(gmLog.lData, "requesting RW connection to service [%s]" % service)
243 conn = self.__pgconnect(logininfo, readonly = 0, encoding = encoding)
244 if conn is None:
245 return None
246
247 if extra_verbose:
248 conn.conn.toggleShowQuery
249
250 return conn
251 #-----------------------------
252 # notification API
253 #-----------------------------
255 """Listen to 'signal' from backend in an asynchronous thread.
256
257 If 'signal' is received from database 'service', activate
258 the 'callback' function"""
259 # FIXME: error handling
260
261 # lazy import of gmBackendListener
262 if _listener_api is None:
263 if not _import_listener_engine():
264 _log.Log(gmLog.lErr, 'cannot load backend listener code')
265 return None
266
267 # get physical database for service
268 try:
269 backend = ConnectionPool.__service2db_map[service]
270 except KeyError:
271 backend = 0
272 _log.Log(gmLog.lData, "connecting notification [%s] from service [%s] (id %s) with callback %s" % (signal, service, backend, callback))
273 # start thread if not listening yet,
274 # but only one per physical database
275 if backend not in ConnectionPool.__listeners.keys():
276 auth = self.GetLoginInfoFor(service)
277 listener = _listener_api.BackendListener(
278 service,
279 auth.GetDatabase(),
280 auth.GetUser(),
281 auth.GetPassword(),
282 auth.GetHost(),
283 int(auth.GetPort())
284 )
285 ConnectionPool.__listeners[backend] = listener
286 # actually start listening
287 listener = ConnectionPool.__listeners[backend]
288 listener.register_callback(signal, callback)
289 return 1
290 #-----------------------------
292 # get physical database for service
293 try:
294 backend = ConnectionPool.__service2db_map[service]
295 except KeyError:
296 backend = 0
297 _log.Log(gmLog.lData, "disconnecting notification [%s] from service [%s] (id %s) from callback %s" % (signal, service, backend, callback))
298 if backend not in ConnectionPool.__listeners.keys():
299 return 1
300 listener = ConnectionPool.__listeners[backend]
301 listener.unregister_callback(signal, callback)
302 #-----------------------------
304 try:
305 backend = self.__service2db_map[service]
306 except KeyError:
307 _log.Log(gmLog.lWarn, 'cannot stop listener on backend')
308 return None
309 try:
310 ConnectionPool.__listeners[backend].stop_thread()
311 del ConnectionPool.__listeners[backend]
312 except:
313 _log.LogException('cannot stop listener on backend [%s]' % backend, sys.exc_info(), verbose = 0)
314 return None
315 return 1
316 #-----------------------------
318 for backend in ConnectionPool.__listeners.keys():
319 try:
320 ConnectionPool.__listeners[backend].stop_thread()
321 del ConnectionPool.__listeners[backend]
322 except:
323 _log.LogException('cannot stop listener on backend [%s]' % backend, sys.exc_info(), verbose = 0)
324 return 1
325 #-----------------------------
326 # misc API
327 #-----------------------------
329 """list all distributed services available on this system
330 (according to configuration database)"""
331 return ConnectionPool.__ro_conns.keys()
332 #-----------------------------
334 """return login information for a particular service"""
335 if login is None:
336 dblogin = ConnectionPool.__login
337 else:
338 dblogin = copy.deepcopy(login)
339 # if service not mapped, return default login information
340 try:
341 srvc_id = ConnectionPool.__service2db_map[service]
342 except KeyError:
343 return dblogin
344 # a service in the default database
345 if srvc_id == 0:
346 return dblogin
347 # actually fetch parameters for db where service
348 # is located from config DB
349 cfg_db = ConnectionPool.__ro_conns['default']
350 cursor = cfg_db.cursor()
351 cmd = "select name, host, port from cfg.db where pk=%s"
352 if not run_query(cursor, None, cmd, srvc_id):
353 _log.Log(gmLog.lPanic, 'cannot get login info for service [%s] with id [%s] from config database' % (service, srvc_id))
354 _log.Log(gmLog.lPanic, 'make sure your service-to-database mappings are properly configured')
355 _log.Log(gmLog.lWarn, 'trying to make do with default login parameters')
356 return dblogin
357 auth_data = cursor.fetchone()
358 idx = get_col_indices(cursor)
359 cursor.close()
360 # substitute values into default login data
361 try: # db name
362 dblogin.SetDatabase(string.strip(auth_data[idx['name']]))
363 except: pass
364 try: # host name
365 dblogin.SetHost(string.strip(auth_data[idx['host']]))
366 except: pass
367 try: # port
368 dblogin.SetPort(auth_data[idx['port']])
369 except: pass
370 # and return what we thus got - which may very well be identical to the default login ...
371 return dblogin
372 #-----------------------------
373 # private methods
374 #-----------------------------
376 """Initialize connections to all servers."""
377 if login is None and ConnectionPool.__is_connected is None:
378 try:
379 login = request_login_params()
380 except:
381 _log.LogException("Exception: Cannot connect to databases without login information !", sys.exc_info(), verbose=1)
382 raise gmExceptions.ConnectionError("Can't connect to database without login information!")
383
384 _log.Log(gmLog.lData, login.GetInfoStr())
385 ConnectionPool.__login = login
386
387 # connect to the configuration server
388 cfg_db = self.__pgconnect(login, readonly=1, encoding=encoding)
389 if cfg_db is None:
390 raise gmExceptions.ConnectionError, _('Cannot connect to configuration database with:\n\n[%s]') % login.GetInfoStr()
391
392 # this is the default gnumed server now
393 ConnectionPool.__ro_conns['default'] = cfg_db
394 cursor = cfg_db.cursor()
395 # document DB version
396 cursor.execute("select version()")
397 _log.Log(gmLog.lInfo, 'service [default/config] running on [%s]' % cursor.fetchone()[0])
398 # preload all services with database pk 0 (default)
399 cmd = "select name from cfg.distributed_db"
400 if not run_query(cursor, None, cmd):
401 cursor.close()
402 raise gmExceptions.ConnectionError("cannot load service names from configuration database")
403 services = cursor.fetchall()
404 for service in services:
405 ConnectionPool.__service2db_map[service[0]] = 0
406
407 # establish connections to all servers we need
408 # according to configuration database
409 cmd = "select * from cfg.config where profile=%s"
410 if not run_query(cursor, None, cmd, login.GetProfile()):
411 cursor.close()
412 raise gmExceptions.ConnectionError("cannot load user profile [%s] from database" % login.GetProfile())
413 databases = cursor.fetchall()
414 dbidx = get_col_indices(cursor)
415
416 # for all configuration entries that match given user and profile
417 for db in databases:
418 # - get symbolic name of distributed service
419 cursor.execute("select name from cfg.distributed_db where pk=%d" % db[dbidx['ddb']])
420 service = string.strip(cursor.fetchone()[0])
421 # - map service name to id of real database
422 _log.Log(gmLog.lData, "mapping service [%s] to DB ID [%s]" % (service, db[dbidx['db']]))
423 ConnectionPool.__service2db_map[service] = db[dbidx['db']]
424 # - init ref counter
425 ConnectionPool.__conn_use_count[service] = 0
426 dblogin = self.GetLoginInfoFor(service, login)
427 # - update 'Database Broker' dictionary
428 conn = self.__pgconnect(dblogin, readonly=1, encoding=encoding)
429 if conn is None:
430 raise gmExceptions.ConnectionError, _('Cannot connect to database with:\n\n[%s]') % login.GetInfoStr()
431 ConnectionPool.__ro_conns[service] = conn
432 # - document DB version
433 cursor.execute("select version()")
434 _log.Log(gmLog.lInfo, 'service [%s] running on [%s]' % (service, cursor.fetchone()[0]))
435 cursor.close()
436 ConnectionPool.__is_connected = 1
437 return ConnectionPool.__is_connected
438 #-----------------------------
440 """Connect to a postgres backend as specified by login object.
441
442 - returns a connection object
443 - encoding works like this:
444 - encoding specified in the call to __pgconnect() overrides
445 - encoding set by a call to gmPG.set_default_encoding() overrides
446 - encoding taken from Python string encoding
447 - wire_encoding and string_encoding must essentially just be different
448 names for one and the same (IOW entirely compatible) encodings, such
449 as "win1250" and "cp1250"
450 """
451 dsn = ""
452 hostport = ""
453 dsn = login.GetDBAPI_DSN()
454 hostport = "0"
455
456 if encoding is None:
457 encoding = _default_client_encoding
458
459 # encoding a Unicode string with this encoding must
460 # yield a byte string encoded such that it can be decoded
461 # safely by wire_encoding
462 string_encoding = encoding['string']
463 if string_encoding is None:
464 string_encoding = _default_client_encoding['string']
465 if string_encoding is None:
466 # string_encoding = sys.getdefaultencoding()
467 string_encoding = locale.getlocale()[1]
468 _log.Log(gmLog.lWarn, 'client encoding not specified, this may lead to data corruption in some cases')
469 _log.Log(gmLog.lWarn, 'therefore the string encoding currently set in the active locale is used: [%s]' % string_encoding)
470 _log.Log(gmLog.lWarn, 'for this to have any chance to work the application MUST have called locale.setlocale() before')
471 _log.Log(gmLog.lInfo, 'using string encoding [%s] to encode Unicode strings for transmission to the database' % string_encoding)
472
473 # Python does not necessarily have to know this encoding by name
474 # but it must know an equivalent encoding which guarantees roundtrip
475 # equality (set that via string_encoding)
476 wire_encoding = encoding['wire']
477 if wire_encoding is None:
478 wire_encoding = _default_client_encoding['wire']
479 if wire_encoding is None:
480 wire_encoding = string_encoding
481 if wire_encoding is None:
482 raise ValueError, '<wire_encoding> cannot be None'
483
484 try:
485 # FIXME: eventually use UTF or UTF8 for READONLY connections _only_
486 conn = dbapi.connect(dsn=dsn, client_encoding=(string_encoding, 'strict'), unicode_results=1)
487 except StandardError:
488 _log.LogException("database connection failed: DSN = [%s], host:port = [%s]" % (dsn, hostport), sys.exc_info(), verbose = 1)
489 return None
490
491 # set the default characteristics of our sessions
492 curs = conn.cursor()
493
494 # - client encoding
495 cmd = "set client_encoding to '%s'" % wire_encoding
496 try:
497 curs.execute(cmd)
498 except:
499 curs.close()
500 conn.close()
501 _log.Log(gmLog.lErr, 'query [%s]' % cmd)
502 _log.LogException (
503 'cannot set string-on-the-wire client_encoding on connection to [%s], this would likely lead to data corruption' % wire_encoding,
504 sys.exc_info(),
505 verbose = _query_logging_verbosity
506 )
507 raise
508 _log.Log(gmLog.lData, 'string-on-the-wire client_encoding set to [%s]' % wire_encoding)
509
510 # - client time zone
511 # cmd = "set session time zone interval '%s'" % _default_client_timezone
512 cmd = "set time zone '%s'" % _default_client_timezone
513 if not run_query(curs, None, cmd):
514 _log.Log(gmLog.lErr, 'cannot set client time zone to [%s]' % _default_client_timezone)
515 _log.Log(gmLog.lWarn, 'not setting this will lead to incorrect dates/times')
516 else:
517 _log.Log (gmLog.lData, 'time zone set to [%s]' % _default_client_timezone)
518
519 # - datestyle
520 # FIXME: add DMY/YMD handling
521 cmd = "set datestyle to 'ISO'"
522 if not run_query(curs, None, cmd):
523 _log.Log(gmLog.lErr, 'cannot set client date style to ISO')
524 _log.Log(gmLog.lWarn, 'you better use other means to make your server delivers valid ISO timestamps with time zone')
525
526 # - transaction isolation level
527 if readonly:
528 isolation_level = 'READ COMMITTED'
529 else:
530 isolation_level = 'SERIALIZABLE'
531 cmd = 'set session characteristics as transaction isolation level %s' % isolation_level
532 if not run_query(curs, None, cmd):
533 curs.close()
534 conn.close()
535 _log.Log(gmLog.lErr, 'cannot set connection characteristics to [%s]' % isolation_level)
536 return None
537
538 # - access mode
539 if readonly:
540 access_mode = 'READ ONLY'
541 else:
542 access_mode = 'READ WRITE'
543 _log.Log(gmLog.lData, "setting session to [%s] for %s@%s:%s" % (access_mode, login.GetUser(), login.GetHost(), login.GetDatabase()))
544 cmd = 'set session characteristics as transaction %s' % access_mode
545 if not run_query(curs, 0, cmd):
546 _log.Log(gmLog.lErr, 'cannot set connection characteristics to [%s]' % access_mode)
547 curs.close()
548 conn.close()
549 return None
550
551 conn.commit()
552 curs.close()
553 return conn
554 #-----------------------------
556 """safe disconnect (respecting possibly active connections) unless the force flag is set"""
557 # are we connected at all?
558 if ConnectionPool.__is_connected is None:
559 # just in case
560 ConnectionPool.__ro_conns.clear()
561 return
562 # stop all background threads
563 for backend in ConnectionPool.__listeners.keys():
564 ConnectionPool.__listeners[backend].stop_thread()
565 del ConnectionPool.__listeners[backend]
566 # disconnect from all databases
567 for key in ConnectionPool.__ro_conns.keys():
568 # check whether this connection might still be in use ...
569 if ConnectionPool.__conn_use_count[key] > 0 :
570 # unless we are really mean
571 if force_it == 0:
572 # let the end user know that shit is happening
573 raise gmExceptions.ConnectionError, "Attempting to close a database connection that is still in use"
574 else:
575 # close the connection
576 ConnectionPool.__ro_conns[key].close()
577
578 # clear the dictionary (would close all connections anyway)
579 ConnectionPool.__ro_conns.clear()
580 ConnectionPool.__is_connected = None
581
582 #---------------------------------------------------
583 # database helper functions
584 #---------------------------------------------------
586 "returns the attribute names of the fetched rows in natural sequence as a list"
587 names=[]
588 for d in cursor.description:
589 names.append(d[0])
590 return names
591 #---------------------------------------------------
593 # sanity checks
594 if aCursor is None:
595 _log.Log(gmLog.lErr, 'need cursor to run query')
596 return None
597 if aQuery is None:
598 _log.Log(gmLog.lErr, 'need query to run it')
599 return None
600 if verbosity is None:
601 verbosity = _query_logging_verbosity
602
603 # t1 = time.time()
604 try:
605 aCursor.execute(aQuery, *args)
606 except:
607 _log.LogException("query >>>%s<<< with args >>>%s<<< failed" % (aQuery, args), sys.exc_info(), verbose = verbosity)
608 return None
609 # t2 = time.time()
610 # print t2-t1, aQuery
611 return 1
612 #---------------------------------------------------
613 -def run_commit2(link_obj=None, queries=None, end_tx=False, max_tries=1, extra_verbose=False, get_col_idx = False):
614 """Convenience function for running a transaction
615 that is supposed to get committed.
616
617 <link_obj>
618 can be either:
619 - a cursor
620 - a connection
621 - a service name
622
623 <queries>
624 is a list of (query, [args]) tuples to be
625 executed as a single transaction, the last
626 query may usefully return rows (such as a
627 "select currval('some_sequence')" statement)
628
629 <end_tx>
630 - controls whether the transaction is finalized (eg.
631 committed/rolled back) or not, this allows the
632 call to run_commit2() to be part of a framing
633 transaction
634 - if <link_obj> is a service name the transaction is
635 always finalized regardless of what <end_tx> says
636 - if link_obj is a connection then <end_tx> will
637 default to False unless it is explicitly set to
638 True which is taken to mean "yes, you do have full
639 control over the transaction" in which case the
640 transaction is properly finalized
641
642 <max_tries>
643 - controls the number of times a transaction is retried
644 after a concurrency error
645 - note that *all* <queries> are rerun if a concurrency
646 error occurrs
647 - max_tries is honored if and only if link_obj is a service
648 name such that we have full control over the transaction
649
650 <get_col_idx>
651 - if true, the returned data will include a dictionary
652 mapping field names to column positions
653 - if false, the returned data returns an empty dict
654
655 method result:
656 - returns a tuple (status, data)
657 - <status>:
658 * True - if all queries succeeded (also if there were 0 queries)
659 * False - if *any* error occurred
660 - <data> if <status> is True:
661 * (None, {}) if last query did not return rows
662 * ("fetchall() result", <index>) if last query returned any rows
663 * for <index> see <get_col_idx>
664 - <data> if <status> is False:
665 * a tuple (error, message) where <error> can be:
666 * 1: unspecified error
667 * 2: concurrency error
668 * 3: constraint violation (non-primary key)
669 * 4: access violation
670 """
671 # sanity checks
672 if queries is None:
673 return (False, (1, 'forgot to pass in queries'))
674 if len(queries) == 0:
675 return (True, 'no queries to execute')
676
677 # check link_obj
678 # is it a cursor ?
679 if hasattr(link_obj, 'fetchone') and hasattr(link_obj, 'description'):
680 return __commit2cursor(cursor=link_obj, queries=queries, extra_verbose=extra_verbose, get_col_idx=get_col_idx)
681 # is it a connection ?
682 if (hasattr(link_obj, 'commit') and hasattr(link_obj, 'cursor')):
683 return __commit2conn(conn=link_obj, queries=queries, end_tx=end_tx, extra_verbose=extra_verbose, get_col_idx=get_col_idx)
684 # take it to be a service name then
685 return __commit2service(service=link_obj, queries=queries, max_tries=max_tries, extra_verbose=extra_verbose, get_col_idx=get_col_idx)
686 #---------------------------------------------------
687 -def __commit2service(service=None, queries=None, max_tries=1, extra_verbose=False, get_col_idx=False):
688 # sanity checks
689 try: int(max_tries)
690 except ValueEror: max_tries = 1
691 if max_tries > 4:
692 max_tries = 4
693 if max_tries < 1:
694 max_tries = 1
695 # get cursor
696 pool = ConnectionPool()
697 conn = pool.GetConnection(str(service), readonly = 0)
698 if conn is None:
699 msg = 'cannot connect to service [%s]'
700 _log.Log(gmLog.lErr, msg % service)
701 return (False, (1, _(msg) % service))
702 if extra_verbose:
703 conn.conn.toggleShowQuery
704 curs = conn.cursor()
705 for attempt in range(0, max_tries):
706 if extra_verbose:
707 _log.Log(gmLog.lData, 'attempt %s' % attempt)
708 # run queries
709 for query, args in queries:
710 if extra_verbose:
711 t1 = time.time()
712 try:
713 curs.execute(query, *args)
714 # FIXME: be more specific in exception catching
715 except:
716 if extra_verbose:
717 duration = time.time() - t1
718 _log.Log(gmLog.lData, 'query took %3.3f seconds' % duration)
719 conn.rollback()
720 exc_info = sys.exc_info()
721 typ, val, tb = exc_info
722 if str(val).find(_serialize_failure) > 0:
723 _log.Log(gmLog.lData, 'concurrency conflict detected, cannot serialize access due to concurrent update')
724 if attempt < max_tries:
725 # jump to next full attempt
726 time.sleep(0.1)
727 continue
728 curs.close()
729 conn.close()
730 return (False, (2, 'l'))
731 # FIXME: handle more types of errors
732 _log.Log(gmLog.lErr, 'query: %s' % query[:2048])
733 try:
734 _log.Log(gmLog.lErr, 'argument: %s' % str(args)[:2048])
735 except MemoryError:
736 pass
737 _log.LogException("query failed on link [%s]" % service, exc_info)
738 if extra_verbose:
739 __log_PG_settings(curs)
740 curs.close()
741 conn.close()
742 tmp = str(val).replace('ERROR:', '')
743 tmp = tmp.replace('ExecAppend:', '')
744 tmp = tmp.strip()
745 return (False, (1, _('SQL: %s') % tmp))
746 # apparently succeeded
747 if extra_verbose:
748 duration = time.time() - t1
749 _log.Log(gmLog.lData, 'query: %s' % query[:2048])
750 try:
751 _log.Log(gmLog.lData, 'args : %s' % str(args)[:2048])
752 except MemoryError:
753 pass
754 _log.Log(gmLog.lData, 'query succeeded on link [%s]' % service)
755 _log.Log(gmLog.lData, '%s rows affected/returned in %3.3f seconds' % (curs.rowcount, duration))
756 # done with queries
757 break # out of retry loop
758 # done with attempt(s)
759 # did we get result rows in the last query ?
760 data = None
761 idx = {}
762 # now, the DB-API is ambigous about whether cursor.description
763 # and cursor.rowcount apply to the most recent query in a cursor
764 # (does this statement make any sense in the first place ?) or
765 # to the entire lifetime of said cursor, pyPgSQL thinks the
766 # latter, hence we need to catch exceptions when there's no
767 # data from the *last* query
768 try:
769 data = curs.fetchall()
770 except:
771 if extra_verbose:
772 _log.Log(gmLog.lData, 'fetchall(): last query did not return rows')
773 # should be None if no rows were returned ...
774 if curs.description is not None:
775 _log.Log(gmLog.lData, 'there seem to be rows but fetchall() failed -- DB API violation ?')
776 _log.Log(gmLog.lData, 'rowcount: %s, description: %s' % (curs.rowcount, curs.description))
777 conn.commit()
778 if get_col_idx:
779 idx = get_col_indices(curs)
780 curs.close()
781 conn.close()
782 return (True, (data, idx))
783 #---------------------------------------------------
784 -def __commit2conn(conn=None, queries=None, end_tx=False, extra_verbose=False, get_col_idx=False):
785 if extra_verbose:
786 conn.conn.toggleShowQuery
787
788 # get cursor
789 curs = conn.cursor()
790
791 # run queries
792 for query, args in queries:
793 if extra_verbose:
794 t1 = time.time()
795 try:
796 curs.execute(query, *args)
797 except:
798 if extra_verbose:
799 duration = time.time() - t1
800 _log.Log(gmLog.lData, 'query took %3.3f seconds' % duration)
801 conn.rollback()
802 exc_info = sys.exc_info()
803 typ, val, tb = exc_info
804 if str(val).find(_serialize_failure) > 0:
805 _log.Log(gmLog.lData, 'concurrency conflict detected, cannot serialize access due to concurrent update')
806 curs.close()
807 if extra_verbose:
808 conn.conn.toggleShowQuery
809 return (False, (2, 'l'))
810 # FIXME: handle more types of errors
811 _log.Log(gmLog.lErr, 'query: %s' % query[:2048])
812 try:
813 _log.Log(gmLog.lErr, 'args : %s' % str(args)[:2048])
814 except MemoryError:
815 pass
816 _log.LogException("query failed on link [%s]" % conn, exc_info)
817 if extra_verbose:
818 __log_PG_settings(curs)
819 curs.close()
820 tmp = str(val).replace('ERROR:', '')
821 tmp = tmp.replace('ExecAppend:', '')
822 tmp = tmp.strip()
823 if extra_verbose:
824 conn.conn.toggleShowQuery
825 return (False, (1, _('SQL: %s') % tmp))
826 # apparently succeeded
827 if extra_verbose:
828 duration = time.time() - t1
829 _log.Log(gmLog.lData, 'query: %s' % query[:2048])
830 try:
831 _log.Log(gmLog.lData, 'args : %s' % str(args)[:2048])
832 except MemoryError:
833 pass
834 _log.Log(gmLog.lData, 'query succeeded on link [%s]' % conn)
835 _log.Log(gmLog.lData, '%s rows affected/returned in %3.3f seconds' % (curs.rowcount, duration))
836 # done with queries
837 if extra_verbose:
838 conn.conn.toggleShowQuery
839 # did we get result rows in the last query ?
840 data = None
841 idx = {}
842 # now, the DB-API is ambigous about whether cursor.description
843 # and cursor.rowcount apply to the most recent query in a cursor
844 # (does this statement make any sense in the first place ?) or
845 # to the entire lifetime of said cursor, pyPgSQL thinks the
846 # latter, hence we need to catch exceptions when there's no
847 # data from the *last* query
848 try:
849 data = curs.fetchall()
850 except:
851 if extra_verbose:
852 _log.Log(gmLog.lData, 'fetchall(): last query did not return rows')
853 # should be None if no rows were returned ...
854 if curs.description is not None:
855 _log.Log(gmLog.lData, 'there seem to be rows but fetchall() failed -- DB API violation ?')
856 _log.Log(gmLog.lData, 'rowcount: %s, description: %s' % (curs.rowcount, curs.description))
857 if end_tx:
858 conn.commit()
859 if get_col_idx:
860 idx = get_col_indices(curs)
861 curs.close()
862 return (True, (data, idx))
863 #---------------------------------------------------
865 # run queries
866 for query, args in queries:
867 if extra_verbose:
868 t1 = time.time()
869 try:
870 curs.execute(query, *args)
871 except:
872 if extra_verbose:
873 duration = time.time() - t1
874 _log.Log(gmLog.lData, 'query took %3.3f seconds' % duration)
875 exc_info = sys.exc_info()
876 typ, val, tb = exc_info
877 if str(val).find(_serialize_failure) > 0:
878 _log.Log(gmLog.lData, 'concurrency conflict detected, cannot serialize access due to concurrent update')
879 return (False, (2, 'l'))
880 # FIXME: handle more types of errors
881 _log.Log(gmLog.lErr, 'query: %s' % query[:2048])
882 try:
883 _log.Log(gmLog.lErr, 'args : %s' % str(args)[:2048])
884 except MemoryError:
885 pass
886 _log.LogException("query failed on link [%s]" % cursor, exc_info)
887 if extra_verbose:
888 __log_PG_settings(curs)
889 tmp = str(val).replace('ERROR:', '')
890 tmp = tmp.replace('ExecAppend:', '')
891 tmp = tmp.strip()
892 return (False, (1, _('SQL: %s') % tmp))
893 # apparently succeeded
894 if extra_verbose:
895 duration = time.time() - t1
896 _log.Log(gmLog.lData, 'query: %s' % query[:2048])
897 try:
898 _log.Log(gmLog.lData, 'args : %s' % str(args)[:2048])
899 except MemoryError:
900 pass
901 _log.Log(gmLog.lData, 'query succeeded on link [%s]' % cursor)
902 _log.Log(gmLog.lData, '%s rows affected/returned in %3.3f seconds' % (curs.rowcount, duration))
903
904 # did we get result rows in the last query ?
905 data = None
906 idx = {}
907 # now, the DB-API is ambigous about whether cursor.description
908 # and cursor.rowcount apply to the most recent query in a cursor
909 # (does this statement make any sense in the first place ?) or
910 # to the entire lifetime of said cursor, pyPgSQL thinks the
911 # latter, hence we need to catch exceptions when there's no
912 # data from the *last* query
913 try:
914 data = curs.fetchall()
915 except:
916 if extra_verbose:
917 _log.Log(gmLog.lData, 'fetchall(): last query did not return rows')
918 # should be None if no rows were returned ...
919 if curs.description is not None:
920 _log.Log(gmLog.lData, 'there seem to be rows but fetchall() failed -- DB API violation ?')
921 _log.Log(gmLog.lData, 'rowcount: %s, description: %s' % (curs.rowcount, curs.description))
922 if get_col_idx:
923 idx = get_col_indices(curs)
924 return (True, (data, idx))
925 #---------------------------------------------------
927 """Convenience function for running a transaction
928 that is supposed to get committed.
929
930 - link_obj can be
931 - a cursor: rollback/commit must be done by the caller
932 - a connection: rollback/commit is handled
933 - a service name: rollback/commit is handled
934
935 - queries is a list of (query, [args]) tuples
936 - executed as a single transaction
937
938 - returns:
939 - a tuple (<value>, error) if return_err_msg is True
940 - a scalar <value> if return_err_msg is False
941
942 - <value> will be
943 - None: if any query failed
944 - 1: if all queries succeeded (also 0 queries)
945 - data: if the last query returned rows
946 """
947 print "DEPRECATION WARNING: gmPG.run_commit() is deprecated, use run_commit2() instead"
948
949 # sanity checks
950 if link_obj is None:
951 raise TypeError, 'gmPG.run_commit(): link_obj must be of type service name, connection or cursor'
952 if queries is None:
953 raise TypeError, 'gmPG.run_commit(): forgot to pass in queries'
954 if len(queries) == 0:
955 _log.Log(gmLog.lWarn, 'no queries to execute ?!?')
956 if return_err_msg:
957 return (1, 'no queries to execute ?!?')
958 return 1
959
960 close_cursor = noop
961 close_conn = noop
962 commit = noop
963 rollback = noop
964 # is it a cursor ?
965 if hasattr(link_obj, 'fetchone') and hasattr(link_obj, 'description'):
966 curs = link_obj
967 # is it a connection ?
968 elif (hasattr(link_obj, 'commit') and hasattr(link_obj, 'cursor')):
969 curs = link_obj.cursor()
970 close_cursor = curs.close
971 conn = link_obj
972 commit = link_obj.commit
973 rollback = link_obj.rollback
974 # take it to be a service name then
975 else:
976 pool = ConnectionPool()
977 conn = pool.GetConnection(link_obj, readonly = 0)
978 if conn is None:
979 _log.Log(gmLog.lErr, 'cannot connect to service [%s]' % link_obj)
980 if return_err_msg:
981 return (None, _('cannot connect to service [%s]') % link_obj)
982 return None
983 curs = conn.cursor()
984 close_cursor = curs.close
985 close_conn = conn.close
986 commit = conn.commit
987 rollback = conn.rollback
988 # run queries
989 for query, args in queries:
990 # t1 = time.time()
991 try:
992 curs.execute (query, *args)
993 except:
994 rollback()
995 exc_info = sys.exc_info()
996 _log.LogException ("RW query >>>%s<<< with args >>>%s<<< failed on link [%s]" % (query[:1024], str(args)[:1024], link_obj), exc_info, verbose = _query_logging_verbosity)
997 __log_PG_settings(curs)
998 close_cursor()
999 close_conn()
1000 if return_err_msg:
1001 typ, val, tb = exc_info
1002 tmp = string.replace(str(val), 'ERROR:', '')
1003 tmp = string.replace(tmp, 'ExecAppend:', '')
1004 tmp = string.strip(tmp)
1005 return (None, 'SQL: %s' % tmp)
1006 return None
1007 # t2 = time.time()
1008 # print t2-t1, query
1009 if _query_logging_verbosity == 1:
1010 _log.Log(gmLog.lData, '%s rows affected by >>>%s<<<' % (curs.rowcount, query))
1011 # did we get result rows in the last query ?
1012 data = None
1013 # now, the DB-API is ambigous about whether cursor.description
1014 # and cursor.rowcount apply to the most recent query in a cursor
1015 # (does that statement make any sense ?!?) or to the entire lifetime
1016 # of said cursor, pyPgSQL thinks the latter, hence we need to catch
1017 # exceptions when there's no data from the *last* query
1018 try:
1019 data = curs.fetchall()
1020 if _query_logging_verbosity == 1:
1021 _log.Log(gmLog.lData, 'last query returned %s rows' % curs.rowcount)
1022 except:
1023 if _query_logging_verbosity == 1:
1024 _log.Log(gmLog.lData, 'fetchall(): last query did not return rows')
1025 # something seems odd
1026 if curs.description is not None:
1027 if curs.rowcount > 0:
1028 _log.Log(gmLog.lData, 'there seem to be rows but fetchall() failed -- DB API violation ?')
1029 _log.Log(gmLog.lData, 'rowcount: %s, description: %s' % (curs.rowcount, curs.description))
1030
1031 # clean up
1032 commit()
1033 close_cursor()
1034 close_conn()
1035
1036 if data is None: status = 1
1037 else: status = data
1038 if return_err_msg: return (status, '')
1039 return status
1040 #---------------------------------------------------
1042 """Runs a read-only query.
1043
1044 - link_obj can be a service name, connection or cursor object
1045
1046 - return status:
1047 - return data if get_col_idx is False
1048 - return (data, idx) if get_col_idx is True
1049
1050 - if query fails: data is None
1051 - if query is not a row-returning SQL statement: data is None
1052
1053 - data is a list of tuples [(w,x,y,z), (a,b,c,d), ...] where each tuple is a table row
1054 - idx is a map of column name to their position in the row tuples
1055 e.g. { 'name': 3, 'id':0, 'job_description': 2, 'location':1 }
1056
1057 usage: e.g. data[0][idx['name']] would return z from [(w,x,y,z ),(a,b,c,d)]
1058 """
1059 # sanity checks
1060 if link_obj is None:
1061 raise TypeError, 'gmPG.run_ro_query(): link_obj must be of type service name, connection or cursor'
1062 if aQuery is None:
1063 raise TypeError, 'gmPG.run_ro_query(): forgot to pass in aQuery'
1064
1065 close_cursor = noop
1066 close_conn = noop
1067 # is it a cursor ?
1068 if hasattr(link_obj, 'fetchone') and hasattr(link_obj, 'description'):
1069 curs = link_obj
1070 # is it a connection ?
1071 elif (hasattr(link_obj, 'commit') and hasattr(link_obj, 'cursor')):
1072 curs = link_obj.cursor()
1073 close_cursor = curs.close
1074 # take it to be a service name then
1075 else:
1076 pool = ConnectionPool()
1077 conn = pool.GetConnection(link_obj, readonly = 1)
1078 if conn is None:
1079 _log.Log(gmLog.lErr, 'cannot get connection to service [%s]' % link_obj)
1080 if not get_col_idx:
1081 return None
1082 else:
1083 return None, None
1084 curs = conn.cursor()
1085 close_cursor = curs.close
1086 close_conn = pool.ReleaseConnection
1087 # t1 = time.time()
1088 # run the query
1089 try:
1090 curs.execute(aQuery, *args)
1091 global last_ro_cursor_desc
1092 last_ro_cursor_desc = curs.description
1093 except:
1094 _log.LogException("query >>>%s<<< with args >>>%s<<< failed on link [%s]" % (aQuery[:250], str(args)[:250], link_obj), sys.exc_info(), verbose = _query_logging_verbosity) # this can fail on *large* args
1095 __log_PG_settings(curs)
1096 close_cursor()
1097 close_conn(link_obj)
1098 if not get_col_idx:
1099 return None
1100 else:
1101 return None, None
1102 # t2 = time.time()
1103 # print t2-t1, aQuery
1104 # and return the data, possibly including the column index
1105 if curs.description is None:
1106 data = None
1107 _log.Log(gmLog.lErr, 'query did not return rows')
1108 else:
1109 try:
1110 data = curs.fetchall()
1111 except:
1112 _log.LogException('cursor.fetchall() failed on link [%s]' % link_obj, sys.exc_info(), verbose = _query_logging_verbosity)
1113 close_cursor()
1114 close_conn(link_obj)
1115 if not get_col_idx:
1116 return None
1117 else:
1118 return None, None
1119
1120 # can "close" before closing cursor since it just decrements the ref counter
1121 close_conn(link_obj)
1122 if get_col_idx:
1123 col_idx = get_col_indices(curs)
1124 close_cursor()
1125 return data, col_idx
1126 else:
1127 close_cursor()
1128 return data
1129 #---------------------------------------------------
1130 #---------------------------------------------------
1132 # sanity checks
1133 if aCursor is None:
1134 _log.Log(gmLog.lErr, 'need cursor to get column indices')
1135 return None
1136 if aCursor.description is None:
1137 _log.Log(gmLog.lErr, 'no result description available: cursor unused or last query did not select rows')
1138 return None
1139 col_indices = {}
1140 col_index = 0
1141 for col_desc in aCursor.description:
1142 col_indices[col_desc[0]] = col_index
1143 col_index += 1
1144 return col_indices
1145 #---------------------------------------------------
1146 #---------------------------------------------------
1147 #---------------------------------------------------
1149 # sanity checks
1150 if aCursor is None:
1151 _log.Log(gmLog.lErr, 'need cursor to determine primary key')
1152 return None
1153 if aTable is None:
1154 _log.Log(gmLog.lErr, 'need table name for which to determine primary key')
1155
1156 if not run_query(aCursor, None, query_pkey_name, aTable):
1157 _log.Log(gmLog.lErr, 'cannot determine primary key')
1158 return -1
1159 result = aCursor.fetchone()
1160 if result is None:
1161 return None
1162 return result[0]
1163 #---------------------------------------------------
1165 """Returns a dictionary of referenced foreign keys.
1166
1167 key = column name of this table
1168 value = (referenced table name, referenced column name) tuple
1169 """
1170 manage_connection = 0
1171 close_cursor = 1
1172 # is it a cursor ?
1173 if hasattr(source, 'fetchone') and hasattr(source, 'description'):
1174 close_cursor = 0
1175 curs = source
1176 # is it a connection ?
1177 elif (hasattr(source, 'commit') and hasattr(source, 'cursor')):
1178 curs = source.cursor()
1179 # take it to be a service name then
1180 else:
1181 manage_connection = 1
1182 pool = ConnectionPool()
1183 conn = pool.GetConnection(source)
1184 if conn is None:
1185 _log.Log(gmLog.lErr, 'cannot get fkey names on table [%s] from source [%s]' % (table, source))
1186 return None
1187 curs = conn.cursor()
1188
1189 if not run_query(curs, None, query_fkey_names, table):
1190 if close_cursor:
1191 curs.close()
1192 if manage_connection:
1193 pool.ReleaseConnection(source)
1194 _log.Log(gmLog.lErr, 'cannot get foreign keys on table [%s] from source [%s]' % (table, source))
1195 return None
1196
1197 fks = curs.fetchall()
1198 if close_cursor:
1199 curs.close()
1200 if manage_connection:
1201 pool.ReleaseConnection(source)
1202
1203 references = {}
1204 for fk in fks:
1205 fkname, src_table, target_table, tmp, src_col, target_col, tmp = string.split(fk[0], '\x00')
1206 references[src_col] = (target_table, target_col)
1207
1208 return references
1209 #---------------------------------------------------
1210 -def add_housekeeping_todo(
1211 reporter='$RCSfile: gmPG.py,v $ $Revision: 1.90 $',
1212 receiver='DEFAULT',
1213 problem='lazy programmer',
1214 solution='lazy programmer',
1215 context='lazy programmer',
1216 category='lazy programmer'
1217 ):
1218 queries = []
1219 cmd = "insert into housekeeping_todo (reported_by, reported_to, problem, solution, context, category) values (%s, %s, %s, %s, %s, %s)"
1220 queries.append((cmd, [reporter, receiver, problem, solution, context, category]))
1221 cmd = "select currval('housekeeping_todo_pk_seq')"
1222 queries.append((cmd, []))
1223 result, err = run_commit('historica', queries, 1)
1224 if result is None:
1225 _log.Log(gmLog.lErr, err)
1226 return (None, err)
1227 return (1, result[0][0])
1228 #==================================================================
1230 #-------------------------------
1231 def myCallback(**kwds):
1232 sys.stdout.flush()
1233 print "\n=== myCallback: got called ==="
1234 print kwds
1235 #-------------------------------
1236
1237 dbpool = ConnectionPool()
1238 roconn = dbpool.GetConnection('default', extra_verbose=1)
1239 rocurs = roconn.cursor()
1240
1241 # main shell loop
1242 print "PostgreSQL backend listener debug shell"
1243 while 1:
1244 print "---------------------------------------"
1245 typed = raw_input("=> ")
1246 args = typed.split(' ')
1247 # mothing typed ?
1248 if len(args) == 0:
1249 continue
1250 # help
1251 if args[0] in ('help', '?'):
1252 print "known commands"
1253 print "--------------"
1254 print "'listen' - start listening to a signal"
1255 print "'ignore' - stop listening to a signal"
1256 print "'send' - send a signal"
1257 print "'quit', 'exit', 'done' - well, chicken out"
1258 continue
1259 # exit
1260 if args[0] in ('quit', 'exit', 'done'):
1261 break
1262 # signal stuff
1263 if args[0] in ("listen", "ignore", "send"):
1264 typed = raw_input("signal name: ")
1265 sig_names = typed.split(' ')
1266 # mothing typed ?
1267 if len(sig_names) == 0:
1268 continue
1269 if args[0] == "listen":
1270 dbpool.Listen('default', sig_names[0], myCallback)
1271 if args[0] == "ignore":
1272 dbpool.Unlisten('default', sig_names[0], myCallback)
1273 if args[0] == "send":
1274 cmd = 'NOTIFY "%s"' % sig_names[0]
1275 print "... running >>>%s<<<" % (cmd)
1276 if not run_query(rocurs, None, cmd):
1277 print "... error sending [%s]" % cmd
1278 roconn.commit()
1279 continue
1280 print 'unknown command [%s]' % typed
1281
1282 # clean up
1283 print "please wait a second or two for threads to sync and die"
1284 dbpool.StopListener('default')
1285 rocurs.close()
1286 roconn.close()
1287 dbpool.ReleaseConnection('default')
1288 #==================================================================
1289 # Main - unit testing
1290 #------------------------------------------------------------------
1291 if __name__ == "__main__":
1292 _log.Log(gmLog.lData, 'DBMS "%s" via DB-API module "%s": API level %s, thread safety %s, parameter style "%s"' % ('PostgreSQL', dbapi, dbapi.apilevel, dbapi.threadsafety, dbapi.paramstyle))
1293
1294 print "Do you want to test the backend notification code ?"
1295 yes_no = raw_input('y/n: ')
1296 if yes_no == 'y':
1297 __run_notifications_debugger()
1298 sys.exit()
1299
1300 dbpool = ConnectionPool()
1301 ### Let's see what services are distributed in this system:
1302 print "\n\nServices available on this system:"
1303 print '-----------------------------------------'
1304 for service in dbpool.GetAvailableServices():
1305 print service
1306 dummy = dbpool.GetConnection(service)
1307 print "\n.......................................\n"
1308
1309 ### We have probably not distributed the services in full:
1310 db = dbpool.GetConnection('config')
1311 print "\n\nPossible services on any gnumed system:"
1312 print '-----------------------------------------'
1313 cursor = db.cursor()
1314 cursor.execute("select name from cfg.distributed_db")
1315 for service in cursor.fetchall():
1316 print service[0]
1317
1318 print "\nTesting convenience funtions:\n============================\n"
1319
1320 print "\nResult as dictionary\n==================\n"
1321 cur = db.cursor()
1322 cursor.execute("select * from cfg.db")
1323 d = dictResult(cursor)
1324 print d
1325 print "\nResult attributes\n==================\n"
1326 n = fieldNames(cursor)
1327 #-------------------------------
1330 #-------------------------------
1331 print "\n-------------------------------------"
1332 print "Testing asynchronous notification for approx. 20 seconds"
1333 print "start psql in another window connect to gnumed"
1334 print "and type 'notify test'; if everything works,"
1335 print "a message [Backend notification received!] should appear\n"
1336 dbpool.Listen('default', 'test', TestCallback)
1337 time.sleep(20)
1338 dbpool.StopListener('default')
1339 print "Requesting write access connection:"
1340 con = dbpool.GetConnection('default', readonly=0)
1341
1342 #==================================================================
1343 # $Log: gmPG.py,v $
1344 # Revision 1.90 2009/12/21 15:02:17 ncq
1345 # - fix typo
1346 #
1347 # Revision 1.89 2009/03/10 14:26:51 ncq
1348 # - remove old code
1349 #
1350 # Revision 1.88 2008/10/12 15:46:44 ncq
1351 # - mark obsolete
1352 #
1353 # Revision 1.87 2007/12/26 18:34:02 ncq
1354 # - no more old CLI lib
1355 #
1356 # Revision 1.86 2007/10/25 16:41:47 ncq
1357 # - cleanup
1358 #
1359 # Revision 1.85 2007/02/06 12:11:25 ncq
1360 # - gnumed_v5
1361 #
1362 # Revision 1.84 2006/12/06 16:04:48 ncq
1363 # - cleanup
1364 #
1365 # Revision 1.83 2006/10/23 13:22:26 ncq
1366 # - cleanup only
1367 #
1368 # Revision 1.82 2006/10/08 09:24:02 ncq
1369 # - add deprecation warning
1370 #
1371 # Revision 1.81 2006/09/21 19:47:40 ncq
1372 # - change default to "gnumed_v3"
1373 #
1374 # Revision 1.80 2006/09/01 14:42:54 ncq
1375 # - *always* return unicode from database
1376 #
1377 # Revision 1.79 2006/08/28 14:32:40 ncq
1378 # - read data from database in unicode encoding, hence use unicode_results in connect()
1379 #
1380 # Revision 1.78 2006/08/01 22:02:42 ncq
1381 # - update v2 hash
1382 #
1383 # Revision 1.77 2006/07/30 17:40:30 ncq
1384 # - cleanup
1385 #
1386 # Revision 1.76 2006/07/19 20:27:03 ncq
1387 # - gmPyCompat.py is history
1388 #
1389 # Revision 1.75 2006/07/10 21:46:36 ncq
1390 # - saner choice of encoding if not set
1391 #
1392 # Revision 1.74 2006/07/01 11:24:56 ncq
1393 # - make encoding parameter a dict so we can give two names for
1394 # the same encoding: one to use with PG and one to use with Python
1395 #
1396 # Revision 1.73 2006/06/26 21:49:06 ncq
1397 # - cleanup and fix encoding handling
1398 #
1399 # Revision 1.72 2006/06/20 09:38:12 ncq
1400 # - we are nearing db v2 stabilization so start using known hashes
1401 #
1402 # Revision 1.71 2006/06/18 22:15:38 shilbert
1403 # - removed surplus character
1404 #
1405 # Revision 1.70 2006/06/18 21:54:36 ncq
1406 # - logging dies when args are huge (str(args) flukes) so work around it
1407 #
1408 # Revision 1.69 2006/06/18 12:25:37 ncq
1409 # - log failing cursor.fetchall() (yes, it happens, think SQL injection attacks)
1410 #
1411 # Revision 1.68 2006/06/14 14:33:52 ncq
1412 # - start being even more strict about character encoding issues
1413 #
1414 # Revision 1.67 2006/06/12 21:26:21 ncq
1415 # - explicitely tell pyPgSQL module about client_encoding ...
1416 #
1417 # Revision 1.66 2006/05/24 12:50:21 ncq
1418 # - now only empty string '' means use local UNIX domain socket connections
1419 #
1420 # Revision 1.65 2006/05/12 12:06:55 ncq
1421 # - add get_current_user()
1422 #
1423 # Revision 1.64 2006/05/04 17:53:32 ncq
1424 # - add function/query to get child tables for parent
1425 #
1426 # Revision 1.63 2006/02/26 18:33:24 ncq
1427 # - change default to gnumed_v2
1428 #
1429 # Revision 1.62 2006/02/12 14:56:43 ncq
1430 # - add get_connection_by_user()
1431 #
1432 # Revision 1.61 2006/01/06 10:17:29 ncq
1433 # - properly deal with array columns in get_col_defs()
1434 # (needed by audit generator)
1435 #
1436 # Revision 1.60 2005/12/27 18:43:46 ncq
1437 # - add database schema verification support
1438 # - _v2_schema_hash
1439 # - database_schema_compatible()
1440 #
1441 # Revision 1.59 2005/12/04 22:17:31 ncq
1442 # - add some queries and convenience functions
1443 #
1444 # Revision 1.58 2005/11/18 15:48:07 ncq
1445 # - adjust to config tables now living in cfg.* schema, also some id->pk
1446 #
1447 # Revision 1.57 2005/10/15 18:18:19 ncq
1448 # - improved query logging in case of failure or --debug
1449 #
1450 # Revision 1.56 2005/10/10 18:24:00 ncq
1451 # - IF we create shortcuts into the DB-API do it properly
1452 #
1453 # Revision 1.55 2005/10/08 12:33:07 sjtan
1454 # tree can be updated now without refetching entire cache; done by passing emr object to create_xxxx methods and calling emr.update_cache(key,obj);refresh_historical_tree non-destructively checks for changes and removes removed nodes and adds them if cache mismatch.
1455 #
1456 # Revision 1.54 2005/09/25 17:22:42 ncq
1457 # - cleanup
1458 #
1459 # Revision 1.53 2005/09/25 01:00:47 ihaywood
1460 # bugfixes
1461 #
1462 # remember 2.6 uses "import wx" not "from wxPython import wx"
1463 # removed not null constraint on clin_encounter.rfe as has no value on instantiation
1464 # client doesn't try to set clin_encounter.description as it doesn't exist anymore
1465 #
1466 # Revision 1.52 2005/09/24 09:14:39 ncq
1467 # - cleanup, removing bogus support for other DB-API adapters
1468 # - remove __backend, we only support PostgreSQL anyways
1469 #
1470 # Revision 1.51 2005/07/16 18:35:55 ncq
1471 # - catch more errors around locale access
1472 #
1473 # Revision 1.50 2005/07/11 08:34:11 ncq
1474 # - better messages on failing to import a DB-API module
1475 #
1476 # Revision 1.49 2005/06/12 22:18:36 ncq
1477 # - allow importers to set default client timezone
1478 #
1479 # Revision 1.48 2005/06/09 21:32:12 ncq
1480 # - torture test fixes :-)
1481 # - properly detect "cannot serialize access due to concurrent update"
1482 # - return (2, 'l') when it happens (that is, the row is 'l'ocked)
1483 #
1484 # Revision 1.47 2005/04/11 18:00:54 ncq
1485 # - cleanup
1486 #
1487 # Revision 1.46 2005/03/30 22:09:34 ncq
1488 # - better logging, as usual
1489 #
1490 # Revision 1.45 2005/03/29 07:26:38 ncq
1491 # - use std lib locale module to guess default client encoding
1492 #
1493 # Revision 1.44 2005/03/08 16:45:11 ncq
1494 # - add TODO item on iterator/generator-based row fetching
1495 #
1496 # Revision 1.43 2005/01/31 12:57:36 ncq
1497 # - get_col_indices() *before* curs.close()
1498 #
1499 # Revision 1.42 2005/01/31 09:32:34 ncq
1500 # - improve error handling in commit2()
1501 #
1502 # Revision 1.41 2005/01/31 06:26:38 ncq
1503 # - several tidy-ups
1504 #
1505 # Revision 1.40 2005/01/29 17:56:13 ncq
1506 # - fix silly off-by-one bug in commit2service() with # of attempts,
1507 # this fixes the bug Carlos noted when creating episodes
1508 # - improve debug logging in commit2()
1509 #
1510 # Revision 1.39 2005/01/03 18:22:58 ncq
1511 # - improve (data, idx) return and docs in commit2
1512 #
1513 # Revision 1.38 2005/01/02 16:15:34 ncq
1514 # - by Ian: make commit2() return col idx on request
1515 # - changed to always return tuple (data, idx) with
1516 # idx = {} if not requested
1517 #
1518 # Revision 1.37 2004/12/20 16:48:00 ncq
1519 # - minor improvement to inline docs
1520 #
1521 # Revision 1.36 2004/11/24 16:00:43 ncq
1522 # - we need to import into the module global namespace, however
1523 #
1524 # Revision 1.35 2004/11/24 15:56:39 ncq
1525 # - import gmPyCompat
1526 #
1527 # Revision 1.34 2004/11/21 20:54:59 ncq
1528 # - give concurrency retries some slack
1529 #
1530 # Revision 1.33 2004/11/03 22:19:53 ncq
1531 # - improve strings
1532 #
1533 # Revision 1.32 2004/11/02 21:04:40 ncq
1534 # - checked in first cut at run_commit2()
1535 # - next step is to make __commit2service/conn() use __commit2cursor()
1536 #
1537 # Revision 1.31 2004/11/01 23:21:30 ncq
1538 # - remove some cruft
1539 # - add stub for run_commit() so people can comment
1540 # (run_commit() started to smell rotten so let's try to
1541 # get it right this time and design a sane API for it)
1542 #
1543 # Revision 1.30 2004/10/29 22:34:37 ncq
1544 # - cleanup
1545 #
1546 # Revision 1.29 2004/09/20 21:09:10 ncq
1547 # - use noop() idiom in table_exists()
1548 #
1549 # Revision 1.28 2004/09/13 09:33:07 ncq
1550 # - axe backend options/tty support
1551 #
1552 # Revision 1.27 2004/09/06 22:19:28 ncq
1553 # - some cleanup
1554 #
1555 # Revision 1.26 2004/09/06 18:56:16 ncq
1556 # - improve inline docs
1557 #
1558 # Revision 1.25 2004/09/01 22:00:10 ncq
1559 # - prompt for host first in textmode login dialog
1560 #
1561 # Revision 1.24 2004/07/17 20:54:50 ncq
1562 # - remove user/_user workaround
1563 #
1564 # Revision 1.23 2004/06/20 16:54:55 ncq
1565 # - restrict length of logged data in run_ro_query and run_commit
1566 #
1567 # Revision 1.22 2004/06/09 14:55:44 ncq
1568 # - cleanup, typos
1569 # - commented out connection lifeness check as per Syan's suggestion
1570 # - adapt StopListener(s)() to gmBackendListener changes
1571 #
1572 # Revision 1.21 2004/05/16 14:32:07 ncq
1573 # - cleanup
1574 #
1575 # Revision 1.20 2004/05/15 15:07:53 sjtan
1576 #
1577 # more comments on run_ro_query return values.
1578 #
1579 # Revision 1.19 2004/05/13 00:00:54 ncq
1580 # - deescalate apparent DB API violation to lData as it seems very common and harmless
1581 #
1582 # Revision 1.18 2004/05/06 23:26:09 ncq
1583 # - cleanup _setup_default_ro_conns()
1584 #
1585 # Revision 1.17 2004/04/28 03:25:01 ihaywood
1586 # ensure sane timezone
1587 #
1588 # Revision 1.16 2004/04/27 22:43:28 ncq
1589 # - with PG versions that support it failing queries now log the PG settings if --debug
1590 #
1591 # Revision 1.15 2004/04/27 22:03:27 ncq
1592 # - we now set the datestyle to ISO on a hard connect()
1593 #
1594 # Revision 1.14 2004/04/26 21:59:46 ncq
1595 # - add_housekeeping_todo()
1596 #
1597 # Revision 1.13 2004/04/24 13:17:02 ncq
1598 # - logininfo() needs host= in request_login_params_tui()
1599 #
1600 # Revision 1.12 2004/04/22 13:14:38 ncq
1601 # - cleanup
1602 #
1603 # Revision 1.11 2004/04/21 14:27:15 ihaywood
1604 # bug preventing backendlistener working on local socket connections
1605 #
1606 # Revision 1.10 2004/04/19 12:46:24 ncq
1607 # - much improved docs on run_commit()
1608 # - use noop() in run_commit()
1609 # - fix rollback/commit behaviour in run_commit() - I wonder why it ever worked !?!
1610 #
1611 # Revision 1.9 2004/04/16 16:18:37 ncq
1612 # - correctly check for returned rows in run_commit()
1613 #
1614 # Revision 1.8 2004/04/16 00:21:22 ncq
1615 # - fix access to "data" in run_commit
1616 #
1617 # Revision 1.7 2004/04/15 23:38:07 ncq
1618 # - debug odd rowcount vs description behaviour in row-returning commits
1619 #
1620 # Revision 1.6 2004/04/11 10:13:32 ncq
1621 # - document run_ro_query API
1622 # - streamline run_ro_query link_obj handling via noop()
1623 # - __-ize prompted_input, req*tui, req*gui, run_not*debugger
1624 #
1625 # Revision 1.5 2004/04/08 23:42:13 ncq
1626 # - set time zone during connect
1627 #
1628 # Revision 1.4 2004/03/27 21:40:01 ncq
1629 # - upon first connect log PG version services run on
1630 #
1631 # Revision 1.3 2004/03/03 14:49:22 ncq
1632 # - need to commit() before curs.close() in run_commit()
1633 # - micro-optimize when to commit() [eg, link_obj not a cursor]
1634 #
1635 # Revision 1.2 2004/03/03 05:24:01 ihaywood
1636 # patient photograph support
1637 #
1638 # Revision 1.1 2004/02/25 09:30:13 ncq
1639 # - moved here from python-common
1640 #
1641 # Revision 1.92 2004/02/18 13:43:33 ncq
1642 # - fail with consistent return struct in run_commit()
1643 #
1644 # Revision 1.91 2004/01/22 23:41:06 ncq
1645 # - add commented out query timing code
1646 #
1647 # Revision 1.90 2004/01/18 21:48:42 ncq
1648 # - some important comments on what to do and not to do where
1649 # - StopListeners()
1650 # - remove dead code, cleanup
1651 #
1652 # Revision 1.89 2004/01/12 13:12:07 ncq
1653 # - remove unhelpful phrases from PG < 7.4 error messages
1654 #
1655 # Revision 1.88 2004/01/09 23:50:25 ncq
1656 # - run_commit() now returns the database level error
1657 # message if return_err_msg is true, default false
1658 #
1659 # Revision 1.87 2004/01/06 10:03:44 ncq
1660 # - don't log use of RO conns anymore
1661 #
1662 # Revision 1.86 2003/12/29 16:31:10 uid66147
1663 # - better logging, cleanup, better encoding handling
1664 # - run_commit/ro_query() now accept either cursor, connection or service name
1665 # - run_ro_query() now sanity checks if the query returned rows before calling fetchall()
1666 #
1667 # Revision 1.85 2003/11/20 00:48:45 ncq
1668 # - re-added run_commit() returning rows if last DML returned rows
1669 #
1670 # Revision 1.84 2003/11/17 20:22:59 ncq
1671 # - remove print()
1672 #
1673 # Revision 1.83 2003/11/17 10:56:36 sjtan
1674 #
1675 # synced and commiting.
1676 #
1677 # Revision 1.83
1678 # uses gmDispatcher to send new currentPatient objects to toplevel gmGP_ widgets. Proprosal to use
1679 # yaml serializer to store editarea data in narrative text field of clin_root_item until
1680 # clin_root_item schema stabilizes.
1681 #
1682 # manual edit areas modelled after r.terry's specs.
1683 # Revision 1.82 2003/11/07 20:34:04 ncq
1684 # - more logging yet
1685 #
1686 # Revision 1.81 2003/11/04 00:19:24 ncq
1687 # - GetConnection now toggles query printing via extra_verbose if dbapi=pyPgSql
1688 #
1689 # Revision 1.80 2003/10/26 15:07:47 ncq
1690 # - in run_commit() if the last command returned rows (e.g. was a SELECT) return those rows to the caller
1691 #
1692 # Revision 1.79 2003/10/19 12:13:24 ncq
1693 # - add table_exists() helper
1694 #
1695 # Revision 1.78 2003/09/30 19:08:31 ncq
1696 # - add helper get_fkey_defs()
1697 #
1698 # Revision 1.77 2003/09/23 14:40:30 ncq
1699 # - just some comments
1700 #
1701 # Revision 1.76 2003/09/23 12:09:27 ihaywood
1702 # Karsten, we've been tripping over each other again
1703 #
1704 # Revision 1.75 2003/09/23 11:30:32 ncq
1705 # - make run_ro_query return either a tuple or just the data depending on
1706 # the value of get_col_idx as per Ian's suggestion
1707 #
1708 # Revision 1.74 2003/09/23 06:43:45 ihaywood
1709 # merging changes
1710 #
1711 # Revision 1.73 2003/09/23 06:41:27 ihaywood
1712 # merging overlapped changes
1713 #
1714 # Revision 1.72 2003/09/22 23:31:44 ncq
1715 # - remove some duplicate code
1716 # - new style run_ro_query() use
1717 #
1718 # Revision 1.71 2003/09/21 12:47:48 ncq
1719 # - iron out bugs
1720 #
1721 # Revision 1.70 2003/09/21 11:23:10 ncq
1722 # - add run_ro_query() helper as suggested by Ian
1723 #
1724 # Revision 1.69 2003/09/16 22:41:11 ncq
1725 # - get_pkey -> get_pkey_name
1726 #
1727 # Revision 1.68 2003/08/17 18:02:33 ncq
1728 # - don't handle service "config" different from the others
1729 # - add helper get_pkey()
1730 #
1731 # Revision 1.67 2003/08/13 14:07:43 ncq
1732 # - removed some dead code
1733 #
1734 # Revision 1.66 2003/07/21 20:55:39 ncq
1735 # - add helper set_default_client_encoding()
1736 #
1737 # Revision 1.65 2003/07/21 19:21:22 ncq
1738 # - remove esc(), correct quoting needs to be left to DB-API module
1739 # - set client_encoding on connections
1740 # - consolidate GetConnection()/GetConnectionUnchecked()
1741 #
1742 # Revision 1.64 2003/07/09 15:44:31 ncq
1743 # - our RO connections need to be READ COMMITTED so they can
1744 # see concurrent committed writes
1745 #
1746 # Revision 1.63 2003/07/05 12:55:58 ncq
1747 # - improved exception reporting on failing queries
1748 #
1749 # Revision 1.62 2003/06/27 16:05:22 ncq
1750 # - get_col_indices() helper to be used after a select
1751 #
1752 # Revision 1.61 2003/06/26 21:37:00 ncq
1753 # - fatal->verbose, curs(cmd, arg) style
1754 #
1755 # Revision 1.60 2003/06/26 04:18:40 ihaywood
1756 # Fixes to gmCfg for commas
1757 #
1758 # Revision 1.59 2003/06/25 22:24:55 ncq
1759 # - improve logging in run_query() depending on --debug (yuck !)
1760 #
1761 # Revision 1.58 2003/06/23 21:21:55 ncq
1762 # - missing "return None" in run_query added
1763 #
1764 # Revision 1.57 2003/06/23 14:25:40 ncq
1765 # - let DB-API do the quoting
1766 #
1767 # Revision 1.56 2003/06/21 10:53:03 ncq
1768 # - correctly handle failing connections to cfg db
1769 #
1770 # Revision 1.55 2003/06/14 22:41:51 ncq
1771 # - remove dead code
1772 #
1773 # Revision 1.54 2003/06/10 08:48:12 ncq
1774 # - on-demand import of gmBackendListener so we can use gmPG generically
1775 # without having to have pyPgSQL available (as long as we don't use
1776 # notifications)
1777 #
1778 # Revision 1.53 2003/06/03 13:59:20 ncq
1779 # - rewrite the lifeness check to look much cleaner
1780 #
1781 # Revision 1.52 2003/06/03 13:46:52 ncq
1782 # - some more fixes to Syans connection liveness check in GetConnection()
1783 #
1784 # Revision 1.51 2003/06/01 13:20:32 sjtan
1785 #
1786 # logging to data stream for debugging. Adding DEBUG tags when work out how to use vi
1787 # with regular expression groups (maybe never).
1788 #
1789 # Revision 1.50 2003/06/01 12:55:58 sjtan
1790 #
1791 # sql commit may cause PortalClose, whilst connection.commit() doesnt?
1792 #
1793 # Revision 1.49 2003/06/01 12:21:25 ncq
1794 # - re-enable listening to async backend notifies
1795 # - what do you mean "reactivate when needed" ?! this is used *already*
1796 #
1797 # Revision 1.48 2003/06/01 01:47:32 sjtan
1798 #
1799 # starting allergy connections.
1800 #
1801 # Revision 1.47 2003/05/17 17:29:28 ncq
1802 # - teach it new-style ro/rw connection handling, mainly __pgconnect()
1803 #
1804 # Revision 1.46 2003/05/17 09:49:10 ncq
1805 # - set default transaction isolation level to serializable
1806 # - prepare for 7.4 read-only/read-write support on connections
1807 #
1808 # Revision 1.45 2003/05/05 15:23:39 ncq
1809 # - close cursor as early as possible in GetLoginInfoFor()
1810 #
1811 # Revision 1.44 2003/05/05 14:08:19 hinnef
1812 # bug fixes in cursorIndex and getLoginInfo
1813 #
1814 # Revision 1.43 2003/05/03 14:15:31 ncq
1815 # - sync and stop threads in __del__
1816 #
1817 # Revision 1.42 2003/05/01 15:01:10 ncq
1818 # - port must be int in backend.listener()
1819 # - remove printk()s
1820 #
1821 # Revision 1.41 2003/04/28 13:23:53 ncq
1822 # - make backend listener shell work by committing after notifying
1823 #
1824 # Revision 1.40 2003/04/27 11:52:26 ncq
1825 # - added notifications debugger shell in test environment
1826 #
1827 # Revision 1.39 2003/04/27 11:37:46 ncq
1828 # - heaps of cleanup, __service_mapping -> __service2db_map, cdb -> cfg_db
1829 # - merge _ListenTo and _StartListeningThread into Listen()
1830 # - add Unlisten()
1831 #
1832 # Revision 1.38 2003/04/25 13:02:10 ncq
1833 # - cleanup and adaptation to cleaned up backend listener code
1834 #
1835 # Revision 1.37 2003/04/08 08:58:00 ncq
1836 # - added comment
1837 #
1838 # Revision 1.36 2003/04/07 00:40:45 ncq
1839 # - now finally also support running on the console (not within a terminal window inside X)
1840 #
1841 # Revision 1.35 2003/03/27 21:11:26 ncq
1842 # - audit for connection object leaks
1843 #
1844 # Revision 1.34 2003/02/24 23:17:32 ncq
1845 # - moved some comments out of the way
1846 # - convenience function run_query()
1847 #
1848 # Revision 1.33 2003/02/19 23:41:23 ncq
1849 # - removed excessive printk's
1850 #
1851 # Revision 1.32 2003/02/07 14:23:48 ncq
1852 # - == None -> is None
1853 #
1854 # Revision 1.31 2003/01/16 14:45:04 ncq
1855 # - debianized
1856 #
1857 # Revision 1.30 2003/01/06 14:35:02 ncq
1858 # - fail gracefully on not being able to connect RW
1859 #
1860 # Revision 1.29 2003/01/05 09:58:19 ncq
1861 # - explicitely use service=default on empty Get/ReleaseConnection()
1862 #
1863 # Revision 1.28 2002/10/29 23:12:25 ncq
1864 # - a bit of cleanup
1865 #
1866 # Revision 1.27 2002/10/26 16:17:13 ncq
1867 # - more explicit error reporting
1868 #
1869 # Revision 1.26 2002/10/26 02:45:52 hherb
1870 # error in name mangling for writeable connections fixed (persisting "_" prepended to user name when connection reused)
1871 #
1872 # Revision 1.25 2002/10/25 13:02:35 hherb
1873 # FetchReturnsList now default on connection creation
1874 #
1875 # Revision 1.24 2002/10/20 16:10:46 ncq
1876 # - a few bits here and there
1877 # - cleaner logging
1878 # - raise ImportError on failing to import a database adapter instead of dying immediately
1879 #
1880 # Revision 1.23 2002/09/30 16:20:30 ncq
1881 # - wrap printk()s in <DEBUG>
1882 #
1883 # Revision 1.22 2002/09/30 15:48:16 ncq
1884 # - fix dumb bug regarding assignment of local variable logininfo
1885 #
1886 # Revision 1.21 2002/09/30 08:26:57 ncq
1887 # - a bit saner logging
1888 #
1889 # Revision 1.20 2002/09/29 14:39:44 ncq
1890 # - cleanup, clarification
1891 #
1892 # Revision 1.19 2002/09/26 13:14:59 ncq
1893 # - log version
1894 #
1895 # Revision 1.18 2002/09/19 18:07:48 hinnef
1896 # fixed two bugs that prevented distributed services from working (HB)
1897 #
1898 # Revision 1.17 2002/09/15 13:20:17 hherb
1899 # option to return results as list instead of result set objects added
1900 #
1901 # Revision 1.16 2002/09/10 07:44:29 ncq
1902 # - added changelog keyword
1903 #
1904 # @change log:
1905 # 25.10.2001 hherb first draft, untested
1906 # 29.10.2001 hherb crude functionality achieved (works ! (sortof))
1907 # 30.10.2001 hherb reference counting to prevent disconnection of active connections
1908 # ==========================================================================
1909 # significant version change!
1910 # ==========================================================================
1911 # 08.02.2002 hherb made DB API 2.0 compatible.
1912 # 01.09.2002 hherb pyPgSQL preferred adapter
1913 # 01.09.2002 hherb writeable connections, start work on asynchronous part
1914
| Trees | Indices | Help |
|
|---|
| Generated by Epydoc 3.0.1 on Tue Feb 9 04:01:28 2010 | http://epydoc.sourceforge.net |