| Home | Trees | Indices | Help |
|
|---|
|
|
1 # -*- coding: utf-8 -*-
2
3
4 __doc__ = """GNUmed general tools."""
5
6 #===========================================================================
7 __author__ = "K. Hilbert <Karsten.Hilbert@gmx.net>"
8 __license__ = "GPL v2 or later (details at http://www.gnu.org)"
9
10 # std libs
11 import sys
12 import os
13 import os.path
14 import csv
15 import tempfile
16 import logging
17 import hashlib
18 import platform
19 import subprocess
20 import decimal
21 import getpass
22 import io
23 import functools
24 import json
25 import shutil
26 import zipfile
27 import datetime as pydt
28 import re as regex
29 import xml.sax.saxutils as xml_tools
30 # old:
31 import pickle, zlib
32 # docutils
33 du_core = None
34
35
36 # GNUmed libs
37 if __name__ == '__main__':
38 sys.path.insert(0, '../../')
39 from Gnumed.pycommon import gmBorg
40
41
42 _log = logging.getLogger('gm.tools')
43
44 # CAPitalization modes:
45 ( CAPS_NONE, # don't touch it
46 CAPS_FIRST, # CAP first char, leave rest as is
47 CAPS_ALLCAPS, # CAP all chars
48 CAPS_WORDS, # CAP first char of every word
49 CAPS_NAMES, # CAP in a way suitable for names (tries to be smart)
50 CAPS_FIRST_ONLY # CAP first char, lowercase the rest
51 ) = range(6)
52
53
54 u_currency_pound = '\u00A3' # Pound sign
55 u_currency_sign = '\u00A4' # generic currency sign
56 u_currency_yen = '\u00A5' # Yen sign
57 u_right_double_angle_quote = '\u00AB' # <<
58 u_registered_trademark = '\u00AE'
59 u_plus_minus = '\u00B1'
60 u_superscript_one = '\u00B9' # ^1
61 u_left_double_angle_quote = '\u00BB' # >>
62 u_one_quarter = '\u00BC'
63 u_one_half = '\u00BD'
64 u_three_quarters = '\u00BE'
65 u_multiply = '\u00D7' # x
66 u_greek_ALPHA = '\u0391'
67 u_greek_alpha = '\u03b1'
68 u_greek_OMEGA = '\u03A9'
69 u_greek_omega = '\u03c9'
70 u_dagger = '\u2020'
71 u_triangular_bullet = '\u2023' # triangular bullet (>)
72 u_ellipsis = '\u2026' # ...
73 u_euro = '\u20AC' # EURO sign
74 u_numero = '\u2116' # No. / # sign
75 u_down_left_arrow = '\u21B5' # <-'
76 u_left_arrow = '\u2190' # <--
77 u_up_arrow = '\u2191'
78 u_arrow2right = '\u2192' # -->
79 u_down_arrow = '\u2193'
80 u_left_arrow_with_tail = '\u21a2' # <--<
81 u_arrow2right_from_bar = '\u21a6' # |->
82 u_arrow2right_until_vertical_bar = '\u21e5' # -->|
83 u_sum = '\u2211' # sigma
84 u_almost_equal_to = '\u2248' # approximately / nearly / roughly
85 u_corresponds_to = '\u2258'
86 u_infinity = '\u221E'
87 u_arrow2right_until_vertical_bar2 = '\u2b72' # -->|
88
89 u_diameter = '\u2300'
90 u_checkmark_crossed_out = '\u237B'
91 u_box_horiz_high = '\u23ba'
92 u_box_vert_left = '\u23b8'
93 u_box_vert_right = '\u23b9'
94
95 u_space_as_open_box = '\u2423'
96
97 u_box_horiz_single = '\u2500' # -
98 u_box_vert_light = '\u2502'
99 u_box_horiz_light_3dashes = '\u2504' # ...
100 u_box_vert_light_4dashes = '\u2506'
101 u_box_horiz_4dashes = '\u2508' # ....
102 u_box_T_right = '\u251c' # |-
103 u_box_T_left = '\u2524' # -|
104 u_box_T_down = '\u252c'
105 u_box_T_up = '\u2534'
106 u_box_plus = '\u253c'
107 u_box_top_double = '\u2550'
108 u_box_top_left_double_single = '\u2552'
109 u_box_top_right_double_single = '\u2555'
110 u_box_top_left_arc = '\u256d'
111 u_box_top_right_arc = '\u256e'
112 u_box_bottom_right_arc = '\u256f'
113 u_box_bottom_left_arc = '\u2570'
114 u_box_horiz_light_heavy = '\u257c'
115 u_box_horiz_heavy_light = '\u257e'
116
117 u_skull_and_crossbones = '\u2620'
118 u_caduceus = '\u2624'
119 u_frowning_face = '\u2639'
120 u_smiling_face = '\u263a'
121 u_black_heart = '\u2665'
122 u_female = '\u2640'
123 u_male = '\u2642'
124 u_male_female = '\u26a5'
125 u_chain = '\u26d3'
126
127 u_checkmark_thin = '\u2713'
128 u_checkmark_thick = '\u2714'
129 u_heavy_greek_cross = '\u271a'
130 u_arrow2right_thick = '\u2794'
131 u_writing_hand = '\u270d'
132 u_pencil_1 = '\u270e'
133 u_pencil_2 = '\u270f'
134 u_pencil_3 = '\u2710'
135 u_latin_cross = '\u271d'
136
137 u_arrow2right_until_black_diamond = '\u291e' # ->*
138
139 u_kanji_yen = '\u5186' # Yen kanji
140 u_replacement_character = '\ufffd'
141 u_link_symbol = '\u1f517'
142
143
144 _kB = 1024
145 _MB = 1024 * _kB
146 _GB = 1024 * _MB
147 _TB = 1024 * _GB
148 _PB = 1024 * _TB
149
150
151 _client_version = None
152
153
154 _GM_TITLE_PREFIX = 'GMd'
155
156 #===========================================================================
158
159 print(".========================================================")
160 print("| Unhandled exception caught !")
161 print("| Type :", t)
162 print("| Value:", v)
163 print("`========================================================")
164 _log.critical('unhandled exception caught', exc_info = (t,v,tb))
165 sys.__excepthook__(t,v,tb)
166
167 #===========================================================================
168 # path level operations
169 #---------------------------------------------------------------------------
171 """Create directory.
172
173 - creates parent dirs if necessary
174 - does not fail if directory exists
175 <mode>: numeric, say 0o0700 for "-rwx------"
176 """
177 if os.path.isdir(directory):
178 if mode is None:
179 return True
180
181 changed = False
182 old_umask = os.umask(0)
183 try:
184 # does not WORK !
185 #os.chmod(directory, mode, follow_symlinks = (os.chmod in os.supports_follow_symlinks)) # can't do better
186 os.chmod(directory, mode)
187 changed = True
188 finally:
189 os.umask(old_umask)
190 return changed
191
192 if mode is None:
193 os.makedirs(directory)
194 return True
195
196 old_umask = os.umask(0)
197 try:
198 os.makedirs(directory, mode)
199 finally:
200 os.umask(old_umask)
201 return True
202
203 #---------------------------------------------------------------------------
205 assert (directory is not None), '<directory> must not be None'
206
207 README_fname = '.00-README.GNUmed' + coalesce(suffix, '.dir')
208 README_path = os.path.abspath(os.path.expanduser(os.path.join(directory, README_fname)))
209 _log.debug('%s', README_path)
210 if readme is None:
211 _log.debug('no README text, boilerplate only')
212 try:
213 README = open(README_path, mode = 'wt', encoding = 'utf8')
214 except Exception:
215 return False
216
217 line = 'GNUmed v%s -- %s' % (_client_version, pydt.datetime.now().strftime('%c'))
218 len_sep = len(line)
219 README.write(line)
220 README.write('\n')
221 line = README_path
222 len_sep = max(len_sep, len(line))
223 README.write(line)
224 README.write('\n')
225 README.write('-' * len_sep)
226 README.write('\n')
227 README.write('\n')
228 README.write(readme)
229 README.write('\n')
230 README.close()
231 return True
232
233 #---------------------------------------------------------------------------
235 #-------------------------------
236 def _on_rm_error(func, path, exc):
237 _log.error('error while shutil.rmtree(%s)', path, exc_info=exc)
238 return True
239
240 #-------------------------------
241 error_count = 0
242 try:
243 shutil.rmtree(directory, False, _on_rm_error)
244 except Exception:
245 _log.exception('cannot shutil.rmtree(%s)', directory)
246 error_count += 1
247 return error_count
248
249 #---------------------------------------------------------------------------
251 _log.debug('cleaning out [%s]', directory)
252 try:
253 items = os.listdir(directory)
254 except OSError:
255 return False
256 for item in items:
257 # attempt file/link removal and ignore (but log) errors
258 full_item = os.path.join(directory, item)
259 try:
260 os.remove(full_item)
261 except OSError: # as per the docs, this is a directory
262 _log.debug('[%s] seems to be a subdirectory', full_item)
263 errors = rmdir(full_item)
264 if errors > 0:
265 return False
266 except Exception:
267 _log.exception('cannot os.remove(%s) [a file or a link]', full_item)
268 return False
269
270 return True
271
272 #---------------------------------------------------------------------------
274 if base_dir is None:
275 base_dir = gmPaths().tmp_dir
276 else:
277 if not os.path.isdir(base_dir):
278 mkdir(base_dir, mode = 0o0700) # (invoking user only)
279 if prefix is None:
280 prefix = 'sndbx-'
281 return tempfile.mkdtemp(prefix = prefix, suffix = '', dir = base_dir)
282
283 #---------------------------------------------------------------------------
286
287 #---------------------------------------------------------------------------
289 # /home/user/dir/ -> dir
290 # /home/user/dir -> dir
291 return os.path.basename(os.path.normpath(directory)) # normpath removes trailing slashes if any
292
293 #---------------------------------------------------------------------------
295 try:
296 empty = (len(os.listdir(directory)) == 0)
297 except OSError as exc:
298 if exc.errno != 2: # no such file
299 raise
300 empty = None
301 return empty
302
303 #---------------------------------------------------------------------------
305 """Copy the *content* of <directory> *into* <target_directory>
306 which is created if need be.
307 """
308 assert (directory is not None), 'source <directory> should not be None'
309 _log.debug('copying content of [%s] into [%s]', directory, target_directory)
310 try:
311 items = os.listdir(directory)
312 except OSError:
313 return None
314
315 for item in items:
316 full_item = os.path.join(directory, item)
317 if os.path.isdir(full_item):
318 target_subdir = os.path.join(target_directory, item)
319 try:
320 shutil.copytree(full_item, target_subdir)
321 except Exception:
322 _log.exception('cannot copy subdir [%s]', full_item)
323 return None
324 else:
325 try:
326 shutil.copy2(full_item, target_directory)
327 except Exception:
328 _log.exception('cannot copy file [%s]', full_item)
329 return None
330
331 return target_directory
332
333 #---------------------------------------------------------------------------
334 #---------------------------------------------------------------------------
336 """This class provides the following paths:
337
338 .home_dir user home
339 .local_base_dir script installation dir
340 .working_dir current dir
341 .user_config_dir
342 .system_config_dir
343 .system_app_data_dir (not writable)
344 .tmp_dir instance-local
345 .user_tmp_dir user-local (NOT per instance)
346 .bytea_cache_dir caches downloaded BYTEA data
347 """
349 """Setup pathes.
350
351 <app_name> will default to (name of the script - .py)
352 """
353 try:
354 self.already_inited
355 return
356 except AttributeError:
357 pass
358
359 self.init_paths(app_name=app_name, wx=wx)
360 self.already_inited = True
361
362 #--------------------------------------
363 # public API
364 #--------------------------------------
366
367 if wx is None:
368 _log.debug('wxPython not available')
369 _log.debug('detecting paths directly')
370
371 if app_name is None:
372 app_name, ext = os.path.splitext(os.path.basename(sys.argv[0]))
373 _log.info('app name detected as [%s]', app_name)
374 else:
375 _log.info('app name passed in as [%s]', app_name)
376
377 # the user home, doesn't work in Wine so work around that
378 self.__home_dir = None
379
380 # where the main script (the "binary") is installed
381 if getattr(sys, 'frozen', False):
382 _log.info('frozen app, installed into temporary path')
383 # this would find the path of *THIS* file
384 #self.local_base_dir = os.path.dirname(__file__)
385 # while this is documented on the web, the ${_MEIPASS2} does not exist
386 #self.local_base_dir = os.environ.get('_MEIPASS2')
387 # this is what Martin Zibricky <mzibr.public@gmail.com> told us to use
388 # when asking about this on pyinstaller@googlegroups.com
389 #self.local_base_dir = sys._MEIPASS
390 # however, we are --onedir, so we should look at sys.executable
391 # as per the pyinstaller manual
392 self.local_base_dir = os.path.dirname(sys.executable)
393 else:
394 self.local_base_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
395
396 # the current working dir at the OS
397 self.working_dir = os.path.abspath(os.curdir)
398
399 # user-specific config dir, usually below the home dir
400 mkdir(os.path.join(self.home_dir, '.%s' % app_name))
401 self.user_config_dir = os.path.join(self.home_dir, '.%s' % app_name)
402
403 # system-wide config dir, usually below /etc/ under UN*X
404 try:
405 self.system_config_dir = os.path.join('/etc', app_name)
406 except ValueError:
407 #self.system_config_dir = self.local_base_dir
408 self.system_config_dir = self.user_config_dir
409
410 # system-wide application data dir
411 try:
412 self.system_app_data_dir = os.path.join(sys.prefix, 'share', app_name)
413 except ValueError:
414 self.system_app_data_dir = self.local_base_dir
415
416 # temporary directory
417 try:
418 self.__tmp_dir_already_set
419 _log.debug('temp dir already set')
420 except AttributeError:
421 _log.info('temp file prefix: %s', tempfile.gettempprefix())
422 _log.info('initial (user level) temp dir: %s', tempfile.gettempdir())
423 bytes_free = shutil.disk_usage(tempfile.gettempdir()).free
424 _log.info('free disk space for temp dir: %s (%s bytes)', size2str(size = bytes_free), bytes_free)
425 # $TMP/gnumed-$USER/
426 self.user_tmp_dir = os.path.join(tempfile.gettempdir(), '%s-%s' % (app_name, getpass.getuser()))
427 mkdir(self.user_tmp_dir, 0o700)
428 _log.info('intermediate (app+user level) temp dir: %s', self.user_tmp_dir)
429 # $TMP/gnumed-$USER/g-$UNIQUE/
430 tempfile.tempdir = self.user_tmp_dir # tell mkdtemp about intermediate dir
431 self.tmp_dir = tempfile.mkdtemp(prefix = 'g-') # will set tempfile.tempdir as side effect
432 _log.info('final (app instance level) temp dir: %s', tempfile.gettempdir())
433 create_directory_description_file(directory = self.tmp_dir, readme = 'client instance tmp dir')
434
435 # BYTEA cache dir
436 cache_dir = os.path.join(self.user_tmp_dir, '.bytea_cache')
437 try:
438 stat = os.stat(cache_dir)
439 _log.warning('reusing BYTEA cache dir: %s', cache_dir)
440 _log.debug(stat)
441 except FileNotFoundError:
442 mkdir(cache_dir, mode = 0o0700)
443 self.bytea_cache_dir = cache_dir
444 create_directory_description_file(directory = self.bytea_cache_dir, readme = 'cache dir for BYTEA data')
445
446 self.__log_paths()
447 if wx is None:
448 return True
449
450 # retry with wxPython
451 _log.debug('re-detecting paths with wxPython')
452
453 std_paths = wx.StandardPaths.Get()
454 _log.info('wxPython app name is [%s]', wx.GetApp().GetAppName())
455
456 # user-specific config dir, usually below the home dir
457 mkdir(os.path.join(std_paths.GetUserConfigDir(), '.%s' % app_name))
458 self.user_config_dir = os.path.join(std_paths.GetUserConfigDir(), '.%s' % app_name)
459
460 # system-wide config dir, usually below /etc/ under UN*X
461 try:
462 tmp = std_paths.GetConfigDir()
463 if not tmp.endswith(app_name):
464 tmp = os.path.join(tmp, app_name)
465 self.system_config_dir = tmp
466 except ValueError:
467 # leave it at what it was from direct detection
468 pass
469
470 # system-wide application data dir
471 # Robin attests that the following doesn't always
472 # give sane values on Windows, so IFDEF it
473 if 'wxMSW' in wx.PlatformInfo:
474 _log.warning('this platform (wxMSW) sometimes returns a broken value for the system-wide application data dir')
475 else:
476 try:
477 self.system_app_data_dir = std_paths.GetDataDir()
478 except ValueError:
479 pass
480
481 self.__log_paths()
482 return True
483
484 #--------------------------------------
486 _log.debug('sys.argv[0]: %s', sys.argv[0])
487 _log.debug('sys.executable: %s', sys.executable)
488 _log.debug('sys._MEIPASS: %s', getattr(sys, '_MEIPASS', '<not found>'))
489 _log.debug('os.environ["_MEIPASS2"]: %s', os.environ.get('_MEIPASS2', '<not found>'))
490 _log.debug('__file__ : %s', __file__)
491 _log.debug('local application base dir: %s', self.local_base_dir)
492 _log.debug('current working dir: %s', self.working_dir)
493 _log.debug('user home dir: %s', self.home_dir)
494 _log.debug('user-specific config dir: %s', self.user_config_dir)
495 _log.debug('system-wide config dir: %s', self.system_config_dir)
496 _log.debug('system-wide application data dir: %s', self.system_app_data_dir)
497 _log.debug('temporary dir (user): %s', self.user_tmp_dir)
498 _log.debug('temporary dir (instance): %s', self.tmp_dir)
499 _log.debug('temporary dir (tempfile.tempdir): %s', tempfile.tempdir)
500 _log.debug('temporary dir (tempfile.gettempdir()): %s', tempfile.gettempdir())
501 _log.debug('BYTEA cache dir: %s', self.bytea_cache_dir)
502
503 #--------------------------------------
504 # properties
505 #--------------------------------------
507 if not (os.access(path, os.R_OK) and os.access(path, os.X_OK)):
508 msg = '[%s:user_config_dir]: invalid path [%s]' % (self.__class__.__name__, path)
509 _log.error(msg)
510 raise ValueError(msg)
511 self.__user_config_dir = path
512
515
516 user_config_dir = property(_get_user_config_dir, _set_user_config_dir)
517 #--------------------------------------
519 if not (os.access(path, os.R_OK) and os.access(path, os.X_OK)):
520 msg = '[%s:system_config_dir]: invalid path [%s]' % (self.__class__.__name__, path)
521 _log.error(msg)
522 raise ValueError(msg)
523 self.__system_config_dir = path
524
527
528 system_config_dir = property(_get_system_config_dir, _set_system_config_dir)
529 #--------------------------------------
531 if not (os.access(path, os.R_OK) and os.access(path, os.X_OK)):
532 msg = '[%s:system_app_data_dir]: invalid path [%s]' % (self.__class__.__name__, path)
533 _log.error(msg)
534 raise ValueError(msg)
535 self.__system_app_data_dir = path
536
539
540 system_app_data_dir = property(_get_system_app_data_dir, _set_system_app_data_dir)
541 #--------------------------------------
544
546 if self.__home_dir is not None:
547 return self.__home_dir
548
549 tmp = os.path.expanduser('~')
550 if tmp == '~':
551 _log.error('this platform does not expand ~ properly')
552 try:
553 tmp = os.environ['USERPROFILE']
554 except KeyError:
555 _log.error('cannot access $USERPROFILE in environment')
556
557 if not (
558 os.access(tmp, os.R_OK)
559 and
560 os.access(tmp, os.X_OK)
561 and
562 os.access(tmp, os.W_OK)
563 ):
564 msg = '[%s:home_dir]: invalid path [%s]' % (self.__class__.__name__, tmp)
565 _log.error(msg)
566 raise ValueError(msg)
567
568 self.__home_dir = tmp
569 return self.__home_dir
570
571 home_dir = property(_get_home_dir, _set_home_dir)
572
573 #--------------------------------------
575 if not (os.access(path, os.R_OK) and os.access(path, os.X_OK)):
576 msg = '[%s:tmp_dir]: invalid path [%s]' % (self.__class__.__name__, path)
577 _log.error(msg)
578 raise ValueError(msg)
579 _log.debug('previous temp dir: %s', tempfile.gettempdir())
580 self.__tmp_dir = path
581 tempfile.tempdir = self.__tmp_dir
582 _log.debug('new temp dir: %s', tempfile.gettempdir())
583 self.__tmp_dir_already_set = True
584
587
588 tmp_dir = property(_get_tmp_dir, _set_tmp_dir)
589
590 #===========================================================================
591 # file related tools
592 #---------------------------------------------------------------------------
593 -def recode_file(source_file=None, target_file=None, source_encoding='utf8', target_encoding=None, base_dir=None, error_mode='replace'):
594 if target_encoding is None:
595 return source_file
596
597 if target_encoding == source_encoding:
598 return source_file
599
600 if target_file is None:
601 target_file = get_unique_filename (
602 prefix = '%s-%s_%s-' % (fname_stem(source_file), source_encoding, target_encoding),
603 suffix = fname_extension(source_file, '.txt'),
604 tmp_dir = base_dir
605 )
606 _log.debug('[%s] -> [%s] (%s -> %s)', source_encoding, target_encoding, source_file, target_file)
607 in_file = io.open(source_file, mode = 'rt', encoding = source_encoding)
608 out_file = io.open(target_file, mode = 'wt', encoding = target_encoding, errors = error_mode)
609 for line in in_file:
610 out_file.write(line)
611 out_file.close()
612 in_file.close()
613 return target_file
614
615 #---------------------------------------------------------------------------
617 _log.debug('unzipping [%s] -> [%s]', archive_name, target_dir)
618 success = False
619 try:
620 with zipfile.ZipFile(archive_name) as archive:
621 archive.extractall(target_dir)
622 success = True
623 except Exception:
624 _log.exception('cannot unzip')
625 return False
626 if remove_archive:
627 remove_file(archive_name)
628 return success
629
630 #---------------------------------------------------------------------------
632 if not os.path.lexists(filename):
633 return True
634
635 # attempt file removal and ignore (but log) errors
636 try:
637 os.remove(filename)
638 return True
639
640 except Exception:
641 if log_error:
642 _log.exception('cannot os.remove(%s)', filename)
643
644 if force:
645 tmp_name = get_unique_filename(tmp_dir = fname_dir(filename))
646 _log.debug('attempting os.replace(%s -> %s)', filename, tmp_name)
647 try:
648 os.replace(filename, tmp_name)
649 return True
650
651 except Exception:
652 if log_error:
653 _log.exception('cannot os.replace(%s)', filename)
654
655 return False
656
657 #---------------------------------------------------------------------------
659 blocksize = 2**10 * 128 # 128k, since md5 uses 128 byte blocks
660 _log.debug('md5(%s): <%s> byte blocks', filename, blocksize)
661 f = io.open(filename, mode = 'rb')
662 md5 = hashlib.md5()
663 while True:
664 data = f.read(blocksize)
665 if not data:
666 break
667 md5.update(data)
668 f.close()
669 _log.debug('md5(%s): %s', filename, md5.hexdigest())
670 if return_hex:
671 return md5.hexdigest()
672
673 return md5.digest()
674
675 #---------------------------------------------------------------------------
677 _log.debug('chunked_md5(%s, chunk_size=%s bytes)', filename, chunk_size)
678 md5_concat = ''
679 f = open(filename, 'rb')
680 while True:
681 md5 = hashlib.md5()
682 data = f.read(chunk_size)
683 if not data:
684 break
685 md5.update(data)
686 md5_concat += md5.hexdigest()
687 f.close()
688 md5 = hashlib.md5()
689 md5.update(md5_concat)
690 hex_digest = md5.hexdigest()
691 _log.debug('md5("%s"): %s', md5_concat, hex_digest)
692 return hex_digest
693
694 #---------------------------------------------------------------------------
695 default_csv_reader_rest_key = 'list_of_values_of_unknown_fields'
696
698 try:
699 is_dict_reader = kwargs['dict']
700 del kwargs['dict']
701 except KeyError:
702 is_dict_reader = False
703
704 if is_dict_reader:
705 kwargs['restkey'] = default_csv_reader_rest_key
706 return csv.DictReader(unicode_csv_data, dialect=dialect, **kwargs)
707 return csv.reader(unicode_csv_data, dialect=dialect, **kwargs)
708
709
710
711
715
716 #def utf_8_encoder(unicode_csv_data):
717 # for line in unicode_csv_data:
718 # yield line.encode('utf-8')
719
721
722 # csv.py doesn't do Unicode; encode temporarily as UTF-8:
723 try:
724 is_dict_reader = kwargs['dict']
725 del kwargs['dict']
726 if is_dict_reader is not True:
727 raise KeyError
728 kwargs['restkey'] = default_csv_reader_rest_key
729 csv_reader = csv.DictReader(unicode2charset_encoder(unicode_csv_data), dialect=dialect, **kwargs)
730 except KeyError:
731 is_dict_reader = False
732 csv_reader = csv.reader(unicode2charset_encoder(unicode_csv_data), dialect=dialect, **kwargs)
733
734 for row in csv_reader:
735 # decode ENCODING back to Unicode, cell by cell:
736 if is_dict_reader:
737 for key in row.keys():
738 if key == default_csv_reader_rest_key:
739 old_data = row[key]
740 new_data = []
741 for val in old_data:
742 new_data.append(str(val, encoding))
743 row[key] = new_data
744 if default_csv_reader_rest_key not in csv_reader.fieldnames:
745 csv_reader.fieldnames.append(default_csv_reader_rest_key)
746 else:
747 row[key] = str(row[key], encoding)
748 yield row
749 else:
750 yield [ str(cell, encoding) for cell in row ]
751 #yield [str(cell, 'utf-8') for cell in row]
752
753 #---------------------------------------------------------------------------
755 """Normalizes unicode, removes non-alpha characters, converts spaces to underscores."""
756
757 dir_part, name_part = os.path.split(filename)
758 if name_part == '':
759 return filename
760
761 import unicodedata
762 name_part = unicodedata.normalize('NFKD', name_part)
763 # remove everything not in group []
764 name_part = regex.sub (
765 '[^.\w\s[\]()%§+-]',
766 '',
767 name_part,
768 flags = regex.UNICODE
769 ).strip()
770 # translate whitespace to underscore
771 name_part = regex.sub (
772 '\s+',
773 '_',
774 name_part,
775 flags = regex.UNICODE
776 )
777 return os.path.join(dir_part, name_part)
778
779 #---------------------------------------------------------------------------
781 """/home/user/dir/filename.ext -> filename"""
782 return os.path.splitext(os.path.basename(filename))[0]
783
784 #---------------------------------------------------------------------------
786 """/home/user/dir/filename.ext -> /home/user/dir/filename"""
787 return os.path.splitext(filename)[0]
788
789 #---------------------------------------------------------------------------
791 """ /home/user/dir/filename.ext -> .ext
792 '' or '.' -> fallback if any else ''
793 """
794 ext = os.path.splitext(filename)[1]
795 if ext.strip() not in ['.', '']:
796 return ext
797 if fallback is None:
798 return ''
799 return fallback
800
801 #---------------------------------------------------------------------------
805
806 #---------------------------------------------------------------------------
810
811 #---------------------------------------------------------------------------
813 """This function has a race condition between
814 its file.close()
815 and actually
816 using the filename in callers.
817
818 The file will NOT exist after calling this function.
819 """
820 if tmp_dir is None:
821 gmPaths() # setup tmp dir if necessary
822 else:
823 if (
824 not os.access(tmp_dir, os.F_OK)
825 or
826 not os.access(tmp_dir, os.X_OK | os.W_OK)
827 ):
828 _log.warning('cannot os.access() temporary dir [%s], using system default', tmp_dir)
829 tmp_dir = None
830
831 if include_timestamp:
832 ts = pydt.datetime.now().strftime('%m%d-%H%M%S-')
833 else:
834 ts = ''
835
836 kwargs = {
837 'dir': tmp_dir,
838 # make sure file gets deleted as soon as
839 # .close()d so we can "safely" open it again
840 'delete': True
841 }
842
843 if prefix is None:
844 kwargs['prefix'] = 'gm-%s' % ts
845 else:
846 kwargs['prefix'] = prefix + ts
847
848 if suffix in [None, '']:
849 kwargs['suffix'] = '.tmp'
850 else:
851 if not suffix.startswith('.'):
852 suffix = '.' + suffix
853 kwargs['suffix'] = suffix
854
855 f = tempfile.NamedTemporaryFile(**kwargs)
856 filename = f.name
857 f.close()
858
859 return filename
860
861 #---------------------------------------------------------------------------
863 import ctypes
864 #windows_create_symlink = ctypes.windll.kernel32.CreateSymbolicLinkW
865 kernel32 = ctype.WinDLL('kernel32', use_last_error = True)
866 windows_create_symlink = kernel32.CreateSymbolicLinkW
867 windows_create_symlink.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
868 windows_create_symlink.restype = ctypes.c_ubyte
869 if os.path.isdir(physical_name):
870 flags = 1
871 else:
872 flags = 0
873 ret_code = windows_create_symlink(link_name, physical_name.replace('/', '\\'), flags)
874 _log.debug('ctypes.windll.kernel32.CreateSymbolicLinkW() [%s] exit code: %s', windows_create_symlink, ret_code)
875 if ret_code == 0:
876 raise ctypes.WinError()
877 return ret_code
878
879 #---------------------------------------------------------------------------
881
882 _log.debug('creating symlink (overwrite = %s):', overwrite)
883 _log.debug('link [%s] =>', link_name)
884 _log.debug('=> physical [%s]', physical_name)
885
886 if os.path.exists(link_name):
887 _log.debug('link exists')
888 if overwrite:
889 return True
890 return False
891
892 try:
893 os.symlink(physical_name, link_name)
894 except (AttributeError, NotImplementedError):
895 _log.debug('this Python does not have os.symlink(), trying via ctypes')
896 __make_symlink_on_windows(physical_name, link_name)
897 except PermissionError:
898 _log.exception('cannot create link')
899 return False
900 #except OSError:
901 # unpriviledged on Windows
902 return True
903
904 #===========================================================================
905 -def import_module_from_directory(module_path=None, module_name=None, always_remove_path=False):
906 """Import a module from any location."""
907
908 _log.debug('CWD: %s', os.getcwd())
909
910 remove_path = always_remove_path or False
911 if module_path not in sys.path:
912 _log.info('appending to sys.path: [%s]' % module_path)
913 sys.path.append(module_path)
914 remove_path = True
915
916 _log.debug('will remove import path: %s', remove_path)
917
918 if module_name.endswith('.py'):
919 module_name = module_name[:-3]
920
921 try:
922 module = __import__(module_name)
923 except Exception:
924 _log.exception('cannot __import__() module [%s] from [%s]' % (module_name, module_path))
925 while module_path in sys.path:
926 sys.path.remove(module_path)
927 raise
928
929 _log.info('imported module [%s] as [%s]' % (module_name, module))
930 if remove_path:
931 while module_path in sys.path:
932 sys.path.remove(module_path)
933
934 return module
935
936 #===========================================================================
937 # text related tools
938 #---------------------------------------------------------------------------
940 if size == 1:
941 return template % _('1 Byte')
942 if size < 10 * _kB:
943 return template % _('%s Bytes') % size
944 if size < _MB:
945 return template % '%.1f kB' % (float(size) / _kB)
946 if size < _GB:
947 return template % '%.1f MB' % (float(size) / _MB)
948 if size < _TB:
949 return template % '%.1f GB' % (float(size) / _GB)
950 if size < _PB:
951 return template % '%.1f TB' % (float(size) / _TB)
952 return template % '%.1f PB' % (float(size) / _PB)
953
954 #---------------------------------------------------------------------------
956 if boolean is None:
957 return none_return
958 if boolean:
959 return true_return
960 if not boolean:
961 return false_return
962 raise ValueError('bool2subst(): <boolean> arg must be either of True, False, None')
963
964 #---------------------------------------------------------------------------
966 return bool2subst (
967 boolean = bool(boolean),
968 true_return = true_str,
969 false_return = false_str
970 )
971
972 #---------------------------------------------------------------------------
974 """Modelled after the SQL NULLIF function."""
975 if value is None:
976 return None
977
978 if strip_string:
979 stripped = value.strip()
980 else:
981 stripped = value
982 if stripped == none_equivalent:
983 return None
984
985 return value
986
987 #---------------------------------------------------------------------------
988 -def coalesce(value2test=None, return_instead=None, template4value=None, template4instead=None, none_equivalents=None, function4value=None, value2return=None):
989 """Modelled after the SQL coalesce function.
990
991 To be used to simplify constructs like:
992
993 if value2test is None (or in none_equivalents):
994 value = (template4instead % return_instead) or return_instead
995 else:
996 value = (template4value % value2test) or value2test
997 print value
998
999 @param value2test: the value to be tested for <None>
1000
1001 @param return_instead: the value to be returned if <value2test> *is* None
1002
1003 @param template4value: if <value2test> is returned, replace the value into this template, must contain one <%s>
1004
1005 @param template4instead: if <return_instead> is returned, replace the value into this template, must contain one <%s>
1006
1007 @param value2return: a *value* to return if <value2test> is NOT None, AND there's no <template4value>
1008
1009 example:
1010 function4value = ('strftime', '%Y-%m-%d')
1011
1012 Ideas:
1013 - list of return_insteads: initial, [return_instead, template], [return_instead, template], [return_instead, template], template4value, ...
1014 """
1015 if none_equivalents is None:
1016 none_equivalents = [None]
1017
1018 if value2test in none_equivalents:
1019 if template4instead is None:
1020 return return_instead
1021 return template4instead % return_instead
1022
1023 # at this point, value2test was not equivalent to None
1024
1025 # 1) explicit value to return supplied ?
1026 if value2return is not None:
1027 return value2return
1028
1029 value2return = value2test
1030 # 2) function supplied to be applied to the value ?
1031 if function4value is not None:
1032 funcname, args = function4value
1033 func = getattr(value2test, funcname)
1034 value2return = func(args)
1035
1036 # 3) template supplied to be applied to the value ?
1037 if template4value is None:
1038 return value2return
1039
1040 try:
1041 return template4value % value2return
1042 except TypeError:
1043 # except (TypeError, ValueError):
1044 # this should go, actually, only needed because "old" calls
1045 # to coalesce will still abuse template4value as explicit value2return,
1046 # relying on the replacement to above to fail
1047 if hasattr(_log, 'log_stack_trace'):
1048 _log.log_stack_trace(message = 'deprecated use of <template4value> for <value2return>')
1049 else:
1050 _log.error('deprecated use of <template4value> for <value2return>')
1051 _log.error(locals())
1052 return template4value
1053
1054 #---------------------------------------------------------------------------
1056 val = match_obj.group(0).lower()
1057 if val in ['von', 'van', 'de', 'la', 'l', 'der', 'den']: # FIXME: this needs to expand, configurable ?
1058 return val
1059 buf = list(val)
1060 buf[0] = buf[0].upper()
1061 for part in ['mac', 'mc', 'de', 'la']:
1062 if len(val) > len(part) and val[:len(part)] == part:
1063 buf[len(part)] = buf[len(part)].upper()
1064 return ''.join(buf)
1065
1066 #---------------------------------------------------------------------------
1068 """Capitalize the first character but leave the rest alone.
1069
1070 Note that we must be careful about the locale, this may
1071 have issues ! However, for UTF strings it should just work.
1072 """
1073 if (mode is None) or (mode == CAPS_NONE):
1074 return text
1075
1076 if len(text) == 0:
1077 return text
1078
1079 if mode == CAPS_FIRST:
1080 if len(text) == 1:
1081 return text[0].upper()
1082 return text[0].upper() + text[1:]
1083
1084 if mode == CAPS_ALLCAPS:
1085 return text.upper()
1086
1087 if mode == CAPS_FIRST_ONLY:
1088 # if len(text) == 1:
1089 # return text[0].upper()
1090 return text[0].upper() + text[1:].lower()
1091
1092 if mode == CAPS_WORDS:
1093 #return regex.sub(ur'(\w)(\w+)', lambda x: x.group(1).upper() + x.group(2).lower(), text)
1094 return regex.sub(r'(\w)(\w+)', lambda x: x.group(1).upper() + x.group(2).lower(), text)
1095
1096 if mode == CAPS_NAMES:
1097 #return regex.sub(r'\w+', __cap_name, text)
1098 return capitalize(text=text, mode=CAPS_FIRST) # until fixed
1099
1100 print("ERROR: invalid capitalization mode: [%s], leaving input as is" % mode)
1101 return text
1102
1103 #---------------------------------------------------------------------------
1105
1106 if isinstance(initial, decimal.Decimal):
1107 return True, initial
1108
1109 val = initial
1110
1111 # float ? -> to string first
1112 if type(val) == type(float(1.4)):
1113 val = str(val)
1114
1115 # string ? -> "," to "."
1116 if isinstance(val, str):
1117 val = val.replace(',', '.', 1)
1118 val = val.strip()
1119
1120 try:
1121 d = decimal.Decimal(val)
1122 return True, d
1123 except (TypeError, decimal.InvalidOperation):
1124 return False, val
1125
1126 #---------------------------------------------------------------------------
1128
1129 val = initial
1130
1131 # string ? -> "," to "."
1132 if isinstance(val, str):
1133 val = val.replace(',', '.', 1)
1134 val = val.strip()
1135
1136 try:
1137 int_val = int(val)
1138 except (TypeError, ValueError):
1139 _log.exception('int(%s) failed', val)
1140 return False, initial
1141
1142 if minval is not None:
1143 if int_val < minval:
1144 _log.debug('%s < min (%s)', val, minval)
1145 return False, initial
1146 if maxval is not None:
1147 if int_val > maxval:
1148 _log.debug('%s > max (%s)', val, maxval)
1149 return False, initial
1150
1151 return True, int_val
1152
1153 #---------------------------------------------------------------------------
1155 if remove_whitespace:
1156 text = text.lstrip()
1157 if not text.startswith(prefix):
1158 return text
1159
1160 text = text.replace(prefix, '', 1)
1161 if not remove_repeats:
1162 if remove_whitespace:
1163 return text.lstrip()
1164 return text
1165
1166 return strip_prefix(text, prefix, remove_repeats = True, remove_whitespace = remove_whitespace)
1167
1168 #---------------------------------------------------------------------------
1170 suffix_len = len(suffix)
1171 if remove_repeats:
1172 if remove_whitespace:
1173 while text.rstrip().endswith(suffix):
1174 text = text.rstrip()[:-suffix_len].rstrip()
1175 return text
1176 while text.endswith(suffix):
1177 text = text[:-suffix_len]
1178 return text
1179 if remove_whitespace:
1180 return text.rstrip()[:-suffix_len].rstrip()
1181 return text[:-suffix_len]
1182
1183 #---------------------------------------------------------------------------
1185 if lines is None:
1186 lines = text.split(eol)
1187
1188 while True:
1189 if lines[0].strip(eol).strip() != '':
1190 break
1191 lines = lines[1:]
1192
1193 if return_list:
1194 return lines
1195
1196 return eol.join(lines)
1197
1198 #---------------------------------------------------------------------------
1200 if lines is None:
1201 lines = text.split(eol)
1202
1203 while True:
1204 if lines[-1].strip(eol).strip() != '':
1205 break
1206 lines = lines[:-1]
1207
1208 if return_list:
1209 return lines
1210
1211 return eol.join(lines)
1212
1213 #---------------------------------------------------------------------------
1215 return strip_trailing_empty_lines (
1216 lines = strip_leading_empty_lines(lines = lines, text = text, eol = eol, return_list = True),
1217 text = None,
1218 eol = eol,
1219 return_list = return_list
1220 )
1221
1222 #---------------------------------------------------------------------------
1223 -def list2text(lines, initial_indent='', subsequent_indent='', eol='\n', strip_leading_empty_lines=True, strip_trailing_empty_lines=True, strip_trailing_whitespace=True, max_line_width=None):
1224
1225 if len(lines) == 0:
1226 return ''
1227
1228 if strip_leading_empty_lines:
1229 lines = strip_leading_empty_lines(lines = lines, eol = eol, return_list = True)
1230
1231 if strip_trailing_empty_lines:
1232 lines = strip_trailing_empty_lines(lines = lines, eol = eol, return_list = True)
1233
1234 if strip_trailing_whitespace:
1235 lines = [ l.rstrip() for l in lines ]
1236
1237 if max_line_width is not None:
1238 wrapped_lines = []
1239 for l in lines:
1240 wrapped_lines.extend(wrap(l, max_line_width).split('\n'))
1241 lines = wrapped_lines
1242
1243 indented_lines = [initial_indent + lines[0]]
1244 indented_lines.extend([ subsequent_indent + l for l in lines[1:] ])
1245
1246 return eol.join(indented_lines)
1247
1248 #---------------------------------------------------------------------------
1250 """A word-wrap function that preserves existing line breaks
1251 and most spaces in the text. Expects that existing line
1252 breaks are posix newlines (\n).
1253 """
1254 if width is None:
1255 return text
1256
1257 wrapped = initial_indent + functools.reduce (
1258 lambda line, word, width=width: '%s%s%s' % (
1259 line,
1260 ' \n'[(len(line) - line.rfind('\n') - 1 + len(word.split('\n',1)[0]) >= width)],
1261 word
1262 ),
1263 text.split(' ')
1264 )
1265 if subsequent_indent != '':
1266 wrapped = ('\n%s' % subsequent_indent).join(wrapped.split('\n'))
1267 if eol != '\n':
1268 wrapped = wrapped.replace('\n', eol)
1269 return wrapped
1270
1271 #---------------------------------------------------------------------------
1272 -def unwrap(text=None, max_length=None, strip_whitespace=True, remove_empty_lines=True, line_separator = ' // '):
1273
1274 text = text.replace('\r', '')
1275 lines = text.split('\n')
1276 text = ''
1277 for line in lines:
1278
1279 if strip_whitespace:
1280 line = line.strip().strip('\t').strip()
1281
1282 if remove_empty_lines:
1283 if line == '':
1284 continue
1285
1286 text += ('%s%s' % (line, line_separator))
1287
1288 text = text.rstrip(line_separator)
1289
1290 if max_length is not None:
1291 text = text[:max_length]
1292
1293 text = text.rstrip(line_separator)
1294
1295 return text
1296
1297 #---------------------------------------------------------------------------
1299
1300 if len(text) <= max_length:
1301 return text
1302
1303 return text[:max_length-1] + u_ellipsis
1304
1305 #---------------------------------------------------------------------------
1306 -def shorten_words_in_line(text=None, max_length=None, min_word_length=None, ignore_numbers=True, ellipsis=u_ellipsis):
1307 if text is None:
1308 return None
1309 if max_length is None:
1310 max_length = len(text)
1311 else:
1312 if len(text) <= max_length:
1313 return text
1314 old_words = regex.split('\s+', text, flags = regex.UNICODE)
1315 no_old_words = len(old_words)
1316 max_word_length = max(min_word_length, (max_length // no_old_words))
1317 words = []
1318 for word in old_words:
1319 if len(word) <= max_word_length:
1320 words.append(word)
1321 continue
1322 if ignore_numbers:
1323 tmp = word.replace('-', '').replace('+', '').replace('.', '').replace(',', '').replace('/', '').replace('&', '').replace('*', '')
1324 if tmp.isdigit():
1325 words.append(word)
1326 continue
1327 words.append(word[:max_word_length] + ellipsis)
1328 return ' '.join(words)
1329
1330 #---------------------------------------------------------------------------
1334
1335 #---------------------------------------------------------------------------
1336 -def tex_escape_string(text=None, replace_known_unicode=True, replace_eol=False, keep_visual_eol=False):
1337 """Check for special TeX characters and transform them.
1338
1339 replace_eol:
1340 replaces "\n" with "\\newline"
1341 keep_visual_eol:
1342 replaces "\n" with "\\newline \n" such that
1343 both LaTeX will know to place a line break
1344 at this point as well as the visual formatting
1345 is preserved in the LaTeX source (think multi-
1346 row table cells)
1347 """
1348 text = text.replace('\\', '\\textbackslash') # requires \usepackage{textcomp} in LaTeX source
1349 text = text.replace('^', '\\textasciicircum')
1350 text = text.replace('~', '\\textasciitilde')
1351
1352 text = text.replace('{', '\\{')
1353 text = text.replace('}', '\\}')
1354 text = text.replace('%', '\\%')
1355 text = text.replace('&', '\\&')
1356 text = text.replace('#', '\\#')
1357 text = text.replace('$', '\\$')
1358 text = text.replace('_', '\\_')
1359 if replace_eol:
1360 if keep_visual_eol:
1361 text = text.replace('\n', '\\newline \n')
1362 else:
1363 text = text.replace('\n', '\\newline ')
1364
1365 if replace_known_unicode:
1366 # this should NOT be replaced for Xe(La)Tex
1367 text = text.replace(u_euro, '\\EUR') # requires \usepackage{textcomp} in LaTeX source
1368 text = text.replace(u_sum, '$\\Sigma$')
1369
1370 return text
1371
1372 #---------------------------------------------------------------------------
1374 global du_core
1375 if du_core is None:
1376 try:
1377 from docutils import core as du_core
1378 except ImportError:
1379 _log.warning('cannot turn ReST into LaTeX: docutils not installed')
1380 return tex_escape_string(text = rst_text)
1381
1382 parts = du_core.publish_parts (
1383 source = rst_text.replace('\\', '\\\\'),
1384 source_path = '<internal>',
1385 writer_name = 'latex',
1386 #destination_path = '/path/to/LaTeX-template/for/calculating/relative/links/template.tex',
1387 settings_overrides = {
1388 'input_encoding': 'unicode' # un-encoded unicode
1389 },
1390 enable_exit_status = True # how to use ?
1391 )
1392 return parts['body']
1393
1394 #---------------------------------------------------------------------------
1396 global du_core
1397 if du_core is None:
1398 try:
1399 from docutils import core as du_core
1400 except ImportError:
1401 _log.warning('cannot turn ReST into HTML: docutils not installed')
1402 return html_escape_string(text = rst_text, replace_eol=False, keep_visual_eol=False)
1403
1404 parts = du_core.publish_parts (
1405 source = rst_text.replace('\\', '\\\\'),
1406 source_path = '<internal>',
1407 writer_name = 'latex',
1408 #destination_path = '/path/to/LaTeX-template/for/calculating/relative/links/template.tex',
1409 settings_overrides = {
1410 'input_encoding': 'unicode' # un-encoded unicode
1411 },
1412 enable_exit_status = True # how to use ?
1413 )
1414 return parts['body']
1415
1416 #---------------------------------------------------------------------------
1418 # a web search did not reveal anything else for Xe(La)Tex
1419 # as opposed to LaTeX, except true unicode chars
1420 return tex_escape_string(text = text, replace_known_unicode = False)
1421
1422 #---------------------------------------------------------------------------
1423 __html_escape_table = {
1424 "&": "&",
1425 '"': """,
1426 "'": "'",
1427 ">": ">",
1428 "<": "<",
1429 }
1430
1432 text = ''.join(__html_escape_table.get(char, char) for char in text)
1433 if replace_eol:
1434 if keep_visual_eol:
1435 text = text.replace('\n', '<br>\n')
1436 else:
1437 text = text.replace('\n', '<br>')
1438 return text
1439
1440 #---------------------------------------------------------------------------
1443
1444 #---------------------------------------------------------------------------
1446 if isinstance(obj, pydt.datetime):
1447 return obj.isoformat()
1448 raise TypeError('cannot json_serialize(%s)' % type(obj))
1449
1450 #---------------------------------------------------------------------------
1451 #---------------------------------------------------------------------------
1453 _log.info('comparing dict-likes: %s[%s] vs %s[%s]', coalesce(title1, '', '"%s" '), type(d1), coalesce(title2, '', '"%s" '), type(d2))
1454 try:
1455 d1 = dict(d1)
1456 except TypeError:
1457 pass
1458 try:
1459 d2 = dict(d2)
1460 except TypeError:
1461 pass
1462 keys_d1 = frozenset(d1.keys())
1463 keys_d2 = frozenset(d2.keys())
1464 different = False
1465 if len(keys_d1) != len(keys_d2):
1466 _log.info('different number of keys: %s vs %s', len(keys_d1), len(keys_d2))
1467 different = True
1468 for key in keys_d1:
1469 if key in keys_d2:
1470 if type(d1[key]) != type(d2[key]):
1471 _log.info('%25.25s: type(dict1) = %s = >>>%s<<<' % (key, type(d1[key]), d1[key]))
1472 _log.info('%25.25s type(dict2) = %s = >>>%s<<<' % ('', type(d2[key]), d2[key]))
1473 different = True
1474 continue
1475 if d1[key] == d2[key]:
1476 _log.info('%25.25s: both = >>>%s<<<' % (key, d1[key]))
1477 else:
1478 _log.info('%25.25s: dict1 = >>>%s<<<' % (key, d1[key]))
1479 _log.info('%25.25s dict2 = >>>%s<<<' % ('', d2[key]))
1480 different = True
1481 else:
1482 _log.info('%25.25s: %50.50s | <MISSING>' % (key, '>>>%s<<<' % d1[key]))
1483 different = True
1484 for key in keys_d2:
1485 if key in keys_d1:
1486 continue
1487 _log.info('%25.25s: %50.50s | %.50s' % (key, '<MISSING>', '>>>%s<<<' % d2[key]))
1488 different = True
1489 if different:
1490 _log.info('dict-likes appear to be different from each other')
1491 return False
1492 _log.info('dict-likes appear equal to each other')
1493 return True
1494
1495 #---------------------------------------------------------------------------
1496 -def format_dict_likes_comparison(d1, d2, title_left=None, title_right=None, left_margin=0, key_delim=' || ', data_delim=' | ', missing_string='=/=', difference_indicator='! ', ignore_diff_in_keys=None):
1497
1498 _log.info('comparing dict-likes: %s[%s] vs %s[%s]', coalesce(title_left, '', '"%s" '), type(d1), coalesce(title_right, '', '"%s" '), type(d2))
1499 append_type = False
1500 if None not in [title_left, title_right]:
1501 append_type = True
1502 type_left = type(d1)
1503 type_right = type(d2)
1504 if title_left is None:
1505 title_left = '%s' % type_left
1506 if title_right is None:
1507 title_right = '%s' % type_right
1508
1509 try: d1 = dict(d1)
1510 except TypeError: pass
1511 try: d2 = dict(d2)
1512 except TypeError: pass
1513 keys_d1 = d1.keys()
1514 keys_d2 = d2.keys()
1515 data = {}
1516 for key in keys_d1:
1517 data[key] = [d1[key], ' ']
1518 if key in d2:
1519 data[key][1] = d2[key]
1520 for key in keys_d2:
1521 if key in keys_d1:
1522 continue
1523 data[key] = [' ', d2[key]]
1524 max1 = max([ len('%s' % k) for k in keys_d1 ])
1525 max2 = max([ len('%s' % k) for k in keys_d2 ])
1526 max_len = max(max1, max2, len(_('<type>')))
1527 max_key_len_str = '%' + '%s.%s' % (max_len, max_len) + 's'
1528 max1 = max([ len('%s' % d1[k]) for k in keys_d1 ])
1529 max2 = max([ len('%s' % d2[k]) for k in keys_d2 ])
1530 max_data_len = min(max(max1, max2), 100)
1531 max_data_len_str = '%' + '%s.%s' % (max_data_len, max_data_len) + 's'
1532 diff_indicator_len_str = '%' + '%s.%s' % (len(difference_indicator), len(difference_indicator)) + 's'
1533 line_template = (' ' * left_margin) + diff_indicator_len_str + max_key_len_str + key_delim + max_data_len_str + data_delim + '%s'
1534
1535 lines = []
1536 # debugging:
1537 #lines.append(u' (40 regular spaces)')
1538 #lines.append((u' ' * 40) + u"(u' ' * 40)")
1539 #lines.append((u'%40.40s' % u'') + u"(u'%40.40s' % u'')")
1540 #lines.append((u'%40.40s' % u' ') + u"(u'%40.40s' % u' ')")
1541 #lines.append((u'%40.40s' % u'.') + u"(u'%40.40s' % u'.')")
1542 #lines.append(line_template)
1543 lines.append(line_template % ('', '', title_left, title_right))
1544 if append_type:
1545 lines.append(line_template % ('', _('<type>'), type_left, type_right))
1546
1547 if ignore_diff_in_keys is None:
1548 ignore_diff_in_keys = []
1549
1550 for key in keys_d1:
1551 append_type = False
1552 txt_left_col = '%s' % d1[key]
1553 try:
1554 txt_right_col = '%s' % d2[key]
1555 if type(d1[key]) != type(d2[key]):
1556 append_type = True
1557 except KeyError:
1558 txt_right_col = missing_string
1559 lines.append(line_template % (
1560 bool2subst (
1561 ((txt_left_col == txt_right_col) or (key in ignore_diff_in_keys)),
1562 '',
1563 difference_indicator
1564 ),
1565 key,
1566 shorten_text(txt_left_col, max_data_len),
1567 shorten_text(txt_right_col, max_data_len)
1568 ))
1569 if append_type:
1570 lines.append(line_template % (
1571 '',
1572 _('<type>'),
1573 shorten_text('%s' % type(d1[key]), max_data_len),
1574 shorten_text('%s' % type(d2[key]), max_data_len)
1575 ))
1576
1577 for key in keys_d2:
1578 if key in keys_d1:
1579 continue
1580 lines.append(line_template % (
1581 bool2subst((key in ignore_diff_in_keys), '', difference_indicator),
1582 key,
1583 shorten_text(missing_string, max_data_len),
1584 shorten_text('%s' % d2[key], max_data_len)
1585 ))
1586
1587 return lines
1588
1589 #---------------------------------------------------------------------------
1590 -def format_dict_like(d, relevant_keys=None, template=None, missing_key_template='<[%(key)s] MISSING>', left_margin=0, tabular=False, value_delimiters=('>>>', '<<<'), eol='\n', values2ignore=None):
1591 if values2ignore is None:
1592 values2ignore = []
1593 if template is not None:
1594 # all keys in template better exist in d
1595 try:
1596 return template % d
1597 except KeyError:
1598 # or else
1599 _log.exception('template contains %%()s key(s) which do not exist in data dict')
1600 # try to extend dict <d> to contain all required keys,
1601 # for that to work <relevant_keys> better list all
1602 # keys used in <template>
1603 if relevant_keys is not None:
1604 for key in relevant_keys:
1605 try:
1606 d[key]
1607 except KeyError:
1608 d[key] = missing_key_template % {'key': key}
1609 return template % d
1610
1611 if relevant_keys is None:
1612 relevant_keys = list(d.keys())
1613 lines = []
1614 if value_delimiters is None:
1615 delim_left = ''
1616 delim_right = ''
1617 else:
1618 delim_left, delim_right = value_delimiters
1619 if tabular:
1620 max_len = max([ len('%s' % k) for k in relevant_keys ])
1621 max_len_str = '%s.%s' % (max_len, max_len)
1622 line_template = (' ' * left_margin) + '%' + max_len_str + ('s: %s%%s%s' % (delim_left, delim_right))
1623 else:
1624 line_template = (' ' * left_margin) + '%%s: %s%%s%s' % (delim_left, delim_right)
1625 for key in relevant_keys:
1626 try:
1627 val = d[key]
1628 except KeyError:
1629 continue
1630 if val not in values2ignore:
1631 lines.append(line_template % (key, val))
1632 if eol is None:
1633 return lines
1634 return eol.join(lines)
1635
1636 #---------------------------------------------------------------------------
1637 -def dicts2table(dict_list, left_margin=0, eol='\n', keys2ignore=None, column_labels=None, show_only_changes=False, equality_value='<=>', date_format=None): #, relevant_keys=None, template=None
1638 """Each dict in <dict_list> becomes a column.
1639
1640 - each key of dict becomes a row label, unless in keys2ignore
1641
1642 - each entry in the <column_labels> list becomes a column title
1643 """
1644 keys2show = []
1645 col_max_width = {}
1646 max_width_of_row_label_col = 0
1647 col_label_key = '__________#header#__________'
1648 if keys2ignore is None:
1649 keys2ignore = []
1650 if column_labels is not None:
1651 keys2ignore.append(col_label_key)
1652
1653 # extract keys from all dicts and calculate column sizes
1654 for dict_idx in range(len(dict_list)):
1655 # convert potentially dict-*like* into dict
1656 d = dict(dict_list[dict_idx])
1657 # add max-len column label row from <column_labels> list, if available
1658 if column_labels is not None:
1659 d[col_label_key] = max(column_labels[dict_idx].split('\n'), key = len)
1660 field_lengths = []
1661 # loop over all keys in this dict
1662 for key in d.keys():
1663 # ignore this key
1664 if key in keys2ignore:
1665 continue
1666 # remember length of value when displayed
1667 if isinstance(d[key], pydt.datetime):
1668 if date_format is None:
1669 field_lengths.append(len('%s' % d[key]))
1670 else:
1671 field_lengths.append(len(d[key].strftime(date_format)))
1672 else:
1673 field_lengths.append(len('%s' % d[key]))
1674 if key in keys2show:
1675 continue
1676 keys2show.append(key)
1677 max_width_of_row_label_col = max(max_width_of_row_label_col, len('%s' % key))
1678 col_max_width[dict_idx] = max(field_lengths)
1679
1680 # pivot data into dict of lists per line
1681 lines = { k: [] for k in keys2show }
1682 prev_vals = {}
1683 for dict_idx in range(len(dict_list)):
1684 max_width_this_col = max(col_max_width[dict_idx], len(equality_value)) if show_only_changes else col_max_width[dict_idx]
1685 max_len_str = '%s.%s' % (max_width_this_col, max_width_this_col)
1686 field_template = ' %' + max_len_str + 's'
1687 d = dict_list[dict_idx]
1688 for key in keys2show:
1689 try:
1690 val = d[key]
1691 except KeyError:
1692 lines[key].append(field_template % _('<missing>'))
1693 continue
1694 if isinstance(val, pydt.datetime):
1695 if date_format is not None:
1696 val = val.strftime(date_format)
1697 lines[key].append(field_template % val)
1698 if show_only_changes:
1699 if key not in prev_vals:
1700 prev_vals[key] = '%s' % lines[key][-1]
1701 continue
1702 if lines[key][-1] != prev_vals[key]:
1703 prev_vals[key] = '%s' % lines[key][-1]
1704 continue
1705 lines[key][-1] = field_template % equality_value
1706
1707 # format data into table
1708 table_lines = []
1709 max_len_str = '%s.%s' % (max_width_of_row_label_col, max_width_of_row_label_col)
1710 row_label_template = '%' + max_len_str + 's'
1711 for key in lines:
1712 # row label (= key) into first column
1713 line = (' ' * left_margin) + row_label_template % key + '|'
1714 # append list values as subsequent columns
1715 line += '|'.join(lines[key])
1716 table_lines.append(line)
1717
1718 # insert lines with column labels (column headers) if any
1719 if column_labels is not None:
1720 # first column contains row labels, so no column label needed
1721 table_header_line_w_col_labels = (' ' * left_margin) + row_label_template % ''
1722 # second table header line: horizontal separator
1723 table_header_line_w_separator = (' ' * left_margin) + u_box_horiz_single * (max_width_of_row_label_col)
1724 max_col_label_widths = [ max(col_max_width[dict_idx], len(equality_value)) for dict_idx in range(len(dict_list)) ]
1725 for col_idx in range(len(column_labels)):
1726 max_len_str = '%s.%s' % (max_col_label_widths[col_idx], max_col_label_widths[col_idx])
1727 col_label_template = '%' + max_len_str + 's'
1728 table_header_line_w_col_labels += '| '
1729 table_header_line_w_col_labels += col_label_template % column_labels[col_idx]
1730 table_header_line_w_separator += '%s%s' % (u_box_plus, u_box_horiz_single)
1731 table_header_line_w_separator += u_box_horiz_single * max_col_label_widths[col_idx]
1732 table_lines.insert(0, table_header_line_w_separator)
1733 table_lines.insert(0, table_header_line_w_col_labels)
1734
1735 if eol is None:
1736 return table_lines
1737
1738 return ('|' + eol).join(table_lines) + '|' + eol
1739
1740 #---------------------------------------------------------------------------
1742 for key in required_keys:
1743 try:
1744 d[key]
1745 except KeyError:
1746 if missing_key_template is None:
1747 d[key] = None
1748 else:
1749 d[key] = missing_key_template % {'key': key}
1750 return d
1751
1752 #---------------------------------------------------------------------------
1754 try:
1755 import pyudev
1756 import psutil
1757 except ImportError:
1758 _log.error('pyudev and/or psutil not installed')
1759 return {}
1760
1761 removable_partitions = {}
1762 ctxt = pyudev.Context()
1763 removable_devices = [ dev for dev in ctxt.list_devices(subsystem='block', DEVTYPE='disk') if dev.attributes.get('removable') == b'1' ]
1764 all_mounted_partitions = { part.device: part for part in psutil.disk_partitions() }
1765 for device in removable_devices:
1766 _log.debug('removable device: %s', device.properties['ID_MODEL'])
1767 partitions_on_removable_device = {
1768 part.device_node: {
1769 'type': device.properties['ID_TYPE'],
1770 'bus': device.properties['ID_BUS'],
1771 'device': device.properties['DEVNAME'],
1772 'partition': part.properties['DEVNAME'],
1773 'vendor': part.properties['ID_VENDOR'],
1774 'model': part.properties['ID_MODEL'],
1775 'fs_label': part.properties['ID_FS_LABEL'],
1776 'is_mounted': False,
1777 'mountpoint': None,
1778 'fs_type': None,
1779 'size_in_bytes': -1,
1780 'bytes_free': 0
1781 } for part in ctxt.list_devices(subsystem='block', DEVTYPE='partition', parent=device)
1782 }
1783 for part in partitions_on_removable_device:
1784 try:
1785 partitions_on_removable_device[part]['mountpoint'] = all_mounted_partitions[part].mountpoint
1786 partitions_on_removable_device[part]['is_mounted'] = True
1787 partitions_on_removable_device[part]['fs_type'] = all_mounted_partitions[part].fstype
1788 du = shutil.disk_usage(all_mounted_partitions[part].mountpoint)
1789 partitions_on_removable_device[part]['size_in_bytes'] = du.total
1790 partitions_on_removable_device[part]['bytes_free'] = du.free
1791 except KeyError:
1792 pass # not mounted
1793 removable_partitions.update(partitions_on_removable_device)
1794 return removable_partitions
1795
1796 # debugging:
1797 #ctxt = pyudev.Context()
1798 #for dev in ctxt.list_devices(subsystem='block', DEVTYPE='disk'):# if dev.attributes.get('removable') == b'1':
1799 # for a in dev.attributes.available_attributes:
1800 # print(a, dev.attributes.get(a))
1801 # for key, value in dev.items():
1802 # print('{key}={value}'.format(key=key, value=value))
1803 # print('---------------------------')
1804
1805 #---------------------------------------------------------------------------
1807 try:
1808 import pyudev
1809 except ImportError:
1810 _log.error('pyudev not installed')
1811 return []
1812
1813 optical_writers = []
1814 ctxt = pyudev.Context()
1815 for dev in [ dev for dev in ctxt.list_devices(subsystem='block', DEVTYPE='disk') if dev.properties.get('ID_CDROM_CD_RW', None) == '1' ]:
1816 optical_writers.append ({
1817 'type': dev.properties['ID_TYPE'],
1818 'bus': dev.properties['ID_BUS'],
1819 'device': dev.properties['DEVNAME'],
1820 'model': dev.properties['ID_MODEL']
1821 })
1822 return optical_writers
1823
1824 #---------------------------------------------------------------------------
1825 #---------------------------------------------------------------------------
1827 """Obtains entry from standard input.
1828
1829 prompt: Prompt text to display in standard output
1830 default: Default value (for user to press enter only)
1831 CTRL-C: aborts and returns None
1832 """
1833 if prompt is None:
1834 msg = '(CTRL-C aborts)'
1835 else:
1836 msg = '%s (CTRL-C aborts)' % prompt
1837
1838 if default is None:
1839 msg = msg + ': '
1840 else:
1841 msg = '%s [%s]: ' % (msg, default)
1842
1843 try:
1844 usr_input = input(msg)
1845 except KeyboardInterrupt:
1846 return None
1847
1848 if usr_input == '':
1849 return default
1850
1851 return usr_input
1852
1853 #===========================================================================
1854 # image handling tools
1855 #---------------------------------------------------------------------------
1856 # builtin (ugly but tried and true) fallback icon
1857 __icon_serpent = \
1858 """x\xdae\x8f\xb1\x0e\x83 \x10\x86w\x9f\xe2\x92\x1blb\xf2\x07\x96\xeaH:0\xd6\
1859 \xc1\x85\xd5\x98N5\xa5\xef?\xf5N\xd0\x8a\xdcA\xc2\xf7qw\x84\xdb\xfa\xb5\xcd\
1860 \xd4\xda;\xc9\x1a\xc8\xb6\xcd<\xb5\xa0\x85\x1e\xeb\xbc\xbc7b!\xf6\xdeHl\x1c\
1861 \x94\x073\xec<*\xf7\xbe\xf7\x99\x9d\xb21~\xe7.\xf5\x1f\x1c\xd3\xbdVlL\xc2\
1862 \xcf\xf8ye\xd0\x00\x90\x0etH \x84\x80B\xaa\x8a\x88\x85\xc4(U\x9d$\xfeR;\xc5J\
1863 \xa6\x01\xbbt9\xceR\xc8\x81e_$\x98\xb9\x9c\xa9\x8d,y\xa9t\xc8\xcf\x152\xe0x\
1864 \xe9$\xf5\x07\x95\x0cD\x95t:\xb1\x92\xae\x9cI\xa8~\x84\x1f\xe0\xa3ec"""
1865
1867
1868 paths = gmPaths(app_name = 'gnumed', wx = wx)
1869
1870 candidates = [
1871 os.path.join(paths.system_app_data_dir, 'bitmaps', 'gm_icon-serpent_and_gnu.png'),
1872 os.path.join(paths.local_base_dir, 'bitmaps', 'gm_icon-serpent_and_gnu.png'),
1873 os.path.join(paths.system_app_data_dir, 'bitmaps', 'serpent.png'),
1874 os.path.join(paths.local_base_dir, 'bitmaps', 'serpent.png')
1875 ]
1876
1877 found_as = None
1878 for candidate in candidates:
1879 try:
1880 open(candidate, 'r').close()
1881 found_as = candidate
1882 break
1883 except IOError:
1884 _log.debug('icon not found in [%s]', candidate)
1885
1886 if found_as is None:
1887 _log.warning('no icon file found, falling back to builtin (ugly) icon')
1888 icon_bmp_data = wx.BitmapFromXPMData(pickle.loads(zlib.decompress(__icon_serpent)))
1889 icon.CopyFromBitmap(icon_bmp_data)
1890 else:
1891 _log.debug('icon found in [%s]', found_as)
1892 icon = wx.Icon()
1893 try:
1894 icon.LoadFile(found_as, wx.BITMAP_TYPE_ANY) #_PNG
1895 except AttributeError:
1896 _log.exception("this platform doesn't support wx.Icon().LoadFile()")
1897
1898 return icon
1899
1900 #---------------------------------------------------------------------------
1902 assert (not ((text is None) and (filename is None))), 'either <text> or <filename> must be specified'
1903
1904 try:
1905 import pyqrcode
1906 except ImportError:
1907 _log.exception('cannot import <pyqrcode>')
1908 return None
1909 if text is None:
1910 with io.open(filename, mode = 'rt', encoding = 'utf8') as input_file:
1911 text = input_file.read()
1912 if qr_filename is None:
1913 if filename is None:
1914 qr_filename = get_unique_filename(prefix = 'gm-qr-', suffix = '.png')
1915 else:
1916 qr_filename = get_unique_filename (
1917 prefix = fname_stem(filename) + '-',
1918 suffix = fname_extension(filename) + '.png'
1919 )
1920 _log.debug('[%s] -> [%s]', filename, qr_filename)
1921 qr = pyqrcode.create(text, encoding = 'utf8')
1922 if verbose:
1923 print('input file:', filename)
1924 print('output file:', qr_filename)
1925 print('text to encode:', text)
1926 print(qr.terminal())
1927 qr.png(qr_filename, quiet_zone = 1)
1928 return qr_filename
1929
1930 #===========================================================================
1931 # main
1932 #---------------------------------------------------------------------------
1933 if __name__ == '__main__':
1934
1935 if len(sys.argv) < 2:
1936 sys.exit()
1937
1938 if sys.argv[1] != 'test':
1939 sys.exit()
1940
1941 # for testing:
1942 logging.basicConfig(level = logging.DEBUG)
1943 from Gnumed.pycommon import gmI18N
1944 gmI18N.activate_locale()
1945 gmI18N.install_domain()
1946
1947 #-----------------------------------------------------------------------
1949
1950 tests = [
1951 [None, False],
1952
1953 ['', False],
1954 [' 0 ', True, 0],
1955
1956 [0, True, 0],
1957 [0.0, True, 0],
1958 [.0, True, 0],
1959 ['0', True, 0],
1960 ['0.0', True, 0],
1961 ['0,0', True, 0],
1962 ['00.0', True, 0],
1963 ['.0', True, 0],
1964 [',0', True, 0],
1965
1966 [0.1, True, decimal.Decimal('0.1')],
1967 [.01, True, decimal.Decimal('0.01')],
1968 ['0.1', True, decimal.Decimal('0.1')],
1969 ['0,1', True, decimal.Decimal('0.1')],
1970 ['00.1', True, decimal.Decimal('0.1')],
1971 ['.1', True, decimal.Decimal('0.1')],
1972 [',1', True, decimal.Decimal('0.1')],
1973
1974 [1, True, 1],
1975 [1.0, True, 1],
1976 ['1', True, 1],
1977 ['1.', True, 1],
1978 ['1,', True, 1],
1979 ['1.0', True, 1],
1980 ['1,0', True, 1],
1981 ['01.0', True, 1],
1982 ['01,0', True, 1],
1983 [' 01, ', True, 1],
1984
1985 [decimal.Decimal('1.1'), True, decimal.Decimal('1.1')]
1986 ]
1987 for test in tests:
1988 conversion_worked, result = input2decimal(initial = test[0])
1989
1990 expected2work = test[1]
1991
1992 if conversion_worked:
1993 if expected2work:
1994 if result == test[2]:
1995 continue
1996 else:
1997 print("ERROR (conversion result wrong): >%s<, expected >%s<, got >%s<" % (test[0], test[2], result))
1998 else:
1999 print("ERROR (conversion worked but was expected to fail): >%s<, got >%s<" % (test[0], result))
2000 else:
2001 if not expected2work:
2002 continue
2003 else:
2004 print("ERROR (conversion failed but was expected to work): >%s<, expected >%s<" % (test[0], test[2]))
2005 #-----------------------------------------------------------------------
2010 #-----------------------------------------------------------------------
2012
2013 val = None
2014 print(val, coalesce(val, 'is None', 'is not None'))
2015 val = 1
2016 print(val, coalesce(val, 'is None', 'is not None'))
2017 return
2018
2019 import datetime as dt
2020 print(coalesce(value2test = dt.datetime.now(), template4value = '-- %s --', function4value = ('strftime', '%Y-%m-%d')))
2021
2022 print('testing coalesce()')
2023 print("------------------")
2024 tests = [
2025 [None, 'something other than <None>', None, None, 'something other than <None>'],
2026 ['Captain', 'Mr.', '%s.'[:4], 'Mr.', 'Capt.'],
2027 ['value to test', 'test 3 failed', 'template with "%s" included', None, 'template with "value to test" included'],
2028 ['value to test', 'test 4 failed', 'template with value not included', None, 'template with value not included'],
2029 [None, 'initial value was None', 'template4value: %s', None, 'initial value was None'],
2030 [None, 'initial value was None', 'template4value: %%(abc)s', None, 'initial value was None']
2031 ]
2032 passed = True
2033 for test in tests:
2034 result = coalesce (
2035 value2test = test[0],
2036 return_instead = test[1],
2037 template4value = test[2],
2038 template4instead = test[3]
2039 )
2040 if result != test[4]:
2041 print("ERROR")
2042 print("coalesce: (%s, %s, %s, %s)" % (test[0], test[1], test[2], test[3]))
2043 print("expected:", test[4])
2044 print("received:", result)
2045 passed = False
2046
2047 if passed:
2048 print("passed")
2049 else:
2050 print("failed")
2051 return passed
2052 #-----------------------------------------------------------------------
2054 print('testing capitalize() ...')
2055 success = True
2056 pairs = [
2057 # [original, expected result, CAPS mode]
2058 ['Boot', 'Boot', CAPS_FIRST_ONLY],
2059 ['boot', 'Boot', CAPS_FIRST_ONLY],
2060 ['booT', 'Boot', CAPS_FIRST_ONLY],
2061 ['BoOt', 'Boot', CAPS_FIRST_ONLY],
2062 ['boots-Schau', 'Boots-Schau', CAPS_WORDS],
2063 ['boots-sChau', 'Boots-Schau', CAPS_WORDS],
2064 ['boot camp', 'Boot Camp', CAPS_WORDS],
2065 ['fahrner-Kampe', 'Fahrner-Kampe', CAPS_NAMES],
2066 ['häkkönen', 'Häkkönen', CAPS_NAMES],
2067 ['McBurney', 'McBurney', CAPS_NAMES],
2068 ['mcBurney', 'McBurney', CAPS_NAMES],
2069 ['blumberg', 'Blumberg', CAPS_NAMES],
2070 ['roVsing', 'RoVsing', CAPS_NAMES],
2071 ['Özdemir', 'Özdemir', CAPS_NAMES],
2072 ['özdemir', 'Özdemir', CAPS_NAMES],
2073 ]
2074 for pair in pairs:
2075 result = capitalize(pair[0], pair[2])
2076 if result != pair[1]:
2077 success = False
2078 print('ERROR (caps mode %s): "%s" -> "%s", expected "%s"' % (pair[2], pair[0], result, pair[1]))
2079
2080 if success:
2081 print("... SUCCESS")
2082
2083 return success
2084 #-----------------------------------------------------------------------
2086 print("testing import_module_from_directory()")
2087 path = sys.argv[1]
2088 name = sys.argv[2]
2089 try:
2090 mod = import_module_from_directory(module_path = path, module_name = name)
2091 except Exception:
2092 print("module import failed, see log")
2093 return False
2094
2095 print("module import succeeded", mod)
2096 print(dir(mod))
2097 return True
2098 #-----------------------------------------------------------------------
2102 #-----------------------------------------------------------------------
2104 print("testing gmPaths()")
2105 print("-----------------")
2106 paths = gmPaths(wx=None, app_name='gnumed')
2107 print("user config dir:", paths.user_config_dir)
2108 print("system config dir:", paths.system_config_dir)
2109 print("local base dir:", paths.local_base_dir)
2110 print("system app data dir:", paths.system_app_data_dir)
2111 print("working directory :", paths.working_dir)
2112 print("temp directory :", paths.tmp_dir)
2113 #-----------------------------------------------------------------------
2115 print("testing none_if()")
2116 print("-----------------")
2117 tests = [
2118 [None, None, None],
2119 ['a', 'a', None],
2120 ['a', 'b', 'a'],
2121 ['a', None, 'a'],
2122 [None, 'a', None],
2123 [1, 1, None],
2124 [1, 2, 1],
2125 [1, None, 1],
2126 [None, 1, None]
2127 ]
2128
2129 for test in tests:
2130 if none_if(value = test[0], none_equivalent = test[1]) != test[2]:
2131 print('ERROR: none_if(%s) returned [%s], expected [%s]' % (test[0], none_if(test[0], test[1]), test[2]))
2132
2133 return True
2134 #-----------------------------------------------------------------------
2136 tests = [
2137 [True, 'Yes', 'Yes', 'Yes'],
2138 [False, 'OK', 'not OK', 'not OK']
2139 ]
2140 for test in tests:
2141 if bool2str(test[0], test[1], test[2]) != test[3]:
2142 print('ERROR: bool2str(%s, %s, %s) returned [%s], expected [%s]' % (test[0], test[1], test[2], bool2str(test[0], test[1], test[2]), test[3]))
2143
2144 return True
2145 #-----------------------------------------------------------------------
2147
2148 print(bool2subst(True, 'True', 'False', 'is None'))
2149 print(bool2subst(False, 'True', 'False', 'is None'))
2150 print(bool2subst(None, 'True', 'False', 'is None'))
2151 #-----------------------------------------------------------------------
2153 print(get_unique_filename())
2154 print(get_unique_filename(prefix='test-'))
2155 print(get_unique_filename(suffix='tst'))
2156 print(get_unique_filename(prefix='test-', suffix='tst'))
2157 print(get_unique_filename(tmp_dir='/home/ncq/Archiv/'))
2158 #-----------------------------------------------------------------------
2160 print("testing size2str()")
2161 print("------------------")
2162 tests = [0, 1, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000, 100000000000, 1000000000000, 10000000000000]
2163 for test in tests:
2164 print(size2str(test))
2165 #-----------------------------------------------------------------------
2167
2168 test = """
2169 second line\n
2170 3rd starts with tab \n
2171 4th with a space \n
2172
2173 6th
2174
2175 """
2176 print(unwrap(text = test, max_length = 25))
2177 #-----------------------------------------------------------------------
2179 test = 'line 1\nline 2\nline 3'
2180
2181 print("wrap 5-6-7 initial 0, subsequent 0")
2182 print(wrap(test, 5))
2183 print()
2184 print(wrap(test, 6))
2185 print()
2186 print(wrap(test, 7))
2187 print("-------")
2188 input()
2189 print("wrap 5 initial 1-1-3, subsequent 1-3-1")
2190 print(wrap(test, 5, ' ', ' '))
2191 print()
2192 print(wrap(test, 5, ' ', ' '))
2193 print()
2194 print(wrap(test, 5, ' ', ' '))
2195 print("-------")
2196 input()
2197 print("wrap 6 initial 1-1-3, subsequent 1-3-1")
2198 print(wrap(test, 6, ' ', ' '))
2199 print()
2200 print(wrap(test, 6, ' ', ' '))
2201 print()
2202 print(wrap(test, 6, ' ', ' '))
2203 print("-------")
2204 input()
2205 print("wrap 7 initial 1-1-3, subsequent 1-3-1")
2206 print(wrap(test, 7, ' ', ' '))
2207 print()
2208 print(wrap(test, 7, ' ', ' '))
2209 print()
2210 print(wrap(test, 7, ' ', ' '))
2211 #-----------------------------------------------------------------------
2213 print('md5 %s: %s' % (sys.argv[2], file2md5(sys.argv[2])))
2214 print('chunked md5 %s: %s' % (sys.argv[2], file2chunked_md5(sys.argv[2])))
2215 #-----------------------------------------------------------------------
2217 print(u_link_symbol * 10)
2218 #-----------------------------------------------------------------------
2220 print(xml_escape_string('<'))
2221 print(xml_escape_string('>'))
2222 print(xml_escape_string('&'))
2223 #-----------------------------------------------------------------------
2225 tests = ['\\', '^', '~', '{', '}', '%', '&', '#', '$', '_', u_euro, 'abc\ndef\n\n1234']
2226 tests.append(' '.join(tests))
2227 for test in tests:
2228 print('%s:' % test, tex_escape_string(test))
2229
2230 #-----------------------------------------------------------------------
2232 tests = ['\\', '^', '~', '{', '}', '%', '&', '#', '$', '_', u_euro, 'abc\ndef\n\n1234']
2233 tests.append(' '.join(tests))
2234 tests.append('C:\Windows\Programme\System 32\lala.txt')
2235 tests.extend([
2236 'should be identical',
2237 'text *some text* text',
2238 """A List
2239 ======
2240
2241 1. 1
2242 2. 2
2243
2244 3. ist-list
2245 1. more
2246 2. noch was ü
2247 #. nummer x"""
2248 ])
2249 for test in tests:
2250 print('==================================================')
2251 print('raw:')
2252 print(test)
2253 print('---------')
2254 print('ReST 2 LaTeX:')
2255 latex = rst2latex_snippet(test)
2256 print(latex)
2257 if latex.strip() == test.strip():
2258 print('=> identical')
2259 print('---------')
2260 print('tex_escape_string:')
2261 print(tex_escape_string(test))
2262 input()
2263
2264 #-----------------------------------------------------------------------
2266 tests = [
2267 'one line, no embedded line breaks ',
2268 'one line\nwith embedded\nline\nbreaks\n '
2269 ]
2270 for test in tests:
2271 print('as list:')
2272 print(strip_trailing_empty_lines(text = test, eol='\n', return_list = True))
2273 print('as string:')
2274 print('>>>%s<<<' % strip_trailing_empty_lines(text = test, eol='\n', return_list = False))
2275 tests = [
2276 ['list', 'without', 'empty', 'trailing', 'lines'],
2277 ['list', 'with', 'empty', 'trailing', 'lines', '', ' ', '']
2278 ]
2279 for test in tests:
2280 print('as list:')
2281 print(strip_trailing_empty_lines(lines = test, eol = '\n', return_list = True))
2282 print('as string:')
2283 print(strip_trailing_empty_lines(lines = test, eol = '\n', return_list = False))
2284 #-----------------------------------------------------------------------
2286 tests = [
2287 r'abc.exe',
2288 r'\abc.exe',
2289 r'c:\abc.exe',
2290 r'c:\d\abc.exe',
2291 r'/home/ncq/tmp.txt',
2292 r'~/tmp.txt',
2293 r'./tmp.txt',
2294 r'./.././tmp.txt',
2295 r'tmp.txt'
2296 ]
2297 for t in tests:
2298 print("[%s] -> [%s]" % (t, fname_stem(t)))
2299 #-----------------------------------------------------------------------
2301 print(sys.argv[2], 'empty:', dir_is_empty(sys.argv[2]))
2302
2303 #-----------------------------------------------------------------------
2305 d1 = {}
2306 d2 = {}
2307 d1[1] = 1
2308 d1[2] = 2
2309 d1[3] = 3
2310 # 4
2311 d1[5] = 5
2312
2313 d2[1] = 1
2314 d2[2] = None
2315 # 3
2316 d2[4] = 4
2317
2318 #compare_dict_likes(d1, d2)
2319
2320 d1 = {1: 1, 2: 2}
2321 d2 = {1: 1, 2: 2}
2322
2323 #compare_dict_likes(d1, d2, 'same1', 'same2')
2324 print(format_dict_like(d1, tabular = False))
2325 print(format_dict_like(d1, tabular = True))
2326 #print(format_dict_like(d2))
2327
2328 #-----------------------------------------------------------------------
2330 d1 = {}
2331 d2 = {}
2332 d1[1] = 1
2333 d1[2] = 2
2334 d1[3] = 3
2335 # 4
2336 d1[5] = 5
2337
2338 d2[1] = 1
2339 d2[2] = None
2340 # 3
2341 d2[4] = 4
2342
2343 print('\n'.join(format_dict_likes_comparison(d1, d2, 'd1', 'd2')))
2344
2345 d1 = {1: 1, 2: 2}
2346 d2 = {1: 1, 2: 2}
2347
2348 print('\n'.join(format_dict_likes_comparison(d1, d2, 'd1', 'd2')))
2349
2350 #-----------------------------------------------------------------------
2352 rmdir('cx:\windows\system3__2xxxxxxxxxxxxx')
2353
2354 #-----------------------------------------------------------------------
2356 #print(rm_dir_content('cx:\windows\system3__2xxxxxxxxxxxxx'))
2357 print(rm_dir_content('/tmp/user/1000/tmp'))
2358
2359 #-----------------------------------------------------------------------
2361 tests = [
2362 ('', '', ''),
2363 ('a', 'a', ''),
2364 ('GMd: a window title', _GM_TITLE_PREFIX + ':', 'a window title'),
2365 ('\.br\MICROCYTES+1\.br\SPHEROCYTES present\.br\POLYCHROMASIAmoderate\.br\\', '\.br\\', 'MICROCYTES+1\.br\SPHEROCYTES present\.br\POLYCHROMASIAmoderate\.br\\')
2366 ]
2367 for test in tests:
2368 text, prefix, expect = test
2369 result = strip_prefix(text, prefix, remove_whitespace = True)
2370 if result == expect:
2371 continue
2372 print('test failed:', test)
2373 print('result:', result)
2374
2375 #-----------------------------------------------------------------------
2377 tst = [
2378 ('123', 1),
2379 ('123', 2),
2380 ('123', 3),
2381 ('123', 4),
2382 ('', 1),
2383 ('1', 1),
2384 ('12', 1),
2385 ('', 2),
2386 ('1', 2),
2387 ('12', 2),
2388 ('123', 2)
2389 ]
2390 for txt, lng in tst:
2391 print('max', lng, 'of', txt, '=', shorten_text(txt, lng))
2392 #-----------------------------------------------------------------------
2394 tests = [
2395 '/tmp/test.txt',
2396 '/tmp/ test.txt',
2397 '/tmp/ tes\\t.txt',
2398 'test'
2399 ]
2400 for test in tests:
2401 print (test, fname_sanitize(test))
2402
2403 #-----------------------------------------------------------------------
2406
2407 #-----------------------------------------------------------------------
2409 parts = enumerate_removable_partitions()
2410 for part_name in parts:
2411 part = parts[part_name]
2412 print(part['device'])
2413 print(part['partition'])
2414 if part['is_mounted']:
2415 print('%s@%s: %s on %s by %s @ %s (FS=%s: %s free of %s total)' % (
2416 part['type'],
2417 part['bus'],
2418 part['fs_label'],
2419 part['model'],
2420 part['vendor'],
2421 part['mountpoint'],
2422 part['fs_type'],
2423 part['bytes_free'],
2424 part['size_in_bytes']
2425 ))
2426 else:
2427 print('%s@%s: %s on %s by %s (not mounted)' % (
2428 part['type'],
2429 part['bus'],
2430 part['fs_label'],
2431 part['model'],
2432 part['vendor']
2433 ))
2434
2435 #-----------------------------------------------------------------------
2437 for writer in enumerate_optical_writers():
2438 print('%s@%s: %s @ %s' % (
2439 writer['type'],
2440 writer['bus'],
2441 writer['model'],
2442 writer['device']
2443 ))
2444
2445 #-----------------------------------------------------------------------
2449
2450 #-----------------------------------------------------------------------
2452 print(mk_sandbox_dir(base_dir = '/tmp/abcd/efg/h'))
2453
2454 #-----------------------------------------------------------------------
2456 dicts = [
2457 {'pkey': 1, 'value': 'a122'},
2458 {'pkey': 2, 'value': 'b23'},
2459 {'pkey': 3, 'value': 'c3'},
2460 {'pkey': 4, 'value': 'd4ssssssssssss'},
2461 {'pkey': 5, 'value': 'd4 asdfas '},
2462 {'pkey': 5, 'value': 'c5---'},
2463 ]
2464 with open('x.txt', 'w', encoding = 'utf8') as f:
2465 f.write(dicts2table(dicts, left_margin=2, eol='\n', keys2ignore=None, show_only_changes=True, column_labels = ['d1', 'd2', 'd3', 'd4', 'd5', 'd6']))
2466 #print(dicts2table(dicts, left_margin=2, eol='\n', keys2ignore=None, show_only_changes=True, column_labels = ['d1', 'd2', 'd3', 'd4', 'd5', 'd6']))
2467
2468 #-----------------------------------------------------------------------
2470 global _client_version
2471 _client_version = 'dev.test'
2472 print(create_directory_description_file (
2473 directory = './',
2474 readme = 'test\ntest2\nsome more text',
2475 suffix = None
2476 ))
2477
2478 #-----------------------------------------------------------------------
2479 #test_coalesce()
2480 #test_capitalize()
2481 #test_import_module()
2482 #test_mkdir()
2483 #test_gmPaths()
2484 #test_none_if()
2485 #test_bool2str()
2486 #test_bool2subst()
2487 #test_get_unique_filename()
2488 #test_size2str()
2489 #test_wrap()
2490 #test_input2decimal()
2491 #test_input2int()
2492 #test_unwrap()
2493 #test_md5()
2494 #test_unicode()
2495 #test_xml_escape()
2496 #test_strip_trailing_empty_lines()
2497 #test_fname_stem()
2498 #test_tex_escape()
2499 #test_rst2latex_snippet()
2500 #test_dir_is_empty()
2501 #test_compare_dicts()
2502 #test_rm_dir()
2503 #test_rm_dir_content()
2504 #test_strip_prefix()
2505 #test_shorten_text()
2506 #test_format_compare_dicts()
2507 #test_fname_sanitize()
2508 #test_create_qrcode()
2509 #test_enumerate_removable_partitions()
2510 #test_enumerate_optical_writers()
2511 #test_copy_tree_content()
2512 #test_mk_sandbox_dir()
2513 #test_make_table_from_dicts()
2514 test_create_dir_desc_file()
2515
2516 #===========================================================================
2517
| Home | Trees | Indices | Help |
|
|---|
| Generated by Epydoc 3.0.1 on Fri Jun 26 01:55:29 2020 | http://epydoc.sourceforge.net |