1 """This module encapsulates a document stored in a GNUmed database."""
2
3 __author__ = "Karsten Hilbert <Karsten.Hilbert@gmx.net>"
4 __license__ = "GPL v2 or later"
5
6 import sys, os, shutil, os.path, types, time, logging
7
8
9 if __name__ == '__main__':
10 sys.path.insert(0, '../../')
11 from Gnumed.pycommon import gmExceptions
12 from Gnumed.pycommon import gmBusinessDBObject
13 from Gnumed.pycommon import gmPG2
14 from Gnumed.pycommon import gmTools
15 from Gnumed.pycommon import gmMimeLib
16 from Gnumed.pycommon import gmDateTime
17
18
19 _log = logging.getLogger('gm.docs')
20
21 MUGSHOT=26
22 DOCUMENT_TYPE_VISUAL_PROGRESS_NOTE = u'visual progress note'
23 DOCUMENT_TYPE_PRESCRIPTION = u'prescription'
24
26 """Represents a folder with medical documents for a single patient."""
27
29 """Fails if
30
31 - patient referenced by aPKey does not exist
32 """
33 self.pk_patient = aPKey
34 if not self._pkey_exists():
35 raise gmExceptions.ConstructorError, "No patient with PK [%s] in database." % aPKey
36
37
38
39
40
41
42
43 _log.debug('instantiated document folder for patient [%s]' % self.pk_patient)
44
47
48
49
51 """Does this primary key exist ?
52
53 - true/false/None
54 """
55
56 rows, idx = gmPG2.run_ro_queries(queries = [
57 {'cmd': u"select exists(select pk from dem.identity where pk = %s)", 'args': [self.pk_patient]}
58 ])
59 if not rows[0][0]:
60 _log.error("patient [%s] not in demographic database" % self.pk_patient)
61 return None
62 return True
63
64
65
67 cmd = u"""
68 SELECT pk_doc
69 FROM blobs.v_doc_med
70 WHERE
71 pk_patient = %(pat)s
72 AND
73 type = %(typ)s
74 AND
75 ext_ref = %(ref)s
76 ORDER BY
77 clin_when DESC
78 LIMIT 1
79 """
80 args = {
81 'pat': self.pk_patient,
82 'typ': DOCUMENT_TYPE_PRESCRIPTION,
83 'ref': u'FreeDiams'
84 }
85 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}])
86 if len(rows) == 0:
87 _log.info('no FreeDiams prescription available for patient [%s]' % self.pk_patient)
88 return None
89 prescription = cDocument(aPK_obj = rows[0][0])
90 return prescription
91
93 cmd = u"SELECT pk_obj FROM blobs.v_latest_mugshot WHERE pk_patient = %s"
94 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_patient]}])
95 if len(rows) == 0:
96 _log.info('no mugshots available for patient [%s]' % self.pk_patient)
97 return None
98 return cDocumentPart(aPK_obj = rows[0][0])
99
100 latest_mugshot = property(get_latest_mugshot, lambda x:x)
101
103 if latest_only:
104 cmd = u"select pk_doc, pk_obj from blobs.v_latest_mugshot where pk_patient=%s"
105 else:
106 cmd = u"""
107 select
108 vdm.pk_doc as pk_doc,
109 dobj.pk as pk_obj
110 from
111 blobs.v_doc_med vdm
112 blobs.doc_obj dobj
113 where
114 vdm.pk_type = (select pk from blobs.doc_type where name = 'patient photograph')
115 and vdm.pk_patient = %s
116 and dobj.fk_doc = vdm.pk_doc
117 """
118 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_patient]}])
119 return rows
120
122 """return flat list of document IDs"""
123
124 args = {
125 'ID': self.pk_patient,
126 'TYP': doc_type
127 }
128
129 cmd = u"""
130 select vdm.pk_doc
131 from blobs.v_doc_med vdm
132 where
133 vdm.pk_patient = %%(ID)s
134 %s
135 order by vdm.clin_when"""
136
137 if doc_type is None:
138 cmd = cmd % u''
139 else:
140 try:
141 int(doc_type)
142 cmd = cmd % u'and vdm.pk_type = %(TYP)s'
143 except (TypeError, ValueError):
144 cmd = cmd % u'and vdm.pk_type = (select pk from blobs.doc_type where name = %(TYP)s)'
145
146 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}])
147 doc_ids = []
148 for row in rows:
149 doc_ids.append(row[0])
150 return doc_ids
151
158
160 args = {'pat': self.pk_patient}
161 cmd = _sql_fetch_document_fields % u"""
162 pk_doc IN (
163 SELECT DISTINCT ON (b_vo.pk_doc) b_vo.pk_doc
164 FROM blobs.v_obj4doc_no_data b_vo
165 WHERE
166 pk_patient = %(pat)s
167 AND
168 reviewed IS FALSE
169 )
170 ORDER BY clin_when DESC"""
171 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
172 return [ cDocument(row = {'pk_field': 'pk_doc', 'idx': idx, 'data': r}) for r in rows ]
173
174 - def get_documents(self, doc_type=None, episodes=None, encounter=None, order_by=None, exclude_unsigned=False):
175 """Return list of documents."""
176
177 args = {
178 'pat': self.pk_patient,
179 'type': doc_type,
180 'enc': encounter
181 }
182 where_parts = [u'pk_patient = %(pat)s']
183
184 if doc_type is not None:
185 try:
186 int(doc_type)
187 where_parts.append(u'pk_type = %(type)s')
188 except (TypeError, ValueError):
189 where_parts.append(u'pk_type = (SELECT pk FROM blobs.doc_type WHERE name = %(type)s)')
190
191 if (episodes is not None) and (len(episodes) > 0):
192 where_parts.append(u'pk_episode IN %(epi)s')
193 args['epi'] = tuple(episodes)
194
195 if encounter is not None:
196 where_parts.append(u'pk_encounter = %(enc)s')
197
198 if exclude_unsigned:
199 where_parts.append(u'pk_doc IN (SELECT b_vo.pk_doc FROM blobs.v_obj4doc_no_data b_vo WHERE b_vo.pk_patient = %(pat)s AND b_vo.reviewed IS TRUE)')
200
201 if order_by is None:
202 order_by = u'ORDER BY clin_when'
203
204 cmd = u"%s\n%s" % (_sql_fetch_document_fields % u' AND '.join(where_parts), order_by)
205 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
206
207 return [ cDocument(row = {'pk_field': 'pk_doc', 'idx': idx, 'data': r}) for r in rows ]
208
209 documents = property(get_documents, lambda x:x)
210
211 - def add_document(self, document_type=None, encounter=None, episode=None):
212 return create_document(document_type = document_type, encounter = encounter, episode = episode)
213
214 _sql_fetch_document_part_fields = u"select * from blobs.v_obj4doc_no_data where %s"
215
217 """Represents one part of a medical document."""
218
219 _cmd_fetch_payload = _sql_fetch_document_part_fields % u"pk_obj = %s"
220 _cmds_store_payload = [
221 u"""UPDATE blobs.doc_obj SET
222 seq_idx = %(seq_idx)s,
223 comment = gm.nullify_empty_string(%(obj_comment)s),
224 filename = gm.nullify_empty_string(%(filename)s),
225 fk_intended_reviewer = %(pk_intended_reviewer)s,
226 fk_doc = %(pk_doc)s
227 WHERE
228 pk = %(pk_obj)s
229 AND
230 xmin = %(xmin_doc_obj)s
231 RETURNING
232 xmin AS xmin_doc_obj"""
233 ]
234 _updatable_fields = [
235 'seq_idx',
236 'obj_comment',
237 'pk_intended_reviewer',
238 'filename',
239 'pk_doc'
240 ]
241
242
243
244 - def export_to_file(self, aChunkSize=0, filename=None, target_mime=None, target_extension=None, ignore_conversion_problems=False):
245
246 if self._payload[self._idx['size']] == 0:
247 return None
248
249 if filename is None:
250 suffix = None
251
252 if self._payload[self._idx['filename']] is not None:
253 name, suffix = os.path.splitext(self._payload[self._idx['filename']])
254 suffix = suffix.strip()
255 if suffix == u'':
256 suffix = None
257
258 filename = gmTools.get_unique_filename (
259 prefix = 'gm-doc_obj-page_%s-' % self._payload[self._idx['seq_idx']],
260 suffix = suffix
261 )
262
263 success = gmPG2.bytea2file (
264 data_query = {
265 'cmd': u'SELECT substring(data from %(start)s for %(size)s) FROM blobs.doc_obj WHERE pk=%(pk)s',
266 'args': {'pk': self.pk_obj}
267 },
268 filename = filename,
269 chunk_size = aChunkSize,
270 data_size = self._payload[self._idx['size']]
271 )
272
273 if not success:
274 return None
275
276 if target_mime is None:
277 return filename
278
279 if target_extension is None:
280 target_extension = gmMimeLib.guess_ext_by_mimetype(mimetype = target_mime)
281
282 target_fname = gmTools.get_unique_filename (
283 prefix = 'gm-doc_obj-page_%s-converted-' % self._payload[self._idx['seq_idx']],
284 suffix = target_extension
285 )
286 _log.debug('attempting conversion: [%s] -> [<%s>:%s]', filename, target_mime, target_fname)
287 if gmMimeLib.convert_file (
288 filename = filename,
289 target_mime = target_mime,
290 target_filename = target_fname
291 ):
292 return target_fname
293
294 _log.warning('conversion failed')
295 if not ignore_conversion_problems:
296 return None
297
298 _log.warning('programmed to ignore conversion problems, hoping receiver can handle [%s]', filename)
299 return filename
300
302 cmd = u"""
303 select
304 reviewer,
305 reviewed_when,
306 is_technically_abnormal,
307 clinically_relevant,
308 is_review_by_responsible_reviewer,
309 is_your_review,
310 coalesce(comment, '')
311 from blobs.v_reviewed_doc_objects
312 where pk_doc_obj = %s
313 order by
314 is_your_review desc,
315 is_review_by_responsible_reviewer desc,
316 reviewed_when desc
317 """
318 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_obj]}])
319 return rows
320
322 return cDocument(aPK_obj = self._payload[self._idx['pk_doc']])
323
324
325
327
328 if not (os.access(fname, os.R_OK) and os.path.isfile(fname)):
329 _log.error('[%s] is not a readable file' % fname)
330 return False
331
332 if not gmPG2.file2bytea (
333 query = u"UPDATE blobs.doc_obj SET data = %(data)s::bytea WHERE pk = %(pk)s RETURNING md5(data) AS md5",
334 filename = fname,
335 args = {'pk': self.pk_obj},
336 file_md5 = gmTools.file2md5(filename = fname, return_hex = True)
337 ):
338 return False
339
340
341 self.refetch_payload()
342 return True
343
344 - def set_reviewed(self, technically_abnormal=None, clinically_relevant=None):
345
346 cmd = u"""
347 select pk
348 from blobs.reviewed_doc_objs
349 where
350 fk_reviewed_row = %s and
351 fk_reviewer = (select pk from dem.staff where db_user = current_user)"""
352 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_obj]}])
353
354
355 if len(rows) == 0:
356 cols = [
357 u"fk_reviewer",
358 u"fk_reviewed_row",
359 u"is_technically_abnormal",
360 u"clinically_relevant"
361 ]
362 vals = [
363 u'%(fk_row)s',
364 u'%(abnormal)s',
365 u'%(relevant)s'
366 ]
367 args = {
368 'fk_row': self.pk_obj,
369 'abnormal': technically_abnormal,
370 'relevant': clinically_relevant
371 }
372 cmd = u"""
373 insert into blobs.reviewed_doc_objs (
374 %s
375 ) values (
376 (select pk from dem.staff where db_user=current_user),
377 %s
378 )""" % (', '.join(cols), ', '.join(vals))
379
380
381 if len(rows) == 1:
382 pk_row = rows[0][0]
383 args = {
384 'abnormal': technically_abnormal,
385 'relevant': clinically_relevant,
386 'pk_row': pk_row
387 }
388 cmd = u"""
389 update blobs.reviewed_doc_objs set
390 is_technically_abnormal = %(abnormal)s,
391 clinically_relevant = %(relevant)s
392 where
393 pk=%(pk_row)s"""
394 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}])
395
396 return True
397
399 if self._payload[self._idx['type']] != u'patient photograph':
400 return False
401
402 rows, idx = gmPG2.run_ro_queries (
403 queries = [{
404 'cmd': u'select coalesce(max(seq_idx)+1, 1) from blobs.doc_obj where fk_doc=%(doc_id)s',
405 'args': {'doc_id': self._payload[self._idx['pk_doc']]}
406 }]
407 )
408 self._payload[self._idx['seq_idx']] = rows[0][0]
409 self._is_modified = True
410 self.save_payload()
411
413
414 fname = self.export_to_file(aChunkSize = chunksize)
415 if fname is None:
416 return False, ''
417
418 success, msg = gmMimeLib.call_viewer_on_file(fname, block = block)
419 if not success:
420 return False, msg
421
422 return True, ''
423
440
473
475 cmd = u"select blobs.delete_document_part(%(pk)s, %(enc)s)"
476 args = {'pk': part_pk, 'enc': encounter_pk}
477 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}])
478 return
479
480 _sql_fetch_document_fields = u"""
481 SELECT
482 *,
483 COALESCE (
484 (SELECT array_agg(seq_idx) FROM blobs.doc_obj b_do WHERE b_do.fk_doc = b_vdm.pk_doc),
485 ARRAY[]::integer[]
486 )
487 AS seq_idx_list
488 FROM
489 blobs.v_doc_med b_vdm
490 WHERE
491 %s
492 """
493
494 -class cDocument(gmBusinessDBObject.cBusinessDBObject):
495 """Represents one medical document."""
496
497 _cmd_fetch_payload = _sql_fetch_document_fields % u"pk_doc = %s"
498 _cmds_store_payload = [
499 u"""update blobs.doc_med set
500 fk_type = %(pk_type)s,
501 fk_episode = %(pk_episode)s,
502 fk_encounter = %(pk_encounter)s,
503 clin_when = %(clin_when)s,
504 comment = gm.nullify_empty_string(%(comment)s),
505 ext_ref = gm.nullify_empty_string(%(ext_ref)s)
506 where
507 pk = %(pk_doc)s and
508 xmin = %(xmin_doc_med)s""",
509 u"""select xmin_doc_med from blobs.v_doc_med where pk_doc = %(pk_doc)s"""
510 ]
511
512 _updatable_fields = [
513 'pk_type',
514 'comment',
515 'clin_when',
516 'ext_ref',
517 'pk_episode',
518 'pk_encounter'
519 ]
520
522 try: del self.__has_unreviewed_parts
523 except AttributeError: pass
524
525 return super(cDocument, self).refetch_payload(ignore_changes = ignore_changes)
526
528 """Get document descriptions.
529
530 - will return a list of rows
531 """
532 if max_lng is None:
533 cmd = u"SELECT pk, text FROM blobs.doc_desc WHERE fk_doc = %s"
534 else:
535 cmd = u"SELECT pk, substring(text from 1 for %s) FROM blobs.doc_desc WHERE fk_doc=%%s" % max_lng
536 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_obj]}])
537 return rows
538
543
545 cmd = u"update blobs.doc_desc set text = %(desc)s where fk_doc = %(doc)s and pk = %(pk_desc)s"
546 gmPG2.run_rw_queries(queries = [
547 {'cmd': cmd, 'args': {'doc': self.pk_obj, 'pk_desc': pk, 'desc': description}}
548 ])
549 return True
550
552 cmd = u"delete from blobs.doc_desc where fk_doc = %(doc)s and pk = %(desc)s"
553 gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': {'doc': self.pk_obj, 'desc': pk}}])
554 return True
555
560
561 parts = property(_get_parts, lambda x:x)
562
564 """Add a part to the document."""
565
566 cmd = u"""
567 insert into blobs.doc_obj (
568 fk_doc, data, seq_idx
569 ) VALUES (
570 %(doc_id)s,
571 ''::bytea,
572 (select coalesce(max(seq_idx)+1, 1) from blobs.doc_obj where fk_doc=%(doc_id)s)
573 )"""
574 rows, idx = gmPG2.run_rw_queries (
575 queries = [
576 {'cmd': cmd, 'args': {'doc_id': self.pk_obj}},
577 {'cmd': u"select currval('blobs.doc_obj_pk_seq')"}
578 ],
579 return_data = True
580 )
581
582 pk_part = rows[0][0]
583 new_part = cDocumentPart(aPK_obj = pk_part)
584 if not new_part.update_data_from_file(fname=file):
585 _log.error('cannot import binary data from [%s] into document part' % file)
586 gmPG2.run_rw_queries (
587 queries = [
588 {'cmd': u"delete from blobs.doc_obj where pk = %s", 'args': [pk_part]}
589 ]
590 )
591 return None
592 new_part['filename'] = file
593 new_part.save_payload()
594
595 return new_part
596
598
599 new_parts = []
600
601 for filename in files:
602 new_part = self.add_part(file = filename)
603 if new_part is None:
604 msg = 'cannot instantiate document part object'
605 _log.error(msg)
606 return (False, msg, filename)
607 new_parts.append(new_part)
608
609 if reviewer is not None:
610 new_part['pk_intended_reviewer'] = reviewer
611 success, data = new_part.save_payload()
612 if not success:
613 msg = 'cannot set reviewer to [%s]' % reviewer
614 _log.error(msg)
615 _log.error(str(data))
616 return (False, msg, filename)
617
618 return (True, '', new_parts)
619
621 fnames = []
622 for part in self.parts:
623 fname = part.export_to_file(aChunkSize = chunksize)
624 if export_dir is not None:
625 shutil.move(fname, export_dir)
626 fname = os.path.join(export_dir, os.path.split(fname)[1])
627 fnames.append(fname)
628 return fnames
629
631 try:
632 return self.__has_unreviewed_parts
633 except AttributeError:
634 pass
635
636 cmd = u"SELECT EXISTS(SELECT 1 FROM blobs.v_obj4doc_no_data WHERE pk_doc = %(pk)s AND reviewed IS FALSE)"
637 args = {'pk': self.pk_obj}
638 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}])
639 self.__has_unreviewed_parts = rows[0][0]
640
641 return self.__has_unreviewed_parts
642
643 has_unreviewed_parts = property(_get_has_unreviewed_parts, lambda x:x)
644
645 - def set_reviewed(self, technically_abnormal=None, clinically_relevant=None):
646
647 for part in self.parts:
648 if not part.set_reviewed(technically_abnormal, clinically_relevant):
649 return False
650 return True
651
653 for part in self.parts:
654 part['pk_intended_reviewer'] = reviewer
655 success, data = part.save_payload()
656 if not success:
657 _log.error('cannot set reviewer to [%s]' % reviewer)
658 _log.error(str(data))
659 return False
660 return True
661
689
691 """Returns new document instance or raises an exception.
692 """
693 cmd = u"""INSERT INTO blobs.doc_med (fk_type, fk_encounter, fk_episode) VALUES (%(type)s, %(enc)s, %(epi)s) RETURNING pk"""
694 try:
695 int(document_type)
696 except ValueError:
697 cmd = u"""
698 INSERT INTO blobs.doc_med (
699 fk_type,
700 fk_encounter,
701 fk_episode
702 ) VALUES (
703 coalesce (
704 (SELECT pk from blobs.doc_type bdt where bdt.name = %(type)s),
705 (SELECT pk from blobs.doc_type bdt where _(bdt.name) = %(type)s)
706 ),
707 %(enc)s,
708 %(epi)s
709 ) RETURNING pk"""
710
711 args = {'type': document_type, 'enc': encounter, 'epi': episode}
712 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}], return_data = True)
713 doc = cDocument(aPK_obj = rows[0][0])
714 return doc
715
717 """Searches for documents with the given patient and type ID."""
718 if patient_id is None:
719 raise ValueError('need patient id to search for document')
720
721 args = {'pat_id': patient_id, 'type_id': type_id, 'ref': external_reference}
722 where_parts = [u'pk_patient = %(pat_id)s']
723
724 if type_id is not None:
725 where_parts.append(u'pk_type = %(type_id)s')
726
727 if external_reference is not None:
728 where_parts.append(u'ext_ref = %(ref)s')
729
730 cmd = _sql_fetch_document_fields % u' AND '.join(where_parts)
731 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
732 return [ cDocument(row = {'data': r, 'idx': idx, 'pk_field': 'pk_doc'}) for r in rows ]
733
735
736 cmd = u"SELECT blobs.delete_document(%(pk)s, %(enc)s)"
737 args = {'pk': document_id, 'enc': encounter_id}
738 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}], return_data = True)
739 if not rows[0][0]:
740 _log.error('cannot delete document [%s]', document_id)
741 return False
742 return True
743
745
746 _log.debug('reclassifying documents by type')
747 _log.debug('original: %s', original_type)
748 _log.debug('target: %s', target_type)
749
750 if target_type['pk_doc_type'] == original_type['pk_doc_type']:
751 return True
752
753 cmd = u"""
754 update blobs.doc_med set
755 fk_type = %(new_type)s
756 where
757 fk_type = %(old_type)s
758 """
759 args = {u'new_type': target_type['pk_doc_type'], u'old_type': original_type['pk_doc_type']}
760
761 gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}])
762
763 return True
764
765
767 """Represents a document type."""
768 _cmd_fetch_payload = u"""select * from blobs.v_doc_type where pk_doc_type=%s"""
769 _cmds_store_payload = [
770 u"""update blobs.doc_type set
771 name = %(type)s
772 where
773 pk=%(pk_obj)s and
774 xmin=%(xmin_doc_type)s""",
775 u"""select xmin_doc_type from blobs.v_doc_type where pk_doc_type = %(pk_obj)s"""
776 ]
777 _updatable_fields = ['type']
778
780
781 if translation.strip() == '':
782 return False
783
784 if translation.strip() == self._payload[self._idx['l10n_type']].strip():
785 return True
786
787 rows, idx = gmPG2.run_rw_queries (
788 queries = [
789 {'cmd': u'select i18n.i18n(%s)', 'args': [self._payload[self._idx['type']]]},
790 {'cmd': u'select i18n.upd_tx((select i18n.get_curr_lang()), %(orig)s, %(tx)s)',
791 'args': {
792 'orig': self._payload[self._idx['type']],
793 'tx': translation
794 }
795 }
796 ],
797 return_data = True
798 )
799 if not rows[0][0]:
800 _log.error('cannot set translation to [%s]' % translation)
801 return False
802
803 return self.refetch_payload()
804
806 rows, idx = gmPG2.run_ro_queries (
807 queries = [{'cmd': u"SELECT * FROM blobs.v_doc_type"}],
808 get_col_idx = True
809 )
810 doc_types = []
811 for row in rows:
812 row_def = {'pk_field': 'pk_doc_type', 'idx': idx, 'data': row}
813 doc_types.append(cDocumentType(row = row_def))
814 return doc_types
815
817 args = {'typ': document_type.strip()}
818
819 cmd = u'SELECT pk FROM blobs.doc_type WHERE name = %(typ)s'
820 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = False)
821 if len(rows) == 0:
822 cmd = u'SELECT pk FROM blobs.doc_type WHERE _(name) = %(typ)s'
823 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = False)
824
825 if len(rows) == 0:
826 return None
827
828 return rows[0]['pk']
829
831
832 cmd = u'select pk from blobs.doc_type where name = %s'
833 rows, idx = gmPG2.run_ro_queries (
834 queries = [{'cmd': cmd, 'args': [document_type]}]
835 )
836 if len(rows) == 0:
837 cmd1 = u"INSERT INTO blobs.doc_type (name) VALUES (%s) RETURNING pk"
838 rows, idx = gmPG2.run_rw_queries (
839 queries = [{'cmd': cmd1, 'args': [document_type]}],
840 return_data = True
841 )
842 return cDocumentType(aPK_obj = rows[0][0])
843
845 if document_type['is_in_use']:
846 return False
847 gmPG2.run_rw_queries (
848 queries = [{
849 'cmd': u'delete from blobs.doc_type where pk=%s',
850 'args': [document_type['pk_doc_type']]
851 }]
852 )
853 return True
854
864
865
866
867 if __name__ == '__main__':
868
869 if len(sys.argv) < 2:
870 sys.exit()
871
872 if sys.argv[1] != u'test':
873 sys.exit()
874
875
877
878 print "----------------------"
879 print "listing document types"
880 print "----------------------"
881
882 for dt in get_document_types():
883 print dt
884
885 print "------------------------------"
886 print "testing document type handling"
887 print "------------------------------"
888
889 dt = create_document_type(document_type = 'dummy doc type for unit test 1')
890 print "created:", dt
891
892 dt['type'] = 'dummy doc type for unit test 2'
893 dt.save_payload()
894 print "changed base name:", dt
895
896 dt.set_translation(translation = 'Dummy-Dokumenten-Typ fuer Unit-Test')
897 print "translated:", dt
898
899 print "deleted:", delete_document_type(document_type = dt)
900
901 return
902
904
905 print "-----------------------"
906 print "testing document import"
907 print "-----------------------"
908
909 docs = search_for_documents(patient_id=12)
910 doc = docs[0]
911 print "adding to doc:", doc
912
913 fname = sys.argv[1]
914 print "adding from file:", fname
915 part = doc.add_part(file=fname)
916 print "new part:", part
917
918 return
919
930
931
932 from Gnumed.pycommon import gmI18N
933 gmI18N.activate_locale()
934 gmI18N.install_domain()
935
936
937
938 test_get_documents()
939
940
941
942
943