1 """This module encapsulates a document stored in a GNUmed database.
2
3 @copyright: GPL v2 or later
4 """
5
6 __version__ = "$Revision: 1.118 $"
7 __author__ = "Karsten Hilbert <Karsten.Hilbert@gmx.net>"
8
9 import sys, os, shutil, os.path, types, time, logging
10
11
12 if __name__ == '__main__':
13 sys.path.insert(0, '../../')
14 from Gnumed.pycommon import gmExceptions
15 from Gnumed.pycommon import gmBusinessDBObject
16 from Gnumed.pycommon import gmPG2
17 from Gnumed.pycommon import gmTools
18 from Gnumed.pycommon import gmMimeLib
19 from Gnumed.pycommon import gmDateTime
20
21
22 _log = logging.getLogger('gm.docs')
23 _log.info(__version__)
24
25 MUGSHOT=26
26 DOCUMENT_TYPE_VISUAL_PROGRESS_NOTE = u'visual progress note'
27 DOCUMENT_TYPE_PRESCRIPTION = u'prescription'
28
30 """Represents a folder with medical documents for a single patient."""
31
33 """Fails if
34
35 - patient referenced by aPKey does not exist
36 """
37 self.pk_patient = aPKey
38 if not self._pkey_exists():
39 raise gmExceptions.ConstructorError, "No patient with PK [%s] in database." % aPKey
40
41
42
43
44
45
46
47 _log.debug('instantiated document folder for patient [%s]' % self.pk_patient)
48
51
52
53
55 """Does this primary key exist ?
56
57 - true/false/None
58 """
59
60 rows, idx = gmPG2.run_ro_queries(queries = [
61 {'cmd': u"select exists(select pk from dem.identity where pk = %s)", 'args': [self.pk_patient]}
62 ])
63 if not rows[0][0]:
64 _log.error("patient [%s] not in demographic database" % self.pk_patient)
65 return None
66 return True
67
68
69
71 cmd = u"""
72 SELECT pk_doc
73 FROM blobs.v_doc_med
74 WHERE
75 pk_patient = %(pat)s
76 AND
77 type = %(typ)s
78 AND
79 ext_ref = %(ref)s
80 ORDER BY
81 clin_when DESC
82 LIMIT 1
83 """
84 args = {
85 'pat': self.pk_patient,
86 'typ': DOCUMENT_TYPE_PRESCRIPTION,
87 'ref': u'FreeDiams'
88 }
89 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}])
90 if len(rows) == 0:
91 _log.info('no FreeDiams prescription available for patient [%s]' % self.pk_patient)
92 return None
93 prescription = cDocument(aPK_obj = rows[0][0])
94 return prescription
95
97 cmd = u"SELECT pk_obj FROM blobs.v_latest_mugshot WHERE pk_patient = %s"
98 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_patient]}])
99 if len(rows) == 0:
100 _log.info('no mugshots available for patient [%s]' % self.pk_patient)
101 return None
102 return cDocumentPart(aPK_obj = rows[0][0])
103
104 latest_mugshot = property(get_latest_mugshot, lambda x:x)
105
107 if latest_only:
108 cmd = u"select pk_doc, pk_obj from blobs.v_latest_mugshot where pk_patient=%s"
109 else:
110 cmd = u"""
111 select
112 vdm.pk_doc as pk_doc,
113 dobj.pk as pk_obj
114 from
115 blobs.v_doc_med vdm
116 blobs.doc_obj dobj
117 where
118 vdm.pk_type = (select pk from blobs.doc_type where name = 'patient photograph')
119 and vdm.pk_patient = %s
120 and dobj.fk_doc = vdm.pk_doc
121 """
122 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_patient]}])
123 return rows
124
126 """return flat list of document IDs"""
127
128 args = {
129 'ID': self.pk_patient,
130 'TYP': doc_type
131 }
132
133 cmd = u"""
134 select vdm.pk_doc
135 from blobs.v_doc_med vdm
136 where
137 vdm.pk_patient = %%(ID)s
138 %s
139 order by vdm.clin_when"""
140
141 if doc_type is None:
142 cmd = cmd % u''
143 else:
144 try:
145 int(doc_type)
146 cmd = cmd % u'and vdm.pk_type = %(TYP)s'
147 except (TypeError, ValueError):
148 cmd = cmd % u'and vdm.pk_type = (select pk from blobs.doc_type where name = %(TYP)s)'
149
150 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}])
151 doc_ids = []
152 for row in rows:
153 doc_ids.append(row[0])
154 return doc_ids
155
162
164 args = {'pat': self.pk_patient}
165 cmd = _sql_fetch_document_fields % u"""
166 pk_doc IN (
167 SELECT DISTINCT ON (b_vo.pk_doc) b_vo.pk_doc
168 FROM blobs.v_obj4doc_no_data b_vo
169 WHERE
170 pk_patient = %(pat)s
171 AND
172 reviewed IS FALSE
173 )
174 ORDER BY clin_when DESC"""
175 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
176 return [ cDocument(row = {'pk_field': 'pk_doc', 'idx': idx, 'data': r}) for r in rows ]
177
178 - def get_documents(self, doc_type=None, episodes=None, encounter=None, order_by=None, exclude_unsigned=False):
179 """Return list of documents."""
180
181 args = {
182 'pat': self.pk_patient,
183 'type': doc_type,
184 'enc': encounter
185 }
186 where_parts = [u'pk_patient = %(pat)s']
187
188 if doc_type is not None:
189 try:
190 int(doc_type)
191 where_parts.append(u'pk_type = %(type)s')
192 except (TypeError, ValueError):
193 where_parts.append(u'pk_type = (SELECT pk FROM blobs.doc_type WHERE name = %(type)s)')
194
195 if (episodes is not None) and (len(episodes) > 0):
196 where_parts.append(u'pk_episode IN %(epi)s')
197 args['epi'] = tuple(episodes)
198
199 if encounter is not None:
200 where_parts.append(u'pk_encounter = %(enc)s')
201
202 if exclude_unsigned:
203 where_parts.append(u'pk_doc IN (SELECT b_vo.pk_doc FROM blobs.v_obj4doc_no_data b_vo WHERE b_vo.pk_patient = %(pat)s AND b_vo.reviewed IS TRUE)')
204
205 if order_by is None:
206 order_by = u'ORDER BY clin_when'
207
208 cmd = u"%s\n%s" % (_sql_fetch_document_fields % u' AND '.join(where_parts), order_by)
209 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
210
211 return [ cDocument(row = {'pk_field': 'pk_doc', 'idx': idx, 'data': r}) for r in rows ]
212
213 documents = property(get_documents, lambda x:x)
214
215 - def add_document(self, document_type=None, encounter=None, episode=None):
216 return create_document(document_type = document_type, encounter = encounter, episode = episode)
217
218 _sql_fetch_document_part_fields = u"select * from blobs.v_obj4doc_no_data where %s"
219
221 """Represents one part of a medical document."""
222
223 _cmd_fetch_payload = _sql_fetch_document_part_fields % u"pk_obj = %s"
224 _cmds_store_payload = [
225 u"""UPDATE blobs.doc_obj SET
226 seq_idx = %(seq_idx)s,
227 comment = gm.nullify_empty_string(%(obj_comment)s),
228 filename = gm.nullify_empty_string(%(filename)s),
229 fk_intended_reviewer = %(pk_intended_reviewer)s,
230 fk_doc = %(pk_doc)s
231 WHERE
232 pk = %(pk_obj)s
233 AND
234 xmin = %(xmin_doc_obj)s
235 RETURNING
236 xmin AS xmin_doc_obj"""
237 ]
238 _updatable_fields = [
239 'seq_idx',
240 'obj_comment',
241 'pk_intended_reviewer',
242 'filename',
243 'pk_doc'
244 ]
245
246
247
248 - def export_to_file(self, aChunkSize=0, filename=None, target_mime=None, target_extension=None, ignore_conversion_problems=False):
249
250 if self._payload[self._idx['size']] == 0:
251 return None
252
253 if filename is None:
254 suffix = None
255
256 if self._payload[self._idx['filename']] is not None:
257 name, suffix = os.path.splitext(self._payload[self._idx['filename']])
258 suffix = suffix.strip()
259 if suffix == u'':
260 suffix = None
261
262 filename = gmTools.get_unique_filename (
263 prefix = 'gm-doc_obj-page_%s-' % self._payload[self._idx['seq_idx']],
264 suffix = suffix
265 )
266
267 success = gmPG2.bytea2file (
268 data_query = {
269 'cmd': u'SELECT substring(data from %(start)s for %(size)s) FROM blobs.doc_obj WHERE pk=%(pk)s',
270 'args': {'pk': self.pk_obj}
271 },
272 filename = filename,
273 chunk_size = aChunkSize,
274 data_size = self._payload[self._idx['size']]
275 )
276
277 if not success:
278 return None
279
280 if target_mime is None:
281 return filename
282
283 if target_extension is None:
284 target_extension = gmMimeLib.guess_ext_by_mimetype(mimetype = target_mime)
285
286 target_fname = gmTools.get_unique_filename (
287 prefix = 'gm-doc_obj-page_%s-converted-' % self._payload[self._idx['seq_idx']],
288 suffix = target_extension
289 )
290 _log.debug('attempting conversion: [%s] -> [<%s>:%s]', filename, target_mime, target_fname)
291 if gmMimeLib.convert_file (
292 filename = filename,
293 target_mime = target_mime,
294 target_filename = target_fname
295 ):
296 return target_fname
297
298 _log.warning('conversion failed')
299 if not ignore_conversion_problems:
300 return None
301
302 _log.warning('programmed to ignore conversion problems, hoping receiver can handle [%s]', filename)
303 return filename
304
306 cmd = u"""
307 select
308 reviewer,
309 reviewed_when,
310 is_technically_abnormal,
311 clinically_relevant,
312 is_review_by_responsible_reviewer,
313 is_your_review,
314 coalesce(comment, '')
315 from blobs.v_reviewed_doc_objects
316 where pk_doc_obj = %s
317 order by
318 is_your_review desc,
319 is_review_by_responsible_reviewer desc,
320 reviewed_when desc
321 """
322 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_obj]}])
323 return rows
324
326 return cDocument(aPK_obj = self._payload[self._idx['pk_doc']])
327
328
329
331
332 if not (os.access(fname, os.R_OK) and os.path.isfile(fname)):
333 _log.error('[%s] is not a readable file' % fname)
334 return False
335
336 gmPG2.file2bytea (
337 query = u"UPDATE blobs.doc_obj SET data = %(data)s::bytea WHERE pk = %(pk)s",
338 filename = fname,
339 args = {'pk': self.pk_obj}
340 )
341
342
343 self.refetch_payload()
344 return True
345
346 - def set_reviewed(self, technically_abnormal=None, clinically_relevant=None):
347
348 cmd = u"""
349 select pk
350 from blobs.reviewed_doc_objs
351 where
352 fk_reviewed_row = %s and
353 fk_reviewer = (select pk from dem.staff where db_user = current_user)"""
354 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_obj]}])
355
356
357 if len(rows) == 0:
358 cols = [
359 u"fk_reviewer",
360 u"fk_reviewed_row",
361 u"is_technically_abnormal",
362 u"clinically_relevant"
363 ]
364 vals = [
365 u'%(fk_row)s',
366 u'%(abnormal)s',
367 u'%(relevant)s'
368 ]
369 args = {
370 'fk_row': self.pk_obj,
371 'abnormal': technically_abnormal,
372 'relevant': clinically_relevant
373 }
374 cmd = u"""
375 insert into blobs.reviewed_doc_objs (
376 %s
377 ) values (
378 (select pk from dem.staff where db_user=current_user),
379 %s
380 )""" % (', '.join(cols), ', '.join(vals))
381
382
383 if len(rows) == 1:
384 pk_row = rows[0][0]
385 args = {
386 'abnormal': technically_abnormal,
387 'relevant': clinically_relevant,
388 'pk_row': pk_row
389 }
390 cmd = u"""
391 update blobs.reviewed_doc_objs set
392 is_technically_abnormal = %(abnormal)s,
393 clinically_relevant = %(relevant)s
394 where
395 pk=%(pk_row)s"""
396 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}])
397
398 return True
399
401 if self._payload[self._idx['type']] != u'patient photograph':
402 return False
403
404 rows, idx = gmPG2.run_ro_queries (
405 queries = [{
406 'cmd': u'select coalesce(max(seq_idx)+1, 1) from blobs.doc_obj where fk_doc=%(doc_id)s',
407 'args': {'doc_id': self._payload[self._idx['pk_doc']]}
408 }]
409 )
410 self._payload[self._idx['seq_idx']] = rows[0][0]
411 self._is_modified = True
412 self.save_payload()
413
415
416 fname = self.export_to_file(aChunkSize = chunksize)
417 if fname is None:
418 return False, ''
419
420 success, msg = gmMimeLib.call_viewer_on_file(fname, block = block)
421 if not success:
422 return False, msg
423
424 return True, ''
425
455
457 cmd = u"select blobs.delete_document_part(%(pk)s, %(enc)s)"
458 args = {'pk': part_pk, 'enc': encounter_pk}
459 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}])
460 return
461
462 _sql_fetch_document_fields = u"""
463 SELECT
464 *,
465 COALESCE (
466 (SELECT array_agg(seq_idx) FROM blobs.doc_obj b_do WHERE b_do.fk_doc = b_vdm.pk_doc),
467 ARRAY[]::integer[]
468 )
469 AS seq_idx_list
470 FROM
471 blobs.v_doc_med b_vdm
472 WHERE
473 %s
474 """
475
476 -class cDocument(gmBusinessDBObject.cBusinessDBObject):
477 """Represents one medical document."""
478
479 _cmd_fetch_payload = _sql_fetch_document_fields % u"pk_doc = %s"
480 _cmds_store_payload = [
481 u"""update blobs.doc_med set
482 fk_type = %(pk_type)s,
483 fk_episode = %(pk_episode)s,
484 fk_encounter = %(pk_encounter)s,
485 clin_when = %(clin_when)s,
486 comment = gm.nullify_empty_string(%(comment)s),
487 ext_ref = gm.nullify_empty_string(%(ext_ref)s)
488 where
489 pk = %(pk_doc)s and
490 xmin = %(xmin_doc_med)s""",
491 u"""select xmin_doc_med from blobs.v_doc_med where pk_doc = %(pk_doc)s"""
492 ]
493
494 _updatable_fields = [
495 'pk_type',
496 'comment',
497 'clin_when',
498 'ext_ref',
499 'pk_episode',
500 'pk_encounter'
501 ]
502
504 try: del self.__has_unreviewed_parts
505 except AttributeError: pass
506
507 return super(cDocument, self).refetch_payload(ignore_changes = ignore_changes)
508
510 """Get document descriptions.
511
512 - will return a list of rows
513 """
514 if max_lng is None:
515 cmd = u"SELECT pk, text FROM blobs.doc_desc WHERE fk_doc = %s"
516 else:
517 cmd = u"SELECT pk, substring(text from 1 for %s) FROM blobs.doc_desc WHERE fk_doc=%%s" % max_lng
518 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_obj]}])
519 return rows
520
525
527 cmd = u"update blobs.doc_desc set text = %(desc)s where fk_doc = %(doc)s and pk = %(pk_desc)s"
528 gmPG2.run_rw_queries(queries = [
529 {'cmd': cmd, 'args': {'doc': self.pk_obj, 'pk_desc': pk, 'desc': description}}
530 ])
531 return True
532
534 cmd = u"delete from blobs.doc_desc where fk_doc = %(doc)s and pk = %(desc)s"
535 gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': {'doc': self.pk_obj, 'desc': pk}}])
536 return True
537
542
543 parts = property(_get_parts, lambda x:x)
544
546 """Add a part to the document."""
547
548 cmd = u"""
549 insert into blobs.doc_obj (
550 fk_doc, data, seq_idx
551 ) VALUES (
552 %(doc_id)s,
553 ''::bytea,
554 (select coalesce(max(seq_idx)+1, 1) from blobs.doc_obj where fk_doc=%(doc_id)s)
555 )"""
556 rows, idx = gmPG2.run_rw_queries (
557 queries = [
558 {'cmd': cmd, 'args': {'doc_id': self.pk_obj}},
559 {'cmd': u"select currval('blobs.doc_obj_pk_seq')"}
560 ],
561 return_data = True
562 )
563
564 pk_part = rows[0][0]
565 new_part = cDocumentPart(aPK_obj = pk_part)
566 if not new_part.update_data_from_file(fname=file):
567 _log.error('cannot import binary data from [%s] into document part' % file)
568 gmPG2.run_rw_queries (
569 queries = [
570 {'cmd': u"delete from blobs.doc_obj where pk = %s", 'args': [pk_part]}
571 ]
572 )
573 return None
574 new_part['filename'] = file
575 new_part.save_payload()
576
577 return new_part
578
580
581 new_parts = []
582
583 for filename in files:
584 new_part = self.add_part(file = filename)
585 if new_part is None:
586 msg = 'cannot instantiate document part object'
587 _log.error(msg)
588 return (False, msg, filename)
589 new_parts.append(new_part)
590
591 if reviewer is not None:
592 new_part['pk_intended_reviewer'] = reviewer
593 success, data = new_part.save_payload()
594 if not success:
595 msg = 'cannot set reviewer to [%s]' % reviewer
596 _log.error(msg)
597 _log.error(str(data))
598 return (False, msg, filename)
599
600 return (True, '', new_parts)
601
603 fnames = []
604 for part in self.parts:
605
606 fname = os.path.basename(gmTools.coalesce (
607 part['filename'],
608 u'%s%s%s_%s' % (part['l10n_type'], gmTools.coalesce(part['ext_ref'], '-', '-%s-'), _('part'), part['seq_idx'])
609 ))
610 if export_dir is not None:
611 fname = os.path.join(export_dir, fname)
612 fnames.append(part.export_to_file(aChunkSize = chunksize, filename = fname))
613 return fnames
614
616 try:
617 return self.__has_unreviewed_parts
618 except AttributeError:
619 pass
620
621 cmd = u"SELECT EXISTS(SELECT 1 FROM blobs.v_obj4doc_no_data WHERE pk_doc = %(pk)s AND reviewed IS FALSE)"
622 args = {'pk': self.pk_obj}
623 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}])
624 self.__has_unreviewed_parts = rows[0][0]
625
626 return self.__has_unreviewed_parts
627
628 has_unreviewed_parts = property(_get_has_unreviewed_parts, lambda x:x)
629
630 - def set_reviewed(self, technically_abnormal=None, clinically_relevant=None):
631
632 for part in self.parts:
633 if not part.set_reviewed(technically_abnormal, clinically_relevant):
634 return False
635 return True
636
638 for part in self.parts:
639 part['pk_intended_reviewer'] = reviewer
640 success, data = part.save_payload()
641 if not success:
642 _log.error('cannot set reviewer to [%s]' % reviewer)
643 _log.error(str(data))
644 return False
645 return True
646
674
676 """Returns new document instance or raises an exception.
677 """
678 cmd = u"""INSERT INTO blobs.doc_med (fk_type, fk_encounter, fk_episode) VALUES (%(type)s, %(enc)s, %(epi)s) RETURNING pk"""
679 try:
680 int(document_type)
681 except ValueError:
682 cmd = u"""
683 INSERT INTO blobs.doc_med (
684 fk_type,
685 fk_encounter,
686 fk_episode
687 ) VALUES (
688 coalesce (
689 (SELECT pk from blobs.doc_type bdt where bdt.name = %(type)s),
690 (SELECT pk from blobs.doc_type bdt where _(bdt.name) = %(type)s)
691 ),
692 %(enc)s,
693 %(epi)s
694 ) RETURNING pk"""
695
696 args = {'type': document_type, 'enc': encounter, 'epi': episode}
697 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}], return_data = True)
698 doc = cDocument(aPK_obj = rows[0][0])
699 return doc
700
702 """Searches for documents with the given patient and type ID."""
703 if patient_id is None:
704 raise ValueError('need patient id to search for document')
705
706 args = {'pat_id': patient_id, 'type_id': type_id, 'ref': external_reference}
707 where_parts = [u'pk_patient = %(pat_id)s']
708
709 if type_id is not None:
710 where_parts.append(u'pk_type = %(type_id)s')
711
712 if external_reference is not None:
713 where_parts.append(u'ext_ref = %(ref)s')
714
715 cmd = _sql_fetch_document_fields % u' AND '.join(where_parts)
716 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
717 return [ cDocument(row = {'data': r, 'idx': idx, 'pk_field': 'pk_doc'}) for r in rows ]
718
720
721 cmd = u"SELECT blobs.delete_document(%(pk)s, %(enc)s)"
722 args = {'pk': document_id, 'enc': encounter_id}
723 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}], return_data = True)
724 if not rows[0][0]:
725 _log.error('cannot delete document [%s]', document_id)
726 return False
727 return True
728
730
731 _log.debug('reclassifying documents by type')
732 _log.debug('original: %s', original_type)
733 _log.debug('target: %s', target_type)
734
735 if target_type['pk_doc_type'] == original_type['pk_doc_type']:
736 return True
737
738 cmd = u"""
739 update blobs.doc_med set
740 fk_type = %(new_type)s
741 where
742 fk_type = %(old_type)s
743 """
744 args = {u'new_type': target_type['pk_doc_type'], u'old_type': original_type['pk_doc_type']}
745
746 gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}])
747
748 return True
749
750
752 """Represents a document type."""
753 _cmd_fetch_payload = u"""select * from blobs.v_doc_type where pk_doc_type=%s"""
754 _cmds_store_payload = [
755 u"""update blobs.doc_type set
756 name = %(type)s
757 where
758 pk=%(pk_obj)s and
759 xmin=%(xmin_doc_type)s""",
760 u"""select xmin_doc_type from blobs.v_doc_type where pk_doc_type = %(pk_obj)s"""
761 ]
762 _updatable_fields = ['type']
763
765
766 if translation.strip() == '':
767 return False
768
769 if translation.strip() == self._payload[self._idx['l10n_type']].strip():
770 return True
771
772 rows, idx = gmPG2.run_rw_queries (
773 queries = [
774 {'cmd': u'select i18n.i18n(%s)', 'args': [self._payload[self._idx['type']]]},
775 {'cmd': u'select i18n.upd_tx((select i18n.get_curr_lang()), %(orig)s, %(tx)s)',
776 'args': {
777 'orig': self._payload[self._idx['type']],
778 'tx': translation
779 }
780 }
781 ],
782 return_data = True
783 )
784 if not rows[0][0]:
785 _log.error('cannot set translation to [%s]' % translation)
786 return False
787
788 return self.refetch_payload()
789
791 rows, idx = gmPG2.run_ro_queries (
792 queries = [{'cmd': u"SELECT * FROM blobs.v_doc_type"}],
793 get_col_idx = True
794 )
795 doc_types = []
796 for row in rows:
797 row_def = {'pk_field': 'pk_doc_type', 'idx': idx, 'data': row}
798 doc_types.append(cDocumentType(row = row_def))
799 return doc_types
800
802 args = {'typ': document_type.strip()}
803
804 cmd = u'SELECT pk FROM blobs.doc_type WHERE name = %(typ)s'
805 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = False)
806 if len(rows) == 0:
807 cmd = u'SELECT pk FROM blobs.doc_type WHERE _(name) = %(typ)s'
808 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = False)
809
810 if len(rows) == 0:
811 return None
812
813 return rows[0]['pk']
814
816
817 cmd = u'select pk from blobs.doc_type where name = %s'
818 rows, idx = gmPG2.run_ro_queries (
819 queries = [{'cmd': cmd, 'args': [document_type]}]
820 )
821 if len(rows) == 0:
822 cmd1 = u"INSERT INTO blobs.doc_type (name) VALUES (%s) RETURNING pk"
823 rows, idx = gmPG2.run_rw_queries (
824 queries = [{'cmd': cmd1, 'args': [document_type]}],
825 return_data = True
826 )
827 return cDocumentType(aPK_obj = rows[0][0])
828
830 if document_type['is_in_use']:
831 return False
832 gmPG2.run_rw_queries (
833 queries = [{
834 'cmd': u'delete from blobs.doc_type where pk=%s',
835 'args': [document_type['pk_doc_type']]
836 }]
837 )
838 return True
839
841 """This needs *considerably* more smarts."""
842 dirname = gmTools.get_unique_filename (
843 prefix = '',
844 suffix = time.strftime(".%Y%m%d-%H%M%S", time.localtime())
845 )
846
847 path, doc_ID = os.path.split(dirname)
848 return doc_ID
849
850
851
852 if __name__ == '__main__':
853
854 if len(sys.argv) < 2:
855 sys.exit()
856
857 if sys.argv[1] != u'test':
858 sys.exit()
859
860
862
863 print "----------------------"
864 print "listing document types"
865 print "----------------------"
866
867 for dt in get_document_types():
868 print dt
869
870 print "------------------------------"
871 print "testing document type handling"
872 print "------------------------------"
873
874 dt = create_document_type(document_type = 'dummy doc type for unit test 1')
875 print "created:", dt
876
877 dt['type'] = 'dummy doc type for unit test 2'
878 dt.save_payload()
879 print "changed base name:", dt
880
881 dt.set_translation(translation = 'Dummy-Dokumenten-Typ fuer Unit-Test')
882 print "translated:", dt
883
884 print "deleted:", delete_document_type(document_type = dt)
885
886 return
887
889
890 print "-----------------------"
891 print "testing document import"
892 print "-----------------------"
893
894 docs = search_for_documents(patient_id=12)
895 doc = docs[0]
896 print "adding to doc:", doc
897
898 fname = sys.argv[1]
899 print "adding from file:", fname
900 part = doc.add_part(file=fname)
901 print "new part:", part
902
903 return
904
915
916
917 from Gnumed.pycommon import gmI18N
918 gmI18N.activate_locale()
919 gmI18N.install_domain()
920
921
922
923 test_get_documents()
924
925
926
927
928