Coverage for application / qaqc / tator / tator_qaqc_processor.py: 8%
395 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-07 06:46 +0000
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-07 06:46 +0000
1import datetime
2import math
3from typing import List
5import requests
6import sys
7import tator
9from flask import session
10from io import BytesIO
11from pptx import Presentation
12from pptx.dml.color import RGBColor
13from pptx.enum.text import PP_ALIGN
14from pptx.util import Inches, Pt
16from application.image_review.tator.tator_localization_processor import TatorLocalizationProcessor
17from application.util.constants import TERM_NORMAL, TERM_RED
18from application.util.tator_localization_type import TatorLocalizationType
21class TatorQaqcProcessor(TatorLocalizationProcessor):
22 """
23 Fetches annotation information from the Tator given a project id, section id, and list of deployments.
24 Filters and formats the annotations for the various QA/QC checks.
25 """
26 def __init__(
27 self,
28 project_id: int,
29 section_ids: List[str],
30 api: tator.api,
31 tator_url: str,
32 darc_review_url: str = None,
33 ):
34 super().__init__(
35 project_id=project_id,
36 section_ids=section_ids,
37 api=api,
38 darc_review_url=darc_review_url,
39 tator_url=tator_url,
40 )
42 def check_names_accepted(self):
43 """
44 Finds records with a scientific name or tentative ID that is not accepted in WoRMS
45 """
46 print('Checking for accepted names...')
47 sys.stdout.flush()
48 checked = {}
49 for section in self.sections:
50 records_of_interest = []
51 for localization in section.localizations:
52 flag_record = False
53 scientific_name = localization['attributes'].get('Scientific Name')
54 tentative_id = localization['attributes'].get('Tentative ID')
55 if scientific_name not in checked.keys():
56 if scientific_name in self.phylogeny.keys():
57 checked[scientific_name] = True
58 else:
59 if self.fetch_worms_phylogeny(scientific_name):
60 checked[scientific_name] = True
61 else:
62 localization['problems'] = 'Scientific Name'
63 checked[scientific_name] = False
64 flag_record = True
65 elif not checked[scientific_name]:
66 localization['problems'] = 'Scientific Name'
67 flag_record = True
68 if tentative_id:
69 if tentative_id not in checked.keys():
70 if tentative_id in self.phylogeny.keys():
71 checked[tentative_id] = True
72 else:
73 if self.fetch_worms_phylogeny(tentative_id):
74 checked[tentative_id] = True
75 else:
76 localization['problems'] = 'Tentative ID'
77 checked[tentative_id] = False
78 flag_record = True
79 elif not checked[tentative_id]:
80 localization['problems'] = 'Tentative ID' if 'problems' not in localization.keys() else 'Scientific Name, Tentative ID'
81 flag_record = True
82 if flag_record:
83 records_of_interest.append(localization)
84 print(f'Found {len(records_of_interest)} localizations with unaccepted names from {section.deployment_name}!')
85 section.localizations = records_of_interest
86 self.process_records(no_match_records={key for key in checked.keys() if not checked[key]}) # don't try to fetch again for names we already know are unaccepted
88 def check_missing_qualifier(self):
89 """
90 Finds records that are classified higher than species but don't have a qualifier set (usually '--'). This check
91 need to call process_records first to populate phylogeny.
92 """
93 self.process_records()
94 actual_final_records = []
95 for record in self.final_records:
96 if not record.get('species') and record.get('qualifier', '--') == '--':
97 record['problems'] = 'Scientific Name, Qualifier'
98 actual_final_records.append(record)
99 self.final_records = actual_final_records
101 def check_stet_reason(self):
102 """
103 Finds records that have a qualifier of 'stet' but no reason set.
104 """
105 for section in self.sections:
106 records_of_interest = []
107 for localization in section.localizations:
108 if localization['attributes'].get('Qualifier') == 'stet.' \
109 and localization['attributes'].get('Reason', '--') == '--':
110 localization['problems'] = 'Qualifier, Reason'
111 records_of_interest.append(localization)
112 section.localizations = records_of_interest
113 self.process_records()
115 def check_attracted_not_attracted(self, attracted_dict: dict):
116 """
117 Finds all records that are marked as "attracted" but are saved as "not attracted" in the attracted_dict, and
118 vice versa. Also flags all records with taxa that are marked as "attracted/not attracted" in the attracted_dict.
119 """
120 for section in self.sections:
121 records_of_interest = []
122 for localization in section.localizations:
123 scientific_name = localization['attributes'].get('Scientific Name')
124 if scientific_name not in attracted_dict.keys() or attracted_dict[scientific_name] == 2:
125 localization['problems'] = 'Scientific Name, Attracted'
126 records_of_interest.append(localization)
127 elif localization['attributes'].get('Attracted') == 'Attracted' and attracted_dict[scientific_name] == 0:
128 localization['problems'] = 'Scientific Name, Attracted'
129 records_of_interest.append(localization)
130 elif localization['attributes'].get('Attracted') == 'Not Attracted' and attracted_dict[scientific_name] == 1:
131 localization['problems'] = 'Scientific Name, Attracted'
132 records_of_interest.append(localization)
133 section.localizations = records_of_interest
134 self.process_records()
136 def check_same_name_qualifier(self):
137 """
138 Finds records that have the same scientific name/tentative ID combo but a different qualifier.
139 """
140 scientific_name_qualifiers = {}
141 problem_scientific_names = set()
142 for section in self.sections:
143 # first pass: build dict of scientific name/tentative ID combos and their qualifiers, add to problem set if mismatch
144 for localization in section.localizations:
145 scientific_name = f'{localization["attributes"].get("Scientific Name")}{" (" + localization["attributes"]["Tentative ID"] + "?)" if localization["attributes"].get("Tentative ID") else ""}'
146 if scientific_name not in scientific_name_qualifiers.keys():
147 scientific_name_qualifiers[scientific_name] = localization['attributes'].get('Qualifier')
148 else:
149 if scientific_name_qualifiers[scientific_name] != localization['attributes'].get('Qualifier'):
150 problem_scientific_names.add(scientific_name)
151 for section in self.sections:
152 # second pass: add records with problem scientific names to records of interest
153 records_of_interest = []
154 for localization in section.localizations:
155 scientific_name = f'{localization["attributes"].get("Scientific Name")}{" (" + localization["attributes"]["Tentative ID"] + "?)" if localization["attributes"].get("Tentative ID") else ""}'
156 if scientific_name in problem_scientific_names:
157 localization['problems'] = 'Scientific Name, Qualifier'
158 records_of_interest.append(localization)
159 section.localizations = records_of_interest
160 self.process_records()
162 def check_non_target_not_attracted(self):
163 """
164 Finds records that are marked as "non-target" but are marked as "attracted".
165 """
166 for section in self.sections:
167 records_of_interest = []
168 for localization in section.localizations:
169 attracted = localization['attributes'].get('Attracted')
170 reason = localization['attributes'].get('Reason')
171 if 'Non-target' in reason and attracted != 'Not Attracted':
172 localization['problems'] = 'Attracted, Reason'
173 records_of_interest.append(localization)
174 section.localizations = records_of_interest
175 self.process_records()
177 def check_exists_in_image_references(self, image_refs: dict):
178 """
179 Finds records that do not exist in the image references db (combo scientific name, tentative ID,
180 and morphospecies). Also flags records with both tentative ID and morphospecies set.
181 """
182 for section in self.sections:
183 records_of_interest = []
184 for localization in section.localizations:
185 image_ref_key = localization['attributes'].get('Scientific Name')
186 tentative_id = localization['attributes'].get('Tentative ID')
187 morphospecies = localization['attributes'].get('Morphospecies')
188 if tentative_id and morphospecies:
189 localization['problems'] = 'Tentative ID, Morphospecies'
190 records_of_interest.append(localization)
191 continue
192 if tentative_id and tentative_id != '':
193 image_ref_key += f'~tid={tentative_id}'
194 if morphospecies and morphospecies != '':
195 image_ref_key += f'~m={morphospecies}'
196 if image_ref_key not in image_refs:
197 records_of_interest.append(localization)
198 section.localizations = records_of_interest
199 self.process_records()
201 def get_all_tentative_ids(self):
202 """
203 Finds every record with a tentative ID. Also checks whether or not the tentative ID is in the same
204 phylogenetic group as the scientific name.
205 """
206 no_match_records = set()
207 records_of_interest = []
208 for section in self.sections:
209 for localization in section.localizations:
210 tentative_id = localization['attributes'].get('Tentative ID')
211 if tentative_id and tentative_id not in ['--', '-', '']:
212 localization['problems'] = 'Tentative ID'
213 records_of_interest.append(localization)
214 section.localizations = records_of_interest
215 self.process_records() # process first to make sure phylogeny is populated
216 for localization in self.final_records:
217 phylogeny_match = False
218 if localization['tentative_id'] not in self.phylogeny.keys():
219 if localization['tentative_id'] not in no_match_records:
220 if not self.fetch_worms_phylogeny(localization['tentative_id']):
221 no_match_records.add(localization['tentative_id'])
222 localization['problems'] += ' phylogeny no match'
223 continue
224 else:
225 localization['problems'] += ' phylogeny no match'
226 continue
227 for value in self.phylogeny[localization['tentative_id']].values():
228 if value == localization['scientific_name']:
229 phylogeny_match = True
230 break
231 if not phylogeny_match:
232 localization['problems'] += ' phylogeny no match'
233 self.save_phylogeny()
235 def get_all_notes_and_remarks(self):
236 """
237 Finds every record with a note or remark.
238 """
239 records_of_interest = []
240 for section in self.sections:
241 for localization in section.localizations:
242 notes = localization['attributes'].get('Notes')
243 id_remarks = localization['attributes'].get('IdentificationRemarks')
244 has_note = notes and notes not in ['--', '-', '']
245 has_remark = id_remarks and id_remarks not in ['--', '-', '']
246 if has_note and has_remark:
247 localization['problems'] = 'Notes, ID Remarks'
248 records_of_interest.append(localization)
249 elif has_note:
250 localization['problems'] = 'Notes'
251 records_of_interest.append(localization)
252 elif has_remark:
253 localization['problems'] = 'ID Remarks'
254 records_of_interest.append(localization)
255 section.localizations = records_of_interest
256 self.process_records()
258 def get_re_examined(self):
259 """
260 Finds all records that have a reason of "to be re-examined"
261 """
262 records_of_interest = []
263 for section in self.sections:
264 for localization in section.localizations:
265 if localization['attributes'].get('Reason') == 'To be re-examined':
266 records_of_interest.append(localization)
267 section.localizations = records_of_interest
268 self.process_records()
270 def get_unique_taxa(self):
271 """
272 Finds every unique scientific name, tentative ID, and morphospecies combo and box/dot info.
273 """
274 self.fetch_start_times()
275 self.process_records(get_timestamp=True)
276 unique_taxa = {}
277 for record in self.final_records:
278 scientific_name = record.get('scientific_name')
279 tentative_id = record.get('tentative_id', '')
280 morphospecies = record.get('morphospecies', '')
281 key = f'{scientific_name}:{tentative_id}:{morphospecies}'
282 if key not in unique_taxa.keys():
283 # add new unique taxa to dict
284 unique_taxa[key] = {
285 'scientific_name': scientific_name,
286 'tentative_id': tentative_id,
287 'morphospecies': morphospecies,
288 'box_count': 0,
289 'dot_count': 0,
290 'first_box': '',
291 'first_dot': '',
292 }
293 for localization in record['all_localizations']:
294 # increment box/dot counts, set first box/dot and TOFA
295 if localization['type'] == TatorLocalizationType.BOX.value:
296 unique_taxa[key]['box_count'] += 1
297 if not record.get('timestamp'):
298 continue
299 first_box = unique_taxa[key]['first_box']
300 observed_timestamp = datetime.datetime.strptime(record['timestamp'], self.BOTTOM_TIME_FORMAT)
301 if not first_box or observed_timestamp < datetime.datetime.strptime(first_box, self.BOTTOM_TIME_FORMAT):
302 unique_taxa[key]['first_box'] = record['timestamp']
303 unique_taxa[key]['first_box_url'] = f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}&selected_entity={localization["elemental_id"]}'
304 elif localization['type'] == TatorLocalizationType.DOT.value:
305 unique_taxa[key]['dot_count'] += 1
306 if not record.get('timestamp'):
307 continue
308 first_dot = unique_taxa[key]['first_dot']
309 observed_timestamp = datetime.datetime.strptime(record['timestamp'], self.BOTTOM_TIME_FORMAT)
310 if not first_dot or observed_timestamp < datetime.datetime.strptime(first_dot, self.BOTTOM_TIME_FORMAT):
311 unique_taxa[key]['first_dot'] = record['timestamp']
312 unique_taxa[key]['first_dot_url'] = f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}&selected_entity={localization["elemental_id"]}'
313 self.final_records = unique_taxa
315 def get_max_n(self):
316 """
317 Finds the highest dot count for each unique scientific name, tentative ID, and morphospecies combo per
318 deployment. Ignores non-attracted taxa.
319 """
320 self.process_records(get_ctd=True)
321 deployment_taxa = {}
322 unique_taxa = {}
323 for record in self.final_records:
324 scientific_name = record.get('scientific_name')
325 tentative_id_suffix = f' ({record["tentative_id"]}?)' if record.get('tentative_id') else ''
326 morphospecies_suffix = f' ({record["morphospecies"]})' if record.get('morphospecies') else ''
327 unique_name = f'{scientific_name}{tentative_id_suffix}{morphospecies_suffix}'
328 if record.get('count', 0) < 1 or record.get('attracted') == 'Not Attracted':
329 continue
330 if unique_name not in unique_taxa.keys():
331 unique_taxa[unique_name] = {
332 'unique_name': unique_name,
333 'phylum': record.get('phylum'),
334 'class': record.get('class'),
335 'order': record.get('order'),
336 'family': record.get('family'),
337 'genus': record.get('genus'),
338 'species': record.get('species'),
339 }
340 if record['video_sequence_name'] not in deployment_taxa.keys():
341 deployment_taxa[record['video_sequence_name']] = {
342 'depth_m': record.get('depth_m'),
343 'max_n_dict': {},
344 }
345 if unique_name not in deployment_taxa[record['video_sequence_name']]['max_n_dict'].keys():
346 # add new unique taxa to dict
347 deployment_taxa[record['video_sequence_name']]['max_n_dict'][unique_name] = {
348 'max_n': record['count'],
349 'max_n_url': f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}',
350 }
351 else:
352 # check for new max N
353 if record['count'] > deployment_taxa[record['video_sequence_name']]['max_n_dict'][unique_name]['max_n']:
354 deployment_taxa[record['video_sequence_name']]['max_n_dict'][unique_name]['max_n'] = record['count']
355 deployment_taxa[record['video_sequence_name']]['max_n_dict'][unique_name]['max_n_url'] = f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}'
356 # convert unique taxa to list for sorting
357 unique_taxa_list = list(unique_taxa.values())
358 unique_taxa_list.sort(key=lambda x: (
359 x['phylum'] if x.get('phylum') else '',
360 x['class'] if x.get('class') else '',
361 x['order'] if x.get('order') else '',
362 x['family'] if x.get('family') else '',
363 x['genus'] if x.get('genus') else '',
364 x['species'] if x.get('species') else '',
365 ))
366 self.final_records = {
367 'deployments': deployment_taxa,
368 'unique_taxa': [taxa['unique_name'] for taxa in unique_taxa_list],
369 }
371 def get_tofa(self):
372 """
373 Finds the time of first arrival for each unique scientific name, tentative ID, and morphospecies combo per
374 deployment. Also shows species accumulation curve. Ignores non-attracted taxa.
375 """
376 self.fetch_start_times()
377 self.process_records(get_timestamp=True, get_ctd=True)
378 deployment_taxa = {}
379 unique_taxa = {}
380 unique_taxa_first_seen = {}
381 section_id_indices = {section.section_id: index for index, section in enumerate(self.sections)}
382 bottom_time = None
383 latest_timestamp = datetime.datetime.fromtimestamp(0) # to find the duration of the deployment
384 for record in self.final_records:
385 scientific_name = record.get('scientific_name')
386 tentative_id_suffix = f' ({record["tentative_id"]}?)' if record.get('tentative_id') else ''
387 morphospecies_suffix = f' ({record["morphospecies"]})' if record.get('morphospecies') else ''
388 unique_name = f'{scientific_name}{tentative_id_suffix}{morphospecies_suffix}'
389 if not record.get('timestamp'):
390 continue
391 observed_timestamp = datetime.datetime.strptime(record['timestamp'], self.BOTTOM_TIME_FORMAT)
392 this_section = self.sections[section_id_indices[record['section_id']]]
393 bottom_time = datetime.datetime.strptime(this_section.bottom_time, self.BOTTOM_TIME_FORMAT)
394 if record.get('count', 0) < 1 or record.get('attracted') == 'Not Attracted':
395 continue
396 if observed_timestamp > latest_timestamp:
397 latest_timestamp = observed_timestamp
398 if unique_name not in unique_taxa_first_seen.keys():
399 unique_taxa_first_seen[unique_name] = observed_timestamp
400 else:
401 if observed_timestamp < unique_taxa_first_seen[unique_name]:
402 unique_taxa_first_seen[unique_name] = observed_timestamp
403 if unique_name not in unique_taxa.keys():
404 unique_taxa[unique_name] = {
405 'unique_name': unique_name,
406 'phylum': record.get('phylum'),
407 'class': record.get('class'),
408 'order': record.get('order'),
409 'family': record.get('family'),
410 'genus': record.get('genus'),
411 'species': record.get('species'),
412 }
413 if record['video_sequence_name'] not in deployment_taxa.keys():
414 deployment_taxa[record['video_sequence_name']] = {
415 'depth_m': record.get('depth_m'),
416 'tofa_dict': {},
417 }
418 if unique_name not in deployment_taxa[record['video_sequence_name']]['tofa_dict'].keys():
419 # add new unique taxa to dict
420 deployment_taxa[record['video_sequence_name']]['tofa_dict'][unique_name] = {
421 'tofa': str(observed_timestamp - bottom_time) if observed_timestamp > bottom_time else '00:00:00',
422 'tofa_url': f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}',
423 }
424 else:
425 # check for new tofa
426 if str(observed_timestamp - bottom_time) < deployment_taxa[record['video_sequence_name']]['tofa_dict'][unique_name]['tofa']:
427 deployment_taxa[record['video_sequence_name']]['tofa_dict'][unique_name]['tofa'] = \
428 str(observed_timestamp - bottom_time) if observed_timestamp > bottom_time else '00:00:00'
429 deployment_taxa[record['video_sequence_name']]['tofa_dict'][unique_name]['tofa_url'] = \
430 f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}'
431 # convert unique taxa to list for sorting
432 unique_taxa_list = list(unique_taxa.values())
433 unique_taxa_list.sort(key=lambda x: (
434 x['phylum'] if x.get('phylum') else '',
435 x['class'] if x.get('class') else '',
436 x['order'] if x.get('order') else '',
437 x['family'] if x.get('family') else '',
438 x['genus'] if x.get('genus') else '',
439 x['species'] if x.get('species') else '',
440 ))
441 if len(unique_taxa_list) == 0:
442 print(f'{TERM_RED}ERROR: Unable to calculate TOFA. Missing start times?{TERM_NORMAL}')
443 self.final_records = {
444 'deployments': deployment_taxa,
445 'unique_taxa': [],
446 'deployment_time': 0,
447 'accumulation_data': [],
448 }
449 return
450 # rounding up to nearest hour
451 deployment_time = datetime.timedelta(hours=math.ceil((latest_timestamp - bottom_time).total_seconds() / 3600))
452 accumulation_data = [] # just a list of the number of unique taxa seen at each hour
453 for hour in range(1, deployment_time.seconds // 3600 + 1):
454 accumulation_data.append(len([
455 taxa for taxa in unique_taxa_first_seen.values() if taxa < bottom_time + datetime.timedelta(hours=hour)
456 ]))
457 self.final_records = {
458 'deployments': deployment_taxa,
459 'unique_taxa': [taxa['unique_name'] for taxa in unique_taxa_list],
460 'deployment_time': deployment_time.seconds // 3600,
461 'accumulation_data': accumulation_data,
462 }
464 def get_summary(self):
465 """
466 Returns a summary of the final records.
467 """
468 self.fetch_start_times()
469 for section in self.sections:
470 section.localizations = [
471 localization for localization in section.localizations if localization['type'] != TatorLocalizationType.BOX.value
472 ]
473 self.process_records(get_timestamp=True, get_ctd=True, get_substrates=True)
475 def download_image_guide(self, app) -> Presentation:
476 """
477 Finds all records marked as "good" images, saves them to a ppt.
478 """
479 for section in self.sections:
480 records_of_interest = []
481 for localization in section.localizations:
482 if localization['attributes'].get('Good Image'):
483 records_of_interest.append(localization)
484 section.localizations = records_of_interest
485 self.process_records()
486 pres = Presentation()
487 image_slide_layout = pres.slide_layouts[6]
489 i = 0
490 while i < len(self.final_records):
491 slide = pres.slides.add_slide(image_slide_layout)
492 current_phylum = self.final_records[i].get('phylum')
493 if current_phylum is None:
494 current_phylum = 'UNKNOWN PHYLUM'
495 phylum_text_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.5), Inches(9), Inches(0.5))
496 phylum_text_frame = phylum_text_box.text_frame
497 phylum_paragraph = phylum_text_frame.paragraphs[0]
498 phylum_paragraph.alignment = PP_ALIGN.CENTER
499 phylum_run = phylum_paragraph.add_run()
500 phylum_run.text = ' '.join(list(current_phylum.upper()))
501 phylum_font = phylum_run.font
502 phylum_font.name = 'Arial'
503 phylum_font.size = Pt(32)
504 phylum_font.color.rgb = RGBColor(0, 0, 0)
505 for j in range(4):
506 # add four images to slide
507 localization = self.final_records[i]
508 if localization['phylum'] != current_phylum and current_phylum != 'UNKNOWN PHYLUM':
509 break
510 localization_id = localization['all_localizations'][0]['id']
511 response = requests.get(f'{app.config.get("LOCAL_APP_URL")}/tator/localization-image/{localization_id}?token={session["tator_token"]}')
512 if response.status_code != 200:
513 print(f'Error fetching image for record {localization["observation_uuid"]}')
514 continue
515 image_data = BytesIO(response.content)
516 top = Inches(1.5 if j < 2 else 4)
517 left = Inches(1 if j % 2 == 0 else 5)
518 picture = slide.shapes.add_picture(image_data, left, top, height=Inches(2.5))
519 line = picture.line
520 line.color.rgb = RGBColor(0, 0, 0)
521 line.width = Pt(1.5)
522 # add text box
523 width = Inches(2)
524 height = Inches(1)
525 text_box = slide.shapes.add_textbox(left, top, width, height)
526 text_frame = text_box.text_frame
527 paragraph = text_frame.paragraphs[0]
528 run = paragraph.add_run()
529 run.text = f'{localization["scientific_name"]}{" (" + localization["tentative_id"] + "?)" if localization.get("tentative_id") else ""}'
530 font = run.font
531 font.name = 'Arial'
532 font.size = Pt(18)
533 font.color.rgb = RGBColor(0xff, 0xff, 0xff)
534 font.italic = True
535 if localization['attracted'] == 'Not Attracted':
536 text_frame.add_paragraph()
537 paragraph = text_frame.paragraphs[1]
538 run_2 = paragraph.add_run()
539 run_2.text = 'NOT ATTRACTED'
540 font = run_2.font
541 font.name = 'Arial'
542 font.size = Pt(18)
543 font.color.rgb = RGBColor(0xff, 0x0, 0x0)
544 font.italic = False
545 i += 1
546 if i >= len(self.final_records):
547 break
548 return pres
550 def fetch_start_times(self):
551 if 'media_timestamps' not in session.keys():
552 session['media_timestamps'] = {}
553 for section in self.sections:
554 print(f'Fetching media start times for deployment "{section.deployment_name}"...', end='')
555 sys.stdout.flush()
556 res = requests.get(
557 url=f'{self.tator_url}/rest/Medias/{self.project_id}?section={section.section_id}',
558 headers={
559 'Content-Type': 'application/json',
560 'Authorization': f'Token {session["tator_token"]}',
561 }
562 )
563 for media in res.json():
564 # get media start times
565 if media['id'] not in session['media_timestamps'].keys():
566 if 'Start Time' in media['attributes'].keys():
567 session['media_timestamps'][media['id']] = media['attributes']['Start Time']
568 session.modified = True
569 else:
570 print(f'{TERM_RED}Warning:{TERM_NORMAL} No start time found for media {media["id"]}')
571 continue
572 # get deployment bottom time
573 media_arrival_attribute = media['attributes'].get('Arrival')
574 if media_arrival_attribute and media_arrival_attribute.strip() != '':
575 video_start_timestamp = datetime.datetime.fromisoformat(media['attributes']['Start Time'])
576 if 'not observed' in media_arrival_attribute.lower():
577 arrival_frame = 0
578 else:
579 try:
580 arrival_frame = int(media_arrival_attribute.strip().split(' ')[0])
581 except ValueError:
582 print(f'\n{TERM_RED}Error:{TERM_NORMAL} Could not parse Arrival value for {media["name"]}')
583 print(f'Arrival value: "{media["attributes"]["Arrival"]}"')
584 raise ValueError
585 deployment_bottom_time = video_start_timestamp + datetime.timedelta(seconds=arrival_frame / 30)
586 section.bottom_time = deployment_bottom_time.strftime(self.BOTTOM_TIME_FORMAT)
587 print('fetched!')