Coverage for application/qaqc/tator/tator_qaqc_processor.py: 9%
366 statements
« prev ^ index » next coverage.py v7.10.4, created at 2025-08-17 17:51 +0000
« prev ^ index » next coverage.py v7.10.4, created at 2025-08-17 17:51 +0000
1import datetime
2import math
3import requests
4import sys
5import tator
7from flask import session
8from io import BytesIO
9from pptx import Presentation
10from pptx.dml.color import RGBColor
11from pptx.enum.text import PP_ALIGN
12from pptx.util import Inches, Pt
14from application.image_review.tator.tator_localization_processor import TatorLocalizationProcessor
15from application.util.constants import TERM_NORMAL, TERM_RED
16from application.util.tator_localization_type import TatorLocalizationType
19class TatorQaqcProcessor(TatorLocalizationProcessor):
20 """
21 Fetches annotation information from the Tator given a project id, section id, and list of deployments.
22 Filters and formats the annotations for the various QA/QC checks.
23 """
24 def __init__(
25 self,
26 project_id: int,
27 section_id: int,
28 api: tator.api,
29 deployment_list: list,
30 tator_url: str,
31 darc_review_url: str = None,
32 ):
33 super().__init__(
34 project_id=project_id,
35 section_id=section_id,
36 api=api,
37 deployment_list=deployment_list,
38 darc_review_url=darc_review_url,
39 tator_url=tator_url,
40 )
42 def fetch_start_times(self):
43 for deployment in self.deployments:
44 print(f'Fetching media start times for deployment "{deployment}"...', end='')
45 sys.stdout.flush()
46 if 'media_timestamps' not in session.keys():
47 session['media_timestamps'] = {}
48 res = requests.get(
49 url=f'{self.tator_url}/rest/Medias/{self.project_id}?section={self.section_id}&attribute_contains=%24name%3A%3A{deployment}',
50 headers={
51 'Content-Type': 'application/json',
52 'Authorization': f'Token {session["tator_token"]}',
53 }
54 )
55 for media in res.json():
56 if media['attributes'].get('Arrival') and media['attributes']['Arrival'].strip() != '':
57 video_start_timestamp = datetime.datetime.fromisoformat(media['attributes'].get('Start Time'))
58 if 'not observed' in media['attributes']['Arrival'].lower():
59 arrival_frame = 0
60 else:
61 try:
62 arrival_frame = int(media['attributes']['Arrival'].strip().split(' ')[0])
63 except ValueError:
64 print(f'\n{TERM_RED}Error:{TERM_NORMAL} Could not parse Arrival value for {media["name"]}')
65 print(f'Arrival value: "{media["attributes"]["Arrival"]}"')
66 raise ValueError
67 self.bottom_times[deployment] = (video_start_timestamp + datetime.timedelta(seconds=arrival_frame / 30)).strftime('%Y-%m-%d %H:%M:%SZ')
68 if media['id'] not in session['media_timestamps'].keys():
69 if 'Start Time' in media['attributes'].keys():
70 session['media_timestamps'][media['id']] = media['attributes']['Start Time']
71 session.modified = True
72 else:
73 print(f'{TERM_RED}Warning:{TERM_NORMAL} No start time found for media {media["id"]}')
74 print('fetched!')
76 def check_names_accepted(self):
77 """
78 Finds records with a scientific name or tentative ID that is not accepted in WoRMS
79 """
80 print('Checking for accepted names...')
81 sys.stdout.flush()
82 checked = {}
83 records_of_interest = []
84 for localization in self.localizations:
85 flag_record = False
86 scientific_name = localization['attributes'].get('Scientific Name')
87 tentative_id = localization['attributes'].get('Tentative ID')
88 if scientific_name not in checked.keys():
89 if scientific_name in self.phylogeny.keys():
90 checked[scientific_name] = True
91 else:
92 if self.fetch_worms_phylogeny(scientific_name):
93 checked[scientific_name] = True
94 else:
95 localization['problems'] = 'Scientific Name'
96 checked[scientific_name] = False
97 flag_record = True
98 elif not checked[scientific_name]:
99 localization['problems'] = 'Scientific Name'
100 flag_record = True
101 if tentative_id:
102 if tentative_id not in checked.keys():
103 if tentative_id in self.phylogeny.keys():
104 checked[tentative_id] = True
105 else:
106 if self.fetch_worms_phylogeny(tentative_id):
107 checked[tentative_id] = True
108 else:
109 localization['problems'] = 'Tentative ID'
110 checked[tentative_id] = False
111 flag_record = True
112 elif not checked[tentative_id]:
113 localization['problems'] = 'Tentative ID' if 'problems' not in localization.keys() else 'Scientific Name, Tentative ID'
114 flag_record = True
115 if flag_record:
116 records_of_interest.append(localization)
117 print(f'Found {len(records_of_interest)} localizations with unaccepted names!')
118 self.localizations = records_of_interest
119 self.process_records(no_match_records={key for key in checked.keys() if not checked[key]})
121 def check_missing_qualifier(self):
122 """
123 Finds records that are classified higher than species but don't have a qualifier set (usually '--'). This check
124 need to call process_records first to populate phylogeny.
125 """
126 self.process_records()
127 actual_final_records = []
128 for record in self.final_records:
129 if not record.get('species') and record.get('qualifier', '--') == '--':
130 record['problems'] = 'Scientific Name, Qualifier'
131 actual_final_records.append(record)
132 self.final_records = actual_final_records
134 def check_stet_reason(self):
135 """
136 Finds records that have a qualifier of 'stet' but no reason set.
137 """
138 records_of_interest = []
139 for localization in self.localizations:
140 if localization['attributes'].get('Qualifier') == 'stet.' \
141 and localization['attributes'].get('Reason', '--') == '--':
142 localization['problems'] = 'Qualifier, Reason'
143 records_of_interest.append(localization)
144 self.localizations = records_of_interest
145 self.process_records()
147 def check_attracted_not_attracted(self, attracted_dict: dict):
148 """
149 Finds all records that are marked as "attracted" but are saved as "not attracted" in the attracted_dict, and
150 vice versa. Also flags all records with taxa that are marked as "attracted/not attracted" in the attracted_dict.
151 """
152 records_of_interest = []
153 for localization in self.localizations:
154 scientific_name = localization['attributes'].get('Scientific Name')
155 if scientific_name not in attracted_dict.keys() or attracted_dict[scientific_name] == 2:
156 localization['problems'] = 'Scientific Name, Attracted'
157 records_of_interest.append(localization)
158 elif localization['attributes'].get('Attracted') == 'Attracted' and attracted_dict[scientific_name] == 0:
159 localization['problems'] = 'Scientific Name, Attracted'
160 records_of_interest.append(localization)
161 elif localization['attributes'].get('Attracted') == 'Not Attracted' and attracted_dict[scientific_name] == 1:
162 localization['problems'] = 'Scientific Name, Attracted'
163 records_of_interest.append(localization)
164 self.localizations = records_of_interest
165 self.process_records()
167 def check_same_name_qualifier(self):
168 """
169 Finds records that have the same scientific name/tentative ID combo but a different qualifier.
170 """
171 scientific_name_qualifiers = {}
172 problem_scientific_names = set()
173 records_of_interest = []
174 for localization in self.localizations:
175 scientific_name = f'{localization["attributes"].get("Scientific Name")}{" (" + localization["attributes"]["Tentative ID"] + "?)" if localization["attributes"].get("Tentative ID") else ""}'
176 if scientific_name not in scientific_name_qualifiers.keys():
177 scientific_name_qualifiers[scientific_name] = localization['attributes'].get('Qualifier')
178 else:
179 if scientific_name_qualifiers[scientific_name] != localization['attributes'].get('Qualifier'):
180 problem_scientific_names.add(scientific_name)
181 for localization in self.localizations:
182 scientific_name = f'{localization["attributes"].get("Scientific Name")}{" (" + localization["attributes"]["Tentative ID"] + "?)" if localization["attributes"].get("Tentative ID") else ""}'
183 if scientific_name in problem_scientific_names:
184 localization['problems'] = 'Scientific Name, Qualifier'
185 records_of_interest.append(localization)
186 self.localizations = records_of_interest
187 self.process_records()
189 def check_non_target_not_attracted(self):
190 """
191 Finds records that are marked as "non-target" but are marked as "attracted".
192 """
193 records_of_interest = []
194 for localization in self.localizations:
195 attracted = localization['attributes'].get('Attracted')
196 reason = localization['attributes'].get('Reason')
197 if 'Non-target' in reason and attracted != 'Not Attracted':
198 localization['problems'] = 'Attracted, Reason'
199 records_of_interest.append(localization)
200 self.localizations = records_of_interest
201 self.process_records()
203 def check_exists_in_image_references(self, image_refs: dict):
204 """
205 Finds records that do not exist in the image references db (combo scientific name, tentative ID,
206 and morphospecies). Also flags records with both tentative ID and morphospecies set.
207 """
208 records_of_interest = []
209 for localization in self.localizations:
210 image_ref_key = localization['attributes'].get('Scientific Name')
211 tentative_id = localization['attributes'].get('Tentative ID')
212 morphospecies = localization['attributes'].get('Morphospecies')
213 if tentative_id and morphospecies:
214 localization['problems'] = 'Tentative ID, Morphospecies'
215 records_of_interest.append(localization)
216 continue
217 if tentative_id and tentative_id != '':
218 image_ref_key += f'~tid={tentative_id}'
219 if morphospecies and morphospecies != '':
220 image_ref_key += f'~m={morphospecies}'
221 if image_ref_key not in image_refs:
222 records_of_interest.append(localization)
223 self.localizations = records_of_interest
224 self.process_records()
226 def get_all_tentative_ids(self):
227 """
228 Finds every record with a tentative ID. Also checks whether or not the tentative ID is in the same
229 phylogenetic group as the scientific name.
230 """
231 no_match_records = set()
232 records_of_interest = []
233 for localization in self.localizations:
234 tentative_id = localization['attributes'].get('Tentative ID')
235 if tentative_id and tentative_id not in ['--', '-', '']:
236 localization['problems'] = 'Tentative ID'
237 records_of_interest.append(localization)
238 self.localizations = records_of_interest
239 self.process_records() # process first to make sure phylogeny is populated
240 for localization in self.final_records:
241 phylogeny_match = False
242 if localization['tentative_id'] not in self.phylogeny.keys():
243 if localization['tentative_id'] not in no_match_records:
244 if not self.fetch_worms_phylogeny(localization['tentative_id']):
245 no_match_records.add(localization['tentative_id'])
246 localization['problems'] += ' phylogeny no match'
247 continue
248 else:
249 localization['problems'] += ' phylogeny no match'
250 continue
251 for value in self.phylogeny[localization['tentative_id']].values():
252 if value == localization['scientific_name']:
253 phylogeny_match = True
254 break
255 if not phylogeny_match:
256 localization['problems'] += ' phylogeny no match'
257 self.save_phylogeny()
259 def get_all_notes_and_remarks(self):
260 """
261 Finds every record with a note or remark.
262 """
263 records_of_interest = []
264 for localization in self.localizations:
265 notes = localization['attributes'].get('Notes')
266 id_remarks = localization['attributes'].get('IdentificationRemarks')
267 has_note = notes and notes not in ['--', '-', '']
268 has_remark = id_remarks and id_remarks not in ['--', '-', '']
269 if has_note and has_remark:
270 localization['problems'] = 'Notes, ID Remarks'
271 records_of_interest.append(localization)
272 elif has_note:
273 localization['problems'] = 'Notes'
274 records_of_interest.append(localization)
275 elif has_remark:
276 localization['problems'] = 'ID Remarks'
277 records_of_interest.append(localization)
278 self.localizations = records_of_interest
279 self.process_records()
281 def get_re_examined(self):
282 """
283 Finds all records that have a reason of "to be re-examined"
284 """
285 records_of_interest = []
286 for localization in self.localizations:
287 if localization['attributes'].get('Reason') == 'To be re-examined':
288 records_of_interest.append(localization)
289 self.localizations = records_of_interest
290 self.process_records()
292 def get_unique_taxa(self):
293 """
294 Finds every unique scientific name, tentative ID, and morphospecies combo and box/dot info.
295 """
296 self.fetch_start_times()
297 self.process_records(get_timestamp=True)
298 unique_taxa = {}
299 for record in self.final_records:
300 scientific_name = record.get('scientific_name')
301 tentative_id = record.get('tentative_id', '')
302 morphospecies = record.get('morphospecies', '')
303 key = f'{scientific_name}:{tentative_id}:{morphospecies}'
304 if key not in unique_taxa.keys():
305 # add new unique taxa to dict
306 unique_taxa[key] = {
307 'scientific_name': scientific_name,
308 'tentative_id': tentative_id,
309 'morphospecies': morphospecies,
310 'box_count': 0,
311 'dot_count': 0,
312 'first_box': '',
313 'first_dot': '',
314 }
315 for localization in record['all_localizations']:
316 # increment box/dot counts, set first box/dot and TOFA
317 if localization['type'] == TatorLocalizationType.BOX.value:
318 unique_taxa[key]['box_count'] += 1
319 first_box = unique_taxa[key]['first_box']
320 if not first_box or datetime.datetime.strptime(record['timestamp'], '%Y-%m-%d %H:%M:%SZ') < datetime.datetime.strptime(first_box, '%Y-%m-%d %H:%M:%SZ'):
321 unique_taxa[key]['first_box'] = record['timestamp']
322 unique_taxa[key]['first_box_url'] = f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}&selected_entity={localization["elemental_id"]}'
323 elif localization['type'] == TatorLocalizationType.DOT.value:
324 unique_taxa[key]['dot_count'] += 1
325 first_dot = unique_taxa[key]['first_dot']
326 observed_timestamp = datetime.datetime.strptime(record['timestamp'], '%Y-%m-%d %H:%M:%SZ')
327 if not first_dot or observed_timestamp < datetime.datetime.strptime(first_dot, '%Y-%m-%d %H:%M:%SZ'):
328 unique_taxa[key]['first_dot'] = record['timestamp']
329 unique_taxa[key]['first_dot_url'] = f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}&selected_entity={localization["elemental_id"]}'
330 self.final_records = unique_taxa
332 def get_max_n(self):
333 """
334 Finds the highest dot count for each unique scientific name, tentative ID, and morphospecies combo per
335 deployment. Ignores non-attracted taxa.
336 """
337 self.process_records(get_ctd=True)
338 deployment_taxa = {}
339 unique_taxa = {}
340 for record in self.final_records:
341 scientific_name = record.get('scientific_name')
342 tentative_id_suffix = f' ({record["tentative_id"]}?)' if record.get('tentative_id') else ''
343 morphospecies_suffix = f' ({record["morphospecies"]})' if record.get('morphospecies') else ''
344 unique_name = f'{scientific_name}{tentative_id_suffix}{morphospecies_suffix}'
345 if record.get('count', 0) < 1 or record.get('attracted') == 'Not Attracted':
346 continue
347 if unique_name not in unique_taxa.keys():
348 unique_taxa[unique_name] = {
349 'unique_name': unique_name,
350 'phylum': record.get('phylum'),
351 'class': record.get('class'),
352 'order': record.get('order'),
353 'family': record.get('family'),
354 'genus': record.get('genus'),
355 'species': record.get('species'),
356 }
357 if record['video_sequence_name'] not in deployment_taxa.keys():
358 deployment_taxa[record['video_sequence_name']] = {
359 'depth_m': record.get('depth_m'),
360 'max_n_dict': {},
361 }
362 if unique_name not in deployment_taxa[record['video_sequence_name']]['max_n_dict'].keys():
363 # add new unique taxa to dict
364 deployment_taxa[record['video_sequence_name']]['max_n_dict'][unique_name] = {
365 'max_n': record['count'],
366 'max_n_url': f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}',
367 }
368 else:
369 # check for new max N
370 if record['count'] > deployment_taxa[record['video_sequence_name']]['max_n_dict'][unique_name]['max_n']:
371 deployment_taxa[record['video_sequence_name']]['max_n_dict'][unique_name]['max_n'] = record['count']
372 deployment_taxa[record['video_sequence_name']]['max_n_dict'][unique_name]['max_n_url'] = f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}'
373 # convert unique taxa to list for sorting
374 unique_taxa_list = list(unique_taxa.values())
375 unique_taxa_list.sort(key=lambda x: (
376 x['phylum'] if x.get('phylum') else '',
377 x['class'] if x.get('class') else '',
378 x['order'] if x.get('order') else '',
379 x['family'] if x.get('family') else '',
380 x['genus'] if x.get('genus') else '',
381 x['species'] if x.get('species') else '',
382 ))
383 self.final_records = {
384 'deployments': deployment_taxa,
385 'unique_taxa': [taxa['unique_name'] for taxa in unique_taxa_list],
386 }
388 def get_tofa(self):
389 """
390 Finds the time of first arrival for each unique scientific name, tentative ID, and morphospecies combo per
391 deployment. Also shows species accumulation curve. Ignores non-attracted taxa.
392 """
393 self.fetch_start_times()
394 self.process_records(get_timestamp=True, get_ctd=True)
395 deployment_taxa = {}
396 unique_taxa = {}
397 unique_taxa_first_seen = {}
398 bottom_time = None
399 latest_timestamp = datetime.datetime.fromtimestamp(0) # to find the duration of the deployment
400 for record in self.final_records:
401 scientific_name = record.get('scientific_name')
402 tentative_id_suffix = f' ({record["tentative_id"]}?)' if record.get('tentative_id') else ''
403 morphospecies_suffix = f' ({record["morphospecies"]})' if record.get('morphospecies') else ''
404 unique_name = f'{scientific_name}{tentative_id_suffix}{morphospecies_suffix}'
405 observed_timestamp = datetime.datetime.strptime(record['timestamp'], '%Y-%m-%d %H:%M:%SZ')
406 bottom_time = datetime.datetime.strptime(self.bottom_times[record['video_sequence_name']], '%Y-%m-%d %H:%M:%SZ')
407 if record.get('count', 0) < 1 or record.get('attracted') == 'Not Attracted':
408 continue
409 if observed_timestamp > latest_timestamp:
410 latest_timestamp = observed_timestamp
411 if unique_name not in unique_taxa_first_seen.keys():
412 unique_taxa_first_seen[unique_name] = observed_timestamp
413 else:
414 if observed_timestamp < unique_taxa_first_seen[unique_name]:
415 unique_taxa_first_seen[unique_name] = observed_timestamp
416 if unique_name not in unique_taxa.keys():
417 unique_taxa[unique_name] = {
418 'unique_name': unique_name,
419 'phylum': record.get('phylum'),
420 'class': record.get('class'),
421 'order': record.get('order'),
422 'family': record.get('family'),
423 'genus': record.get('genus'),
424 'species': record.get('species'),
425 }
426 if record['video_sequence_name'] not in deployment_taxa.keys():
427 deployment_taxa[record['video_sequence_name']] = {
428 'depth_m': record.get('depth_m'),
429 'tofa_dict': {},
430 }
431 if unique_name not in deployment_taxa[record['video_sequence_name']]['tofa_dict'].keys():
432 # add new unique taxa to dict
433 deployment_taxa[record['video_sequence_name']]['tofa_dict'][unique_name] = {
434 'tofa': str(observed_timestamp - bottom_time) if observed_timestamp > bottom_time else '00:00:00',
435 'tofa_url': f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}',
436 }
437 else:
438 # check for new tofa
439 if str(observed_timestamp - bottom_time) < deployment_taxa[record['video_sequence_name']]['tofa_dict'][unique_name]['tofa']:
440 deployment_taxa[record['video_sequence_name']]['tofa_dict'][unique_name]['tofa'] = \
441 str(observed_timestamp - bottom_time) if observed_timestamp > bottom_time else '00:00:00'
442 deployment_taxa[record['video_sequence_name']]['tofa_dict'][unique_name]['tofa_url'] = \
443 f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}'
444 # convert unique taxa to list for sorting
445 unique_taxa_list = list(unique_taxa.values())
446 unique_taxa_list.sort(key=lambda x: (
447 x['phylum'] if x.get('phylum') else '',
448 x['class'] if x.get('class') else '',
449 x['order'] if x.get('order') else '',
450 x['family'] if x.get('family') else '',
451 x['genus'] if x.get('genus') else '',
452 x['species'] if x.get('species') else '',
453 ))
454 # rounding up to nearest hour
455 deployment_time = datetime.timedelta(hours=math.ceil((latest_timestamp - bottom_time).total_seconds() / 3600))
456 accumulation_data = [] # just a list of the number of unique taxa seen at each hour
457 for hour in range(1, deployment_time.seconds // 3600 + 1):
458 accumulation_data.append(len([
459 taxa for taxa in unique_taxa_first_seen.values() if taxa < bottom_time + datetime.timedelta(hours=hour)
460 ]))
461 self.final_records = {
462 'deployments': deployment_taxa,
463 'unique_taxa': [taxa['unique_name'] for taxa in unique_taxa_list],
464 'deployment_time': deployment_time.seconds // 3600,
465 'accumulation_data': accumulation_data,
466 }
468 def get_summary(self):
469 """
470 Returns a summary of the final records.
471 """
472 self.fetch_start_times()
473 self.localizations = [
474 localization for localization in self.localizations if localization['type'] != TatorLocalizationType.BOX.value
475 ]
476 self.process_records(get_timestamp=True, get_ctd=True, get_substrates=True)
478 def download_image_guide(self, app) -> Presentation:
479 """
480 Finds all records marked as "good" images, saves them to a ppt.
481 """
482 records_of_interest = []
483 for localization in self.localizations:
484 if localization['attributes'].get('Good Image'):
485 records_of_interest.append(localization)
486 self.localizations = records_of_interest
487 self.process_records()
488 pres = Presentation()
489 image_slide_layout = pres.slide_layouts[6]
491 i = 0
492 while i < len(self.final_records):
493 slide = pres.slides.add_slide(image_slide_layout)
494 current_phylum = self.final_records[i].get('phylum')
495 if current_phylum is None:
496 current_phylum = 'UNKNOWN PHYLUM'
497 phylum_text_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.5), Inches(9), Inches(0.5))
498 phylum_text_frame = phylum_text_box.text_frame
499 phylum_paragraph = phylum_text_frame.paragraphs[0]
500 phylum_paragraph.alignment = PP_ALIGN.CENTER
501 phylum_run = phylum_paragraph.add_run()
502 phylum_run.text = ' '.join(list(current_phylum.upper()))
503 phylum_font = phylum_run.font
504 phylum_font.name = 'Arial'
505 phylum_font.size = Pt(32)
506 phylum_font.color.rgb = RGBColor(0, 0, 0)
507 for j in range(4):
508 # add four images to slide
509 localization = self.final_records[i]
510 if localization['phylum'] != current_phylum and current_phylum != 'UNKNOWN PHYLUM':
511 break
512 localization_id = localization['all_localizations'][0]['id']
513 response = requests.get(f'{app.config.get("LOCAL_APP_URL")}/tator/localization-image/{localization_id}?token={session["tator_token"]}')
514 if response.status_code != 200:
515 print(f'Error fetching image for record {localization["observation_uuid"]}')
516 continue
517 image_data = BytesIO(response.content)
518 top = Inches(1.5 if j < 2 else 4)
519 left = Inches(1 if j % 2 == 0 else 5)
520 picture = slide.shapes.add_picture(image_data, left, top, height=Inches(2.5))
521 line = picture.line
522 line.color.rgb = RGBColor(0, 0, 0)
523 line.width = Pt(1.5)
524 # add text box
525 width = Inches(2)
526 height = Inches(1)
527 text_box = slide.shapes.add_textbox(left, top, width, height)
528 text_frame = text_box.text_frame
529 paragraph = text_frame.paragraphs[0]
530 run = paragraph.add_run()
531 run.text = f'{localization["scientific_name"]}{" (" + localization["tentative_id"] + "?)" if localization.get("tentative_id") else ""}'
532 font = run.font
533 font.name = 'Arial'
534 font.size = Pt(18)
535 font.color.rgb = RGBColor(0xff, 0xff, 0xff)
536 font.italic = True
537 if localization['attracted'] == 'Not Attracted':
538 text_frame.add_paragraph()
539 paragraph = text_frame.paragraphs[1]
540 run_2 = paragraph.add_run()
541 run_2.text = 'NOT ATTRACTED'
542 font = run_2.font
543 font.name = 'Arial'
544 font.size = Pt(18)
545 font.color.rgb = RGBColor(0xff, 0x0, 0x0)
546 font.italic = False
547 i += 1
548 if i >= len(self.final_records):
549 break
550 return pres