Coverage for application/qaqc/tator/tator_qaqc_processor.py: 9%
368 statements
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-23 02:22 +0000
« prev ^ index » next coverage.py v7.9.1, created at 2025-06-23 02:22 +0000
1import datetime
2import math
3import requests
4import sys
5import tator
7from flask import session
8from io import BytesIO
9from pptx import Presentation
10from pptx.dml.color import RGBColor
11from pptx.enum.text import PP_ALIGN
12from pptx.util import Inches, Pt
14from application.image_review.tator.tator_localization_processor import TatorLocalizationProcessor
15from application.util.constants import TERM_NORMAL, TERM_RED
16from application.util.tator_localization_type import TatorLocalizationType
19class TatorQaqcProcessor(TatorLocalizationProcessor):
20 """
21 Fetches annotation information from the Tator given a project id, section id, and list of deployments.
22 Filters and formats the annotations for the various QA/QC checks.
23 """
24 def __init__(
25 self,
26 project_id: int,
27 section_id: int,
28 api: tator.api,
29 deployment_list: list,
30 tator_url: str,
31 darc_review_url: str = None,
32 ):
33 super().__init__(
34 project_id=project_id,
35 section_id=section_id,
36 api=api,
37 deployment_list=deployment_list,
38 darc_review_url=darc_review_url,
39 tator_url=tator_url,
40 )
42 def fetch_start_times(self):
43 for deployment in self.deployments:
44 print(f'Fetching media start times for deployment "{deployment}"...', end='')
45 sys.stdout.flush()
46 if 'media_timestamps' not in session.keys():
47 session['media_timestamps'] = {}
48 res = requests.get(
49 url=f'{self.tator_url}/rest/Medias/{self.project_id}?section={self.section_id}&attribute_contains=%24name%3A%3A{deployment}',
50 headers={
51 'Content-Type': 'application/json',
52 'Authorization': f'Token {session["tator_token"]}',
53 }
54 )
55 for media in res.json():
56 if media['attributes'].get('Arrival') and media['attributes']['Arrival'].strip() != '':
57 video_start_timestamp = datetime.datetime.fromisoformat(media['attributes'].get('Start Time'))
58 if 'not observed' in media['attributes']['Arrival'].lower():
59 arrival_frame = 0
60 else:
61 try:
62 arrival_frame = int(media['attributes']['Arrival'].strip().split(' ')[0])
63 except ValueError:
64 print(f'\n{TERM_RED}Error:{TERM_NORMAL} Could not parse Arrival value for {media["name"]}')
65 print(f'Arrival value: "{media["attributes"]["Arrival"]}"')
66 raise ValueError
67 self.bottom_times[deployment] = (video_start_timestamp + datetime.timedelta(seconds=arrival_frame / 30)).strftime('%Y-%m-%d %H:%M:%SZ')
68 if media['id'] not in session['media_timestamps'].keys():
69 if 'Start Time' in media['attributes'].keys():
70 session['media_timestamps'][media['id']] = media['attributes']['Start Time']
71 session.modified = True
72 else:
73 print(f'{TERM_RED}Warning:{TERM_NORMAL} No start time found for media {media["id"]}')
74 print('fetched!')
76 def check_names_accepted(self):
77 """
78 Finds records with a scientific name or tentative ID that is not accepted in WoRMS
79 """
80 print('Checking for accepted names...')
81 sys.stdout.flush()
82 checked = {}
83 records_of_interest = []
84 for localization in self.localizations:
85 flag_record = False
86 scientific_name = localization['attributes'].get('Scientific Name')
87 tentative_id = localization['attributes'].get('Tentative ID')
88 if scientific_name not in checked.keys():
89 if scientific_name in self.phylogeny.keys():
90 checked[scientific_name] = True
91 else:
92 if self.fetch_worms_phylogeny(scientific_name):
93 checked[scientific_name] = True
94 else:
95 localization['problems'] = 'Scientific Name'
96 checked[scientific_name] = False
97 flag_record = True
98 elif not checked[scientific_name]:
99 localization['problems'] = 'Scientific Name'
100 flag_record = True
101 if tentative_id:
102 if tentative_id not in checked.keys():
103 if tentative_id in self.phylogeny.keys():
104 checked[tentative_id] = True
105 else:
106 if self.fetch_worms_phylogeny(tentative_id):
107 checked[tentative_id] = True
108 else:
109 localization['problems'] = 'Tentative ID'
110 checked[tentative_id] = False
111 flag_record = True
112 elif not checked[tentative_id]:
113 localization['problems'] = 'Tentative ID' if 'problems' not in localization.keys() else 'Scientific Name, Tentative ID'
114 flag_record = True
115 if flag_record:
116 records_of_interest.append(localization)
117 print(f'Found {len(records_of_interest)} localizations with unaccepted names!')
118 self.localizations = records_of_interest
119 self.process_records(no_match_records={key for key in checked.keys() if not checked[key]})
121 def check_missing_qualifier(self):
122 """
123 Finds records that are classified higher than species but don't have a qualifier set (usually '--'). This check
124 need to call process_records first to populate phylogeny.
125 """
126 self.process_records()
127 actual_final_records = []
128 for record in self.final_records:
129 if not record.get('species') and record.get('qualifier', '--') == '--':
130 record['problems'] = 'Scientific Name, Qualifier'
131 actual_final_records.append(record)
132 self.final_records = actual_final_records
134 def check_stet_reason(self):
135 """
136 Finds records that have a qualifier of 'stet' but no reason set.
137 """
138 records_of_interest = []
139 for localization in self.localizations:
140 if localization['attributes'].get('Qualifier') == 'stet.' \
141 and localization['attributes'].get('Reason', '--') == '--':
142 localization['problems'] = 'Qualifier, Reason'
143 records_of_interest.append(localization)
144 self.localizations = records_of_interest
145 self.process_records()
147 def check_attracted_not_attracted(self, attracted_dict: dict):
148 """
149 Finds all records that are marked as "attracted" but are saved as "not attracted" in the attracted_dict, and
150 vice versa. Also flags all records with taxa that are marked as "attracted/not attracted" in the attracted_dict.
151 """
152 records_of_interest = []
153 for localization in self.localizations:
154 scientific_name = localization['attributes'].get('Scientific Name')
155 if scientific_name not in attracted_dict.keys() or attracted_dict[scientific_name] == 2:
156 localization['problems'] = 'Scientific Name, Attracted'
157 records_of_interest.append(localization)
158 elif localization['attributes'].get('Attracted') == 'Attracted' and attracted_dict[scientific_name] == 0:
159 localization['problems'] = 'Scientific Name, Attracted'
160 records_of_interest.append(localization)
161 elif localization['attributes'].get('Attracted') == 'Not Attracted' and attracted_dict[scientific_name] == 1:
162 localization['problems'] = 'Scientific Name, Attracted'
163 records_of_interest.append(localization)
164 self.localizations = records_of_interest
165 self.process_records()
167 def check_same_name_qualifier(self):
168 """
169 Finds records that have the same scientific name/tentative ID combo but a different qualifier.
170 """
171 scientific_name_qualifiers = {}
172 problem_scientific_names = set()
173 records_of_interest = []
174 for localization in self.localizations:
175 scientific_name = f'{localization["attributes"].get("Scientific Name")}{" (" + localization["attributes"]["Tentative ID"] + "?)" if localization["attributes"].get("Tentative ID") else ""}'
176 if scientific_name not in scientific_name_qualifiers.keys():
177 scientific_name_qualifiers[scientific_name] = localization['attributes'].get('Qualifier')
178 else:
179 if scientific_name_qualifiers[scientific_name] != localization['attributes'].get('Qualifier'):
180 problem_scientific_names.add(scientific_name)
181 for localization in self.localizations:
182 scientific_name = f'{localization["attributes"].get("Scientific Name")}{" (" + localization["attributes"]["Tentative ID"] + "?)" if localization["attributes"].get("Tentative ID") else ""}'
183 if scientific_name in problem_scientific_names:
184 localization['problems'] = 'Scientific Name, Qualifier'
185 records_of_interest.append(localization)
186 self.localizations = records_of_interest
187 self.process_records()
189 def check_non_target_not_attracted(self):
190 """
191 Finds records that are marked as "non-target" but are marked as "attracted".
192 """
193 records_of_interest = []
194 for localization in self.localizations:
195 attracted = localization['attributes'].get('Attracted')
196 reason = localization['attributes'].get('Reason')
197 if 'Non-target' in reason and attracted != 'Not Attracted':
198 localization['problems'] = 'Attracted, Reason'
199 records_of_interest.append(localization)
200 self.localizations = records_of_interest
201 self.process_records()
203 def check_exists_in_image_references(self, image_refs: dict):
204 """
205 Finds records that do not exist in the image references db (combo scientific name, tentative ID,
206 and morphospecies). Also flags records with both tentative ID and morphospecies set.
207 """
208 records_of_interest = []
209 for localization in self.localizations:
210 image_ref_key = localization['attributes'].get('Scientific Name')
211 tentative_id = localization['attributes'].get('Tentative ID')
212 morphospecies = localization['attributes'].get('Morphospecies')
213 if tentative_id and morphospecies:
214 localization['problems'] = 'Tentative ID, Morphospecies'
215 records_of_interest.append(localization)
216 continue
217 if tentative_id and tentative_id != '':
218 image_ref_key += f'~tid={tentative_id}'
219 if morphospecies and morphospecies != '':
220 image_ref_key += f'~m={morphospecies}'
221 if image_ref_key not in image_refs:
222 records_of_interest.append(localization)
223 self.localizations = records_of_interest
224 self.process_records()
226 def get_all_tentative_ids(self):
227 """
228 Finds every record with a tentative ID. Also checks whether or not the tentative ID is in the same
229 phylogenetic group as the scientific name.
230 """
231 no_match_records = set()
232 records_of_interest = []
233 for localization in self.localizations:
234 tentative_id = localization['attributes'].get('Tentative ID')
235 if tentative_id and tentative_id not in ['--', '-', '']:
236 localization['problems'] = 'Tentative ID'
237 records_of_interest.append(localization)
238 self.localizations = records_of_interest
239 self.process_records() # process first to make sure phylogeny is populated
240 for localization in self.final_records:
241 phylogeny_match = False
242 if localization['tentative_id'] not in self.phylogeny.keys():
243 if localization['tentative_id'] not in no_match_records:
244 if not self.fetch_worms_phylogeny(localization['tentative_id']):
245 no_match_records.add(localization['tentative_id'])
246 localization['problems'] += ' phylogeny no match'
247 continue
248 else:
249 localization['problems'] += ' phylogeny no match'
250 continue
251 for value in self.phylogeny[localization['tentative_id']].values():
252 if value == localization['scientific_name']:
253 phylogeny_match = True
254 break
255 if not phylogeny_match:
256 localization['problems'] += ' phylogeny no match'
257 self.save_phylogeny()
259 def get_all_notes_and_remarks(self):
260 """
261 Finds every record with a note or remark.
262 """
263 records_of_interest = []
264 for localization in self.localizations:
265 notes = localization['attributes'].get('Notes')
266 id_remarks = localization['attributes'].get('IdentificationRemarks')
267 has_note = notes and notes not in ['--', '-', '']
268 if 'Temperature and oxygen data collected' in notes \
269 and not ('|' in notes or ';' in notes):
270 has_note = False
271 has_remark = id_remarks and id_remarks not in ['--', '-', '']
272 if has_note and has_remark:
273 localization['problems'] = 'Notes, ID Remarks'
274 records_of_interest.append(localization)
275 elif has_note:
276 localization['problems'] = 'Notes'
277 records_of_interest.append(localization)
278 elif has_remark:
279 localization['problems'] = 'ID Remarks'
280 records_of_interest.append(localization)
281 self.localizations = records_of_interest
282 self.process_records()
284 def get_re_examined(self):
285 """
286 Finds all records that have a reason of "to be re-examined"
287 """
288 records_of_interest = []
289 for localization in self.localizations:
290 if localization['attributes'].get('Reason') == 'To be re-examined':
291 records_of_interest.append(localization)
292 self.localizations = records_of_interest
293 self.process_records()
295 def get_unique_taxa(self):
296 """
297 Finds every unique scientific name, tentative ID, and morphospecies combo and box/dot info.
298 """
299 self.fetch_start_times()
300 self.process_records(get_timestamp=True)
301 unique_taxa = {}
302 for record in self.final_records:
303 scientific_name = record.get('scientific_name')
304 tentative_id = record.get('tentative_id', '')
305 morphospecies = record.get('morphospecies', '')
306 key = f'{scientific_name}:{tentative_id}:{morphospecies}'
307 if key not in unique_taxa.keys():
308 # add new unique taxa to dict
309 unique_taxa[key] = {
310 'scientific_name': scientific_name,
311 'tentative_id': tentative_id,
312 'morphospecies': morphospecies,
313 'box_count': 0,
314 'dot_count': 0,
315 'first_box': '',
316 'first_dot': '',
317 }
318 for localization in record['all_localizations']:
319 # increment box/dot counts, set first box/dot and TOFA
320 if localization['type'] == TatorLocalizationType.BOX.value:
321 unique_taxa[key]['box_count'] += 1
322 first_box = unique_taxa[key]['first_box']
323 if not first_box or datetime.datetime.strptime(record['timestamp'], '%Y-%m-%d %H:%M:%SZ') < datetime.datetime.strptime(first_box, '%Y-%m-%d %H:%M:%SZ'):
324 unique_taxa[key]['first_box'] = record['timestamp']
325 unique_taxa[key]['first_box_url'] = f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}&selected_entity={localization["elemental_id"]}'
326 elif localization['type'] == TatorLocalizationType.DOT.value:
327 unique_taxa[key]['dot_count'] += 1
328 first_dot = unique_taxa[key]['first_dot']
329 observed_timestamp = datetime.datetime.strptime(record['timestamp'], '%Y-%m-%d %H:%M:%SZ')
330 if not first_dot or observed_timestamp < datetime.datetime.strptime(first_dot, '%Y-%m-%d %H:%M:%SZ'):
331 unique_taxa[key]['first_dot'] = record['timestamp']
332 unique_taxa[key]['first_dot_url'] = f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}&selected_entity={localization["elemental_id"]}'
333 self.final_records = unique_taxa
335 def get_max_n(self):
336 """
337 Finds the highest dot count for each unique scientific name, tentative ID, and morphospecies combo per
338 deployment. Ignores non-attracted taxa.
339 """
340 self.process_records(get_ctd=True)
341 deployment_taxa = {}
342 unique_taxa = {}
343 for record in self.final_records:
344 scientific_name = record.get('scientific_name')
345 tentative_id_suffix = f' ({record["tentative_id"]}?)' if record.get('tentative_id') else ''
346 morphospecies_suffix = f' ({record["morphospecies"]})' if record.get('morphospecies') else ''
347 unique_name = f'{scientific_name}{tentative_id_suffix}{morphospecies_suffix}'
348 if record.get('count', 0) < 1 or record.get('attracted') == 'Not Attracted':
349 continue
350 if unique_name not in unique_taxa.keys():
351 unique_taxa[unique_name] = {
352 'unique_name': unique_name,
353 'phylum': record.get('phylum'),
354 'class': record.get('class'),
355 'order': record.get('order'),
356 'family': record.get('family'),
357 'genus': record.get('genus'),
358 'species': record.get('species'),
359 }
360 if record['video_sequence_name'] not in deployment_taxa.keys():
361 deployment_taxa[record['video_sequence_name']] = {
362 'depth_m': record.get('depth_m'),
363 'max_n_dict': {},
364 }
365 if unique_name not in deployment_taxa[record['video_sequence_name']]['max_n_dict'].keys():
366 # add new unique taxa to dict
367 deployment_taxa[record['video_sequence_name']]['max_n_dict'][unique_name] = {
368 'max_n': record['count'],
369 'max_n_url': f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}',
370 }
371 else:
372 # check for new max N
373 if record['count'] > deployment_taxa[record['video_sequence_name']]['max_n_dict'][unique_name]['max_n']:
374 deployment_taxa[record['video_sequence_name']]['max_n_dict'][unique_name]['max_n'] = record['count']
375 deployment_taxa[record['video_sequence_name']]['max_n_dict'][unique_name]['max_n_url'] = f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}'
376 # convert unique taxa to list for sorting
377 unique_taxa_list = list(unique_taxa.values())
378 unique_taxa_list.sort(key=lambda x: (
379 x['phylum'] if x.get('phylum') else '',
380 x['class'] if x.get('class') else '',
381 x['order'] if x.get('order') else '',
382 x['family'] if x.get('family') else '',
383 x['genus'] if x.get('genus') else '',
384 x['species'] if x.get('species') else '',
385 ))
386 self.final_records = {
387 'deployments': deployment_taxa,
388 'unique_taxa': [taxa['unique_name'] for taxa in unique_taxa_list],
389 }
391 def get_tofa(self):
392 """
393 Finds the time of first arrival for each unique scientific name, tentative ID, and morphospecies combo per
394 deployment. Also shows species accumulation curve. Ignores non-attracted taxa.
395 """
396 self.fetch_start_times()
397 self.process_records(get_timestamp=True, get_ctd=True)
398 deployment_taxa = {}
399 unique_taxa = {}
400 unique_taxa_first_seen = {}
401 bottom_time = None
402 latest_timestamp = datetime.datetime.fromtimestamp(0) # to find the duration of the deployment
403 for record in self.final_records:
404 scientific_name = record.get('scientific_name')
405 tentative_id_suffix = f' ({record["tentative_id"]}?)' if record.get('tentative_id') else ''
406 morphospecies_suffix = f' ({record["morphospecies"]})' if record.get('morphospecies') else ''
407 unique_name = f'{scientific_name}{tentative_id_suffix}{morphospecies_suffix}'
408 observed_timestamp = datetime.datetime.strptime(record['timestamp'], '%Y-%m-%d %H:%M:%SZ')
409 bottom_time = datetime.datetime.strptime(self.bottom_times[record['video_sequence_name']], '%Y-%m-%d %H:%M:%SZ')
410 if record.get('count', 0) < 1 or record.get('attracted') == 'Not Attracted':
411 continue
412 if observed_timestamp > latest_timestamp:
413 latest_timestamp = observed_timestamp
414 if unique_name not in unique_taxa_first_seen.keys():
415 unique_taxa_first_seen[unique_name] = observed_timestamp
416 else:
417 if observed_timestamp < unique_taxa_first_seen[unique_name]:
418 unique_taxa_first_seen[unique_name] = observed_timestamp
419 if unique_name not in unique_taxa.keys():
420 unique_taxa[unique_name] = {
421 'unique_name': unique_name,
422 'phylum': record.get('phylum'),
423 'class': record.get('class'),
424 'order': record.get('order'),
425 'family': record.get('family'),
426 'genus': record.get('genus'),
427 'species': record.get('species'),
428 }
429 if record['video_sequence_name'] not in deployment_taxa.keys():
430 deployment_taxa[record['video_sequence_name']] = {
431 'depth_m': record.get('depth_m'),
432 'tofa_dict': {},
433 }
434 if unique_name not in deployment_taxa[record['video_sequence_name']]['tofa_dict'].keys():
435 # add new unique taxa to dict
436 deployment_taxa[record['video_sequence_name']]['tofa_dict'][unique_name] = {
437 'tofa': str(observed_timestamp - bottom_time) if observed_timestamp > bottom_time else '00:00:00',
438 'tofa_url': f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}',
439 }
440 else:
441 # check for new tofa
442 if str(observed_timestamp - bottom_time) < deployment_taxa[record['video_sequence_name']]['tofa_dict'][unique_name]['tofa']:
443 deployment_taxa[record['video_sequence_name']]['tofa_dict'][unique_name]['tofa'] = \
444 str(observed_timestamp - bottom_time) if observed_timestamp > bottom_time else '00:00:00'
445 deployment_taxa[record['video_sequence_name']]['tofa_dict'][unique_name]['tofa_url'] = \
446 f'{self.tator_url}/{self.project_id}/annotation/{record["media_id"]}?frame={record["frame"]}'
447 # convert unique taxa to list for sorting
448 unique_taxa_list = list(unique_taxa.values())
449 unique_taxa_list.sort(key=lambda x: (
450 x['phylum'] if x.get('phylum') else '',
451 x['class'] if x.get('class') else '',
452 x['order'] if x.get('order') else '',
453 x['family'] if x.get('family') else '',
454 x['genus'] if x.get('genus') else '',
455 x['species'] if x.get('species') else '',
456 ))
457 # rounding up to nearest hour
458 deployment_time = datetime.timedelta(hours=math.ceil((latest_timestamp - bottom_time).total_seconds() / 3600))
459 accumulation_data = [] # just a list of the number of unique taxa seen at each hour
460 for hour in range(1, deployment_time.seconds // 3600 + 1):
461 accumulation_data.append(len([
462 taxa for taxa in unique_taxa_first_seen.values() if taxa < bottom_time + datetime.timedelta(hours=hour)
463 ]))
464 self.final_records = {
465 'deployments': deployment_taxa,
466 'unique_taxa': [taxa['unique_name'] for taxa in unique_taxa_list],
467 'deployment_time': deployment_time.seconds // 3600,
468 'accumulation_data': accumulation_data,
469 }
471 def get_summary(self):
472 """
473 Returns a summary of the final records.
474 """
475 self.fetch_start_times()
476 self.localizations = [
477 localization for localization in self.localizations if localization['type'] != TatorLocalizationType.BOX.value
478 ]
479 self.process_records(get_timestamp=True, get_ctd=True, get_substrates=True)
481 def download_image_guide(self, app) -> Presentation:
482 """
483 Finds all records marked as "good" images, saves them to a ppt.
484 """
485 records_of_interest = []
486 for localization in self.localizations:
487 if localization['attributes'].get('Good Image'):
488 records_of_interest.append(localization)
489 self.localizations = records_of_interest
490 self.process_records()
491 pres = Presentation()
492 image_slide_layout = pres.slide_layouts[6]
494 i = 0
495 while i < len(self.final_records):
496 slide = pres.slides.add_slide(image_slide_layout)
497 current_phylum = self.final_records[i].get('phylum')
498 if current_phylum is None:
499 current_phylum = 'UNKNOWN PHYLUM'
500 phylum_text_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.5), Inches(9), Inches(0.5))
501 phylum_text_frame = phylum_text_box.text_frame
502 phylum_paragraph = phylum_text_frame.paragraphs[0]
503 phylum_paragraph.alignment = PP_ALIGN.CENTER
504 phylum_run = phylum_paragraph.add_run()
505 phylum_run.text = ' '.join(list(current_phylum.upper()))
506 phylum_font = phylum_run.font
507 phylum_font.name = 'Arial'
508 phylum_font.size = Pt(32)
509 phylum_font.color.rgb = RGBColor(0, 0, 0)
510 for j in range(4):
511 # add four images to slide
512 localization = self.final_records[i]
513 if localization['phylum'] != current_phylum and current_phylum != 'UNKNOWN PHYLUM':
514 break
515 localization_id = localization['all_localizations'][0]['id']
516 response = requests.get(f'{app.config.get("LOCAL_APP_URL")}/tator/localization-image/{localization_id}?token={session["tator_token"]}')
517 if response.status_code != 200:
518 print(f'Error fetching image for record {localization["observation_uuid"]}')
519 continue
520 image_data = BytesIO(response.content)
521 top = Inches(1.5 if j < 2 else 4)
522 left = Inches(1 if j % 2 == 0 else 5)
523 picture = slide.shapes.add_picture(image_data, left, top, height=Inches(2.5))
524 line = picture.line
525 line.color.rgb = RGBColor(0, 0, 0)
526 line.width = Pt(1.5)
527 # add text box
528 width = Inches(2)
529 height = Inches(1)
530 text_box = slide.shapes.add_textbox(left, top, width, height)
531 text_frame = text_box.text_frame
532 paragraph = text_frame.paragraphs[0]
533 run = paragraph.add_run()
534 run.text = f'{localization["scientific_name"]}{" (" + localization["tentative_id"] + "?)" if localization.get("tentative_id") else ""}'
535 font = run.font
536 font.name = 'Arial'
537 font.size = Pt(18)
538 font.color.rgb = RGBColor(0xff, 0xff, 0xff)
539 font.italic = True
540 if localization['attracted'] == 'Not Attracted':
541 text_frame.add_paragraph()
542 paragraph = text_frame.paragraphs[1]
543 run_2 = paragraph.add_run()
544 run_2.text = 'NOT ATTRACTED'
545 font = run_2.font
546 font.name = 'Arial'
547 font.size = Pt(18)
548 font.color.rgb = RGBColor(0xff, 0x0, 0x0)
549 font.italic = False
550 i += 1
551 if i >= len(self.final_records):
552 break
553 return pres