|
| 1 | +""" |
| 2 | +run.py (GtR general) |
| 3 | +-------------------- |
| 4 | +
|
| 5 | +Transfer pre-collected GtR data from MySQL to Elasticsearch. |
| 6 | +""" |
| 7 | + |
| 8 | +from ast import literal_eval |
| 9 | +import boto3 |
| 10 | +import json |
| 11 | +import logging |
| 12 | +import os |
| 13 | +from datetime import datetime as dt |
| 14 | + |
| 15 | +from nesta.core.luigihacks.elasticsearchplus import ElasticsearchPlus |
| 16 | +from nesta.core.luigihacks.luigi_logging import set_log_level |
| 17 | +from nesta.core.orms.orm_utils import db_session, get_mysql_engine |
| 18 | +from nesta.core.orms.orm_utils import load_json_from_pathstub |
| 19 | +from nesta.core.orms.orm_utils import object_to_dict, get_class_by_tablename |
| 20 | +from nesta.core.orms.gtr_orm import Base, Projects, LinkTable, OrganisationLocation |
| 21 | +from collections import defaultdict, Counter |
| 22 | + |
| 23 | + |
| 24 | +def default_pop(dictobj, key, default={}): |
| 25 | + """Pop the key from the dict-like object. If the key doesn't exist, return a default. |
| 26 | + |
| 27 | + Args: |
| 28 | + dictobj (dict-like): A dict-like object to modify. |
| 29 | + key (hashable): A key to pop from the dict-like object. |
| 30 | + default: Any value to be returned as default, should the key not exist. |
| 31 | + Returns: |
| 32 | + value: Either the value stored at the key, or the default value. |
| 33 | + """ |
| 34 | + try: |
| 35 | + default = dictobj.pop(key) |
| 36 | + except KeyError: |
| 37 | + pass |
| 38 | + return default |
| 39 | + |
| 40 | + |
| 41 | +def truncate_if_str(value, n): |
| 42 | + """Truncate a value if it's a string, otherwise return the value itself. |
| 43 | + |
| 44 | + Args: |
| 45 | + value: Object to truncate, if it's a string |
| 46 | + n (int): Number of chars after which to truncate. |
| 47 | + Returns: |
| 48 | + truncated: A truncated string, otherwise the original value itself. |
| 49 | + """ |
| 50 | + return value[:n] if type(value) is str else value |
| 51 | + |
| 52 | + |
| 53 | +def extract_funds(gtr_funds): |
| 54 | + """Extract and deduplicate funding information |
| 55 | +
|
| 56 | + Args: |
| 57 | + gtr_funds (list of dict): Raw GtR funding information for a single project |
| 58 | + Returns: |
| 59 | + _gtr_funds (list of dict): Deduplicated GtR funding information, ready for ingestion to ES |
| 60 | + """ |
| 61 | + funds = {} |
| 62 | + for row in gtr_funds: |
| 63 | + row = {k:row[k] for k in row if k != 'id'} |
| 64 | + row['start_date'] = truncate_if_str(row.pop('start'), 10) |
| 65 | + row['end_date'] = truncate_if_str(row.pop('end'), 10) |
| 66 | + composite_key = (row[k] for k in ('start_date', 'end_date', 'category', |
| 67 | + 'amount', 'currencyCode')) |
| 68 | + funds[tuple(composite_key)] = row |
| 69 | + return [row for _, row in funds.items()] |
| 70 | + |
| 71 | + |
| 72 | +def get_linked_rows(session, links): |
| 73 | + """Pull rows out of the database from various tables, |
| 74 | + as indicated by the link table. |
| 75 | +
|
| 76 | + Args: |
| 77 | + session (SqlAlchemy session): Open session from which to query the database. |
| 78 | + links (dict): Mapping of table name to a list of PKs in that table |
| 79 | + Returns: |
| 80 | + rows (dict): Mapping of table name to a list of rows of data from that table |
| 81 | + """ |
| 82 | + linked_rows = defaultdict(list) |
| 83 | + for table_name, ids in links.items(): |
| 84 | + if table_name.startswith('gtr_outcomes'): # Just make counts of GtR outcomes for now as |
| 85 | + # they otherwise lead to a mapping explosion |
| 86 | + linked_rows['gtr_outcomes'] += [table_name[13:]]*len(ids) # Will make a count of these later |
| 87 | + else: |
| 88 | + _class = get_class_by_tablename(Base, table_name) |
| 89 | + rows = [object_to_dict(_obj) |
| 90 | + for _obj in (session.query(_class)\ |
| 91 | + .filter(_class.id.in_(ids)).all())] |
| 92 | + linked_rows[table_name] += rows |
| 93 | + return linked_rows |
| 94 | + |
| 95 | + |
| 96 | +def reformat_row(row, linked_rows, locations): |
| 97 | + """Prepare raw data for ingestion to ES. |
| 98 | +
|
| 99 | + Args: |
| 100 | + row (dict): Row of data. |
| 101 | + linked_rows (dict): Mapping of table name to a list of rows of data from that table |
| 102 | + locations (dict): Mapping of organisation id to location data |
| 103 | + Returns: |
| 104 | + row (dict): Reformatted row of data |
| 105 | + """ |
| 106 | + # Extract general info |
| 107 | + gtr_funds = default_pop(linked_rows, 'gtr_funds') |
| 108 | + row['_json_funding_project'] = extract_funds(gtr_funds) |
| 109 | + row['_json_outcomes_project'] = dict(Counter(linked_rows['gtr_outcomes'])) |
| 110 | + row['_terms_topics_project'] = [r['text'] for r in linked_rows['gtr_topic'] if r['text'] != 'Unclassified'] |
| 111 | + row['_terms_institutes_project'] = [r['name'] for r in linked_rows['gtr_organisations']] |
| 112 | + row['_terms_instituteIds_project'] = [r['id'] for r in linked_rows['gtr_organisations']] |
| 113 | + |
| 114 | + # Extract geographic info |
| 115 | + org_ids = list(row['_terms_instituteIds_project']) |
| 116 | + _locations = [loc for org_id, loc in locations.items() if org_id in org_ids] |
| 117 | + row['_terms_countries_project'] = [loc['country_name'] for loc in _locations] |
| 118 | + row['_terms_iso2_project'] = [loc['country_alpha_2'] for loc in _locations] |
| 119 | + row['_terms_continent_project'] = [loc['continent'] for loc in _locations] |
| 120 | + |
| 121 | + row['_coordinate_institutes_project'] = [] |
| 122 | + for loc in _locations: |
| 123 | + lat = loc['latitude'] |
| 124 | + lon = loc['longitude'] |
| 125 | + if lat is None or lon is None: |
| 126 | + continue |
| 127 | + row['_coordinate_institutes_project'].append({'lat': float(lat), 'lon': float(lon)}) |
| 128 | + return row |
| 129 | + |
| 130 | + |
| 131 | +def get_project_links(session, project_ids): |
| 132 | + """Generate the look-up table of table_name to object ids, by project id, |
| 133 | + as a prepatory stage for retrieving the "rows" by id from each table_name, |
| 134 | + by project id. |
| 135 | +
|
| 136 | + Args: |
| 137 | + session (SqlAlchemy session): Open session from which to query the database. |
| 138 | + project_ids (list-like): List of project ids to extract linked entities from. |
| 139 | + Returns: |
| 140 | + linked_rows (dict): Mapping of table name to a list of row ids of data in that table |
| 141 | + """ |
| 142 | + project_links = defaultdict(lambda: defaultdict(list)) |
| 143 | + for obj in session.query(LinkTable).filter(LinkTable.project_id.in_(project_ids)).all(): |
| 144 | + row = object_to_dict(obj) |
| 145 | + project_links[row['project_id']][row['table_name']].append(row['id']) |
| 146 | + return project_links |
| 147 | + |
| 148 | + |
| 149 | +def get_org_locations(session): |
| 150 | + """Retrieve look-up of all organisation ids to location metadata. |
| 151 | +
|
| 152 | + Args: |
| 153 | + session (SqlAlchemy session): Open session from which to query the database. |
| 154 | + Returns: |
| 155 | + locations (nested dict): Mapping of organisation id to location metadata. |
| 156 | + """ |
| 157 | + locations = {} |
| 158 | + for obj in session.query(OrganisationLocation).all(): |
| 159 | + row = object_to_dict(obj) |
| 160 | + locations[row.pop('id')] = row |
| 161 | + return locations |
| 162 | + |
| 163 | + |
| 164 | +def run(): |
| 165 | + test = literal_eval(os.environ["BATCHPAR_test"]) |
| 166 | + bucket = os.environ['BATCHPAR_bucket'] |
| 167 | + batch_file = os.environ['BATCHPAR_batch_file'] |
| 168 | + db_name = os.environ["BATCHPAR_db_name"] |
| 169 | + es_host = os.environ['BATCHPAR_outinfo'] |
| 170 | + es_port = int(os.environ['BATCHPAR_out_port']) |
| 171 | + es_index = os.environ['BATCHPAR_out_index'] |
| 172 | + entity_type = os.environ["BATCHPAR_entity_type"] |
| 173 | + aws_auth_region = os.environ["BATCHPAR_aws_auth_region"] |
| 174 | + |
| 175 | + # database setup |
| 176 | + logging.info('Retrieving engine connection') |
| 177 | + engine = get_mysql_engine("BATCHPAR_config", "mysqldb", |
| 178 | + db_name) |
| 179 | + |
| 180 | + # es setup |
| 181 | + logging.info('Connecting to ES') |
| 182 | + strans_kwargs = {'filename': 'gtr.json', 'ignore': ['id']} |
| 183 | + es = ElasticsearchPlus(hosts=es_host, |
| 184 | + port=es_port, |
| 185 | + aws_auth_region=aws_auth_region, |
| 186 | + no_commit=("AWSBATCHTEST" in |
| 187 | + os.environ), |
| 188 | + entity_type=entity_type, |
| 189 | + strans_kwargs=strans_kwargs, |
| 190 | + null_empty_str=True, |
| 191 | + coordinates_as_floats=True, |
| 192 | + listify_terms=True, |
| 193 | + do_sort=False, |
| 194 | + ngram_fields=['textBody_abstract_project', |
| 195 | + 'textBody_potentialImpact_project', |
| 196 | + 'textBody_techAbstract_project']) |
| 197 | + |
| 198 | + # collect file |
| 199 | + logging.info('Retrieving project ids') |
| 200 | + s3 = boto3.resource('s3') |
| 201 | + obj = s3.Object(bucket, batch_file) |
| 202 | + project_ids = json.loads(obj.get()['Body']._raw_stream.read()) |
| 203 | + logging.info(f"{len(project_ids)} project IDs " |
| 204 | + "retrieved from s3") |
| 205 | + |
| 206 | + # |
| 207 | + logging.info('Processing rows') |
| 208 | + with db_session(engine) as session: |
| 209 | + locations = get_org_locations(session) |
| 210 | + project_links = get_project_links(session, project_ids) |
| 211 | + for count, obj in enumerate((session.query(Projects) |
| 212 | + .filter(Projects.id.in_(project_ids)) |
| 213 | + .all())): |
| 214 | + row = object_to_dict(obj) |
| 215 | + links = default_pop(project_links, row['id']) |
| 216 | + linked_rows = get_linked_rows(session, links) |
| 217 | + row = reformat_row(row, linked_rows, locations) |
| 218 | + es.index(index=es_index, id=row.pop('id'), body=row) |
| 219 | + if not count % 1000: |
| 220 | + logging.info(f"{count} rows loaded to " |
| 221 | + "elasticsearch") |
| 222 | + |
| 223 | + |
| 224 | +if __name__ == "__main__": |
| 225 | + set_log_level() |
| 226 | + if 'BATCHPAR_outinfo' not in os.environ: |
| 227 | + from nesta.core.orms.orm_utils import setup_es |
| 228 | + from nesta.core.luigihacks.misctools import find_filepath_from_pathstub |
| 229 | + es, es_config = setup_es(production=False, endpoint='general', |
| 230 | + dataset='gtr', drop_and_recreate=True) |
| 231 | + environ = {'config': find_filepath_from_pathstub('mysqldb.config'), |
| 232 | + 'batch_file' : (''), |
| 233 | + 'db_name': 'dev', |
| 234 | + 'bucket': 'nesta-production-intermediate', |
| 235 | + 'outinfo': es_config['host'], |
| 236 | + 'out_port': es_config['port'], |
| 237 | + 'out_index': es_config['index'], |
| 238 | + 'aws_auth_region': 'eu-west-2', |
| 239 | + 'entity_type': 'project', |
| 240 | + 'test': "True"} |
| 241 | + for k, v in environ.items(): |
| 242 | + os.environ[f'BATCHPAR_{k}'] = v |
| 243 | + |
| 244 | + logging.info('Starting...') |
| 245 | + run() |
0 commit comments