diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..75e4952 --- /dev/null +++ b/.flake8 @@ -0,0 +1,4 @@ +[flake8] +max-line-length = 88 +extend-ignore = E501,E712,F541,F841 +exclude = __init__.py \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..5a5b106 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,9 @@ +repos: + - repo: https://github.com/psf/black + rev: 23.11.0 + hooks: + - id: black + - repo: https://github.com/PyCQA/flake8 + rev: 6.1.0 + hooks: + - id: flake8 \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 1601e31..baf39b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). +## [Unreleased] +### Added +- Apply Black formatter [#169](https://github.com/IN-CORE/pyincore-viz/issues/169) + ## [1.10.1] - 2023-06-12 ### Added - Google Analytics to the documentation site [#164](https://github.com/IN-CORE/pyincore-viz/issues/164) diff --git a/docs/source/conf.py b/docs/source/conf.py index 25a3452..4486c83 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -20,20 +20,20 @@ import os import sys -sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('../..')) -sys.path.insert(0, os.path.abspath('../../pyincore_viz')) -sys.path.insert(0, os.path.abspath('../../tests')) +sys.path.insert(0, os.path.abspath(".")) +sys.path.insert(0, os.path.abspath("../..")) +sys.path.insert(0, os.path.abspath("../../pyincore_viz")) +sys.path.insert(0, os.path.abspath("../../tests")) # -- Project information ----------------------------------------------------- -project = 'pyIncore-viz' -author = 'Yong Wook Kim' +project = "pyIncore-viz" +author = "Yong Wook Kim" # The short X.Y version -version = '1.10' +version = "1.10" # The full version, including alpha/beta/rc tags -release = '1.10.1' +release = "1.10.1" # -- General configuration --------------------------------------------------- @@ -44,25 +44,26 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.intersphinx', - # 'nbsphinx', - # 'sphinx.ext.viewcode', - 'sphinx_rtd_theme', - 'sphinx.ext.ifconfig', - 'sphinx.ext.napoleon', - 'sphinx.ext.todo' - ] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + # 'nbsphinx', + # 'sphinx.ext.viewcode', + "sphinx_rtd_theme", + "sphinx.ext.ifconfig", + "sphinx.ext.napoleon", + "sphinx.ext.todo", +] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # -source_suffix = ['.rst', '.md'] +source_suffix = [".rst", ".md"] # The master toctree document. -master_doc = 'index' +master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation.rst # for a list of supported languages. @@ -74,13 +75,13 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', '**.ipynb_checkpoints'] +exclude_patterns = ["_build", "**.ipynb_checkpoints"] # Disable notebook execution, nbsphinx not to execute # nbsphinx_execute = 'never' # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # -- Custom configuration --------------------------------------------------- @@ -90,8 +91,7 @@ # See also: # http://www.sphinx-doc.org/en/stable/ext/autodoc.html#confval-autodoc_mock_importshttps://github.com/sphinx-doc/sphinx/issues/4182 -autodoc_mock_imports = ['pyincore_viz', 'pytest', 'rasterstats'] - +autodoc_mock_imports = ["pyincore_viz", "pytest", "rasterstats"] # This value selects what content will be inserted into the main body of an autoclass directive. @@ -102,7 +102,7 @@ # “both”: Both the class ’ and the init method’s docstring are concatenated and inserted. # “init”: Only the init method’s docstring is inserted. -autoclass_content = 'both' +autoclass_content = "both" # -- Options for HTML output ------------------------------------------------- @@ -119,19 +119,19 @@ # # html_theme_options = {} html_theme_options = { - 'canonical_url': '', - 'analytics_id': '', - 'logo_only': False, - 'display_version': True, - 'prev_next_buttons_location': 'bottom', - 'style_external_links': False, - 'vcs_pageview_mode': '', + "canonical_url": "", + "analytics_id": "", + "logo_only": False, + "display_version": True, + "prev_next_buttons_location": "bottom", + "style_external_links": False, + "vcs_pageview_mode": "", # Toc options - 'collapse_navigation': True, - 'sticky_navigation': True, - 'navigation_depth': 4, - 'includehidden': True, - 'titles_only': False + "collapse_navigation": True, + "sticky_navigation": True, + "navigation_depth": 4, + "includehidden": True, + "titles_only": False, } # Add any paths that contain custom static files (such as style sheets) here, @@ -153,7 +153,7 @@ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'pyincorevizdoc' +htmlhelp_basename = "pyincorevizdoc" # -- Options for LaTeX output ------------------------------------------------ @@ -162,15 +162,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -180,8 +177,13 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'pyIncore-viz.tex', 'pyIncore-viz Documentation', - 'ISDA NCSA', 'manual'), + ( + master_doc, + "pyIncore-viz.tex", + "pyIncore-viz Documentation", + "ISDA NCSA", + "manual", + ), ] @@ -189,10 +191,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'pyIncore-viz', 'pyIncore-viz Documentation', - [author], 1) -] +man_pages = [(master_doc, "pyIncore-viz", "pyIncore-viz Documentation", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False @@ -204,9 +203,15 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'pyIncore-viz', 'pyIncore-viz Documentation', - author, 'pyIncore-viz', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "pyIncore-viz", + "pyIncore-viz Documentation", + author, + "pyIncore-viz", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. diff --git a/pyincore_viz/analysis/popresultstable.py b/pyincore_viz/analysis/popresultstable.py index 78f824b..18ecfcc 100644 --- a/pyincore_viz/analysis/popresultstable.py +++ b/pyincore_viz/analysis/popresultstable.py @@ -9,11 +9,11 @@ class PopResultsTable: """Utility methods for Population related data: - Housing Unit Inventory - Person Record Inventory - Job inventory - Housing Unit Allocation - Creates tables for data exploration and visualization + Housing Unit Inventory + Person Record Inventory + Job inventory + Housing Unit Allocation + Creates tables for data exploration and visualization """ @staticmethod @@ -39,7 +39,9 @@ def visualize(dataset, **kwargs): Returns: None """ - pop_df = pd.read_csv(dataset.get_file_path('csv'), header="infer", low_memory=False) + pop_df = pd.read_csv( + dataset.get_file_path("csv"), header="infer", low_memory=False + ) table = PopResultsTable.pop_results_table(pop_df, **kwargs) return table @@ -53,20 +55,37 @@ def add_race_ethnicity_to_pop_df(df): object: Pandas DataFrame object. """ - df['Race Ethnicity'] = "0 Vacant HU No Race Ethnicity Data" - df['Race Ethnicity'].notes = "Identify Race and Ethnicity Housing Unit Characteristics." - - df.loc[(df['race'] == 1) & (df['hispan'] == 0), 'Race Ethnicity'] = "1 White alone, Not Hispanic" - df.loc[(df['race'] == 2) & (df['hispan'] == 0), 'Race Ethnicity'] = "2 Black alone, Not Hispanic" - df.loc[(df['race'] == 3) & (df['hispan'] == 0), 'Race Ethnicity'] = "3 American Indian and Alaska " \ - "Native alone, Not Hispanic" - df.loc[(df['race'] == 4) & (df['hispan'] == 0), 'Race Ethnicity'] = "4 Asian alone, Not Hispanic" - df.loc[(df['race'].isin([5, 6, 7])) & (df['hispan'] == 0), 'Race Ethnicity'] = "5 Other Race, Not Hispanic" - df.loc[(df['hispan'] == 1), 'Race Ethnicity'] = "6 Any Race, Hispanic" - df.loc[(df['gqtype'] >= 1) & (df['Race Ethnicity'] == "0 Vacant HU No Race Ethnicity Data"), 'Race Ethnicity'] \ - = "7 Group Quarters no Race Ethnicity Data" + df["Race Ethnicity"] = "0 Vacant HU No Race Ethnicity Data" + df[ + "Race Ethnicity" + ].notes = "Identify Race and Ethnicity Housing Unit Characteristics." + + df.loc[ + (df["race"] == 1) & (df["hispan"] == 0), "Race Ethnicity" + ] = "1 White alone, Not Hispanic" + df.loc[ + (df["race"] == 2) & (df["hispan"] == 0), "Race Ethnicity" + ] = "2 Black alone, Not Hispanic" + df.loc[(df["race"] == 3) & (df["hispan"] == 0), "Race Ethnicity"] = ( + "3 American Indian and Alaska " "Native alone, Not Hispanic" + ) + df.loc[ + (df["race"] == 4) & (df["hispan"] == 0), "Race Ethnicity" + ] = "4 Asian alone, Not Hispanic" + df.loc[ + (df["race"].isin([5, 6, 7])) & (df["hispan"] == 0), "Race Ethnicity" + ] = "5 Other Race, Not Hispanic" + df.loc[(df["hispan"] == 1), "Race Ethnicity"] = "6 Any Race, Hispanic" + df.loc[ + (df["gqtype"] >= 1) + & (df["Race Ethnicity"] == "0 Vacant HU No Race Ethnicity Data"), + "Race Ethnicity", + ] = "7 Group Quarters no Race Ethnicity Data" # Set variable to missing if structure is vacant - makes tables look nicer - df.loc[(df['Race Ethnicity'] == "0 Vacant HU No Race Ethnicity Data"), 'Race Ethnicity'] = np.nan + df.loc[ + (df["Race Ethnicity"] == "0 Vacant HU No Race Ethnicity Data"), + "Race Ethnicity", + ] = np.nan return df @@ -79,18 +98,22 @@ def add_vacancy_to_pop_df(df): object: Pandas DataFrame object. """ - df['Vacancy Type'] = "0 Occupied Housing Unit" - df['Vacancy Type'].notes = "Identify Vacancy Type Housing Unit Characteristics." - - df.loc[(df['vacancy'] == 1), 'Vacancy Type'] = "1 For Rent" - df.loc[(df['vacancy'] == 2), 'Vacancy Type'] = "2 Rented, not occupied" - df.loc[(df['vacancy'] == 3), 'Vacancy Type'] = "3 For sale only" - df.loc[(df['vacancy'] == 4), 'Vacancy Type'] = "4 Sold, not occupied" - df.loc[(df['vacancy'] == 5), 'Vacancy Type'] = "5 For seasonal, recreational, or occasional use" - df.loc[(df['vacancy'] == 6), 'Vacancy Type'] = "6 For migrant workers" - df.loc[(df['vacancy'] == 7), 'Vacancy Type'] = "7 Other vacant" + df["Vacancy Type"] = "0 Occupied Housing Unit" + df["Vacancy Type"].notes = "Identify Vacancy Type Housing Unit Characteristics." + + df.loc[(df["vacancy"] == 1), "Vacancy Type"] = "1 For Rent" + df.loc[(df["vacancy"] == 2), "Vacancy Type"] = "2 Rented, not occupied" + df.loc[(df["vacancy"] == 3), "Vacancy Type"] = "3 For sale only" + df.loc[(df["vacancy"] == 4), "Vacancy Type"] = "4 Sold, not occupied" + df.loc[ + (df["vacancy"] == 5), "Vacancy Type" + ] = "5 For seasonal, recreational, or occasional use" + df.loc[(df["vacancy"] == 6), "Vacancy Type"] = "6 For migrant workers" + df.loc[(df["vacancy"] == 7), "Vacancy Type"] = "7 Other vacant" # Set variable to missing if structure is occupied - makes tables look nicer - df.loc[(df['Vacancy Type'] == "0 Occupied Housing Unit"), 'Vacancy Type'] = np.nan + df.loc[ + (df["Vacancy Type"] == "0 Occupied Housing Unit"), "Vacancy Type" + ] = np.nan return df @@ -103,13 +126,15 @@ def add_tenure_to_pop_df(df): object: Pandas DataFrame object. """ - df['Tenure Status'] = "0 No Tenure Status" - df['Tenure Status'].notes = "Identify Renter and Owner Occupied Housing Unit Characteristics." + df["Tenure Status"] = "0 No Tenure Status" + df[ + "Tenure Status" + ].notes = "Identify Renter and Owner Occupied Housing Unit Characteristics." - df.loc[(df['ownershp'] == 1), 'Tenure Status'] = "1 Owner Occupied" - df.loc[(df['ownershp'] == 2), 'Tenure Status'] = "2 Renter Occupied" + df.loc[(df["ownershp"] == 1), "Tenure Status"] = "1 Owner Occupied" + df.loc[(df["ownershp"] == 2), "Tenure Status"] = "2 Renter Occupied" # Set variable to missing if structure is vacant - makes tables look nicer - df.loc[(df['Tenure Status'] == "0 No Tenure Status"), 'Tenure Status'] = np.nan + df.loc[(df["Tenure Status"] == "0 No Tenure Status"), "Tenure Status"] = np.nan return df @@ -122,13 +147,13 @@ def add_family_to_pop_df(df): object: Pandas DataFrame object. """ - df['Family Type'] = "0 No Family Data" - df['Family Type'].notes = "Identify Family and Non-Family Characteristics." + df["Family Type"] = "0 No Family Data" + df["Family Type"].notes = "Identify Family and Non-Family Characteristics." - df.loc[(df['family'] == 1), 'Family Type'] = "1 Family Household" - df.loc[(df['family'] == 0), 'Family Type'] = "0 Non-Family Household" + df.loc[(df["family"] == 1), "Family Type"] = "1 Family Household" + df.loc[(df["family"] == 0), "Family Type"] = "0 Non-Family Household" # Set variable to missing if structure is vacant - makes tables look nicer - df.loc[(df['Family Type'] == "0 No Family Data"), 'Family Type'] = np.nan + df.loc[(df["Family Type"] == "0 No Family Data"), "Family Type"] = np.nan return df @@ -144,14 +169,22 @@ def add_dislocates_pd_df(df): Returns: object: Pandas DataFrame object. """ - df['Population Dislocation'] = "No Data" - df['Population Dislocation'].notes = "Identify Population Dislocation." - - df.loc[(df['dislocated'] == False) & (df['guid'].notnull()), 'Population Dislocation'] = "0 Does not dislocate" - df.loc[(df['dislocated'] == True) & (df['guid'].notnull()), 'Population Dislocation'] = "1 Dislocates" + df["Population Dislocation"] = "No Data" + df["Population Dislocation"].notes = "Identify Population Dislocation." + + df.loc[ + (df["dislocated"] == False) & (df["guid"].notnull()), + "Population Dislocation", + ] = "0 Does not dislocate" + df.loc[ + (df["dislocated"] == True) & (df["guid"].notnull()), + "Population Dislocation", + ] = "1 Dislocates" # Set dislocates to missing if no building data- makes tables look nicer - df.loc[(df['Population Dislocation'] == "No Data"), 'Population Dislocation'] = np.nan + df.loc[ + (df["Population Dislocation"] == "No Data"), "Population Dislocation" + ] = np.nan return df @@ -164,17 +197,17 @@ def add_jobtype_df(df): object: Pandas DataFrame object. """ - df['Job Type'] = "0 No Job Type Information" - df['Job Type'].notes = "Identify Job Type Characteristics." + df["Job Type"] = "0 No Job Type Information" + df["Job Type"].notes = "Identify Job Type Characteristics." - df.loc[(df['jobtype'] == 'JT03'), 'Job Type'] = "Private Primary Jobs" - df.loc[(df['jobtype'] == 'JT09'), 'Job Type'] = "Private Non-primary Jobs" - df.loc[(df['jobtype'] == 'JT05'), 'Job Type'] = "Federal Primary Jobs" - df.loc[(df['jobtype'] == 'JT10'), 'Job Type'] = "Federal Non-primary Jobs" - df.loc[(df['jobtype'] == 'JT07'), 'Job Type'] = "Public Sector Primary Jobs" - df.loc[(df['jobtype'] == 'JT11'), 'Job Type'] = "Public Sector Non-primary Jobs" + df.loc[(df["jobtype"] == "JT03"), "Job Type"] = "Private Primary Jobs" + df.loc[(df["jobtype"] == "JT09"), "Job Type"] = "Private Non-primary Jobs" + df.loc[(df["jobtype"] == "JT05"), "Job Type"] = "Federal Primary Jobs" + df.loc[(df["jobtype"] == "JT10"), "Job Type"] = "Federal Non-primary Jobs" + df.loc[(df["jobtype"] == "JT07"), "Job Type"] = "Public Sector Primary Jobs" + df.loc[(df["jobtype"] == "JT11"), "Job Type"] = "Public Sector Non-primary Jobs" # Set variable to missing if structure is occupied - makes tables look nicer - df.loc[(df['Job Type'] == "0 No Job Type Information"), 'Job Type'] = np.nan + df.loc[(df["Job Type"] == "0 No Job Type Information"), "Job Type"] = np.nan return df @@ -187,34 +220,68 @@ def add_industrycode_df(df): object: Pandas DataFrame object. """ - df['NAICS Industry Sector'] = "0 No NAICS Industry Sector" - df['NAICS Industry Sector'].notes = "Identify NAICS Industry Sector." - - df.loc[(df['IndustryCode'] == 1), 'NAICS Industry Sector'] = "11 Agriculture, Forestry, Fishing and Hunting" - df.loc[(df['IndustryCode'] == 2), 'NAICS Industry Sector'] = "21 Mining, Quarrying, and Oil and Gas Extraction" - df.loc[(df['IndustryCode'] == 3), 'NAICS Industry Sector'] = "22 Utilities" - df.loc[(df['IndustryCode'] == 4), 'NAICS Industry Sector'] = "23 Construction" - df.loc[(df['IndustryCode'] == 5), 'NAICS Industry Sector'] = "31-33 Manufacturing" - df.loc[(df['IndustryCode'] == 6), 'NAICS Industry Sector'] = "42 Wholesale Trade" - df.loc[(df['IndustryCode'] == 7), 'NAICS Industry Sector'] = "44-45 Retail Trade" - df.loc[(df['IndustryCode'] == 8), 'NAICS Industry Sector'] = "48-49 Transportation and Warehousing" - df.loc[(df['IndustryCode'] == 9), 'NAICS Industry Sector'] = "51 Information" - df.loc[(df['IndustryCode'] == 10), 'NAICS Industry Sector'] = "52 Finance and Insurance" - df.loc[(df['IndustryCode'] == 11), 'NAICS Industry Sector'] = "53 Real Estate and Rental and Leasing" - df.loc[(df['IndustryCode'] == 12), 'NAICS Industry Sector'] = "54 Professional, Scientific, and Technical " \ - "Services" - df.loc[(df['IndustryCode'] == 13), 'NAICS Industry Sector'] = "55 Management of Companies and Enterprises" - df.loc[(df['IndustryCode'] == 14), 'NAICS Industry Sector'] = "56 Administration & Support, Waste Management " \ - "and Remediation" - df.loc[(df['IndustryCode'] == 15), 'NAICS Industry Sector'] = "61 Educational Services" - df.loc[(df['IndustryCode'] == 16), 'NAICS Industry Sector'] = "62 Health Care and Social Assistance" - df.loc[(df['IndustryCode'] == 17), 'NAICS Industry Sector'] = "71 Arts, Entertainment, and Recreation" - df.loc[(df['IndustryCode'] == 18), 'NAICS Industry Sector'] = "72 Accommodation and Food Services" - df.loc[(df['IndustryCode'] == 19), 'NAICS Industry Sector'] = "81 Other Services " \ - "(excluding Public Administration)" - df.loc[(df['IndustryCode'] == 20), 'NAICS Industry Sector'] = "92 Public Administration" + df["NAICS Industry Sector"] = "0 No NAICS Industry Sector" + df["NAICS Industry Sector"].notes = "Identify NAICS Industry Sector." + + df.loc[ + (df["IndustryCode"] == 1), "NAICS Industry Sector" + ] = "11 Agriculture, Forestry, Fishing and Hunting" + df.loc[ + (df["IndustryCode"] == 2), "NAICS Industry Sector" + ] = "21 Mining, Quarrying, and Oil and Gas Extraction" + df.loc[(df["IndustryCode"] == 3), "NAICS Industry Sector"] = "22 Utilities" + df.loc[(df["IndustryCode"] == 4), "NAICS Industry Sector"] = "23 Construction" + df.loc[ + (df["IndustryCode"] == 5), "NAICS Industry Sector" + ] = "31-33 Manufacturing" + df.loc[ + (df["IndustryCode"] == 6), "NAICS Industry Sector" + ] = "42 Wholesale Trade" + df.loc[ + (df["IndustryCode"] == 7), "NAICS Industry Sector" + ] = "44-45 Retail Trade" + df.loc[ + (df["IndustryCode"] == 8), "NAICS Industry Sector" + ] = "48-49 Transportation and Warehousing" + df.loc[(df["IndustryCode"] == 9), "NAICS Industry Sector"] = "51 Information" + df.loc[ + (df["IndustryCode"] == 10), "NAICS Industry Sector" + ] = "52 Finance and Insurance" + df.loc[ + (df["IndustryCode"] == 11), "NAICS Industry Sector" + ] = "53 Real Estate and Rental and Leasing" + df.loc[(df["IndustryCode"] == 12), "NAICS Industry Sector"] = ( + "54 Professional, Scientific, and Technical " "Services" + ) + df.loc[ + (df["IndustryCode"] == 13), "NAICS Industry Sector" + ] = "55 Management of Companies and Enterprises" + df.loc[(df["IndustryCode"] == 14), "NAICS Industry Sector"] = ( + "56 Administration & Support, Waste Management " "and Remediation" + ) + df.loc[ + (df["IndustryCode"] == 15), "NAICS Industry Sector" + ] = "61 Educational Services" + df.loc[ + (df["IndustryCode"] == 16), "NAICS Industry Sector" + ] = "62 Health Care and Social Assistance" + df.loc[ + (df["IndustryCode"] == 17), "NAICS Industry Sector" + ] = "71 Arts, Entertainment, and Recreation" + df.loc[ + (df["IndustryCode"] == 18), "NAICS Industry Sector" + ] = "72 Accommodation and Food Services" + df.loc[(df["IndustryCode"] == 19), "NAICS Industry Sector"] = ( + "81 Other Services " "(excluding Public Administration)" + ) + df.loc[ + (df["IndustryCode"] == 20), "NAICS Industry Sector" + ] = "92 Public Administration" # Set variable to missing if structure is occupied - makes tables look nicer - df.loc[(df['NAICS Industry Sector'] == "0 No NAICS Industry Sector"), 'NAICS Industry Sector'] = np.nan + df.loc[ + (df["NAICS Industry Sector"] == "0 No NAICS Industry Sector"), + "NAICS Industry Sector", + ] = np.nan return df @@ -229,13 +296,13 @@ def add_colpercent(df, sourcevar, formatedvar): object: Pandas DataFrame object. """ - df['%'] = (df[sourcevar] / (df[sourcevar].sum()/2) * 100) - df['(%)'] = df.agg('({0[%]:.1f}%)'.format, axis=1) - df['value'] = df[sourcevar] - df['format value'] = df.agg('{0[value]:,.0f}'.format, axis=1) - df[formatedvar] = df['format value'] + '\t ' + df['(%)'] + df["%"] = df[sourcevar] / (df[sourcevar].sum() / 2) * 100 + df["(%)"] = df.agg("({0[%]:.1f}%)".format, axis=1) + df["value"] = df[sourcevar] + df["format value"] = df.agg("{0[value]:,.0f}".format, axis=1) + df[formatedvar] = df["format value"] + "\t " + df["(%)"] - df = df.drop(columns=[sourcevar, '%', '(%)', 'value', 'format value']) + df = df.drop(columns=[sourcevar, "%", "(%)", "value", "format value"]) return df @@ -292,97 +359,113 @@ def pop_results_table(input_df, **kwargs): # check current column list and add categorical descriptions current_col_list = list(df.columns) # Add Race Ethnicity to columns - if all(col in current_col_list for col in ['race', 'hispan']): + if all(col in current_col_list for col in ["race", "hispan"]): df = PopResultsTable.add_race_ethnicity_to_pop_df(df) - if 'ownershp' in current_col_list: + if "ownershp" in current_col_list: df = PopResultsTable.add_tenure_to_pop_df(df) - if 'vacancy' in current_col_list: + if "vacancy" in current_col_list: df = PopResultsTable.add_tenure_to_pop_df(df) - if all(col in current_col_list for col in ['guid', 'dislocated']): + if all(col in current_col_list for col in ["guid", "dislocated"]): df = PopResultsTable.add_dislocates_pd_df(df) - if 'jobtype' in current_col_list: + if "jobtype" in current_col_list: df = PopResultsTable.add_jobtype_df(df) - if 'family' in current_col_list: + if "family" in current_col_list: df = PopResultsTable.add_family_to_pop_df(df) - if 'IndustryCode' in current_col_list: + if "IndustryCode" in current_col_list: df = PopResultsTable.add_industrycode_df(df) - if 'hhinc' in current_col_list: + if "hhinc" in current_col_list: df = PopResultsTable.add_hhinc_df(df) - if 'poverty' in current_col_list: + if "poverty" in current_col_list: df = PopResultsTable.add_poverty_df(df) if who == "Total Households": - variable = 'huid' - function = 'count' - renamecol = {'Total': who, 'sum': ''} + variable = "huid" + function = "count" + renamecol = {"Total": who, "sum": ""} num_format = "{:,.0f}" elif who == "Total Population by Households": - variable = 'numprec' + variable = "numprec" function = np.sum - renamecol = {'Total': who, 'sum': ''} + renamecol = {"Total": who, "sum": ""} num_format = "{:,.0f}" elif who == "Total Population by Persons": - variable = 'precid' - function = 'count' - renamecol = {'Total': who, 'sum': ''} + variable = "precid" + function = "count" + renamecol = {"Total": who, "sum": ""} num_format = "{:,.0f}" elif who == "Total Jobs": - variable = 'uniquejobid' - function = 'count' - renamecol = {'Total': who, 'sum': ''} + variable = "uniquejobid" + function = "count" + renamecol = {"Total": who, "sum": ""} num_format = "{:,.0f}" elif who == "Median Household Income": - variable = 'randincome' + variable = "randincome" function = np.median - renamecol = {'Total': who} + renamecol = {"Total": who} num_format = "${:,.0f}" else: - variable = 'huid' - function = 'count' - renamecol = {'Total': who, 'sum': ''} + variable = "huid" + function = "count" + renamecol = {"Total": who, "sum": ""} num_format = "{:,.0f}" # Generate table - table = pd.pivot_table(df, values=variable, index=[row_index], - margins=True, margins_name='Total', - columns=[col_index], aggfunc=function).rename(columns=renamecol) + table = pd.pivot_table( + df, + values=variable, + index=[row_index], + margins=True, + margins_name="Total", + columns=[col_index], + aggfunc=function, + ).rename(columns=renamecol) table_title = "Table. " + who + " " + what + ", " + where + ", " + when + "." varformat = {(who): num_format} for col in table.columns: varformat[col] = num_format # Add percent row column - if row_percent != '': + if row_percent != "": numerator = table[row_percent] denominator = table[who] - table['row_pct'] = numerator/denominator * 100 - table['Percent Row ' + '\n' + row_percent] = \ - table.agg('{0[row_pct]:.1f}%'.format, axis=1) - table = table.drop(columns=['row_pct']) + table["row_pct"] = numerator / denominator * 100 + table["Percent Row " + "\n" + row_percent] = table.agg( + "{0[row_pct]:.1f}%".format, axis=1 + ) + table = table.drop(columns=["row_pct"]) # Add Column Percents if "Total" in who: # add column percent to all columns except the percent row column - row_pct_vars = [col for col in table if col.startswith('Percent Row ')] + row_pct_vars = [col for col in table if col.startswith("Percent Row ")] columns = [col for col in table if col not in row_pct_vars] for col in columns: - formated_column_name = col + ' (%)' + formated_column_name = col + " (%)" table = PopResultsTable.add_colpercent(table, col, formated_column_name) # Move row percent to last column - if row_percent != '': - row_pct_vars = [col for col in table if col.startswith('Percent Row ')] + if row_percent != "": + row_pct_vars = [col for col in table if col.startswith("Percent Row ")] columns = [col for col in table if col not in row_pct_vars] table = table[columns + row_pct_vars] # Caption Title Style - styles = [dict(selector="caption", - props=[("text-align", "center"), ("caption-side", "top"), ("font-size", "150%")])] - - table = table.style \ - .set_caption(table_title) \ - .set_table_styles(styles) \ + styles = [ + dict( + selector="caption", + props=[ + ("text-align", "center"), + ("caption-side", "top"), + ("font-size", "150%"), + ], + ) + ] + + table = ( + table.style.set_caption(table_title) + .set_table_styles(styles) .format(varformat) + ) return table @@ -394,16 +477,19 @@ def add_hhinc_df(df): Returns: object: Pandas DataFrame object. """ - df['Household Income Group'] = "No Data" - df['Household Income Group'].notes = "Identify Household Income Groups Housing Unit Characteristics." - df.loc[(df['hhinc'] == 1), 'Household Income Group'] = "1 Less than $15,000" - df.loc[(df['hhinc'] == 2), 'Household Income Group'] = "2 $15,000 to $24,999" - df.loc[(df['hhinc'] == 3), 'Household Income Group'] = "3 $25,000 to $74,999" - df.loc[(df['hhinc'] == 4), 'Household Income Group'] = "4 $75,000 to $99,999" - df.loc[(df['hhinc'] == 5), 'Household Income Group'] = "5 $100,000 or more" + df["Household Income Group"] = "No Data" + df[ + "Household Income Group" + ].notes = "Identify Household Income Groups Housing Unit Characteristics." + df.loc[(df["hhinc"] == 1), "Household Income Group"] = "1 Less than $15,000" + df.loc[(df["hhinc"] == 2), "Household Income Group"] = "2 $15,000 to $24,999" + df.loc[(df["hhinc"] == 3), "Household Income Group"] = "3 $25,000 to $74,999" + df.loc[(df["hhinc"] == 4), "Household Income Group"] = "4 $75,000 to $99,999" + df.loc[(df["hhinc"] == 5), "Household Income Group"] = "5 $100,000 or more" # Set variable to missing if no data- makes tables look nicer - df.loc[(df['Household Income Group'] == "No Data"), - 'Household Income Group'] = np.nan + df.loc[ + (df["Household Income Group"] == "No Data"), "Household Income Group" + ] = np.nan return df @staticmethod @@ -414,11 +500,12 @@ def add_poverty_df(df): Returns: object: Pandas DataFrame object. """ - df['Poverty Status'] = "No Data" - df['Poverty Status'].notes = "Identify Poverty Status Housing Unit Characteristics." - df.loc[(df['poverty'] == 0), 'Poverty Status'] = "0 At or above poverty level" - df.loc[(df['poverty'] == 1), 'Poverty Status'] = "1 Below poverty level" + df["Poverty Status"] = "No Data" + df[ + "Poverty Status" + ].notes = "Identify Poverty Status Housing Unit Characteristics." + df.loc[(df["poverty"] == 0), "Poverty Status"] = "0 At or above poverty level" + df.loc[(df["poverty"] == 1), "Poverty Status"] = "1 Below poverty level" # Set variable to missing if no data- makes tables look nicer - df.loc[(df['Poverty Status'] == "No Data"), - 'Poverty Status'] = np.nan + df.loc[(df["Poverty Status"] == "No Data"), "Poverty Status"] = np.nan return df diff --git a/pyincore_viz/analysisviz.py b/pyincore_viz/analysisviz.py index 3e9b3cf..1fb4b15 100644 --- a/pyincore_viz/analysisviz.py +++ b/pyincore_viz/analysisviz.py @@ -13,15 +13,15 @@ class AnalysisViz: def visualize(dataset, **kwargs): """Base visualize method that dynamically imports the necessary modules. - Args: - dataset (obj): pyincore dataset without geospatial data. + Args: + dataset (obj): pyincore dataset without geospatial data. - Returns: - None + Returns: + None """ # data types that needs to use pop_results_table visualization - pop_result_table_data_types = ['incorehousingunitallocation'] + pop_result_table_data_types = ["incorehousingunitallocation"] try: module_name = "" @@ -35,8 +35,14 @@ def visualize(dataset, **kwargs): if module_name.lower() in pop_result_table_data_types: module_name = "PopResultsTable" - module = importlib.import_module("pyincore_viz.analysis." + module_name.lower()) - print("Loaded pyincore_viz.analysis." + module_name.lower() + " module successfully.") + module = importlib.import_module( + "pyincore_viz.analysis." + module_name.lower() + ) + print( + "Loaded pyincore_viz.analysis." + + module_name.lower() + + " module successfully." + ) # load class analysis_class = getattr(module, module_name) @@ -45,5 +51,7 @@ def visualize(dataset, **kwargs): return analysis_class.visualize(dataset, **kwargs) except Exception: - raise ValueError("Fail to dynamically import dataset to its corresponding class. Please double " - "check the data_type of the dataset!") + raise ValueError( + "Fail to dynamically import dataset to its corresponding class. Please double " + "check the data_type of the dataset!" + ) diff --git a/pyincore_viz/geoutil.py b/pyincore_viz/geoutil.py index 63ee057..2448aa3 100644 --- a/pyincore_viz/geoutil.py +++ b/pyincore_viz/geoutil.py @@ -35,7 +35,10 @@ from io import BytesIO from pyincore_viz.plotutil import PlotUtil from pyincore_viz.tabledatasetlistmap import TableDatasetListMap as table_list_map -from pyincore_viz.helpers.common import get_period_and_demand_from_str, get_demands_for_dataset_hazards +from pyincore_viz.helpers.common import ( + get_period_and_demand_from_str, + get_demands_for_dataset_hazards, +) from branca.colormap import linear logger = pyincore_viz_globals.LOGGER @@ -45,7 +48,13 @@ class GeoUtil: """Utility methods for Geospatial Visualization""" @staticmethod - def plot_gdf_map(gdf, column, category=False, basemap=True, source=ctx.providers.OpenStreetMap.Mapnik): + def plot_gdf_map( + gdf, + column, + category=False, + basemap=True, + source=ctx.providers.OpenStreetMap.Mapnik, + ): """Plot Geopandas DataFrame. Args: @@ -58,14 +67,21 @@ def plot_gdf_map(gdf, column, category=False, basemap=True, source=ctx.providers """ gdf = gdf.to_crs(epsg=3857) - ax = gdf.plot(figsize=(10, 10), column=column, - categorical=category, legend=True) + ax = gdf.plot( + figsize=(10, 10), column=column, categorical=category, legend=True + ) if basemap: ctx.add_basemap(ax, source=source) @staticmethod - def overlay_gdf_with_raster_hazard(gdf, column, raster, category=False, basemap=True, - source=ctx.providers.OpenStreetMap.Mapnik): + def overlay_gdf_with_raster_hazard( + gdf, + column, + raster, + category=False, + basemap=True, + source=ctx.providers.OpenStreetMap.Mapnik, + ): """Overlay Geopandas DataFrame with raster dataset such as earthquake or flood. Args: @@ -78,13 +94,17 @@ def overlay_gdf_with_raster_hazard(gdf, column, raster, category=False, basemap= ctx.providers.Stamen.Terrain, ctx.providers.CartoDB.Positron etc. """ - file_path = Path(raster.local_file_path).joinpath(raster.metadata['fileDescriptors'][0]['filename']) + file_path = Path(raster.local_file_path).joinpath( + raster.metadata["fileDescriptors"][0]["filename"] + ) # check if the extension is either tif or png filename, file_extension = os.path.splitext(file_path) - if file_extension.lower() != '.png' \ - and file_extension.lower() != '.tiff' and\ - file_extension.lower() != '.tif': + if ( + file_extension.lower() != ".png" + and file_extension.lower() != ".tiff" + and file_extension.lower() != ".tif" + ): exit("Error! Given data set is not tif or png. Please check the dataset") with rasterio.open(file_path) as r: @@ -120,7 +140,13 @@ def join_datasets(geodataset, dataset): return join_gdf @staticmethod - def plot_map(dataset, column, category=False, basemap=True, source=ctx.providers.OpenStreetMap.Mapnik): + def plot_map( + dataset, + column, + category=False, + basemap=True, + source=ctx.providers.OpenStreetMap.Mapnik, + ): """Plot a map of geospatial dataset. Args: @@ -137,8 +163,14 @@ def plot_map(dataset, column, category=False, basemap=True, source=ctx.providers GeoUtil.plot_gdf_map(gdf, column, category, basemap, source) @staticmethod - def plot_join_map(geodataset, dataset, column, category=False, basemap=True, - source=ctx.providers.OpenStreetMap.Mapnik): + def plot_join_map( + geodataset, + dataset, + column, + category=False, + basemap=True, + source=ctx.providers.OpenStreetMap.Mapnik, + ): """Plot a map from geospatial dataset and non-geospatial dataset. Args: @@ -155,7 +187,13 @@ def plot_join_map(geodataset, dataset, column, category=False, basemap=True, GeoUtil.plot_gdf_map(gdf, column, category, basemap, source) @staticmethod - def plot_tornado(tornado_id, client, category=False, basemap=True, source=ctx.providers.OpenStreetMap.Mapnik): + def plot_tornado( + tornado_id, + client, + category=False, + basemap=True, + source=ctx.providers.OpenStreetMap.Mapnik, + ): """Plot a tornado path. Args: @@ -169,13 +207,17 @@ def plot_tornado(tornado_id, client, category=False, basemap=True, source=ctx.pr """ # it needs descartes package for polygon plotting # getting tornado dataset should be part of Tornado Hazard code - tornado_dataset_id = HazardService( - client).get_tornado_hazard_metadata(tornado_id)["hazardDatasets"][0].get('datasetId') + tornado_dataset_id = ( + HazardService(client) + .get_tornado_hazard_metadata(tornado_id)["hazardDatasets"][0] + .get("datasetId") + ) tornado_dataset = Dataset.from_data_service( - tornado_dataset_id, DataService(client)) + tornado_dataset_id, DataService(client) + ) tornado_gdf = gpd.read_file(tornado_dataset.local_file_path) - GeoUtil.plot_gdf_map(tornado_gdf, 'ef_rating', category, basemap, source) + GeoUtil.plot_gdf_map(tornado_gdf, "ef_rating", category, basemap, source) @staticmethod def plot_earthquake(earthquake_id, client, demand=None): @@ -188,46 +230,59 @@ def plot_earthquake(earthquake_id, client, demand=None): each demand. e.g. PGA, PGV, 0.2 sec SA. """ - eq_metadata = HazardService( - client).get_earthquake_hazard_metadata(earthquake_id) + eq_metadata = HazardService(client).get_earthquake_hazard_metadata( + earthquake_id + ) eq_dataset_id = None - if eq_metadata['eqType'] == 'model': - eq_dataset_id = eq_metadata['hazardDatasets'][0].get('datasetId') - demand_type = eq_metadata['hazardDatasets'][0].get('demandType') - period = eq_metadata['hazardDatasets'][0].get('period', "NA") + if eq_metadata["eqType"] == "model": + eq_dataset_id = eq_metadata["hazardDatasets"][0].get("datasetId") + demand_type = eq_metadata["hazardDatasets"][0].get("demandType") + period = eq_metadata["hazardDatasets"][0].get("period", "NA") else: if demand is None: # get first dataset - if len(eq_metadata['hazardDatasets']) > 0 and eq_metadata['hazardDatasets'][0]['datasetId']: - eq_dataset_id = eq_metadata['hazardDatasets'][0]['datasetId'] - demand_type = eq_metadata['hazardDatasets'][0]['demandType'] - period = eq_metadata['hazardDatasets'][0]['period'] + if ( + len(eq_metadata["hazardDatasets"]) > 0 + and eq_metadata["hazardDatasets"][0]["datasetId"] + ): + eq_dataset_id = eq_metadata["hazardDatasets"][0]["datasetId"] + demand_type = eq_metadata["hazardDatasets"][0]["demandType"] + period = eq_metadata["hazardDatasets"][0]["period"] else: raise Exception("No datasets found for the hazard") else: # match the passed demand with a dataset demand_parts = get_period_and_demand_from_str(demand) - demand_type = demand_parts['demandType'] - period = demand_parts['period'] + demand_type = demand_parts["demandType"] + period = demand_parts["period"] - for dataset in eq_metadata['hazardDatasets']: - if dataset['demandType'].lower() == demand_type.lower() and dataset['period'] == period: - eq_dataset_id = dataset['datasetId'] + for dataset in eq_metadata["hazardDatasets"]: + if ( + dataset["demandType"].lower() == demand_type.lower() + and dataset["period"] == period + ): + eq_dataset_id = dataset["datasetId"] if eq_dataset_id is None: - available_demands = get_demands_for_dataset_hazards(eq_metadata['hazardDatasets']) - raise Exception("Please provide a valid demand for the earthquake. " - "Available demands for the earthquake are: " + "\n" + "\n".join(available_demands)) + available_demands = get_demands_for_dataset_hazards( + eq_metadata["hazardDatasets"] + ) + raise Exception( + "Please provide a valid demand for the earthquake. " + "Available demands for the earthquake are: " + + "\n" + + "\n".join(available_demands) + ) if period > 0: title = "Demand Type: " + demand_type.upper() + ", Period: " + str(period) else: title = "Demand Type: " + demand_type.upper() - eq_dataset = Dataset.from_data_service( - eq_dataset_id, DataService(client)) + eq_dataset = Dataset.from_data_service(eq_dataset_id, DataService(client)) raster_file_path = Path(eq_dataset.local_file_path).joinpath( - eq_dataset.metadata['fileDescriptors'][0]['filename']) + eq_dataset.metadata["fileDescriptors"][0]["filename"] + ) GeoUtil.plot_raster_file_with_legend(raster_file_path, title) @@ -242,11 +297,12 @@ def plot_raster_dataset(dataset_id, client): """ metadata = DataService(client).get_dataset_metadata(dataset_id) # metadata = DataService(client) - title = metadata['title'] + title = metadata["title"] dataset = Dataset.from_data_service(dataset_id, DataService(client)) - raster_file_path = Path(dataset.local_file_path).\ - joinpath(dataset.metadata['fileDescriptors'][0]['filename']) + raster_file_path = Path(dataset.local_file_path).joinpath( + dataset.metadata["fileDescriptors"][0]["filename"] + ) GeoUtil.plot_raster_file_with_legend(raster_file_path, title) @@ -266,16 +322,18 @@ def plot_raster_file_with_legend(file_path, title=None): max = earthquake_nd.max() # Define the default viridis colormap for viz - viz_cmap = cm.get_cmap('viridis', 256) + viz_cmap = cm.get_cmap("viridis", 256) earthquake_nd = np.flip(earthquake_nd, axis=0) fig, ax = plt.subplots(figsize=(6, 6), constrained_layout=True) - psm = ax.pcolormesh(earthquake_nd, cmap=viz_cmap, rasterized=True, vmin=min, vmax=max) + psm = ax.pcolormesh( + earthquake_nd, cmap=viz_cmap, rasterized=True, vmin=min, vmax=max + ) fig.colorbar(psm, ax=ax) # since the x,y values in the images shows the cell location, # this could be misleading. It could be better not showing the x and y value - plt.axis('off') + plt.axis("off") plt.title(title) plt.show() @@ -292,10 +350,17 @@ def plot_graph_network(graph, coords): # nx.draw(graph, coords, with_lables=True, font_weithg='bold') # other ways to draw - nx.draw_networkx_nodes(graph, coords, cmap=plt.get_cmap( - 'jet'), node_size=100, node_color='g', with_lables=True, font_weithg='bold') + nx.draw_networkx_nodes( + graph, + coords, + cmap=plt.get_cmap("jet"), + node_size=100, + node_color="g", + with_lables=True, + font_weithg="bold", + ) nx.draw_networkx_labels(graph, coords) - nx.draw_networkx_edges(graph, coords, edge_color='r', arrows=True) + nx.draw_networkx_edges(graph, coords, edge_color="r", arrows=True) plt.show() @staticmethod @@ -379,11 +444,13 @@ def get_gdf_map(datasets: list): if isinstance(dataset, Dataset): gdf = dataset.get_dataframe_from_shapefile() geo_data = ipylft.GeoData( - geo_dataframe=gdf, name=dataset.metadata['title']) + geo_dataframe=gdf, name=dataset.metadata["title"] + ) else: gdf = dataset geo_data = ipylft.GeoData( - geo_dataframe=gdf, name="GeoDataFrame_" + str(i)) + geo_dataframe=gdf, name="GeoDataFrame_" + str(i) + ) geo_data_list.append(geo_data) bbox = gdf.total_bounds @@ -398,7 +465,11 @@ def get_gdf_map(datasets: list): return m @staticmethod - def get_wms_map(datasets: list, wms_url=pyincore_viz_globals.INCORE_GEOSERVER_WMS_URL, layer_check=False): + def get_wms_map( + datasets: list, + wms_url=pyincore_viz_globals.INCORE_GEOSERVER_WMS_URL, + layer_check=False, + ): """Get a map with WMS layers from list of datasets. Args: @@ -421,7 +492,7 @@ def get_wms_map(datasets: list, wms_url=pyincore_viz_globals.INCORE_GEOSERVER_WM # by putting on/off for this layer checking, it could make the process faster. if layer_check: try: - wms = WebMapService(wms_url + "?", version='1.1.1') + wms = WebMapService(wms_url + "?", version="1.1.1") except lxml.etree.XMLSyntaxError: # The error is caused because it failed to parse the geoserver's return xml. # This error will happen in geoserver when there is not complete dataset ingested, @@ -433,7 +504,7 @@ def get_wms_map(datasets: list, wms_url=pyincore_viz_globals.INCORE_GEOSERVER_WM raise Exception("Geoserver failed to set WMS service.") for dataset in datasets: - wms_layer_name = 'incore:' + dataset.id + wms_layer_name = "incore:" + dataset.id # check availability of the wms layer # TODO in here, the question is the, should this error quit whole process # or just keep going and show the error message for only the layer with error @@ -448,12 +519,21 @@ def get_wms_map(datasets: list, wms_url=pyincore_viz_globals.INCORE_GEOSERVER_WM try: wms[dataset.id].boundingBox except KeyError: - print("Error: The layer " + str(dataset.id) + " does not exist in the wms server") - wms_layer = ipylft.WMSLayer(url=wms_url, layers=wms_layer_name, - format='image/png', transparent=True, name=dataset.metadata['title']) + print( + "Error: The layer " + + str(dataset.id) + + " does not exist in the wms server" + ) + wms_layer = ipylft.WMSLayer( + url=wms_url, + layers=wms_layer_name, + format="image/png", + transparent=True, + name=dataset.metadata["title"], + ) wms_layers.append(wms_layer) - bbox = dataset.metadata['boundingBox'] + bbox = dataset.metadata["boundingBox"] bbox_all = GeoUtil.merge_bbox(bbox_all, bbox) m = GeoUtil.get_ipyleaflet_map(bbox_all) @@ -464,7 +544,9 @@ def get_wms_map(datasets: list, wms_url=pyincore_viz_globals.INCORE_GEOSERVER_WM return m @staticmethod - def get_gdf_wms_map(datasets, wms_datasets, wms_url=pyincore_viz_globals.INCORE_GEOSERVER_WMS_URL): + def get_gdf_wms_map( + datasets, wms_datasets, wms_url=pyincore_viz_globals.INCORE_GEOSERVER_WMS_URL + ): """Get a map with WMS layers from list of datasets for geopandas and list of datasets for WMS. Args: @@ -485,8 +567,7 @@ def get_gdf_wms_map(datasets, wms_datasets, wms_url=pyincore_viz_globals.INCORE_ for dataset in datasets: # maybe this part should be moved to Dataset Class gdf = gpd.read_file(dataset.local_file_path) - geo_data = ipylft.GeoData( - geo_dataframe=gdf, name=dataset.metadata['title']) + geo_data = ipylft.GeoData(geo_dataframe=gdf, name=dataset.metadata["title"]) geo_data_list.append(geo_data) bbox = gdf.total_bounds @@ -494,12 +575,17 @@ def get_gdf_wms_map(datasets, wms_datasets, wms_url=pyincore_viz_globals.INCORE_ wms_layers = [] for dataset in wms_datasets: - wms_layer_name = 'incore:' + dataset.id - wms_layer = ipylft.WMSLayer(url=wms_url, layers=wms_layer_name, format='image/png', - transparent=True, name=dataset.metadata['title'] + '-WMS') + wms_layer_name = "incore:" + dataset.id + wms_layer = ipylft.WMSLayer( + url=wms_url, + layers=wms_layer_name, + format="image/png", + transparent=True, + name=dataset.metadata["title"] + "-WMS", + ) wms_layers.append(wms_layer) - bbox = dataset.metadata['boundingBox'] + bbox = dataset.metadata["boundingBox"] bbox_all = GeoUtil.merge_bbox(bbox_all, bbox) m = GeoUtil.get_ipyleaflet_map(bbox_all) @@ -562,18 +648,24 @@ def plot_network_dataset(network_dataset: NetworkDataset): return m @staticmethod - def plot_table_dataset(dataset, client, column=str, category=False, basemap=True, - source=ctx.providers.OpenStreetMap.Mapnik): - """ Creates map window with table dataset. - - Args: - dataset (obj): pyincore dataset. - client (obj): pyincore service. - column (str): column name to be plot. - category (bool): turn on/off category option. - basemap (bool): turn on/off base map (e.g. openstreetmap). - source(obj): source of the Map to be used. examples, ctx.providers.OpenStreetMap.Mapnik (default), - ctx.providers.Stamen.Terrain, ctx.providers.CartoDB.Positron etc. + def plot_table_dataset( + dataset, + client, + column=str, + category=False, + basemap=True, + source=ctx.providers.OpenStreetMap.Mapnik, + ): + """Creates map window with table dataset. + + Args: + dataset (obj): pyincore dataset. + client (obj): pyincore service. + column (str): column name to be plot. + category (bool): turn on/off category option. + basemap (bool): turn on/off base map (e.g. openstreetmap). + source(obj): source of the Map to be used. examples, ctx.providers.OpenStreetMap.Mapnik (default), + ctx.providers.Stamen.Terrain, ctx.providers.CartoDB.Positron etc. """ joined_gdf = GeoUtil.join_table_dataset_with_source_dataset(dataset, client) @@ -585,25 +677,28 @@ def plot_table_dataset(dataset, client, column=str, category=False, basemap=True def join_table_dataset_with_source_dataset(dataset, client): """Creates geopandas dataframe by joining table dataset and its source dataset. - Args: - dataset (obj): pyincore dataset. - client (obj): pyincore service client. + Args: + dataset (obj): pyincore dataset. + client (obj): pyincore service client. - Returns: - obj: Geopandas geodataframe object. + Returns: + obj: Geopandas geodataframe object. """ is_source_dataset = False source_dataset = None # check if the given dataset is table dastaset - if dataset.metadata['format'] != 'table' and dataset.metadata['format'] != 'csv': + if ( + dataset.metadata["format"] != "table" + and dataset.metadata["format"] != "csv" + ): print("The given dataset is not a table dataset") return None # check if source dataset exists try: - source_dataset = dataset.metadata['sourceDataset'] + source_dataset = dataset.metadata["sourceDataset"] is_source_dataset = True except Exception: print("There is no source dataset for the give table dataset") @@ -618,39 +713,51 @@ def join_table_dataset_with_source_dataset(dataset, client): return joined_gdf @staticmethod - def plot_table_dataset_list_from_single_source(client, dataset_list, column, in_source_dataset_id=None): + def plot_table_dataset_list_from_single_source( + client, dataset_list, column, in_source_dataset_id=None + ): """Creates map window with a list of table dataset and source dataset. - Args: - client (obj): pyincore service Client Object. - dataset_list (list): list of table dataset. - column (str): column name to be plot. - in_source_dataset_id (str): source dataset id, the default is None. + Args: + client (obj): pyincore service Client Object. + dataset_list (list): list of table dataset. + column (str): column name to be plot. + in_source_dataset_id (str): source dataset id, the default is None. - Returns: - obj: An ipyleaflet Map, GeoUtil.map (ipyleaflet.Map). + Returns: + obj: An ipyleaflet Map, GeoUtil.map (ipyleaflet.Map). - """ + """ source_dataset_id = None if in_source_dataset_id is None: - joined_df, dataset_id_list, source_dataset_id = \ - GeoUtil.merge_table_dataset_with_field(dataset_list, column) + ( + joined_df, + dataset_id_list, + source_dataset_id, + ) = GeoUtil.merge_table_dataset_with_field(dataset_list, column) else: - joined_df, dataset_id_list, source_dataset_id = \ - GeoUtil.merge_table_dataset_with_field(dataset_list, column, in_source_dataset_id) + ( + joined_df, + dataset_id_list, + source_dataset_id, + ) = GeoUtil.merge_table_dataset_with_field( + dataset_list, column, in_source_dataset_id + ) if source_dataset_id is None: raise Exception("There is no sourceDataset id.") - source_dataset = Dataset.from_data_service(source_dataset_id, DataService(client)) + source_dataset = Dataset.from_data_service( + source_dataset_id, DataService(client) + ) inventory_df = PlotUtil.inventory_to_geodataframe(source_dataset) - inventory_df = PlotUtil.remove_null_inventories(inventory_df, 'guid') + inventory_df = PlotUtil.remove_null_inventories(inventory_df, "guid") # merge inventory dataframe and joined table dataframe - inventory_df = inventory_df.merge(joined_df, on='guid') + inventory_df = inventory_df.merge(joined_df, on="guid") # keep only necessary fields - keep_list = ['guid', 'geometry'] + keep_list = ["guid", "geometry"] for dataset_id in dataset_id_list: # dataset_id will be used as a column name to visualize the values in the field keep_list.append(dataset_id) @@ -663,7 +770,9 @@ def plot_table_dataset_list_from_single_source(client, dataset_list, column, in_ return map.map @staticmethod - def merge_table_dataset_with_field(dataset_list: list, column=str, in_source_dataset_id=None): + def merge_table_dataset_with_field( + dataset_list: list, column=str, in_source_dataset_id=None + ): """Creates pandas dataframe with all dataset in the list joined with guid and column. Args: @@ -695,20 +804,23 @@ def merge_table_dataset_with_field(dataset_list: list, column=str, in_source_dat dataset_id = dataset.metadata["id"] dataset_id_list.append(dataset_id) temp_df = dataset.get_dataframe_from_csv() - temp_df = temp_df[['guid', column]] + temp_df = temp_df[["guid", column]] if dataset_counter == 0: join_df = copy.copy(temp_df) try: if dataset_counter == 0: join_df[dataset_id] = join_df[column].astype(float) - join_df = join_df[['guid', dataset_id]] + join_df = join_df[["guid", dataset_id]] else: temp_df[dataset_id] = temp_df[column].astype(float) - temp_df = temp_df[['guid', dataset_id]] - join_df = join_df.join(temp_df.set_index("guid"), on='guid') + temp_df = temp_df[["guid", dataset_id]] + join_df = join_df.join(temp_df.set_index("guid"), on="guid") except KeyError as err: - logger.debug("Skipping " + dataset_id + - ", Given column name does not exist or the column is not number.") + logger.debug( + "Skipping " + + dataset_id + + ", Given column name does not exist or the column is not number." + ) dataset_counter += 1 if in_source_dataset_id is not None: @@ -720,11 +832,11 @@ def merge_table_dataset_with_field(dataset_list: list, column=str, in_source_dat def plot_raster_from_path(input_path): """Creates map window with geo-referenced raster file from local or url visualized. - Args: - input_path (str): An input raster dataset (GeoTiff) file path. + Args: + input_path (str): An input raster dataset (GeoTiff) file path. - Returns: - obj: An ipyleaflet Map, GeoUtil.map (ipyleaflet.Map). + Returns: + obj: An ipyleaflet Map, GeoUtil.map (ipyleaflet.Map). """ return GeoUtil.map_raster_overlay_from_file(input_path) @@ -733,11 +845,11 @@ def plot_raster_from_path(input_path): def map_raster_overlay_from_file(input_path): """Creates map window with geo-referenced raster file from local or url visualized. - Args: - input_path (str): An input raster dataset (GeoTiff) file path. + Args: + input_path (str): An input raster dataset (GeoTiff) file path. - Returns: - obj: ipyleaflet Map object. + Returns: + obj: ipyleaflet Map object. """ bbox = GeoUtil.get_raster_boundary(input_path) @@ -746,8 +858,7 @@ def map_raster_overlay_from_file(input_path): map = GeoUtil.get_ipyleaflet_map(bbox) image = ipylft.ImageOverlay( - url=image_url, - bounds=((bbox[1], bbox[0]), (bbox[3], bbox[2])) + url=image_url, bounds=((bbox[1], bbox[0]), (bbox[3], bbox[2])) ) map.add_layer(image) @@ -757,11 +868,11 @@ def map_raster_overlay_from_file(input_path): def get_raster_boundary(input_path): """Creates boundary list from raster dataset file. - Args: - input_path (str): An input raster dataset (GeoTiff) file path. + Args: + input_path (str): An input raster dataset (GeoTiff) file path. - Returns: - list: A list of boundary values. + Returns: + list: A list of boundary values. """ data = gdal.Open(input_path, GA_ReadOnly) @@ -778,11 +889,11 @@ def get_raster_boundary(input_path): def create_data_img_url_from_geotiff_for_ipyleaflet(input_path): """Creates boundary list from raster dataset file. - Args: - input_path (str): An input raster dataset (GeoTiff) file path. + Args: + input_path (str): An input raster dataset (GeoTiff) file path. - Returns: - str: Data for the png data converted from GeoTiff. + Returns: + str: Data for the png data converted from GeoTiff. """ data = gdal.Open(input_path, GA_ReadOnly) @@ -794,33 +905,37 @@ def create_data_img_url_from_geotiff_for_ipyleaflet(input_path): tiff_norm = tiff_array - np.amin(tiff_array) tiff_norm = tiff_norm / np.amax(tiff_norm) tiff_norm = np.where(np.isfinite(tiff_array), tiff_norm, 0) - tiff_im = PIL.Image.fromarray(np.uint8(plt.cm.jet(tiff_norm) * 255)) # specify colormap + tiff_im = PIL.Image.fromarray( + np.uint8(plt.cm.jet(tiff_norm) * 255) + ) # specify colormap tiff_mask = np.where(np.isfinite(tiff_array), 255, 0) - mask = PIL.Image.fromarray(np.uint8(tiff_mask), mode='L') - output_img = PIL.Image.new('RGBA', tiff_norm.shape[::-1], color=None) + mask = PIL.Image.fromarray(np.uint8(tiff_mask), mode="L") + output_img = PIL.Image.new("RGBA", tiff_norm.shape[::-1], color=None) output_img.paste(tiff_im, mask=mask) # convert image to png f = BytesIO() - output_img.save(f, 'png') + output_img.save(f, "png") data = b64encode(f.getvalue()) - data = data.decode('ascii') - image_url = 'data:image/png;base64,' + data + data = data.decode("ascii") + image_url = "data:image/png;base64," + data return image_url @staticmethod - def plot_maps_dataset_list(dataset_list, client, column='guid', category=False, basemap=True): + def plot_maps_dataset_list( + dataset_list, client, column="guid", category=False, basemap=True + ): """Create map window using dataset list. Should be okay whether it is shapefile or geotiff. - Args: - dataset_list (list): A list of dataset to be mapped. - column (str): A column name to be plot. - client (obj): pyincore service Client. - category (bool): turn on/off category option. - basemap (bool): turn on/off base map (e.g. openstreetmap). + Args: + dataset_list (list): A list of dataset to be mapped. + column (str): A column name to be plot. + client (obj): pyincore service Client. + category (bool): turn on/off category option. + basemap (bool): turn on/off base map (e.g. openstreetmap). - Returns: - obj: An ipyleaflet Map. + Returns: + obj: An ipyleaflet Map. """ layer_list = [] @@ -829,42 +944,64 @@ def plot_maps_dataset_list(dataset_list, client, column='guid', category=False, for dataset in dataset_list: # check if dataset is shapefile or raster try: - if dataset.metadata['format'].lower() == 'shapefile': + if dataset.metadata["format"].lower() == "shapefile": gdf = gpd.read_file(dataset.local_file_path) - geodata = GeoUtil.create_geodata_from_geodataframe(gdf, dataset.metadata['title']) + geodata = GeoUtil.create_geodata_from_geodataframe( + gdf, dataset.metadata["title"] + ) bbox = gdf.total_bounds bbox_all = GeoUtil.merge_bbox(bbox_all, bbox) layer_list.append(geodata) - elif dataset.metadata['format'].lower() == 'table' or dataset.metadata['format'] == 'csv': + elif ( + dataset.metadata["format"].lower() == "table" + or dataset.metadata["format"] == "csv" + ): # check source dataset - gdf = GeoUtil.join_table_dataset_with_source_dataset(dataset, client) + gdf = GeoUtil.join_table_dataset_with_source_dataset( + dataset, client + ) if gdf is None: - print(dataset.metadata['title'] + "'s data format" + dataset.metadata['format'] + - " is not supported.") + print( + dataset.metadata["title"] + + "'s data format" + + dataset.metadata["format"] + + " is not supported." + ) else: - geodata = GeoUtil.create_geodata_from_geodataframe(gdf, dataset.metadata['title']) + geodata = GeoUtil.create_geodata_from_geodataframe( + gdf, dataset.metadata["title"] + ) bbox = gdf.total_bounds bbox_all = GeoUtil.merge_bbox(bbox_all, bbox) layer_list.append(geodata) - elif dataset.metadata['format'].lower() == 'raster' \ - or dataset.metadata['format'].lower() == 'geotif' \ - or dataset.metadata['format'].lower() == 'geotif': - input_path = dataset.get_file_path('tif') + elif ( + dataset.metadata["format"].lower() == "raster" + or dataset.metadata["format"].lower() == "geotif" + or dataset.metadata["format"].lower() == "geotif" + ): + input_path = dataset.get_file_path("tif") bbox = GeoUtil.get_raster_boundary(input_path) bbox_all = GeoUtil.merge_bbox(bbox_all, bbox) - image_url = GeoUtil.create_data_img_url_from_geotiff_for_ipyleaflet(input_path) + image_url = GeoUtil.create_data_img_url_from_geotiff_for_ipyleaflet( + input_path + ) image = ipylft.ImageOverlay( - url=image_url, - bounds=((bbox[1], bbox[0]), (bbox[3], bbox[2])) + url=image_url, bounds=((bbox[1], bbox[0]), (bbox[3], bbox[2])) ) layer_list.append(image) else: - print(dataset.metadata['title'] + "'s data format" + dataset.metadata['format'] + - " is not supported.") + print( + dataset.metadata["title"] + + "'s data format" + + dataset.metadata["format"] + + " is not supported." + ) except Exception: - print("There is a problem in dataset format for ' + dataset.metadata['title'] + '.") + print( + "There is a problem in dataset format for ' + dataset.metadata['title'] + '." + ) map = GeoUtil.get_ipyleaflet_map(bbox_all) @@ -879,20 +1016,29 @@ def plot_maps_dataset_list(dataset_list, client, column='guid', category=False, def create_geodata_from_geodataframe(gdf, name): """Create map window using dataset list. Should be okay whether it is shapefile or geotiff. - Args: - gdf (obj): A geopandas geodataframe. - name (str): A name of the gdf. + Args: + gdf (obj): A geopandas geodataframe. + name (str): A name of the gdf. - Returns: - obj: An ipyleaflet GeoData. + Returns: + obj: An ipyleaflet GeoData. """ # create random color - color = "#" + ''.join([random.choice('0123456789ABCDEF') for j in range(6)]) - geodata = ipylft.GeoData(geo_dataframe=gdf, - style={'color': 'black', 'fillColor': color, 'opacity': 0.05, - 'weight': 1.9, 'dashArray': '2', 'fillOpacity': 0.6}, - hover_style={'fillColor': 'red', 'fillOpacity': 0.2}, name=name) + color = "#" + "".join([random.choice("0123456789ABCDEF") for j in range(6)]) + geodata = ipylft.GeoData( + geo_dataframe=gdf, + style={ + "color": "black", + "fillColor": color, + "opacity": 0.05, + "weight": 1.9, + "dashArray": "2", + "fillOpacity": 0.6, + }, + hover_style={"fillColor": "red", "fillOpacity": 0.2}, + name=name, + ) return geodata @@ -900,11 +1046,11 @@ def create_geodata_from_geodataframe(gdf, name): def convert_bound_to_ipylft_format(bbox): """Convert conventional geodata's bounding box to ipyleaflet bounding box format. - Args: - bbox (list): Geodata bounding box with [min_lat, min_lon, max_lat, max_lon]. + Args: + bbox (list): Geodata bounding box with [min_lat, min_lon, max_lat, max_lon]. - Returns: - list: A bounding box coordinates, [[south, east], [north, west]]. + Returns: + list: A bounding box coordinates, [[south, east], [north, west]]. """ south = bbox[1] @@ -920,12 +1066,12 @@ def convert_bound_to_ipylft_format(bbox): def calc_center_from_bbox(bbox): """Calculate center point location from given bounding box. - Args: - bbox (list): Geodata bounding box with [min_lat, min_lon, max_lat, max_lon]. + Args: + bbox (list): Geodata bounding box with [min_lat, min_lon, max_lat, max_lon]. - Returns: - float: A latitude of center location in the bounding box. - float: A longitude of center location in the bounding box. + Returns: + float: A latitude of center location in the bounding box. + float: A longitude of center location in the bounding box. """ cen_lat, cen_lon = (bbox[2] + bbox[0]) / 2.0, (bbox[3] + bbox[1]) / 2.0 @@ -936,17 +1082,22 @@ def calc_center_from_bbox(bbox): def get_ipyleaflet_map_with_center_location(cen_lon, cen_lat, zoom_level): """Creates ipyleaflet map object and fit the map using the center point location and zoom level. - Args: - cen_lon (float): Longitude of map's center location. - cen_lat (float): Latitude of map's center location. - zoom_level (int): An initial zoom level of the map. + Args: + cen_lon (float): Longitude of map's center location. + cen_lat (float): Latitude of map's center location. + zoom_level (int): An initial zoom level of the map. - Returns: - obj: An ipyleaflet map. + Returns: + obj: An ipyleaflet map. """ - map = ipylft.Map(center=(cen_lon, cen_lat), zoom=zoom_level, basemap=ipylft.basemaps.OpenStreetMap.Mapnik, - crs=projections.EPSG3857, scroll_wheel_zoom=True) + map = ipylft.Map( + center=(cen_lon, cen_lat), + zoom=zoom_level, + basemap=ipylft.basemaps.OpenStreetMap.Mapnik, + crs=projections.EPSG3857, + scroll_wheel_zoom=True, + ) return map @@ -954,15 +1105,19 @@ def get_ipyleaflet_map_with_center_location(cen_lon, cen_lat, zoom_level): def get_ipyleaflet_map(bbox=None): """Creates ipyleaflet map object and fit the map using the bounding box information. - Args: - bbox (list): Geodata bounding box. + Args: + bbox (list): Geodata bounding box. - Returns: - obj: An ipyleaflet map. + Returns: + obj: An ipyleaflet map. """ - map = ipylft.Map(basemap=ipylft.basemaps.OpenStreetMap.Mapnik, zoom=10, - crs=projections.EPSG3857, scroll_wheel_zoom=True) + map = ipylft.Map( + basemap=ipylft.basemaps.OpenStreetMap.Mapnik, + zoom=10, + crs=projections.EPSG3857, + scroll_wheel_zoom=True, + ) if bbox is not None: # the boundary information should be converted to ipyleaflet code boundary @@ -973,49 +1128,55 @@ def get_ipyleaflet_map(bbox=None): # need to reverse x and y map.center = [center[1], center[0]] - map.add_control(ipylft.LayersControl(position='topright')) - map.add_control(ipylft.FullScreenControl(position='topright')) + map.add_control(ipylft.LayersControl(position="topright")) + map.add_control(ipylft.FullScreenControl(position="topright")) return map @staticmethod - def plot_heatmap(dataset, fld_name, radius=10, blur=10, max=1, multiplier=1, name=""): + def plot_heatmap( + dataset, fld_name, radius=10, blur=10, max=1, multiplier=1, name="" + ): """Creates ipyleaflet map object and fit the map using the bounding box information. - Args: - dataset (obj): A dataset to be mapped. - fld_name (str): A column name to be plot in heat map. - radius (float): Radius of each "point" of the heatmap. - blur (float): Amount of blur. - max (float): Maximum point intensity. - multiplier (float): A multiplication factor for making fld value to more clearly in the map. - name (str): name that represents the layer. + Args: + dataset (obj): A dataset to be mapped. + fld_name (str): A column name to be plot in heat map. + radius (float): Radius of each "point" of the heatmap. + blur (float): Amount of blur. + max (float): Maximum point intensity. + multiplier (float): A multiplication factor for making fld value to more clearly in the map. + name (str): name that represents the layer. - Returns: - obj: An ipyleaflet map. + Returns: + obj: An ipyleaflet map. """ gdf = gpd.read_file(dataset.local_file_path) - map = GeoUtil.plot_heatmap_from_gdf(gdf, fld_name, radius, blur, max, multiplier, name) + map = GeoUtil.plot_heatmap_from_gdf( + gdf, fld_name, radius, blur, max, multiplier, name + ) return map @staticmethod - def plot_heatmap_from_gdf(gdf, fld_name, radius=10, blur=10, max=1, multiplier=1, name=""): + def plot_heatmap_from_gdf( + gdf, fld_name, radius=10, blur=10, max=1, multiplier=1, name="" + ): """Creates ipyleaflet map object and fit the map using the bounding box information. - Args: - gdf (GeoDataFrame): GeoPandas geodataframe. - fld_name (str): column name to be plot in heat map. - radius (float): Radius of each "point" of the heatmap. - blur (float): Amount of blur. - max (float): Maximum point intensity. - multiplier (float): A multiplication factor for making fld value to more clearly in the map. - name (str): A name that represents the layer. + Args: + gdf (GeoDataFrame): GeoPandas geodataframe. + fld_name (str): column name to be plot in heat map. + radius (float): Radius of each "point" of the heatmap. + blur (float): Amount of blur. + max (float): Maximum point intensity. + multiplier (float): A multiplication factor for making fld value to more clearly in the map. + name (str): A name that represents the layer. - Returns: - obj: An ipyleaflet map. + Returns: + obj: An ipyleaflet map. """ # when the geodataframe is processed, not original(converted directly) @@ -1049,22 +1210,25 @@ def plot_heatmap_from_gdf(gdf, fld_name, radius=10, blur=10, max=1, multiplier=1 # create locations placeholder for heatmap using x, y value and field value. locations = [] - if (is_geometry): - if gdf.geom_type[0].lower() != "point" and gdf.geom_type[0].lower() != "polygon" \ - and gdf.geom_type[0].lower() != "linestring": + if is_geometry: + if ( + gdf.geom_type[0].lower() != "point" + and gdf.geom_type[0].lower() != "polygon" + and gdf.geom_type[0].lower() != "linestring" + ): raise Exception("Error, the input dataset's geometry is not supported.") # convert polygon to point if gdf.geom_type[0].lower() == "polygon": points = gdf.copy() - points.geometry = points['geometry'].centroid + points.geometry = points["geometry"].centroid points.crs = gdf.crs gdf = points # convert line to point if gdf.geom_type[0].lower() == "linestring": lines = gdf.copy() - lines.geometry = lines['geometry'].centroid + lines.geometry = lines["geometry"].centroid lines.crs = gdf.crs gdf = lines @@ -1072,7 +1236,9 @@ def plot_heatmap_from_gdf(gdf, fld_name, radius=10, blur=10, max=1, multiplier=1 bbox = [bbox[0], bbox[1], bbox[2], bbox[3]] for index, row in gdf.iterrows(): - locations.append([row.geometry.y, row.geometry.x, row[fld_name] * multiplier]) + locations.append( + [row.geometry.y, row.geometry.x, row[fld_name] * multiplier] + ) else: # create location information for total bounding box # set initial min, max values @@ -1082,8 +1248,10 @@ def plot_heatmap_from_gdf(gdf, fld_name, radius=10, blur=10, max=1, multiplier=1 # that is kind of out of scope for pyincore-viz. # However, if it is needed, maybe it should be included # in the future release for pyincore. - first_geometry = ((first_row.geometry).replace('(', '').replace(')', '')).split() - if first_geometry[0].lower() != 'point': + first_geometry = ( + (first_row.geometry).replace("(", "").replace(")", "") + ).split() + if first_geometry[0].lower() != "point": raise Exception("The given geometry is not point.") minx = float(first_geometry[1]) @@ -1092,7 +1260,7 @@ def plot_heatmap_from_gdf(gdf, fld_name, radius=10, blur=10, max=1, multiplier=1 maxy = float(first_geometry[2]) for index, row in gdf.iterrows(): - geometry = ((row.geometry).replace('(', '').replace(')', '')).split() + geometry = ((row.geometry).replace("(", "").replace(")", "")).split() locations.append([geometry[2], geometry[1], row[fld_name] * multiplier]) if float(geometry[1]) < minx: minx = float(geometry[1]) @@ -1108,11 +1276,13 @@ def plot_heatmap_from_gdf(gdf, fld_name, radius=10, blur=10, max=1, multiplier=1 if name == "": name = fld_name - heatmap = GeoUtil.get_ipyleaflet_heatmap(locations=locations, radius=radius, blur=blur, max=max, name=name) + heatmap = GeoUtil.get_ipyleaflet_heatmap( + locations=locations, radius=radius, blur=blur, max=max, name=name + ) map = GeoUtil.get_ipyleaflet_map(bbox) map.add_layer(heatmap) - map.add_control(ipylft.LayersControl(position='topright')) + map.add_control(ipylft.LayersControl(position="topright")) return map @@ -1120,21 +1290,29 @@ def plot_heatmap_from_gdf(gdf, fld_name, radius=10, blur=10, max=1, multiplier=1 def get_ipyleaflet_heatmap(locations=None, radius=10, blur=10, max=1, name=""): """Creates ipyleaflet map object and fit the map using the bounding box information. - Args: - locations (list): A list of center locations with values. - radius (float): A radius of each "point" of the heatmap. - blur (float): Amount of blur. - max (float): A maximum point intensity. - name (str): A name that represents the layer. + Args: + locations (list): A list of center locations with values. + radius (float): A radius of each "point" of the heatmap. + blur (float): Amount of blur. + max (float): A maximum point intensity. + name (str): A name that represents the layer. - Returns: - obj: An ipyleaflet map. + Returns: + obj: An ipyleaflet map. """ # create location list using x, y, and fld value - heatmap = ipylft.Heatmap(locations=locations, radius=radius, blur=blur, name=name) + heatmap = ipylft.Heatmap( + locations=locations, radius=radius, blur=blur, name=name + ) heatmap.max = max - heatmap.gradient = {0.4: 'red', 0.6: 'yellow', 0.7: 'lime', 0.8: 'cyan', 1.0: 'blue'} + heatmap.gradient = { + 0.4: "red", + 0.6: "yellow", + 0.7: "lime", + 0.8: "cyan", + 1.0: "blue", + } return heatmap @@ -1147,8 +1325,16 @@ def plot_local_earthquake(eq_dataset): demand_units = eq_dataset.demand_units hazard_type = eq_dataset.hazard_type period = eq_dataset.period - title = "Demand Type: " + demand_type.upper() + ", Demand Units: " + demand_units + ", Period: " + \ - str(period) + ", Hazard Type: " + hazard_type + title = ( + "Demand Type: " + + demand_type.upper() + + ", Demand Units: " + + demand_units + + ", Period: " + + str(period) + + ", Hazard Type: " + + hazard_type + ) raster_file_path = eq_dataset.dataset.local_file_path GeoUtil.plot_raster_file_with_legend(raster_file_path, title) @@ -1167,8 +1353,14 @@ def plot_local_tsunami(tsu_dataset): demand_type = tsu_dataset.demand_type demand_units = tsu_dataset.demand_units hazard_type = tsu_dataset.hazard_type - title = "Demand Type: " + demand_type.upper() + ", Demand Units: " + str(demand_units) + \ - ", Hazard Type: " + hazard_type + title = ( + "Demand Type: " + + demand_type.upper() + + ", Demand Units: " + + str(demand_units) + + ", Hazard Type: " + + hazard_type + ) raster_file_path = tsu_dataset.dataset.local_file_path GeoUtil.plot_raster_file_with_legend(raster_file_path, title) @@ -1187,8 +1379,14 @@ def plot_local_flood(flood_dataset): demand_type = flood_dataset.demand_type demand_units = flood_dataset.demand_units hazard_type = flood_dataset.hazard_type - title = "Demand Type: " + demand_type.upper() + ", Demand Units: " + str(demand_units) + \ - ", Hazard Type: " + hazard_type + title = ( + "Demand Type: " + + demand_type.upper() + + ", Demand Units: " + + str(demand_units) + + ", Hazard Type: " + + hazard_type + ) raster_file_path = flood_dataset.dataset.local_file_path GeoUtil.plot_raster_file_with_legend(raster_file_path, title) @@ -1207,8 +1405,14 @@ def plot_local_hurricane(hur_dataset): demand_type = hur_dataset.demand_type demand_units = hur_dataset.demand_units hazard_type = hur_dataset.hazard_type - title = "Demand Type: " + demand_type.upper() + ", Demand Units: " + str(demand_units) + \ - ", Hazard Type: " + hazard_type + title = ( + "Demand Type: " + + demand_type.upper() + + ", Demand Units: " + + str(demand_units) + + ", Hazard Type: " + + hazard_type + ) raster_file_path = hur_dataset.dataset.local_file_path GeoUtil.plot_raster_file_with_legend(raster_file_path, title) @@ -1229,11 +1433,11 @@ def plot_local_tornado(tornado, id_field): def plot_multiple_vector_dataset(dataset_list): """Plot multiple vector datasets on the same map. - Args: - dataset_list (list): A list of datasets + Args: + dataset_list (list): A list of datasets - Returns: - obj: An ipyleaflet map. + Returns: + obj: An ipyleaflet map. """ geodata_dic_list = [] @@ -1262,14 +1466,16 @@ def plot_multiple_vector_dataset(dataset_list): bbox[3] = tmp_bbox[3] # skim geodataframe only for needed fields - tmp_fld_list = ['geometry'] + tmp_fld_list = ["geometry"] tmp_gpd_skimmed = tmp_gpd[tmp_fld_list] tmp_geo_data_dic = json.loads(tmp_gpd_skimmed.to_json()) geodata_dic_list.append(tmp_geo_data_dic) title_list.append(dataset.metadata["title"]) except Exception: - raise ValueError("Given dataset might not be a geodataset or has an error in the attribute") + raise ValueError( + "Given dataset might not be a geodataset or has an error in the attribute" + ) out_map = GeoUtil.get_ipyleaflet_map(bbox) @@ -1277,14 +1483,10 @@ def plot_multiple_vector_dataset(dataset_list): # add data to map tmp_layer = ipylft.GeoJSON( data=geodata_dic, - style={ - 'opacity': 1, 'fillOpacity': 0.8, 'weight': 1 - }, - hover_style={ - 'color': 'white', 'dashArray': '0', 'fillOpacity': 0.5 - }, + style={"opacity": 1, "fillOpacity": 0.8, "weight": 1}, + hover_style={"color": "white", "dashArray": "0", "fillOpacity": 0.5}, style_callback=GeoUtil.random_color, - name=title + name=title, ) out_map.add_layer(tmp_layer) @@ -1295,28 +1497,30 @@ def plot_multiple_vector_dataset(dataset_list): def random_color(feature): """Creates random color for ipyleaflet map feature - Args: - feature (obj): geodataframe feature + Args: + feature (obj): geodataframe feature - Returns: - obj: dictionary for color + Returns: + obj: dictionary for color """ return { - 'color': 'black', - 'fillColor': random.choice(['red', 'yellow', 'purple', 'green', 'orange', 'blue', 'magenta']), + "color": "black", + "fillColor": random.choice( + ["red", "yellow", "purple", "green", "orange", "blue", "magenta"] + ), } @staticmethod def plot_choropleth_multiple_fields_from_single_dataset(dataset, field_list): """Make choropleth map using multiple fields from single dataset. - Args: - dataset (list): A dataset to be mapped - field_list (list): A list of fields in the dataset + Args: + dataset (list): A dataset to be mapped + field_list (list): A list of fields in the dataset - Returns: - obj: An ipyleaflet map. + Returns: + obj: An ipyleaflet map. """ in_gpd = None @@ -1333,26 +1537,28 @@ def plot_choropleth_multiple_fields_from_single_dataset(dataset, field_list): bbox = in_gpd.total_bounds except Exception: - raise ValueError("Given dataset might not be a geodataset or has an error in the attribute") + raise ValueError( + "Given dataset might not be a geodataset or has an error in the attribute" + ) # skim geodataframe only for needed fields - field_list.append('geometry') + field_list.append("geometry") in_gpd_tmp = in_gpd[field_list] geo_data_dic = json.loads(in_gpd_tmp.to_json()) out_map = GeoUtil.get_ipyleaflet_map(bbox) for fld in field_list: - if fld != 'geometry': + if fld != "geometry": tmp_choro_data = GeoUtil.create_choro_data_from_pd(in_gpd, fld) # add choropleth data to map tmp_layer = ipylft.Choropleth( geo_data=geo_data_dic, choro_data=tmp_choro_data, colormap=linear.YlOrRd_04, - border_color='black', - style={'fillOpacity': 0.8}, - name=fld + border_color="black", + style={"fillOpacity": 0.8}, + name=fld, ) out_map.add_layer(tmp_layer) @@ -1363,14 +1569,14 @@ def plot_choropleth_multiple_fields_from_single_dataset(dataset, field_list): def plot_choropleth_multiple_dataset(dataset_list, field_list, zoom_level=10): """Make choropleth map using multiple dataset. - Args: - dataset_list (list): A list of dataset to be mapped - field_list (list): A list of fields in the dataset. - The order of the list should be matched with the order of dataset list - zoom_level (int): Zoom level + Args: + dataset_list (list): A list of dataset to be mapped + field_list (list): A list of fields in the dataset. + The order of the list should be matched with the order of dataset list + zoom_level (int): Zoom level - Returns: - obj: An ipyleaflet map. + Returns: + obj: An ipyleaflet map. """ geodata_dic_list = [] @@ -1407,7 +1613,7 @@ def plot_choropleth_multiple_dataset(dataset_list, field_list, zoom_level=10): bbox[3] = tmp_bbox[3] # skim geodataframe only for needed fields - tmp_fld_list = [fld, 'geometry'] + tmp_fld_list = [fld, "geometry"] tmp_gpd_skimmed = tmp_gpd[tmp_fld_list] tmp_geo_data_dic = json.loads(tmp_gpd_skimmed.to_json()) tmp_choro_data = GeoUtil.create_choro_data_from_pd(tmp_gpd_skimmed, fld) @@ -1424,15 +1630,17 @@ def plot_choropleth_multiple_dataset(dataset_list, field_list, zoom_level=10): out_map = GeoUtil.get_ipyleaflet_map(bbox) - for geodata_dic, choro_data, title in zip(geodata_dic_list, choro_data_list, title_list): + for geodata_dic, choro_data, title in zip( + geodata_dic_list, choro_data_list, title_list + ): # add choropleth data to map tmp_layer = ipylft.Choropleth( geo_data=geodata_dic, choro_data=choro_data, colormap=linear.YlOrRd_04, - border_color='black', - style={'fillOpacity': 0.8}, - name=title + border_color="black", + style={"fillOpacity": 0.8}, + name=title, ) out_map.add_layer(tmp_layer) @@ -1501,7 +1709,7 @@ def plot_local_hazard(dataset): for hurricane in dataset.hazardDatasets: GeoUtil.plot_local_hurricane(hurricane) else: - GeoUtil.plot_local_hurricane(dataset.hazardDatasets[0]) + GeoUtil.plot_local_hurricane(dataset.hazardDatasets[0]) elif hazard_type.lower() == "tornado": id_field = dataset.EF_RATING_FIELD if len(dataset.hazardDatasets) > 1: diff --git a/pyincore_viz/globals.py b/pyincore_viz/globals.py index 48e5d6f..db70e79 100644 --- a/pyincore_viz/globals.py +++ b/pyincore_viz/globals.py @@ -11,10 +11,14 @@ PACKAGE_VERSION = "1.10.0" INCORE_GEOSERVER_WMS_URL = "https://incore.ncsa.illinois.edu/geoserver/incore/wms" -INCORE_GEOSERVER_DEV_WMS_URL = "https://incore-dev.ncsa.illinois.edu/geoserver/incore/wms" +INCORE_GEOSERVER_DEV_WMS_URL = ( + "https://incore-dev.ncsa.illinois.edu/geoserver/incore/wms" +) PYINCORE_VIZ_ROOT_FOLDER = os.path.dirname(os.path.dirname(__file__)) -LOGGING_CONFIG = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'logging.ini')) +LOGGING_CONFIG = os.path.abspath( + os.path.join(os.path.abspath(os.path.dirname(__file__)), "logging.ini") +) logging_config.fileConfig(LOGGING_CONFIG) -LOGGER = logging.getLogger('pyincore-viz') +LOGGER = logging.getLogger("pyincore-viz") diff --git a/pyincore_viz/helpers/common.py b/pyincore_viz/helpers/common.py index e5c28a3..e9d737d 100644 --- a/pyincore_viz/helpers/common.py +++ b/pyincore_viz/helpers/common.py @@ -36,6 +36,9 @@ def get_demands_for_dataset_hazards(datasets: List) -> List[str]: """ available_demands = [] for dataset in datasets: - available_demands.append(dataset['demandType'] if dataset['period'] == 0 else - str(dataset['period']) + " " + dataset['demandType']) + available_demands.append( + dataset["demandType"] + if dataset["period"] == 0 + else str(dataset["period"]) + " " + dataset["demandType"] + ) return available_demands diff --git a/pyincore_viz/plotutil.py b/pyincore_viz/plotutil.py index 2c4cfe9..6f99718 100644 --- a/pyincore_viz/plotutil.py +++ b/pyincore_viz/plotutil.py @@ -15,7 +15,10 @@ class PlotUtil: """Plotting utility.""" - @deprecated(version="1.8.0", reason="It is not being used anymore. Check get_x_y or get_x_y_z") + @deprecated( + version="1.8.0", + reason="It is not being used anymore. Check get_x_y or get_x_y_z", + ) def get_standard_x_y(disttype: str, alpha: float, beta: float): """Get arrays of x and y values for standard fragility or period standard fragility. @@ -29,16 +32,23 @@ def get_standard_x_y(disttype: str, alpha: float, beta: float): ndarray: Y cumulative density values. """ - if disttype == 'LogNormal': + if disttype == "LogNormal": return PlotUtil.sample_lognormal_cdf_alt(alpha, beta, 200) - if disttype == 'Normal': + if disttype == "Normal": return PlotUtil.sample_lognormal_cdf(alpha, beta, 200) - if disttype == 'standardNormal': + if disttype == "standardNormal": return PlotUtil.sample_normal_cdf(alpha, beta, 200) @staticmethod - def get_x_y(curve, demand_type_name, curve_parameters, custom_curve_parameters, - start=0.001, end=10, sample_size: int = 200): + def get_x_y( + curve, + demand_type_name, + curve_parameters, + custom_curve_parameters, + start=0.001, + end=10, + sample_size: int = 200, + ): """Get arrays of x and y values for plotting refactored fragility curves. Args: @@ -58,14 +68,26 @@ def get_x_y(curve, demand_type_name, curve_parameters, custom_curve_parameters, x = numpy.linspace(start, end, sample_size) y = [] for i in x: - y.append(curve.solve_curve_expression(hazard_values={demand_type_name: i}, - curve_parameters=curve_parameters, **custom_curve_parameters)) + y.append( + curve.solve_curve_expression( + hazard_values={demand_type_name: i}, + curve_parameters=curve_parameters, + **custom_curve_parameters + ) + ) y = numpy.asarray(y) return x, y @staticmethod - def get_x_y_z(curve, demand_type_names, curve_parameters, custom_curve_parameters, start=1, end=50, - sample_interval: int = 0.5): + def get_x_y_z( + curve, + demand_type_names, + curve_parameters, + custom_curve_parameters, + start=1, + end=50, + sample_interval: int = 0.5, + ): """Get arrays of x, y and z values for plotting refactored fragility plots. Args: @@ -86,21 +108,30 @@ def get_x_y_z(curve, demand_type_names, curve_parameters, custom_curve_parameter x = y = numpy.arange(start, end, sample_interval) def _f(curve, x, y): - return curve.solve_curve_expression(hazard_values={demand_type_names[0]: x, - demand_type_names[1]: y}, - curve_parameters=curve_parameters, - **custom_curve_parameters) # kwargs + return curve.solve_curve_expression( + hazard_values={demand_type_names[0]: x, demand_type_names[1]: y}, + curve_parameters=curve_parameters, + **custom_curve_parameters + ) # kwargs X, Y = numpy.meshgrid(x, y) - z = numpy.array([_f(curve, x, y) for x, y in zip(numpy.ravel(X), numpy.ravel(Y))]) + z = numpy.array( + [_f(curve, x, y) for x, y in zip(numpy.ravel(X), numpy.ravel(Y))] + ) Z = z.reshape(X.shape) return X, Y, Z @staticmethod - def get_fragility_plot(fragility_set, title=None, dimension=2, limit_state="LS_0", - custom_curve_parameters={}, **kwargs): + def get_fragility_plot( + fragility_set, + title=None, + dimension=2, + limit_state="LS_0", + custom_curve_parameters={}, + **kwargs + ): """Get fragility plot. Args: @@ -123,14 +154,22 @@ def get_fragility_plot(fragility_set, title=None, dimension=2, limit_state="LS_0 title = fragility_set.description if dimension == 2: - return PlotUtil.get_fragility_plot_2d(fragility_set, title, custom_curve_parameters, **kwargs) + return PlotUtil.get_fragility_plot_2d( + fragility_set, title, custom_curve_parameters, **kwargs + ) if dimension == 3: - return PlotUtil.get_fragility_plot_3d(fragility_set, title, limit_state, custom_curve_parameters, **kwargs) + return PlotUtil.get_fragility_plot_3d( + fragility_set, title, limit_state, custom_curve_parameters, **kwargs + ) else: - raise ValueError("We do not support " + str(dimension) + "D fragility plotting") + raise ValueError( + "We do not support " + str(dimension) + "D fragility plotting" + ) @staticmethod - def get_fragility_plot_2d(fragility_set, title=None, custom_curve_parameters={}, **kwargs): + def get_fragility_plot_2d( + fragility_set, title=None, custom_curve_parameters={}, **kwargs + ): """Get 2d refactored fragility plot. Args: @@ -151,36 +190,54 @@ def get_fragility_plot_2d(fragility_set, title=None, custom_curve_parameters={}, for parameter in fragility_set.curve_parameters: # add case insensitive # for hazard - if parameter.get("name") is not None \ - and parameter.get("name").lower() \ - in [demand_type.lower() for demand_type in demand_types]: + if parameter.get("name") is not None and parameter.get("name").lower() in [ + demand_type.lower() for demand_type in demand_types + ]: demand_type_names.append(parameter["name"]) - elif parameter.get("fullName") is not None \ - and parameter.get("fullName").lower() \ - in [demand_type.lower() for demand_type in demand_types]: + elif parameter.get("fullName") is not None and parameter.get( + "fullName" + ).lower() in [demand_type.lower() for demand_type in demand_types]: demand_type_names.append(parameter["fullName"]) # check the rest of the parameters see if default or custom value has passed in else: - if parameter.get("expression") is None and parameter.get("name") not in \ - custom_curve_parameters: - raise ValueError("The required parameter: " + parameter.get("name") - + " does not have a default or custom value. Please check " - "your fragility curve setting. Alternatively, you can include it in the " - "custom_curve_parameters variable and passed it in this method. ") + if ( + parameter.get("expression") is None + and parameter.get("name") not in custom_curve_parameters + ): + raise ValueError( + "The required parameter: " + + parameter.get("name") + + " does not have a default or custom value. Please check " + "your fragility curve setting. Alternatively, you can include it in the " + "custom_curve_parameters variable and passed it in this method. " + ) for curve in fragility_set.fragility_curves: - x, y = PlotUtil.get_x_y(curve, demand_type_names[0], fragility_set.curve_parameters, - custom_curve_parameters, **kwargs) + x, y = PlotUtil.get_x_y( + curve, + demand_type_names[0], + fragility_set.curve_parameters, + custom_curve_parameters, + **kwargs + ) plt.plot(x, y, label=curve.return_type["description"]) - plt.xlabel(fragility_set.demand_types[0] + " (" + fragility_set.demand_units[0] + ")") + plt.xlabel( + fragility_set.demand_types[0] + " (" + fragility_set.demand_units[0] + ")" + ) plt.title(title) plt.legend() return plt @staticmethod - def get_fragility_plot_3d(fragility_set, title=None, limit_state="LS_0", custom_curve_parameters={}, **kwargs): + def get_fragility_plot_3d( + fragility_set, + title=None, + limit_state="LS_0", + custom_curve_parameters={}, + **kwargs + ): """Get 3d refactored fragility plot. Args: @@ -202,39 +259,63 @@ def get_fragility_plot_3d(fragility_set, title=None, limit_state="LS_0", custom_ for parameter in fragility_set.curve_parameters: # for hazard # add case insensitive - if parameter.get("name") is not None \ - and parameter.get("name").lower() \ - in [demand_type.lower() for demand_type in demand_types]: + if parameter.get("name") is not None and parameter.get("name").lower() in [ + demand_type.lower() for demand_type in demand_types + ]: demand_type_names.append(parameter["name"]) - elif parameter.get("fullName") is not None \ - and parameter.get("fullName").lower() \ - in [demand_type.lower() for demand_type in demand_types]: + elif parameter.get("fullName") is not None and parameter.get( + "fullName" + ).lower() in [demand_type.lower() for demand_type in demand_types]: demand_type_names.append(parameter["fullName"]) # check the rest of the parameters see if default or custom value has passed in else: - if parameter.get("expression") is None and parameter.get("name") not in \ - custom_curve_parameters: - raise ValueError("The required parameter: " + parameter.get("name") - + " does not have a default or custom value. Please check " - "your fragility curve setting. Alternatively, you can include it in the " - "custom_curve_parameters variable and passed it in this method. ") + if ( + parameter.get("expression") is None + and parameter.get("name") not in custom_curve_parameters + ): + raise ValueError( + "The required parameter: " + + parameter.get("name") + + " does not have a default or custom value. Please check " + "your fragility curve setting. Alternatively, you can include it in the " + "custom_curve_parameters variable and passed it in this method. " + ) if len(demand_type_names) < 2: - raise ValueError("This fragility curve set does not support 3D plot, please check if the number of demand " - "types are larger than 2.") + raise ValueError( + "This fragility curve set does not support 3D plot, please check if the number of demand " + "types are larger than 2." + ) # check if desired limit state exist, we can only plot one limit state per time for 3d plot matched = False for curve in fragility_set.fragility_curves: if limit_state == curve.return_type["description"]: matched = True - x, y, z = PlotUtil.get_x_y_z(curve, demand_type_names[:2], fragility_set.curve_parameters, - custom_curve_parameters, **kwargs) - ax = plt.axes(projection='3d') - ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis', edgecolor='none') - ax.set_xlabel(fragility_set.demand_types[0] + " (" + fragility_set.demand_units[0] + ")") - ax.set_ylabel(fragility_set.demand_types[1] + " (" + fragility_set.demand_units[1] + ")") - ax.set_zlabel(limit_state + ' probability') + x, y, z = PlotUtil.get_x_y_z( + curve, + demand_type_names[:2], + fragility_set.curve_parameters, + custom_curve_parameters, + **kwargs + ) + ax = plt.axes(projection="3d") + ax.plot_surface( + x, y, z, rstride=1, cstride=1, cmap="viridis", edgecolor="none" + ) + ax.set_xlabel( + fragility_set.demand_types[0] + + " (" + + fragility_set.demand_units[0] + + ")" + ) + ax.set_ylabel( + fragility_set.demand_types[1] + + " (" + + fragility_set.demand_units[1] + + ")" + ) + ax.set_zlabel(limit_state + " probability") plt.title(title) @@ -244,8 +325,10 @@ def get_fragility_plot_3d(fragility_set, title=None, limit_state="LS_0", custom_ return plt @staticmethod - @deprecated(version="1.9.0", - reason="It is not being used anymore. Check pyincore's Dataset.get_dataframe_from_shapefile") + @deprecated( + version="1.9.0", + reason="It is not being used anymore. Check pyincore's Dataset.get_dataframe_from_shapefile", + ) def inventory_to_geodataframe(inventory_dataset): """Convert inventory_dataset to GeoDataFrame. @@ -263,7 +346,7 @@ def inventory_to_geodataframe(inventory_dataset): return inventory_df @staticmethod - def remove_null_inventories(inventory_df, key='guid'): + def remove_null_inventories(inventory_df, key="guid"): """Remove null inventory. Args: @@ -304,13 +387,18 @@ def merge_inventory_w_dmg(inventory_df, damage_result): DataFrame: Inventory. """ - inventory_df = inventory_df.merge(damage_result, on='guid') + inventory_df = inventory_df.merge(damage_result, on="guid") return inventory_df @staticmethod - def mean_damage_histogram(mean_damage_dataset, histogram_bins=20, figure_size=(10, 5), axes_font_size=12, - title_font_size=12): + def mean_damage_histogram( + mean_damage_dataset, + histogram_bins=20, + figure_size=(10, 5), + axes_font_size=12, + title_font_size=12, + ): """Figure with mean damage histogram. Args: @@ -325,8 +413,7 @@ def mean_damage_histogram(mean_damage_dataset, histogram_bins=20, figure_size=(1 """ mean_damage = mean_damage_dataset.get_dataframe_from_csv() - ax = mean_damage['meandamage'].hist( - bins=histogram_bins, figsize=figure_size) + ax = mean_damage["meandamage"].hist(bins=histogram_bins, figsize=figure_size) ax.set_title("Mean damage distribution", fontsize=title_font_size) ax.set_xlabel("mean damage value", fontsize=axes_font_size) ax.set_ylabel("counts", fontsize=axes_font_size) @@ -335,7 +422,9 @@ def mean_damage_histogram(mean_damage_dataset, histogram_bins=20, figure_size=(1 return fig @staticmethod - def histogram_from_csv_with_column(plot_title, x_title, y_title, column, in_csv, num_bins, figure_size): + def histogram_from_csv_with_column( + plot_title, x_title, y_title, column, in_csv, num_bins, figure_size + ): """Get histogram from csv with column. Args: diff --git a/pyincore_viz/tabledatasetlistmap.py b/pyincore_viz/tabledatasetlistmap.py index 573c2fe..83225a8 100644 --- a/pyincore_viz/tabledatasetlistmap.py +++ b/pyincore_viz/tabledatasetlistmap.py @@ -18,8 +18,12 @@ class TableDatasetListMap: """Mapping class for visualizing list of Table Dataset""" def __init__(self): - self.map = ipylft.Map(center=(0, 0), zoom=12, basemap=ipylft.basemaps.OpenStreetMap.Mapnik, - scroll_wheel_zoom=True) + self.map = ipylft.Map( + center=(0, 0), + zoom=12, + basemap=ipylft.basemaps.OpenStreetMap.Mapnik, + scroll_wheel_zoom=True, + ) def create_basemap_ipylft(self, geo_dataframe, title_list): """Creates map window with given inventory with multiple table dataset file using folder location. @@ -36,8 +40,12 @@ def create_basemap_ipylft(self, geo_dataframe, title_list): cen_x, cen_y = (ext[1] + ext[3]) / 2, (ext[0] + ext[2]) / 2 # create base ipyleaflet map - self.map = ipylft.Map(center=(cen_x, cen_y), zoom=12, - basemap=ipylft.basemaps.OpenStreetMap.Mapnik, scroll_wheel_zoom=True) + self.map = ipylft.Map( + center=(cen_x, cen_y), + zoom=12, + basemap=ipylft.basemaps.OpenStreetMap.Mapnik, + scroll_wheel_zoom=True, + ) # add map widgets self.map = self.create_map_widgets(title_list, self.map, geo_dataframe) @@ -53,19 +61,21 @@ def create_map_widgets(self, title_list, map, inventory_df): Returns: """ - map_dropdown = ipywgt.Dropdown(description='Outputfile - 1', options=title_list, width=500) - file_control1 = ipylft.WidgetControl(widget=map_dropdown, position='bottomleft') + map_dropdown = ipywgt.Dropdown( + description="Outputfile - 1", options=title_list, width=500 + ) + file_control1 = ipylft.WidgetControl(widget=map_dropdown, position="bottomleft") # use the following line when it needs to have another dropdown # dropdown2 = ipywgt.Dropdown(description = 'Outputfile - 2', options = title_list2, width=500) # file_control2 = ipylft.WidgetControl(widget=dropdown2, position='bottomleft') - button = ipywgt.Button(description='Generate Map', button_style='info') + button = ipywgt.Button(description="Generate Map", button_style="info") button.on_click(self.on_button_clicked) - map_control = ipylft.WidgetControl(widget=button, position='bottomleft') + map_control = ipylft.WidgetControl(widget=button, position="bottomleft") - map.add_control(ipylft.LayersControl(position='topright', style='info')) - map.add_control(ipylft.FullScreenControl(position='topright')) + map.add_control(ipylft.LayersControl(position="topright", style="info")) + map.add_control(ipylft.FullScreenControl(position="topright")) map.add_control(map_control) # map.add_control(file_control2) # use the line when it needs to have extra dropdown map.add_control(file_control1) @@ -86,10 +96,10 @@ def on_button_clicked(self, b): Returns: """ - print('Loading: ', self.map_dropdown.value) + print("Loading: ", self.map_dropdown.value) key = self.map_dropdown.value self.create_choropleth_layer(key) - print('\n') + print("\n") def create_choropleth_layer(self, key): """add choropleth layer to map. @@ -103,7 +113,7 @@ def create_choropleth_layer(self, key): # vmax_val = max(self.bldg_data_df[key]) vmax_val = 1 - temp_id = list(range(len(self.inventory_df['guid']))) + temp_id = list(range(len(self.inventory_df["guid"]))) temp_id = [str(i) for i in temp_id] choro_data = dict(zip(temp_id, self.inventory_df[key])) try: @@ -112,14 +122,20 @@ def create_choropleth_layer(self, key): except Exception: print("there is no existing layer") pass - self.layer = ipylft.Choropleth(geo_data=self.inventory_json, - choro_data=choro_data, colormap=linear.YlOrRd_04, value_min=0, - value_max=vmax_val, border_color='black', - style={'fillOpacity': 0.8}, name='dataset map') + self.layer = ipylft.Choropleth( + geo_data=self.inventory_json, + choro_data=choro_data, + colormap=linear.YlOrRd_04, + value_min=0, + value_max=vmax_val, + border_color="black", + style={"fillOpacity": 0.8}, + name="dataset map", + ) self.map.add_layer(self.layer) - print('Done loading layer.') + print("Done loading layer.") # TODO the following method for adding layer should be added in the future # def create_legend(self): diff --git a/setup.py b/setup.py index 2722b3e..4483297 100644 --- a/setup.py +++ b/setup.py @@ -7,22 +7,19 @@ from setuptools import setup, find_packages # version number of pyincore -version = '1.10.1' +version = "1.10.1" with open("README.rst", encoding="utf-8") as f: readme = f.read() setup( - name='pyincore_viz', + name="pyincore_viz", version=version, - description='IN-CORE visualization python package', + description="IN-CORE visualization python package", long_description=readme, - long_description_content_type='text/x-rst', - - url='https://incore.ncsa.illinois.edu', - + long_description_content_type="text/x-rst", + url="https://incore.ncsa.illinois.edu", license="Mozilla Public License v2.0", - classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Science/Research", @@ -30,9 +27,8 @@ "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3", - "Topic :: Scientific/Engineering" + "Topic :: Scientific/Engineering", ], - keywords=[ "infrastructure", "resilience", @@ -43,44 +39,40 @@ "tsunami", "tornado", "hurricane", - "dislocation" + "dislocation", ], - - packages=find_packages(where=".", exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), + packages=find_packages( + where=".", exclude=["*.tests", "*.tests.*", "tests.*", "tests"] + ), include_package_data=True, - package_data={ - '': ['*.ini'] - }, - + package_data={"": ["*.ini"]}, install_requires=[ - 'branca>=0.3.0', - 'contextily>=1.0.0', - 'deprecated', - 'geopandas>=0.14.0', - 'ipyleaflet>=0.16.0', - 'ipywidgets>=7.6.0', - 'lxml>=4.6.3', - 'matplotlib>=3.8.0', - 'networkx>=3.2.1', - 'numpy>=1.26.0,<2.0a0', - 'owslib>=0.17.1', - 'pandas>=2.1.2', - 'pillow', - 'pyincore>=1.11.0', - 'rasterio>=1.3.9', - 'openssl<=3.2.0' + "branca>=0.3.0", + "contextily>=1.0.0", + "deprecated", + "geopandas>=0.14.0", + "ipyleaflet>=0.16.0", + "ipywidgets>=7.6.0", + "lxml>=4.6.3", + "matplotlib>=3.8.0", + "networkx>=3.2.1", + "numpy>=1.26.0,<2.0a0", + "owslib>=0.17.1", + "pandas>=2.1.2", + "pillow", + "pyincore>=1.11.0", + "rasterio>=1.3.9", + "openssl<=3.2.0", ], - extras_require={ - 'test': [ - 'pycodestyle>=2.6.0', - 'pytest>=3.9.0', - 'python-jose>=3.0', + "test": [ + "pycodestyle>=2.6.0", + "pytest>=3.9.0", + "python-jose>=3.0", ] }, - project_urls={ - 'Bug Reports': 'https://github.com/IN-CORE/pyincor-vize/issues', - 'Source': 'https://github.com/IN-CORE/pyincor-vize', + "Bug Reports": "https://github.com/IN-CORE/pyincor-vize/issues", + "Source": "https://github.com/IN-CORE/pyincor-vize", }, ) diff --git a/tests/conftest.py b/tests/conftest.py index eddda7b..e405592 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,7 +7,12 @@ from pyincore import ( globals as pyglobals, - IncoreClient, DataService, FragilityService, RepairService, HazardService, SpaceService + IncoreClient, + DataService, + FragilityService, + RepairService, + HazardService, + SpaceService, ) @@ -18,7 +23,9 @@ def pytest_sessionstart(session): before performing collection and entering the run test loop. """ try: - with open(os.path.join(os.path.dirname(__file__), "pyincore_viz/.incorepw"), 'r') as f: + with open( + os.path.join(os.path.dirname(__file__), "pyincore_viz/.incorepw"), "r" + ) as f: cred = f.read().splitlines() except EnvironmentError: assert False @@ -27,11 +34,15 @@ def pytest_sessionstart(session): monkeypatch = MonkeyPatch() monkeypatch.setattr("builtins.input", lambda x: credentials["username"]) monkeypatch.setattr("getpass.getpass", lambda y: credentials["password"]) - client = IncoreClient(service_url=pyglobals.INCORE_API_DEV_URL, token_file_name=".incrtesttoken") + client = IncoreClient( + service_url=pyglobals.INCORE_API_DEV_URL, token_file_name=".incrtesttoken" + ) pytest.client = client pytest.datasvc = DataService(client) pytest.fragilitysvc = FragilityService(client) pytest.repairsvc = RepairService(client) pytest.hazardsvc = HazardService(client) pytest.spacesvc = SpaceService(client) - print(f"Successfully initialized Incore client and services. Using {pyglobals.INCORE_API_DEV_URL}") + print( + f"Successfully initialized Incore client and services. Using {pyglobals.INCORE_API_DEV_URL}" + ) diff --git a/tests/helpers/test_common.py b/tests/helpers/test_common.py index 17c083e..917f369 100644 --- a/tests/helpers/test_common.py +++ b/tests/helpers/test_common.py @@ -2,11 +2,10 @@ import pytest -@pytest.mark.parametrize("demand_str,exp_demand_type,exp_period", [ - ("0.2 sec SA", "SA", 0.2), - ("0.3 SA", "SA", 0.3), - ("PGA", "PGA", 0) -]) +@pytest.mark.parametrize( + "demand_str,exp_demand_type,exp_period", + [("0.2 sec SA", "SA", 0.2), ("0.3 SA", "SA", 0.3), ("PGA", "PGA", 0)], +) def test_get_period_and_demand_from_str(demand_str, exp_demand_type, exp_period): demand = get_period_and_demand_from_str(demand_str) assert demand["demandType"] == exp_demand_type and demand["period"] == exp_period diff --git a/tests/pyincore_viz/test_analysis_viz.py b/tests/pyincore_viz/test_analysis_viz.py index 8e5b899..4ab5a9c 100644 --- a/tests/pyincore_viz/test_analysis_viz.py +++ b/tests/pyincore_viz/test_analysis_viz.py @@ -5,7 +5,9 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ import pytest -from pyincore.analyses.housingunitallocation.housingunitallocation import HousingUnitAllocation +from pyincore.analyses.housingunitallocation.housingunitallocation import ( + HousingUnitAllocation, +) from pyincore_viz import AnalysisViz diff --git a/tests/pyincore_viz/test_pyincore_viz.py b/tests/pyincore_viz/test_pyincore_viz.py index 6d88557..7373a11 100644 --- a/tests/pyincore_viz/test_pyincore_viz.py +++ b/tests/pyincore_viz/test_pyincore_viz.py @@ -3,8 +3,6 @@ # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -import json - import pytest import matplotlib import geopandas as gpd @@ -49,8 +47,11 @@ def test_visualize_joplin_tornado_building(client): viz.plot_tornado(tornado_hazard_id, client, basemap=False) - tornado_dataset_id = HazardService(client).get_tornado_hazard_metadata(tornado_hazard_id)[ - 'hazardDatasets'][0].get('datasetId') + tornado_dataset_id = ( + HazardService(client) + .get_tornado_hazard_metadata(tornado_hazard_id)["hazardDatasets"][0] + .get("datasetId") + ) tornado_dataset = Dataset.from_data_service(tornado_dataset_id, DataService(client)) viz.get_gdf_map([tornado_dataset]) @@ -99,70 +100,102 @@ def test_plot_fragility(client): matplotlib.pyplot.cla() # 5b47b2d7337d4a36187c61c9 period standard - fragility_set = FragilityCurveSet(FragilityService(client).get_dfr3_set("5b47b2d7337d4a36187c61c9")) - plt = plot.get_fragility_plot(fragility_set, title="period standard fragility curve") - plt.savefig('periodStandard.png') + fragility_set = FragilityCurveSet( + FragilityService(client).get_dfr3_set("5b47b2d7337d4a36187c61c9") + ) + plt = plot.get_fragility_plot( + fragility_set, title="period standard fragility curve" + ) + plt.savefig("periodStandard.png") plt.clf() # 5b4903c7337d4a48f7d88dcf standard - fragility_set = FragilityCurveSet(FragilityService(client).get_dfr3_set("5b4903c7337d4a48f7d88dcf")) + fragility_set = FragilityCurveSet( + FragilityService(client).get_dfr3_set("5b4903c7337d4a48f7d88dcf") + ) plt = plot.get_fragility_plot(fragility_set, title="standard fragility curve") - plt.savefig('standard.png') + plt.savefig("standard.png") plt.clf() # 5b47b34e337d4a36290754a0 period building - fragility_set = FragilityCurveSet(FragilityService(client).get_dfr3_set("5b47b34e337d4a36290754a0")) - plt = plot.get_fragility_plot(fragility_set, title="period building fragility curve") - plt.savefig('periodBuilding.png') + fragility_set = FragilityCurveSet( + FragilityService(client).get_dfr3_set("5b47b34e337d4a36290754a0") + ) + plt = plot.get_fragility_plot( + fragility_set, title="period building fragility curve" + ) + plt.savefig("periodBuilding.png") plt.clf() # 5ed6bfc35b6166000155d0d9 parametric - fragility_set = FragilityCurveSet(FragilityService(client).get_dfr3_set("5ed6bfc35b6166000155d0d9")) + fragility_set = FragilityCurveSet( + FragilityService(client).get_dfr3_set("5ed6bfc35b6166000155d0d9") + ) plt = plot.get_fragility_plot(fragility_set, title="parametric fragility curve") - plt.savefig('parametric.png') + plt.savefig("parametric.png") plt.clf() # 5b47ba6f337d4a372105936f custom 2d - fragility_set = FragilityCurveSet(FragilityService(client).get_dfr3_set("5b47ba6f337d4a372105936f")) - plt = plot.get_fragility_plot(fragility_set, title="customExpression 2d fragility curve") - plt.savefig('customExpression.png') + fragility_set = FragilityCurveSet( + FragilityService(client).get_dfr3_set("5b47ba6f337d4a372105936f") + ) + plt = plot.get_fragility_plot( + fragility_set, title="customExpression 2d fragility curve" + ) + plt.savefig("customExpression.png") plt.clf() # 5ed6be9a5b6166000155d0b9 conditional 2d - fragility_set = FragilityCurveSet(FragilityService(client).get_dfr3_set("5ed6be9a5b6166000155d0b9")) + fragility_set = FragilityCurveSet( + FragilityService(client).get_dfr3_set("5ed6be9a5b6166000155d0b9") + ) plt = plot.get_fragility_plot(fragility_set, title="conditional fragility curve") - plt.savefig('conditional.png') + plt.savefig("conditional.png") plt.clf() # new format 2d - fragility_set = FragilityCurveSet(FragilityService(client).get_dfr3_set("602f31f381bd2c09ad8efcb4")) + fragility_set = FragilityCurveSet( + FragilityService(client).get_dfr3_set("602f31f381bd2c09ad8efcb4") + ) # comment on and off to compare curves # plt = plot.get_fragility_plot_2d(fragility_set, title="refactored fragility 2d curve") - plt = plot.get_fragility_plot_2d(fragility_set, title="refactored fragility 2d curve", - custom_curve_parameters={"ffe_elev": 3}) + plt = plot.get_fragility_plot_2d( + fragility_set, + title="refactored fragility 2d curve", + custom_curve_parameters={"ffe_elev": 3}, + ) # you can now also plot refactored fragility curve using the main plot method # plt = plot.get_fragility_plot(fragility_set, title="refactored fragility 2d curve", # custom_curve_parameters={"ffe_elev": 3}) - plt.savefig('refactored_2d.png') + plt.savefig("refactored_2d.png") plt.clf() # new format 3d - fragility_set = FragilityCurveSet(FragilityService(client).get_dfr3_set("5f6ccf67de7b566bb71b202d")) - plt = plot.get_fragility_plot_3d(fragility_set, title="refactored fragility 3d curve", limit_state="LS_0") + fragility_set = FragilityCurveSet( + FragilityService(client).get_dfr3_set("5f6ccf67de7b566bb71b202d") + ) + plt = plot.get_fragility_plot_3d( + fragility_set, title="refactored fragility 3d curve", limit_state="LS_0" + ) # you can now also plot refactored fragility curve using the main plot method # plt = plot.get_fragility_plot(fragility_set, title="refactored fragility 3d curve", limit_state="LS_0", # dimension=3, custom_curve_parameters={"ffe_elev": 3}) - plt.savefig('refactored_3d.png') + plt.savefig("refactored_3d.png") plt.clf() # test case sensitivity of demand types - import pathlib, os + import pathlib + import os + working_dir = pathlib.Path(__file__).parent.resolve() fragility_set = FragilityCurveSet.from_json_file( - os.path.join(working_dir, "data", "StandardFragilityCurveDemandType.json")) - plt = plot.get_fragility_plot_2d(fragility_set, title="demand type case insensitive fragility 2d curve") - plt.savefig('case_insensitive_2d.png') + os.path.join(working_dir, "data", "StandardFragilityCurveDemandType.json") + ) + plt = plot.get_fragility_plot_2d( + fragility_set, title="demand type case insensitive fragility 2d curve" + ) + plt.savefig("case_insensitive_2d.png") plt.clf() assert True @@ -176,18 +209,22 @@ def test_plot_raster_dataset(client): def test_visualize_raster_file(client): - galvaston_wave_height_id = '5f11e503feef2d758c4df6db' + galvaston_wave_height_id = "5f11e503feef2d758c4df6db" dataset = Dataset.from_data_service(galvaston_wave_height_id, DataService(client)) - map = viz.map_raster_overlay_from_file(dataset.get_file_path('tif')) + map = viz.map_raster_overlay_from_file(dataset.get_file_path("tif")) assert True def test_plot_map_dataset_list(client): - galveston_roadway_id = '5f0dd5ecb922f96f4e962caf' - galvaston_wave_height_id = '5f11e503feef2d758c4df6db' - shelvy_building_damage_id = '5a296b53c7d30d4af5378cd5' - dataset_id_list = [galveston_roadway_id, galvaston_wave_height_id, shelvy_building_damage_id] + galveston_roadway_id = "5f0dd5ecb922f96f4e962caf" + galvaston_wave_height_id = "5f11e503feef2d758c4df6db" + shelvy_building_damage_id = "5a296b53c7d30d4af5378cd5" + dataset_id_list = [ + galveston_roadway_id, + galvaston_wave_height_id, + shelvy_building_damage_id, + ] dataset_list = [] for dataset_id in dataset_id_list: @@ -200,23 +237,24 @@ def test_plot_map_dataset_list(client): def test_plot_map_table_dataset(client): - building_damage_id = '5a296b53c7d30d4af5378cd5' + building_damage_id = "5a296b53c7d30d4af5378cd5" dataset = Dataset.from_data_service(building_damage_id, DataService(client)) - map = viz.plot_table_dataset(dataset, client, 'meandamage') + map = viz.plot_table_dataset(dataset, client, "meandamage") assert True def test_plot_table_dataset_list_from_single_source(client): - seaside_building_polygon_id = '5f7c95d681c8dd4d309d5a46' - dataset_id_list = ['5f7c9b4f81c8dd4d309d5b62', '5f7c9af781c8dd4d309d5b5e'] + seaside_building_polygon_id = "5f7c95d681c8dd4d309d5a46" + dataset_id_list = ["5f7c9b4f81c8dd4d309d5b62", "5f7c9af781c8dd4d309d5b5e"] dataset_list = [] for dataset_id in dataset_id_list: dataset_list.append(Dataset.from_data_service(dataset_id, DataService(client))) map = viz.plot_table_dataset_list_from_single_source( - client, dataset_list, 'failure_probability', seaside_building_polygon_id) + client, dataset_list, "failure_probability", seaside_building_polygon_id + ) assert True @@ -231,7 +269,9 @@ def test_heatmap(client): def test_seaside_bridges(client): trns_brdg_dataset_id = "5d251172b9219c0692cd7523" - trns_brdg_dataset = Dataset.from_data_service(trns_brdg_dataset_id, DataService(client)) + trns_brdg_dataset = Dataset.from_data_service( + trns_brdg_dataset_id, DataService(client) + ) viz.plot_map(trns_brdg_dataset, column=None, category=False, basemap=True) assert True @@ -243,7 +283,11 @@ def test_overay_gdf_with_raster(client): memphis_water_pipeline = "5a284f28c7d30d13bc081d14" memphis_eq = "5b902cb273c3371e1236b36b" - eq_dataset_id = HazardService(client).get_earthquake_hazard_metadata(memphis_eq)['hazardDatasets'][0].get('datasetId') + eq_dataset_id = ( + HazardService(client) + .get_earthquake_hazard_metadata(memphis_eq)["hazardDatasets"][0] + .get("datasetId") + ) raster_dataset = Dataset.from_data_service(eq_dataset_id, DataService(client)) dataset = Dataset.from_data_service(shelby_hospital_inv_id, DataService(client)) @@ -255,30 +299,41 @@ def test_overay_gdf_with_raster(client): def test_choropleth_sinlge_dataset(client): - social_vulnerability_census_block_group = '5a284f57c7d30d13bc08254c' - dataset = Dataset.from_data_service(social_vulnerability_census_block_group, DataService(client)) - viz.plot_choropleth_multiple_fields_from_single_dataset(dataset, ['tot_hh', 'totpop']) + social_vulnerability_census_block_group = "5a284f57c7d30d13bc08254c" + dataset = Dataset.from_data_service( + social_vulnerability_census_block_group, DataService(client) + ) + viz.plot_choropleth_multiple_fields_from_single_dataset( + dataset, ["tot_hh", "totpop"] + ) assert True def test_choropleth_multiple_dataset(client): - social_vulnerability_census_block_group = '5a284f57c7d30d13bc08254c' - dislocation_census_block_group = '5a284f58c7d30d13bc082566' - dataset1 = Dataset.from_data_service(social_vulnerability_census_block_group, DataService(client)) - dataset2 = Dataset.from_data_service(dislocation_census_block_group, DataService(client)) - viz.plot_choropleth_multiple_dataset([dataset1, dataset2], ['tot_hh', 'p_16pyr']) + social_vulnerability_census_block_group = "5a284f57c7d30d13bc08254c" + dislocation_census_block_group = "5a284f58c7d30d13bc082566" + dataset1 = Dataset.from_data_service( + social_vulnerability_census_block_group, DataService(client) + ) + dataset2 = Dataset.from_data_service( + dislocation_census_block_group, DataService(client) + ) + viz.plot_choropleth_multiple_dataset([dataset1, dataset2], ["tot_hh", "p_16pyr"]) assert True def test_multiple_vector_visualization(client): - centerville_model_tornado = '60c917b498a93232884f367d' - centerville_epn_link = '5b1fdc2db1cf3e336d7cecc9' - tornado_metadata = HazardService(client).get_tornado_hazard_metadata(centerville_model_tornado) + centerville_model_tornado = "60c917b498a93232884f367d" + centerville_epn_link = "5b1fdc2db1cf3e336d7cecc9" + tornado_metadata = HazardService(client).get_tornado_hazard_metadata( + centerville_model_tornado + ) dataset1 = Dataset.from_data_service(centerville_epn_link, DataService(client)) - dataset2 = Dataset.from_data_service(tornado_metadata["hazardDatasets"][0].get("datasetId"), - DataService(client)) + dataset2 = Dataset.from_data_service( + tornado_metadata["hazardDatasets"][0].get("datasetId"), DataService(client) + ) viz.plot_multiple_vector_dataset([dataset1, dataset2]) assert True diff --git a/tests/test_format.py b/tests/test_format.py index 7377757..d9190f4 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -9,8 +9,8 @@ from pyincore_viz.globals import PYINCORE_VIZ_ROOT_FOLDER paths = [ - os.path.join(PYINCORE_VIZ_ROOT_FOLDER, 'pyincore_viz'), - os.path.join(PYINCORE_VIZ_ROOT_FOLDER, 'tests/pyincore_viz/'), + os.path.join(PYINCORE_VIZ_ROOT_FOLDER, "pyincore_viz"), + os.path.join(PYINCORE_VIZ_ROOT_FOLDER, "tests/pyincore_viz/"), ]