Skip to content

Commit

Permalink
Merge pull request #430 from IN-CORE/release-1.14.0
Browse files Browse the repository at this point in the history
Release 1.14.0
  • Loading branch information
longshuicy authored Nov 8, 2023
2 parents 0b98180 + 08ce991 commit 47b15ac
Show file tree
Hide file tree
Showing 29 changed files with 483 additions and 199 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/conda.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ jobs:
uses: mamba-org/setup-micromamba@v1
with:
create-args: >-
python=3.8
python=3.9
conda
environment-file: environment.yml
generate-run-shell: true
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/pypi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,10 @@ jobs:
echo "TOKEN=${{ secrets.TEST_PYPI_API_TOKEN }}" >> $GITHUB_ENV
fi
- name: Set up Python 3.8
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.8
python-version: 3.9

- name: Install dependencies
run: python -m pip install --upgrade pip setuptools wheel
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/pytests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
vars: [ {python-version: '3.8', sleep: '0s'}, {python-version: '3.9', sleep: '60s'},
{python-version: '3.10', sleep: '120s'}, {python-version: '3.11', sleep: '180s'} ]
vars: [ {python-version: '3.9', sleep: '0s'}, {python-version: '3.10', sleep: '60s'},
{python-version: '3.11', sleep: '120s'}, {python-version: '3.12', sleep: '180s'} ]
name: Python ${{ matrix.vars.python-version }} Test
steps:
- name: Checkout source code
Expand Down
18 changes: 17 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,23 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).


## [1.14.0] - 2023-11-02

### Changed
- Properly set the output dataset in Building Portfolio Recovery Analysis [#423](https://github.com/IN-CORE/pyincore/issues/423)
- Dependency clean up [#431](https://github.com/IN-CORE/pyincore/issues/431)

### Added
- Add support for hazard object input from local and remote for building damage analysis [#427](https://github.com/IN-CORE/pyincore/issues/427)

### Fixed
- CGE warning that using series is deprecated and will raise a type error [#357](https://github.com/IN-CORE/pyincore/issues/357)
- Pytest fix in workflow [#425](https://github.com/IN-CORE/pyincore/issues/425)
- Mapping rule to match local repair curve [#438](https://github.com/IN-CORE/pyincore/issues/438)
- Local tornado x and y axis reversed [#439](https://github.com/IN-CORE/pyincore/issues/439)


## [1.13.0] - 2023-10-11

### Changed
Expand All @@ -15,7 +32,6 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
- Capability to support for local hazard [#404](https://github.com/IN-CORE/pyincore/issues/404)
- Add support for local hazard with backward compatibility to analyses [#415](https://github.com/IN-CORE/pyincore/issues/415)


### Fixed
- Aggregate hazard exposure column for non-structural building damage analysis to avoid column name cutoff and chaining issue with mean damage [#393](https://github.com/IN-CORE/pyincore/issues/393)

Expand Down
4 changes: 2 additions & 2 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@
author = ''

# The short X.Y version
version = '1.13'
version = '1.14'
# The full version, including alpha/beta/rc tags
release = '1.13.0'
release = '1.14.0'

# -- General configuration ---------------------------------------------------

Expand Down
26 changes: 13 additions & 13 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,19 @@ channels:
- defaults
dependencies:
- ipopt>=3.11
- fiona>=1.8.4
- geopandas>=0.6.1
- matplotlib>=2.1.0
- networkx>=2.2
- numpy>=1.16.6,<2.0a0
- pandas>=1.1.0
- fiona>=1.9.5
- geopandas>=0.14.0
- matplotlib>=3.8.0
- networkx>=3.2.1
- numpy>=1.26.0,<2.0a0
- pandas>=2.1.2
- pycodestyle>=2.6.0
- pyomo>=5.6
- pyproj>=1.9.6
- pyomo>=6.6.2
- pyproj>=3.6.1
- pytest>=3.9.0
- python-jose>=3.0
- rasterio>=1.0.18
- requests>=2.20.0
- rtree>=0.8.3
- scipy>=1.2.0
- shapely>=1.6.4.post1
- rasterio>=1.3.9
- requests>=2.31.0
- rtree>=1.1.0
- scipy>=1.11.3
- shapely>=2.0.2
59 changes: 35 additions & 24 deletions pyincore/analyses/buildingdamage/buildingdamage.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,24 @@ def run(self):
else:
retrofit_strategy = None

# Accomodating to multihazard
# Get hazard input
hazard_dataset_ids = self.get_parameter("hazard_id").split("+")

# Hazard type of the exposure
hazard_types = self.get_parameter("hazard_type").split("+")
# Accommodating to multi-hazard
hazards = [] # hazard objects
hazard_object = self.get_input_hazard("hazard")

# To use local hazard
if hazard_object is not None:
# Right now only supports single hazard for local hazard object
hazard_types = [hazard_object.hazard_type]
hazard_dataset_ids = [hazard_object.id]
hazards = [hazard_object]
# To use remote hazard
elif self.get_parameter("hazard_id") is not None and self.get_parameter("hazard_type") is not None:
hazard_dataset_ids = self.get_parameter("hazard_id").split("+")
hazard_types = self.get_parameter("hazard_type").split("+")
for hazard_type, hazard_dataset_id in zip(hazard_types, hazard_dataset_ids):
hazards.append(BaseAnalysis._create_hazard_object(hazard_type, hazard_dataset_id, self.hazardsvc))
else:
raise ValueError("Either hazard object or hazard id + hazard type must be provided")

# Get Fragility key
fragility_key = self.get_parameter("fragility_key")
Expand Down Expand Up @@ -74,6 +86,7 @@ def run(self):
num_workers,
inventory_args,
repeat(retrofit_strategy),
repeat(hazards),
repeat(hazard_types),
repeat(hazard_dataset_ids))

Expand Down Expand Up @@ -105,12 +118,14 @@ def building_damage_concurrent_future(self, function_name, parallelism, *args):

return output_ds, output_dmg

def building_damage_analysis_bulk_input(self, buildings, retrofit_strategy, hazard_types, hazard_dataset_ids):
def building_damage_analysis_bulk_input(self, buildings, retrofit_strategy, hazards, hazard_types,
hazard_dataset_ids):
"""Run analysis for multiple buildings.
Args:
buildings (list): Multiple buildings from input inventory set.
retrofit_strategy (list): building guid and its retrofit level 0, 1, 2, etc. This is Optional
hazards (list): List of hazard objects.
hazard_types (list): List of Hazard type, either earthquake, tornado, or tsunami.
hazard_dataset_ids (list): List of id of the hazard exposure.
Expand All @@ -130,7 +145,7 @@ def building_damage_analysis_bulk_input(self, buildings, retrofit_strategy, haza
multihazard_vals = {}
adjust_demand_types_mapping = {}

for hazard_type, hazard_dataset_id in zip(hazard_types, hazard_dataset_ids):
for hazard, hazard_type, hazard_dataset_id in zip(hazards, hazard_types, hazard_dataset_ids):
# get allowed demand types for the hazard type
allowed_demand_types = [item["demand_type"].lower() for item in self.hazardsvc.get_allowed_demands(
hazard_type)]
Expand Down Expand Up @@ -172,19 +187,7 @@ def building_damage_analysis_bulk_input(self, buildings, retrofit_strategy, haza
else:
unmapped_buildings.append(b)

if hazard_type == 'earthquake':
hazard_vals = self.hazardsvc.post_earthquake_hazard_values(hazard_dataset_id, values_payload)
elif hazard_type == 'tornado':
hazard_vals = self.hazardsvc.post_tornado_hazard_values(hazard_dataset_id, values_payload,
self.get_parameter('seed'))
elif hazard_type == 'tsunami':
hazard_vals = self.hazardsvc.post_tsunami_hazard_values(hazard_dataset_id, values_payload)
elif hazard_type == 'hurricane':
hazard_vals = self.hazardsvc.post_hurricane_hazard_values(hazard_dataset_id, values_payload)
elif hazard_type == 'flood':
hazard_vals = self.hazardsvc.post_flood_hazard_values(hazard_dataset_id, values_payload)
else:
raise ValueError("The provided hazard type is not supported yet by this analysis")
hazard_vals = hazard.read_hazard_values(values_payload, self.hazardsvc)

# map demand type from payload to response
# worst code I have ever written
Expand Down Expand Up @@ -261,7 +264,7 @@ def building_damage_analysis_bulk_input(self, buildings, retrofit_strategy, haza
AnalysisUtil.adjust_damage_for_liquefaction(dmg_probability, ground_failure_prob))

dmg_interval = selected_fragility_set.calculate_damage_interval(
dmg_probability, hazard_type=self.get_parameter("hazard_type"), inventory_type="building")
dmg_probability, hazard_type="+".join(hazard_types), inventory_type="building")
else:
raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are "
"seeing this please report the issue.")
Expand Down Expand Up @@ -325,13 +328,13 @@ def get_spec(self):
},
{
'id': 'hazard_type',
'required': True,
'required': False,
'description': 'Hazard Type (e.g. earthquake)',
'type': str
},
{
'id': 'hazard_id',
'required': True,
'required': False,
'description': 'Hazard ID',
'type': str
},
Expand Down Expand Up @@ -372,6 +375,14 @@ def get_spec(self):
'type': str,
}
],
'input_hazards': [
{
'id': 'hazard',
'required': False,
'description': 'Hazard object',
'type': ["earthquake", "tornado", "hurricane", "flood", "tsunami"]
},
],
'input_datasets': [
{
'id': 'buildings',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
import scipy.stats
import concurrent.futures

from pyincore.analyses.buildingportfolio.recovery.BuildingData import BuildingData
from pyincore import BaseAnalysis
from pyincore.analyses.buildingportfolio.BuildingData import BuildingData
from pyincore import BaseAnalysis, Dataset


class BuildingPortfolioRecoveryAnalysis(BaseAnalysis):
Expand Down Expand Up @@ -117,6 +117,7 @@ def get_spec(self):
{
'id': 'result',
'parent_type': 'buildingPortfolio',
'description': 'Building portfolio recovery result.',
'type': 'incore:portfolioRecovery'
}
]
Expand Down Expand Up @@ -230,8 +231,7 @@ def run(self):
# Trajectory for Best Line Functionality and Full Functionality
mean_recovery_output = sum(recovery_fp) / sample_size

output_file = output_base_name + 'building-recovery' + '.csv'
with open(output_file, 'w+', newline='') as output_file:
with open(output_base_name + '_building-recovery.csv', 'w+', newline='') as output_file:
spam_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
spam_writer.writerow(['Building_ID', 'Building_Lon', 'Building_Lat']
+ list(range(1, time_steps + 1)))
Expand Down Expand Up @@ -343,9 +343,7 @@ def run(self):
upper_bound95[t] = 1

# END: Additional Code for uncertainty Analysis

output_file = output_base_name + 'portfolio-recovery' + '.csv'
with open(output_file, 'w+', newline='') as output_file:
with open(output_base_name + '_portfolio-recovery.csv', 'w+', newline='') as output_file:
spam_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
spam_writer.writerow(['Week', 'Recovery_Percent_Func_Probability', '75P_Upper_Bound',
'75P_Lower_Bound', '95P_Upper_Bound', '95P_Lower_Bound',
Expand All @@ -356,28 +354,15 @@ def run(self):
lower_bound95[i], upper_bound95[i]] + list(mean_recovery[i]) +
[pdf[i]])
else:
output_file = output_base_name + 'portfolio-recovery' + '.csv'
with open(output_file, 'w+', newline='') as output_file:
with open(output_base_name + '_portfolio-recovery.csv', 'w+', newline='') as output_file:
spam_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
spam_writer.writerow(['Week', 'Recovery_Percent_Func_Probability', 'RecPercent_RE',
'RecPercent_RU', 'RecPercent_RO', 'RecPercent_BF', 'RecPercent_FF'])
for i in range(time_steps):
spam_writer.writerow([i + 1, mean_recovery_output[i]] + list(mean_recovery[i]))

# Not needed as graphs are generated in jupyter notebook
# fig2 = plt.figure(2)
# plt.plot(range(time_steps), mean_recovery_output, color="black")
# if uncertainty:
# plt.plot(range(time_steps), lower_bound75, color="red")
# plt.plot(range(time_steps), upper_bound75, color="red")
# plt.plot(range(time_steps), lower_bound95, color="blue")
# plt.plot(range(time_steps), upper_bound95, color="blue")
# plt.xlabel('Expected recovery time (weeks)')
# plt.ylabel('Percentage of Buildings Recovered')
# plt.title('Building Portfolio Recovery Analysis with uncertainty')
#
# output_image2 = output_base_name + 'portfolio-recovery' + '.png'
# fig2.savefig(output_image2)
self.set_output_dataset("result", Dataset.from_file(output_base_name + '_portfolio-recovery.csv',
data_type=self.output_datasets["result"]["spec"]["type"]))

print("INFO: Finished executing Building Portfolio Recovery Analysis")

Expand Down
3 changes: 3 additions & 0 deletions pyincore/analyses/buildingportfolio/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from pyincore.analyses.buildingportfolio.BuildingPortfolioRecoveryAnalysis import BuildingPortfolioRecoveryAnalysis
from pyincore.analyses.buildingportfolio.BuildingDamage import BuildingDamage
from pyincore.analyses.buildingportfolio.BuildingData import BuildingData
2 changes: 0 additions & 2 deletions pyincore/analyses/buildingportfolio/recovery/__init__.py

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,11 @@ def recovery_rate(self, buildings, sample_damage_states, total_delay):

# This is sort of a workaround until we define Repair Curve models and abstract this out there
for i, b in enumerate(buildings):
repair_sets_by_guid[b["properties"]['guid']] = repair_sets[str(i)]
# if building id has a matched repair curve set
if b['id'] in repair_sets.keys():
repair_sets_by_guid[b["properties"]['guid']] = repair_sets[b['id']]
else:
repair_sets_by_guid[b["properties"]['guid']] = None

# Obtain the column names
colnames = list(total_delay.columns)[1:]
Expand Down Expand Up @@ -267,10 +271,13 @@ def idx(x, y):
percent_func = np.random.random(num_samples)
# NOTE: Even though the kwarg name is "repair_time", it actually takes percent of functionality. DFR3
# system currently doesn't have a way to represent the name correctly when calculating the inverse.
repair_time = mapped_repair.repair_curves[state].solve_curve_for_inverse(
hazard_values={}, curve_parameters=mapped_repair.curve_parameters, **{"repair_time": percent_func}
) / 7

if mapped_repair is not None:
repair_time = mapped_repair.repair_curves[state].solve_curve_for_inverse(
hazard_values={}, curve_parameters=mapped_repair.curve_parameters, **{"repair_time": percent_func}
) / 7
else:
repair_time = np.full(num_samples, np.nan)

for j in range(0, num_samples):
samples_n1_n2[build, idx(i, j)] = round(samples_np[build, i] + repair_time[j], 1)

Expand Down
2 changes: 1 addition & 1 deletion pyincore/analyses/galvestoncge/galvestoncge.py
Original file line number Diff line number Diff line change
Expand Up @@ -749,7 +749,7 @@ def _(x):

CG0T.loc[I, GN] = SAM.loc[I, GN].div(P0.loc[I], axis='index')

DEPR = float((SAM.loc[IG, ["INVES"]].sum(0)) / (KS0.loc[K, IG].sum(1).sum(0)))
DEPR = float(((SAM.loc[IG, ["INVES"]].sum(0)) / (KS0.loc[K, IG].sum(1).sum(0))).iloc[0])

N0.loc[K, IG] = KS0.loc[K, IG] * DEPR

Expand Down
Loading

0 comments on commit 47b15ac

Please sign in to comment.