Skip to content

Commit

Permalink
fix bug in cycle_mode (saves as a list in cellpyfile, but needs to (s…
Browse files Browse the repository at this point in the history
…till) be a string in the cellpycell methods.
  • Loading branch information
jepegit committed Dec 27, 2023
1 parent f01b6b8 commit 0547d40
Show file tree
Hide file tree
Showing 12 changed files with 39 additions and 4 deletions.
8 changes: 7 additions & 1 deletion cellpy/readers/cellreader.py
Original file line number Diff line number Diff line change
Expand Up @@ -778,7 +778,13 @@ def cycle_mode(self):
# TODO: v2.0 edit this from scalar to list
try:
data = self.data
return data.meta_test_dependent.cycle_mode
m = data.meta_test_dependent.cycle_mode
# cellpy saves this as a list (ready for v2.0),
# but we want to return a scalar for the moment
# Temporary fix to make sure that cycle_mode is a scalar:
if isinstance(m, (tuple, list)):
return m[0]
return m
except NoDataFound:
return self._cycle_mode

Expand Down
20 changes: 19 additions & 1 deletion cellpy/readers/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,13 +303,28 @@ class Data:

def _repr_html_(self):
txt = f"<h2>Data-object</h2> <b>id</b>: {hex(id(self))}"

txt += "<p>"
for p in dir(self):
if not p.startswith("_"):
if p not in ["raw", "summary", "steps", "logger"]:
if p not in [
"raw",
"summary",
"steps",
"logger",
"raw_data_files",
"custom_info",
"populate_defaults",
]:
value = self.__getattribute__(p)
txt += f"<b>{p}</b>: {value}<br>"
if p == "raw_data_files":
fid_txt = "<b>raw data files</b>:"
fid_names = ", ".join([f.name for f in self.raw_data_files])
fid_txt += f" [{fid_names}]<br>"
txt += fid_txt
txt += "</p>"

try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
Expand Down Expand Up @@ -389,10 +404,13 @@ def raw_id(self):

@property
def start_datetime(self):
# TODO: convert to datetime object?
print(type(self.meta_common.start_datetime))
return self.meta_common.start_datetime

@start_datetime.setter
def start_datetime(self, n):
# TODO: convert to datetime object?
self.meta_common.start_datetime = n

@property
Expand Down
1 change: 1 addition & 0 deletions cellpy/readers/instruments/arbin_res.py
Original file line number Diff line number Diff line change
Expand Up @@ -1079,6 +1079,7 @@ def _init_data(self, file_name, global_data_df, test_no=None):
data.schedule_file_name = selected_global_data_df[
self.arbin_headers_global.schedule_file_name_txt
].values[0]
# TODO: convert to datetime:
data.start_datetime = selected_global_data_df[
self.arbin_headers_global.start_datetime_txt
].values[0]
Expand Down
4 changes: 2 additions & 2 deletions cellpy/readers/instruments/arbin_sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,10 +268,10 @@ def loader(self, name, **kwargs):
data.test_ID = test_id
data.test_name = name

# The following meta data is not implemented yet for SQL loader:
# The following metadata is not implemented yet for SQL loader:
data.creator = None
data.schedule_file_name = None
data.start_datetime = None
data.start_datetime = None # REMARK! convert to datetime when implementing

# Generating a FileID project:
self.generate_fid()
Expand Down
1 change: 1 addition & 0 deletions cellpy/readers/instruments/arbin_sql_7.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,7 @@ def loader(self, name, **kwargs):
data.item_ID = None
# Implemented metadata:
data.schedule_file_name = meta_data["Schedule_File_Name"][0]
# TODO: convert to datetime:
data.start_datetime = meta_data["First_Start_DateTime"][0]
data.creator = meta_data["Creator"][0]

Expand Down
2 changes: 2 additions & 0 deletions cellpy/readers/instruments/arbin_sql_h5.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ def loader(self, name, **kwargs):
data.test_name = self.name.name
data.creator = None
data.schedule_file_name = data_dfs["info_df"]["Schedule_File_Name"].iloc[0]
# TODO: convert to datetime (note that this seems to be set also in the postprocessing)
data.start_datetime = data_dfs["info_df"]["First_Start_DateTime"].iloc[0]
data.mass = data_dfs["info_df"]["SpecificMASS"].iloc[0]
data.nom_cap = data_dfs["info_df"]["SpecificCapacity"].iloc[0]
Expand Down Expand Up @@ -220,6 +221,7 @@ def _post_process(self, data):

hdr_date_time = self.arbin_headers_normal.datetime_txt
start = data.raw[hdr_date_time].iat[0]
# TODO: convert to datetime:
data.start_datetime = start

return data
Expand Down
1 change: 1 addition & 0 deletions cellpy/readers/instruments/arbin_sql_xlsx.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,7 @@ def _post_process(self, data):
data.raw[hdr_ir] = data.raw[hdr_ir].fillna(method="bfill")

hdr_date_time = self.arbin_headers_normal.datetime_txt
# TODO: convert to datetime:
start = data.raw[hdr_date_time].iat[0]
data.start_datetime = start

Expand Down
2 changes: 2 additions & 0 deletions cellpy/readers/instruments/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -565,6 +565,7 @@ def loader(self, name: Union[str, pathlib.Path], **kwargs: str) -> core.Data:
data.test_name = meta.get("test_name", None)
data.creator = meta.get("creator", None)
data.schedule_file_name = meta.get("schedule_file_name", None)
# TODO: convert to datetime:
data.start_datetime = meta.get("start_datetime", None)

# Generating a FileID project:
Expand All @@ -579,6 +580,7 @@ def loader(self, name: Union[str, pathlib.Path], **kwargs: str) -> core.Data:
data = self._post_process(data)
data = self.identify_last_data_point(data)
if data.start_datetime is None:
# TODO: convert to datetime:
data.start_datetime = data.raw[headers_normal.datetime_txt].iat[0]

data = self.validate(data)
Expand Down
1 change: 1 addition & 0 deletions cellpy/readers/instruments/biologics_mpr.py
Original file line number Diff line number Diff line change
Expand Up @@ -460,6 +460,7 @@ def _generate_cycle_index(self):

def _generate_datetime(self):
start_date = self.mpr_settings["start_date"]
# TODO: convert to datetime:
start_datetime = self.mpr_log["Start"]
cellpy_header_txt = "datetime_txt"
date_format = "%Y-%m-%d %H:%M:%S" # without microseconds
Expand Down
1 change: 1 addition & 0 deletions cellpy/readers/instruments/ext_nda_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ def loader(self, file_name, *args, **kwargs):
channel_index = 1
creator = "no name"
schedule_file_name = "no name"
# TODO: convert to datetime:
start_datetime = "2020.02.24 14:58:00"
test_ID = 1
test_name = "no name"
Expand Down
1 change: 1 addition & 0 deletions cellpy/readers/instruments/neware_xlsx.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,7 @@ def _post_process(self, data):
print("recalculating capacity: cap = current * time")

# hdr_date_time = self.arbin_headers_normal.datetime_txt
# TODO: convert to datetime:
# start = data.raw[hdr_date_time].iat[0]
# data.start_datetime = start

Expand Down
1 change: 1 addition & 0 deletions cellpy/readers/instruments/pec_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,7 @@ def loader(self, file_name, bad_steps=None, **kwargs):

# --------- read raw-data (normal-data) -------------------------
self._load_pec_data(bad_steps)
# TODO: convert to datetime:
data.start_datetime = self.pec_settings["start_time"]
length_of_test = self.pec_data.shape[0]
logging.debug(f"length of test: {length_of_test}")
Expand Down

0 comments on commit 0547d40

Please sign in to comment.