Commit fbdbe7a4 authored by matthmey's avatar matthmey
Browse files

Merge branch 'develop' of gitlab.ethz.ch:tec/research/stuett/software/framework/stuett

parents 6ccb52f5 757a5e4d
...@@ -364,6 +364,10 @@ class SeismicSource(DataSource): ...@@ -364,6 +364,10 @@ class SeismicSource(DataSource):
del x.attrs["stats"] del x.attrs["stats"]
# x.rename({'seed_id':'channels'}) #TODO: rename seed_id to channels # x.rename({'seed_id':'channels'}) #TODO: rename seed_id to channels
# print(x.shape)
# TODO: same check for obspy
if len(x["seed_id"]) != len(config["channel"]):
warnings.warn("Inconsistent data: Not all channels could be loaded")
return x return x
def process_seismic_data( def process_seismic_data(
...@@ -1411,10 +1415,14 @@ class SegmentedDataset(Dataset): ...@@ -1411,10 +1415,14 @@ class SegmentedDataset(Dataset):
# we need to load every single piece to check if it is empty # we need to load every single piece to check if it is empty
# TODO: loop through dims in batch_dim and check if they are correct # TODO: loop through dims in batch_dim and check if they are correct
try: try:
warnings.simplefilter("error")
if self.get_data(slices[o[0]]).size == 0: if self.get_data(slices[o[0]]).size == 0:
warnings.simplefilter("default")
continue continue
warnings.simplefilter("default")
except Exception as e: except Exception as e:
print(e) warnings.simplefilter("default")
print("Ignoring file", e)
continue continue
# TODO: maybe this can be done faster (and cleaner) # TODO: maybe this can be done faster (and cleaner)
i = o[0] i = o[0]
...@@ -1425,7 +1433,7 @@ class SegmentedDataset(Dataset): ...@@ -1425,7 +1433,7 @@ class SegmentedDataset(Dataset):
label = str(label) label = str(label)
if label not in self.classes: if label not in self.classes:
self.classes.append(label) self.classes.append(label)
if i not in label_dict: if i not in label_dict:
label_dict[i] = {"indexers": slices[i], "labels": [label]} label_dict[i] = {"indexers": slices[i], "labels": [label]}
elif label not in label_dict[i]["labels"]: elif label not in label_dict[i]["labels"]:
...@@ -1602,7 +1610,7 @@ class SegmentedDataset(Dataset): ...@@ -1602,7 +1610,7 @@ class SegmentedDataset(Dataset):
# class PytorchDataset(DataSource): # TODO: extends pytorch dataset # class PytorchDataset(DataSource): # TODO: extends pytorch dataset
# def __init__(self, source=None): # def __init__(self, source=None):
# """ Creates a pytorch like dataset from a data source and a label source. # """ Creates a pytorch like dataset from a data source and a label source.
# Arguments: # Arguments:
# DataSource {[type]} -- [description] # DataSource {[type]} -- [description]
# config {dict} -- configuration for labels # config {dict} -- configuration for labels
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment