Commit 4a98e48d authored by Aurore Sallard's avatar Aurore Sallard
Browse files

Capacities of secondary locations

parent 6a1fd3da
Pipeline #96537 failed with stage
# General pipeline settings
working_directory: /nas/asallard/Switzerland/cache_schools
flowchart_path: /nas/asallard/Switzerland/output_schools/flowchart.json
working_directory: /nas/asallard/Switzerland/cache_sample_secloc
flowchart_path: /nas/asallard/Switzerland/cache_170521/flowchart.json
dryrun: false
# Requested stages
......@@ -9,15 +9,17 @@ run:
# - data.statpop.projections.households
# - data.statpop.scaled
# - population.matched
# - data.microcensus.households
#- data.microcensus.trips
# - population.destinations
# - synthesis.population.destinations
# - synthesis.population.spatial.secondary.locations
# - synthesis.population.destinations
#- synthesis.population.spatial.secondary.locations
- population.output
# - matsim.facilities
# - matsim.population
# - matsim.households
- matsim.run
- analysis.analysis
# - matsim.run
#- analysis.analysis
# - population.output
# These are configuration options that we use in the pipeline
......@@ -27,12 +29,12 @@ config:
hot_deck_matching_runners: 2
disable_progress_bar: false
java_memory: 80G
input_downsampling: 0.25
input_downsampling: 0.0001
enable_scaling: true
scaling_year: 2020
use_freight: true
use_detailed_activities: false
use_detailed_activities: true
hafas_date: 01.10.2018
data_path: /nas/ivtmatsim/scenarios/switzerland/data
output_path: /nas/asallard/Switzerland/output_schools
analysis_path: /nas/asallard/Switzerland/analysis_schools
output_path: /nas/asallard/Switzerland/output_sample_secloc
analysis_path: /nas/asallard/Switzerland/analysis_sample_secloc
......@@ -76,7 +76,7 @@ def execute(context):
df_mz_trips.loc[df_mz_trips["wzweck1"] == 12, "purpose"] = "unknown" # Other
df_mz_trips.loc[df_mz_trips["wzweck1"] == 13, "purpose"] = "border" # Going out of country
if det_activities == "true":
if det_activities:# == "true":
df_mz_trips.loc[df_mz_trips["wzweck1"] == 5, "purpose"] = "services"
df_mz_trips.loc[np.logical_and(df_mz_trips["wzweck1"] == 4, df_mz_trips["f51800a"] == 1), "purpose"] = "grocery"
......
......@@ -33,7 +33,7 @@ def execute(context):
# 90 = arts, entertainment, leisure; 56 = gastronomy
df.loc[:, "offers_leisure"] = df["noga"].str.startswith("90") | df["noga"].str.startswith("56") | df["noga"].str.startswith("91") | df["noga"].str.startswith("55") | df["noga"].str.startswith("94") | (df["noga"] == "591400") | (df["noga"] == "920000") | (df["noga"] == "855200") | (df["noga"] == "932100") | (df["noga"] == "932900") | (df["noga"] == "855100") | df["noga"].str.startswith("931")
if det_activities == "true":
if det_activities:# == "true":
df.loc[:, "offers_culture"] = df["noga"].str.startswith("90") | df["noga"].str.startswith("91") | (df["noga"] == "591400") | (df["noga"] == "920000") | (df["noga"] == "855200") | (df["noga"] == "932100") | (df["noga"] == "932900")
df.loc[:, "offers_religion"] = df["noga"].str.startswith("94")
......@@ -44,7 +44,7 @@ def execute(context):
# 47 = retail
df.loc[:, "offers_shop"] = df["noga"].str.startswith("47")
if det_activities == "true":
if det_activities:# == "true":
df.loc[:, "offers_grocery"] = df["noga"].str.startswith("471") | df["noga"].str.startswith("472") | df["noga"].str.startswith("473") | (df["noga"] == "478100")
df.loc[:, "offers_other(S)"] = df["offers_shop"] & np.logical_not(df["offers_grocery"])
......@@ -52,22 +52,25 @@ def execute(context):
df.loc[:, "offers_volunteer"] = True
df.loc[:, "offers_outdoor"] = False
del df["noga"]
#del df["noga"]
df = spatial_utils.to_gpd(context, df, x="destination_x", y="destination_y", coord_type="facility")
max_id = np.max(df["destination_id"].values.tolist())
# Services
if det_activities == "true":
if det_activities:# == "True":
df.loc[:, "offers_services"] = df["noga"].str.startswith("8690") | df["noga"].str.startswith("95") | df["noga"].str.startswith("33") | df["noga"].str.startswith("6419") | df["noga"].str.startswith("6512") | (df["noga"] == "750000") | (df["noga"] == "861001") | (df["noga"] == "861002") | (df["noga"] == "862100") | (df["noga"] == "862200") | (df["noga"] == "862300") | (df["noga"] == "772200") | (df["noga"] == "960101") | (df["noga"] == "960102") | (df["noga"] == "960201") | (df["noga"] == "960202") | (df["noga"] == "960401") | (df["noga"] == "960402") | (df["noga"] == "855300") | (df["noga"] == "531000") | (df["noga"] == "532000") | (df["noga"] == "452001") | (df["noga"] == "452002") | (df["noga"] == "649201") | (df["noga"] == "651100") | (df["noga"] == "681000") | (df["noga"] == "682001") | (df["noga"] == "682002") | (df["noga"] == "682002") | (df["noga"] == "683100") | (df["noga"] == "683200") | (df["noga"] == "691001") | (df["noga"] == "692000") | (df["noga"] == "771100") | (df["noga"] == "772100") | (df["noga"] == "772900") | (df["noga"] == "791100") | (df["noga"] == "791200") | (df["noga"] == "841100") | (df["noga"] == "842400") | (df["noga"] == "842301") | (df["noga"] == "843000")
df.loc[:, "offers_outdoor"] = False
# Outdoor points
if det_activities == "true":
outdoor_path = "/nas/asallard/Switzerland/cache_secloc/"
outdoor = gpd.read_file("%s/Outdoor_points/sample_points_clean.shp"% outdoor_path)
if det_activities:# == "True":
outdoor_path = "/nas/asallard/Switzerland/Outdoor_points"
outdoor = gpd.read_file("%s/sample_points.shp"% outdoor_path)
initial_crs = outdoor.crs
print("OUTDOOR POINTS LOADED")
outdoor["destination_id"] = np.arange(max_id + 1, max_id + 1 + len(outdoor), 1)
outdoor["destination_x"] = [p.x for p in outdoor.geometry.values]
outdoor["destination_y"] = [p.y for p in outdoor.geometry.values]
......
......@@ -28,6 +28,7 @@ class CustomDiscretizationSolver(rda.DiscretizationSolver):
def __init__(self, data):
self.data = data
self.indices = {}
self.query_size = 10
for purpose, data in self.data.items():
print("Constructing spatial index for %s ..." % purpose)
......@@ -38,7 +39,14 @@ class CustomDiscretizationSolver(rda.DiscretizationSolver):
discretized_identifiers = []
for location, purpose in zip(locations, problem["purposes"]):
index = self.indices[purpose].query(location.reshape(1, -1), return_distance = False)[0][0]
#index = self.indices[purpose].query(location.reshape(1, -1), return_distance = False)[0][0]
distances, indices = self.indices[purpose].query(location.reshape(1, -1), self.query_size, return_distance=True)
candidates_nboemployees = self.data[purpose]["number_employees"][indices]
weights = candidates_nboemployees / np.sum(candidates_nboemployees)
selector = np.random.choice(self.query_size, p=weights)
index = np.choose(selector, indices[j].T)
discretized_identifiers.append(self.data[purpose]["identifiers"][index])
discretized_locations.append(self.data[purpose]["locations"][index])
......
......@@ -47,7 +47,7 @@ def prepare_destinations(context):
det_activities = context.config("use_detailed_activities")
data = {}
if det_activities == "true":
if det_activities:# == "true":
df_home = context.stage("synthesis.population.spatial.home.locations").copy()[["household_id", "geometry"]].rename({"household_id": "destination_id"}, axis = 1)
df_home.loc[:, "destination_id"] = np.array(range(M, M + len(df_home), 1))
df_home.loc[:, "offers_visits"] = True
......@@ -72,9 +72,13 @@ def prepare_destinations(context):
df_destinations = pd.concat([df_destinations, df_home])
identifiers = df_destinations["destination_id"].values
locations = np.vstack(df_destinations["geometry"].apply(lambda x: np.array([x.x, x.y])).values)
df_destinations["offers_outdoor"] = df_destinations['offers_outdoor'].fillna(False)
df_destinations["offers_services"] = df_destinations['offers_services'].fillna(False)
for purpose in ("grocery", "other(S)", "culture", "gastronomy", "religion", "sport", "other(L)", "other", "visits", "volunteer", "outdoor", "services"):
f = df_destinations["offers_%s" % purpose].values
print(list(set(f)))
data[purpose] = dict(
identifiers=identifiers[f],
locations=locations[f]
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment