Skip to content

Commit

Permalink
Old namespace cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
zjwilliams20 committed Oct 27, 2022
1 parent 31fc14a commit 3bbe372
Show file tree
Hide file tree
Showing 9 changed files with 75 additions and 77 deletions.
4 changes: 2 additions & 2 deletions dpilqr/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@
quadraticize_distance,
quadraticize_finite_difference,
)
from .decentralized import (
from .distributed import (
define_inter_graph_threshold,
solve_centralized,
solve_decentralized,
solve_distributed,
solve_rhc,
)
from .dynamics import (
Expand Down
19 changes: 9 additions & 10 deletions dpilqr/decentralized.py → dpilqr/distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@

import itertools
import logging
import multiprocessing as mp
from time import perf_counter as pc

import numpy as np
Expand All @@ -23,8 +22,8 @@
g = 9.81


def solve_decentralized(problem, X, U, radius, t_kill=None, pool=None, verbose=True, **kwargs):
"""Solve the problem via decentralization into subproblems"""
def solve_distributed(problem, X, U, radius, pool=None, verbose=True, **kwargs):
"""Solve the problem via division into subproblems"""

x_dims = problem.game_cost.x_dims
u_dims = problem.game_cost.u_dims
Expand Down Expand Up @@ -64,8 +63,8 @@ def solve_decentralized(problem, X, U, radius, t_kill=None, pool=None, verbose=T
if verbose:
print(f"Problem {id_}: {graph[id_]}\nTook {Δt} seconds\n")

X_dec[:, i * n_states: (i + 1) * n_states] = Xi_agent
U_dec[:, i * n_controls: (i + 1) * n_controls] = Ui_agent
X_dec[:, i * n_states : (i + 1) * n_states] = Xi_agent
U_dec[:, i * n_controls : (i + 1) * n_controls] = Ui_agent

solve_info[id_] = (Δt, graph[id_])

Expand All @@ -82,8 +81,8 @@ def solve_decentralized(problem, X, U, radius, t_kill=None, pool=None, verbose=T
Δt = pc() - t0
if verbose:
print(f"Problem {id_}: {graph[id_]}\nTook {Δt} seconds")
X_dec[:, i * n_states: (i + 1) * n_states] = Xi_agent
U_dec[:, i * n_controls: (i + 1) * n_controls] = Ui_agent
X_dec[:, i * n_states : (i + 1) * n_states] = Xi_agent
U_dec[:, i * n_controls : (i + 1) * n_controls] = Ui_agent

# NOTE: This cannot be compared to the single-processed version due to
# multi-processing overhead.
Expand All @@ -96,7 +95,7 @@ def solve_decentralized(problem, X, U, radius, t_kill=None, pool=None, verbose=T
return X_dec, U_dec, J_full, solve_info


def solve_rhc( # N is the length of the prediction horizon
def solve_rhc(
problem,
x0,
N,
Expand Down Expand Up @@ -142,7 +141,7 @@ def predicate(x, _):
xi = x0.reshape(1, -1)
X = xi.copy()
# U = np.zeros((N, n_u))
U = np.random.rand(N, n_u)*0.01
U = np.random.rand(N, n_u) * 0.01
# U = np.tile([g, 0, 0], (N, n_agents))
centralized_solver = ilqrSolver(problem, N)

Expand All @@ -164,7 +163,7 @@ def predicate(x, _):
)
# print(f"Shape of X at each prediction horizon is{X.shape}")
else:
X, U, J, solve_info = solve_decentralized(
X, U, J, solve_info = solve_distributed(
problem, X, U, *args, verbose=False, **kwargs
)
# print(f"Shape of X at each prediction horizon is{X.shape}")
Expand Down
9 changes: 4 additions & 5 deletions dpilqr/dynamics.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import abc

import numpy as np
from scipy.constants import g
from scipy.optimize import approx_fprime
from scipy.integrate import solve_ivp
import sympy as sym
Expand Down Expand Up @@ -152,8 +151,8 @@ def f(self, x, u):
nx = self.x_dims[0]
nu = self.u_dims[0]
for i, model in enumerate(self.submodels):
xn[i * nx: (i + 1) * nx] = model.f(
x[i * nx: (i + 1) * nx], u[i * nu: (i + 1) * nu]
xn[i * nx : (i + 1) * nx] = model.f(
x[i * nx : (i + 1) * nx], u[i * nu : (i + 1) * nu]
)
return xn

Expand All @@ -166,8 +165,8 @@ def __call__(self, x, u):
nx = self.x_dims[0]
nu = self.u_dims[0]
for i, model in enumerate(self.submodels):
xn[i * nx: (i + 1) * nx] = model.__call__(
x[i * nx: (i + 1) * nx], u[i * nu: (i + 1) * nu]
xn[i * nx : (i + 1) * nx] = model.__call__(
x[i * nx : (i + 1) * nx], u[i * nu : (i + 1) * nu]
)
return xn

Expand Down
2 changes: 1 addition & 1 deletion dpilqr/graphics.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import networkx as nx
import numpy as np

from decentralized.util import split_agents, compute_pairwise_distance
from dpilqr.util import split_agents, compute_pairwise_distance


plt.rcParams.update(
Expand Down
4 changes: 2 additions & 2 deletions dpilqr/problem.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/usr/bin/env python

"""Logic to combine dynamics and cost in one framework to simplify decentralization"""
"""Logic to combine dynamics and cost in one framework to simplify distribution"""

from time import perf_counter as pc

Expand Down Expand Up @@ -34,7 +34,7 @@ def ids(self):
return self.dynamics.ids.copy()

def split(self, graph):
"""Split up this centralized problem into a list of decentralized
"""Split up this centralized problem into a list of distributed
sub-problems.
"""

Expand Down
2 changes: 1 addition & 1 deletion run/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
QuadcopterDynamics6D,
MultiDynamicalModel,
)
from dpilqr.decentralized import solve_rhc
from dpilqr.distributed import solve_rhc
from dpilqr.problem import ilqrProblem
from dpilqr.util import split_agents_gen, random_setup

Expand Down
68 changes: 36 additions & 32 deletions run/examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import matplotlib.pyplot as plt

from dpilqr import split_agents, plot_solve
import dpilqr as dec
import dpilqr
import scenarios

π = np.pi
Expand All @@ -30,15 +30,15 @@ def single_unicycle():
x = np.array([-10, 10, 10, 0], dtype=float)
x_goal = np.zeros((4, 1), dtype=float).T

dynamics = dec.UnicycleDynamics4D(dt)
dynamics = dpilqr.UnicycleDynamics4D(dt)

Q = np.diag([1.0, 1, 0, 0])
Qf = 1000 * np.eye(Q.shape[0])
R = np.eye(2)
cost = dec.ReferenceCost(x_goal, Q, R, Qf)
cost = dpilqr.ReferenceCost(x_goal, Q, R, Qf)

prob = dec.ilqrProblem(dynamics, cost)
ilqr = dec.ilqrSolver(prob, N)
prob = dpilqr.ilqrProblem(dynamics, cost)
ilqr = dpilqr.ilqrSolver(prob, N)
X, _, J = ilqr.solve(x)

plt.clf()
Expand All @@ -54,15 +54,15 @@ def single_quad6d():
x = np.array([2, 2, 0.5, 0, 0, 0], dtype=float)
xf = np.zeros((6, 1), dtype=float).T

dynamics = dec.QuadcopterDynamics6D(dt)
dynamics = dpilqr.QuadcopterDynamics6D(dt)

Q = np.eye(6)
Qf = 100 * np.eye(Q.shape[0])
R = np.diag([0, 1, 1])
cost = dec.ReferenceCost(xf, Q, R, Qf)
cost = dpilqr.ReferenceCost(xf, Q, R, Qf)

prob = dec.ilqrProblem(dynamics, cost)
ilqr = dec.ilqrSolver(prob, N)
prob = dpilqr.ilqrProblem(dynamics, cost)
ilqr = dpilqr.ilqrSolver(prob, N)

X, _, J = ilqr.solve(x)

Expand Down Expand Up @@ -97,23 +97,27 @@ def two_quads_one_human():
Rs = [R, R, R_human]
Qfs = [Qf, Qf, Qf_human]

models = [dec.QuadcopterDynamics6D, dec.QuadcopterDynamics6D, dec.HumanDynamics6D]
models = [
dpilqr.QuadcopterDynamics6D,
dpilqr.QuadcopterDynamics6D,
dpilqr.HumanDynamics6D,
]
ids = [100 + i for i in range(n_agents)]
dynamics = dec.MultiDynamicalModel(
dynamics = dpilqr.MultiDynamicalModel(
[model(dt, id_) for id_, model in zip(ids, models)]
)

goal_costs = [
dec.ReferenceCost(xf_i, Qi, Ri, Qfi, id_)
dpilqr.ReferenceCost(xf_i, Qi, Ri, Qfi, id_)
for xf_i, id_, x_dim, Qi, Ri, Qfi in zip(
dec.split_agents_gen(xf, x_dims), ids, x_dims, Qs, Rs, Qfs
dpilqr.split_agents_gen(xf, x_dims), ids, x_dims, Qs, Rs, Qfs
)
]
prox_cost = dec.ProximityCost(x_dims, radius, n_dims)
game_cost = dec.GameCost(goal_costs, prox_cost)
prox_cost = dpilqr.ProximityCost(x_dims, radius, n_dims)
game_cost = dpilqr.GameCost(goal_costs, prox_cost)

problem = dec.ilqrProblem(dynamics, game_cost)
solver = dec.ilqrSolver(problem, N)
problem = dpilqr.ilqrProblem(dynamics, game_cost)
solver = dpilqr.ilqrSolver(problem, N)

U0 = np.c_[np.tile([g, 0, 0], (N, 2)), np.ones((N, n_controls))]
X, _, J = solver.solve(x0, U0)
Expand All @@ -122,7 +126,7 @@ def two_quads_one_human():
plot_solve(X, J, xf, x_dims, True, 3)

plt.figure()
dec.plot_pairwise_distances(X, x_dims, n_dims, radius)
dpilqr.plot_pairwise_distances(X, x_dims, n_dims, radius)

plt.show()

Expand All @@ -138,7 +142,7 @@ def random_multiagent_simulation():

n_d = n_dims[0]

x0, xf = dec.random_setup(
x0, xf = dpilqr.random_setup(
n_agents,
n_states,
is_rotation=False,
Expand All @@ -151,7 +155,7 @@ def random_multiagent_simulation():
x_dims = [n_states] * n_agents
u_dims = [n_controls] * n_agents

dec.eyeball_scenario(x0, xf, n_agents, n_states)
dpilqr.eyeball_scenario(x0, xf, n_agents, n_states)
plt.show()

dt = 0.05
Expand All @@ -160,42 +164,42 @@ def random_multiagent_simulation():
tol = 1e-6
ids = [100 + i for i in range(n_agents)]

model = dec.UnicycleDynamics4D
dynamics = dec.MultiDynamicalModel([model(dt, id_) for id_ in ids])
model = dpilqr.UnicycleDynamics4D
dynamics = dpilqr.MultiDynamicalModel([model(dt, id_) for id_ in ids])

Q = np.eye(4)
R = np.eye(2)
Qf = 1e3 * np.eye(n_states)
radius = 0.5

goal_costs = [
dec.ReferenceCost(xf_i, Q.copy(), R.copy(), Qf.copy(), id_)
dpilqr.ReferenceCost(xf_i, Q.copy(), R.copy(), Qf.copy(), id_)
for xf_i, id_, x_dim, u_dim in zip(
dec.split_agents_gen(xf, x_dims), ids, x_dims, u_dims
dpilqr.split_agents_gen(xf, x_dims), ids, x_dims, u_dims
)
]
prox_cost = dec.ProximityCost(x_dims, radius, n_dims)
prox_cost = dpilqr.ProximityCost(x_dims, radius, n_dims)
goal_costs = [
dec.ReferenceCost(xf_i, Q.copy(), R.copy(), Qf.copy(), id_)
dpilqr.ReferenceCost(xf_i, Q.copy(), R.copy(), Qf.copy(), id_)
for xf_i, id_ in zip(split_agents(xf.T, x_dims), ids)
]
prox_cost = dec.ProximityCost(x_dims, radius, n_dims)
game_cost = dec.GameCost(goal_costs, prox_cost)
prox_cost = dpilqr.ProximityCost(x_dims, radius, n_dims)
game_cost = dpilqr.GameCost(goal_costs, prox_cost)

problem = dec.ilqrProblem(dynamics, game_cost)
solver = dec.ilqrSolver(problem, N)
problem = dpilqr.ilqrProblem(dynamics, game_cost)
solver = dpilqr.ilqrSolver(problem, N)

X, _, J = solver.solve(x0, tol=tol, t_kill=None)

plt.clf()
plot_solve(X, J, xf.T, x_dims, True, n_d)

plt.figure()
dec.plot_pairwise_distances(X, x_dims, n_dims, radius)
dpilqr.plot_pairwise_distances(X, x_dims, n_dims, radius)

plt.show()

dec.make_trajectory_gif(f"{n_agents}-unicycles.gif", X, xf, x_dims, radius)
dpilqr.make_trajectory_gif(f"{n_agents}-unicycles.gif", X, xf, x_dims, radius)


def main():
Expand Down
Loading

0 comments on commit 3bbe372

Please sign in to comment.