Skip to content

Commit

Permalink
update numpy typing to be compatible with v2.0
Browse files Browse the repository at this point in the history
  • Loading branch information
Limmen committed Jun 17, 2024
1 parent 9349f08 commit 25d2e4b
Show file tree
Hide file tree
Showing 11 changed files with 25 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ class AptGameConfig(SimulationEnvInputConfig):
def __init__(self, env_name: str,
T: npt.NDArray[Any], O: npt.NDArray[np.int_], Z: npt.NDArray[Any],
C: npt.NDArray[Any], S: npt.NDArray[np.int_], A1: npt.NDArray[np.int_],
A2: npt.NDArray[np.int_], b1: npt.NDArray[np.float_], N: int, p_a: float,
A2: npt.NDArray[np.int_], b1: npt.NDArray[np.float64], N: int, p_a: float,
save_dir: str, checkpoint_traces_freq: int, gamma: float = 1) -> None:
"""
Initializes the DTO
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ class AptGameState(JSONSerializable):
Represents the state of the optimal APT game
"""

def __init__(self, b1: npt.NDArray[np.float_]) -> None:
def __init__(self, b1: npt.NDArray[np.float64]) -> None:
"""
Intializes the state
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def __init__(self, config: AptGameAttackerMdpConfig):
self.reset()
super().__init__()

def step(self, pi2: Union[npt.NDArray[Any], int, float, np.int_, np.float_]) \
def step(self, pi2: Union[npt.NDArray[Any], int, float, np.int_, np.float64]) \
-> Tuple[npt.NDArray[Any], int, bool, bool, Dict[str, Any]]:
"""
Takes a step in the environment by executing the given action
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class AptGameUtil:
"""

@staticmethod
def b1(N: int) -> npt.NDArray[np.float_]:
def b1(N: int) -> npt.NDArray[np.float64]:
"""
Gets the initial belief
Expand Down Expand Up @@ -177,7 +177,7 @@ def sample_next_state(T: npt.NDArray[Any], s: int, a1: int, a2: int, S: npt.NDAr
return int(np.random.choice(np.arange(0, len(S)), p=state_probs))

@staticmethod
def sample_initial_state(b1: npt.NDArray[np.float_]) -> int:
def sample_initial_state(b1: npt.NDArray[np.float64]) -> int:
"""
Samples the initial state
Expand All @@ -202,7 +202,7 @@ def sample_next_observation(Z: npt.NDArray[Any], s_prime: int, O: npt.NDArray[np
return int(o)

@staticmethod
def bayes_filter(s_prime: int, o: int, a1: int, b: npt.NDArray[np.float_], pi2: npt.NDArray[Any],
def bayes_filter(s_prime: int, o: int, a1: int, b: npt.NDArray[np.float64], pi2: npt.NDArray[Any],
config: AptGameConfig) -> float:
"""
A Bayesian filter to compute the belief of player 1
Expand Down Expand Up @@ -236,8 +236,8 @@ def bayes_filter(s_prime: int, o: int, a1: int, b: npt.NDArray[np.float_], pi2:
return b_prime_s_prime

@staticmethod
def next_belief(o: int, a1: int, b: npt.NDArray[np.float_], pi2: npt.NDArray[Any],
config: AptGameConfig, a2: int = 0, s: int = 0) -> npt.NDArray[np.float_]:
def next_belief(o: int, a1: int, b: npt.NDArray[np.float64], pi2: npt.NDArray[Any],
config: AptGameConfig, a2: int = 0, s: int = 0) -> npt.NDArray[np.float64]:
"""
Computes the next belief using a Bayesian filter
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ class IntrusionResponseGameStateLocal(JSONSerializable):
with public observations)
"""

def __init__(self, d_b1: npt.NDArray[np.float_], a_b1: npt.NDArray[np.float_], s_1_idx: int,
def __init__(self, d_b1: npt.NDArray[np.float64], a_b1: npt.NDArray[np.float64], s_1_idx: int,
S: npt.NDArray[Any], S_A: npt.NDArray[Any], S_D: npt.NDArray[Any]) -> None:
"""
Initializes the DTO
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ class LocalIntrusionResponseGameConfig(JSONSerializable):
def __init__(self, env_name: str, T: npt.NDArray[Any], O: npt.NDArray[np.int_], Z: npt.NDArray[Any],
R: npt.NDArray[Any], S: npt.NDArray[np.int_], S_A: npt.NDArray[np.int_],
S_D: npt.NDArray[np.int_], s_1_idx: int, zones: npt.NDArray[np.int_],
A1: npt.NDArray[np.int_], A2: npt.NDArray[np.int_], d_b1: npt.NDArray[np.float_],
a_b1: npt.NDArray[np.float_], gamma: float,
A1: npt.NDArray[np.int_], A2: npt.NDArray[np.int_], d_b1: npt.NDArray[np.float64],
a_b1: npt.NDArray[np.float64], gamma: float,
beta: float, C_D: npt.NDArray[Any], eta: float, A_P: npt.NDArray[Any],
Z_D_P: npt.NDArray[Any], Z_U: npt.NDArray[Any]) -> None:
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def local_initial_state_idx(initial_zone: int, S: npt.NDArray[Any]) -> int:
raise ValueError("Initial state not recognized")

@staticmethod
def local_initial_state_distribution(initial_state_idx, S: npt.NDArray[Any]) -> npt.NDArray[np.float_]:
def local_initial_state_distribution(initial_state_idx, S: npt.NDArray[Any]) -> npt.NDArray[np.float64]:
"""
Gets the initial state distribution
Expand All @@ -153,7 +153,7 @@ def local_initial_state_distribution(initial_state_idx, S: npt.NDArray[Any]) ->
return rho

@staticmethod
def local_initial_defender_belief(S_A: npt.NDArray[Any]) -> npt.NDArray[np.float_]:
def local_initial_defender_belief(S_A: npt.NDArray[Any]) -> npt.NDArray[np.float64]:
"""
Gets the initial defender belief for a local version of the game
Expand All @@ -165,7 +165,7 @@ def local_initial_defender_belief(S_A: npt.NDArray[Any]) -> npt.NDArray[np.float
return d_b1

@staticmethod
def local_initial_attacker_belief(S_D: npt.NDArray[Any], initial_zone) -> npt.NDArray[np.float_]:
def local_initial_attacker_belief(S_D: npt.NDArray[Any], initial_zone) -> npt.NDArray[np.float64]:
"""
Gets the initial attacker belief for a local version of the game
Expand Down Expand Up @@ -270,7 +270,7 @@ def local_workflow_utility(beta: float, reachable: bool, s: npt.NDArray[Any], in
return beta * impact * int(not IntrusionResponseGameUtil.is_local_state_shutdown_or_redirect(s=s))

@staticmethod
def constant_defender_action_costs(A1: npt.NDArray[np.int_], constant_cost: float) -> npt.NDArray[np.float_]:
def constant_defender_action_costs(A1: npt.NDArray[np.int_], constant_cost: float) -> npt.NDArray[np.float64]:
"""
Returns a vector with the local defender action costs where each action has the same constant cost
Expand All @@ -297,7 +297,7 @@ def zones(num_zones: int) -> npt.NDArray[Any]:
return np.array(list(range(1, num_zones + 1)))

@staticmethod
def constant_zone_utilities(zones: npt.NDArray[np.int_], constant_utility: float) -> npt.NDArray[np.float_]:
def constant_zone_utilities(zones: npt.NDArray[np.int_], constant_utility: float) -> npt.NDArray[np.float64]:
"""
Returns a vector with the zone utilities where each zone has the same constant utility
Expand All @@ -312,7 +312,7 @@ def constant_zone_utilities(zones: npt.NDArray[np.int_], constant_utility: float

@staticmethod
def constant_zone_detection_probabilities(zones: npt.NDArray[np.int_], constant_detection_prob: float) \
-> npt.NDArray[np.float_]:
-> npt.NDArray[np.float64]:
"""
Returns a vector with the zone detection probabilities where each zone as the same uniform detection
probability
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def __init__(self, env_name: str,
T: npt.NDArray[Any], O: npt.NDArray[np.int_], Z: npt.NDArray[Any],
R: npt.NDArray[Any], S: npt.NDArray[np.int_], A1: npt.NDArray[np.int_],
A2: npt.NDArray[np.int_], L: int, R_INT: int, R_COST: int, R_SLA: int, R_ST: int,
b1: npt.NDArray[np.float_],
b1: npt.NDArray[np.float64],
save_dir: str, checkpoint_traces_freq: int, gamma: float = 1, compute_beliefs: bool = True,
save_trace: bool = True) -> None:
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ class StoppingGameState(JSONSerializable):
Represents the state of the optimal stopping game
"""

def __init__(self, b1: npt.NDArray[np.float_], L: int) -> None:
def __init__(self, b1: npt.NDArray[np.float64], L: int) -> None:
"""
Intializes the state
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def __init__(self, config: StoppingGameAttackerMdpConfig):
self.reset()
super().__init__()

def step(self, pi2: Union[npt.NDArray[Any], int, float, np.int_, np.float_]) \
def step(self, pi2: Union[npt.NDArray[Any], int, float, np.int_, np.float64]) \
-> Tuple[npt.NDArray[Any], int, bool, bool, Dict[str, Any]]:
"""
Takes a step in the environment by executing the given action
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ class StoppingGameUtil:
"""

@staticmethod
def b1() -> npt.NDArray[np.float_]:
def b1() -> npt.NDArray[np.float64]:
"""
Gets the initial belief
Expand Down Expand Up @@ -233,7 +233,7 @@ def sample_next_state(T: npt.NDArray[Any], l: int, s: int, a1: int, a2: int, S:
return int(np.random.choice(np.arange(0, len(S)), p=state_probs))

@staticmethod
def sample_initial_state(b1: npt.NDArray[np.float_]) -> int:
def sample_initial_state(b1: npt.NDArray[np.float64]) -> int:
"""
Samples the initial state
Expand Down Expand Up @@ -264,7 +264,7 @@ def sample_next_observation(Z: npt.NDArray[Any], s_prime: int, O: npt.NDArray[np
return int(o)

@staticmethod
def bayes_filter(s_prime: int, o: int, a1: int, b: npt.NDArray[np.float_], pi2: npt.NDArray[Any], l: int,
def bayes_filter(s_prime: int, o: int, a1: int, b: npt.NDArray[np.float64], pi2: npt.NDArray[Any], l: int,
config: StoppingGameConfig) -> float:
"""
A Bayesian filter to compute the belief of player 1
Expand Down Expand Up @@ -302,8 +302,8 @@ def bayes_filter(s_prime: int, o: int, a1: int, b: npt.NDArray[np.float_], pi2:
return float(b_prime_s_prime)

@staticmethod
def next_belief(o: int, a1: int, b: npt.NDArray[np.float_], pi2: npt.NDArray[Any],
config: StoppingGameConfig, l: int, a2: int = 0, s: int = 0) -> npt.NDArray[np.float_]:
def next_belief(o: int, a1: int, b: npt.NDArray[np.float64], pi2: npt.NDArray[Any],
config: StoppingGameConfig, l: int, a2: int = 0, s: int = 0) -> npt.NDArray[np.float64]:
"""
Computes the next belief using a Bayesian filter
Expand Down

0 comments on commit 25d2e4b

Please sign in to comment.