-
Notifications
You must be signed in to change notification settings - Fork 5
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit 19a81b5
Showing
9 changed files
with
759 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
log/ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,6 @@ | ||
[submodule "commons"] | ||
path = commons | ||
url = [email protected]:hiwonjoon/tf-boilerplate.git | ||
[submodule "libs/flann"] | ||
path = libs/flann | ||
url = [email protected]:hiwonjoon/flann |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
# Neural Episodic Control | ||
|
||
Tensorflow implementation of Neural Episodic Control | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,122 @@ | ||
import collections | ||
import os | ||
import pickle | ||
import numpy as np | ||
from pyflann import FLANN | ||
# ngtpy is buggy. (incremental remove and add is fragile) | ||
#import ngtpy | ||
|
||
class FastDictionary(object): | ||
def __init__(self,maxlen): | ||
self.flann = FLANN() | ||
|
||
self.counter = 0 | ||
|
||
self.contents_lookup = {} #{oid: (e,q)} | ||
self.p_queue = collections.deque() #priority queue contains; list of (priotiry_value,oid) | ||
self.maxlen = maxlen | ||
|
||
def save(self,dir,fname,it=None): | ||
fname = f'{fname}' if it is None else f'{fname}-{it}' | ||
|
||
with open(os.path.join(dir,fname),'wb') as f: | ||
pickle.dump((self.contents_lookup,self.p_queue,self.maxlen),f) | ||
|
||
def restore(self,fname): | ||
with open(fname,'rb') as f: | ||
_contents_lookup, _p_queue, maxlen = pickle.load(f) | ||
|
||
assert self.maxlen == maxlen, (self.maxlen,maxlen) | ||
|
||
new_oid_lookup = {} | ||
E = [] | ||
for oid,(e,q) in _contents_lookup.items(): | ||
E.append(e) | ||
|
||
new_oid, self.counter = self.counter, self.counter+1 | ||
|
||
new_oid_lookup[oid] = new_oid | ||
self.contents_lookup[new_oid] = (e,q) | ||
|
||
# Rebuild KD-Tree | ||
self.flann.build_index(np.array(E)) | ||
|
||
# Rebuild Heap | ||
while len(_p_queue) >= 0: | ||
oid = _p_queue.popleft() | ||
|
||
if not oid in new_oid_lookup: | ||
continue | ||
self.p_queue.append(new_oid_lookup[oid]) | ||
|
||
def add(self,E,Contents): | ||
assert not np.isnan(E).any(), ('NaN Detected in Add',np.argwhere(np.isnan(E))) | ||
assert len(E) == len(Contents) | ||
|
||
if self.counter == 0: | ||
self.flann.build_index(E) | ||
else: | ||
self.flann.add_points(E) | ||
Oid, self.counter = np.arange(self.counter,self.counter+len(E)), self.counter + len(E) | ||
|
||
for oid,content in zip(Oid,Contents): | ||
self.contents_lookup[oid] = content | ||
self.p_queue.append(oid) | ||
|
||
if len(self.contents_lookup) > self.maxlen: | ||
while not self.p_queue[0] in self.contents_lookup: | ||
self.p_queue.popleft() #invalidated items due to update, so just pop. | ||
|
||
old_oid = self.p_queue.popleft() | ||
|
||
self.flann.remove_point(old_oid) | ||
del self.contents_lookup[old_oid] | ||
|
||
def query_knn(self,E,K=100): | ||
assert not np.isnan(E).any(), ('NaN Detected in Querying',np.argwhere(np.isnan(E))) | ||
|
||
flatten = False | ||
if E.ndim == 1: | ||
E = E[None] | ||
flatten = True | ||
|
||
Oids, _ = self.flann.nn_index(E,num_neighbors=K) | ||
NN_E = np.zeros((len(E),K,E.shape[1]),np.float32) | ||
NN_Q = np.zeros((len(E),K),np.float32) | ||
|
||
for b,oids in enumerate(Oids): | ||
for k,oid in enumerate(oids): | ||
e,q = self.contents_lookup[oid] | ||
|
||
NN_E[b,k] = e | ||
NN_Q[b,k] = q | ||
|
||
if flatten: | ||
return Oids, NN_E[0], NN_Q[0] | ||
else: | ||
return Oids, NN_E, NN_Q | ||
|
||
def update(self,Oid,E,Contents): | ||
""" | ||
Basically, same this is remove & add. | ||
This code only manages a heap more effectively; since delete an item in the middle of heap is not trivial!) | ||
""" | ||
assert not np.isnan(E).any(), ('NaN Detected in Updating',np.argwhere(np.isnan(E))) | ||
assert len(np.unique(Oid)) == len(Oid) | ||
|
||
# add new Embeddings | ||
self.flann.add_points(E) | ||
NewOid, self.counter = np.arange(self.counter,self.counter+len(E)), self.counter + len(E) | ||
|
||
for oid,new_oid,content in zip(Oid,NewOid,Contents): | ||
self.contents_lookup[new_oid] = content | ||
self.p_queue.append(new_oid) | ||
|
||
# delete from kd-tree | ||
self.flann.remove_point(oid) | ||
# delete from contents_lookup | ||
del self.contents_lookup[oid] | ||
# I cannot remove from p_queue, but it will be handeled in add op. | ||
|
||
if __name__ == "__main__": | ||
pass |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,249 @@ | ||
# Code from openai/baselines | ||
# https://raw.githubusercontent.com/openai/baselines/master/baselines/common/atari_wrappers.py | ||
|
||
import numpy as np | ||
import os | ||
os.environ.setdefault('PATH', '') | ||
from collections import deque | ||
import gym | ||
from gym import spaces | ||
import cv2 | ||
cv2.ocl.setUseOpenCL(False) | ||
|
||
class NoopResetEnv(gym.Wrapper): | ||
def __init__(self, env, noop_max=30): | ||
"""Sample initial states by taking random number of no-ops on reset. | ||
No-op is assumed to be action 0. | ||
""" | ||
gym.Wrapper.__init__(self, env) | ||
self.noop_max = noop_max | ||
self.override_num_noops = None | ||
self.noop_action = 0 | ||
assert env.unwrapped.get_action_meanings()[0] == 'NOOP' | ||
|
||
def reset(self, **kwargs): | ||
""" Do no-op action for a number of steps in [1, noop_max].""" | ||
self.env.reset(**kwargs) | ||
if self.override_num_noops is not None: | ||
noops = self.override_num_noops | ||
else: | ||
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101 | ||
assert noops > 0 | ||
obs = None | ||
for _ in range(noops): | ||
obs, _, done, _ = self.env.step(self.noop_action) | ||
if done: | ||
obs = self.env.reset(**kwargs) | ||
return obs | ||
|
||
def step(self, ac): | ||
return self.env.step(ac) | ||
|
||
class FireResetEnv(gym.Wrapper): | ||
def __init__(self, env): | ||
"""Take action on reset for environments that are fixed until firing.""" | ||
gym.Wrapper.__init__(self, env) | ||
assert env.unwrapped.get_action_meanings()[1] == 'FIRE' | ||
assert len(env.unwrapped.get_action_meanings()) >= 3 | ||
|
||
def reset(self, **kwargs): | ||
self.env.reset(**kwargs) | ||
obs, _, done, _ = self.env.step(1) | ||
if done: | ||
self.env.reset(**kwargs) | ||
obs, _, done, _ = self.env.step(2) | ||
if done: | ||
self.env.reset(**kwargs) | ||
return obs | ||
|
||
def step(self, ac): | ||
return self.env.step(ac) | ||
|
||
class EpisodicLifeEnv(gym.Wrapper): | ||
def __init__(self, env): | ||
"""Make end-of-life == end-of-episode, but only reset on true game over. | ||
Done by DeepMind for the DQN and co. since it helps value estimation. | ||
""" | ||
gym.Wrapper.__init__(self, env) | ||
self.lives = 0 | ||
self.was_real_done = True | ||
|
||
def step(self, action): | ||
obs, reward, done, info = self.env.step(action) | ||
self.was_real_done = done | ||
# check current lives, make loss of life terminal, | ||
# then update lives to handle bonus lives | ||
lives = self.env.unwrapped.ale.lives() | ||
if lives < self.lives and lives > 0: | ||
# for Qbert sometimes we stay in lives == 0 condition for a few frames | ||
# so it's important to keep lives > 0, so that we only reset once | ||
# the environment advertises done. | ||
done = True | ||
self.lives = lives | ||
return obs, reward, done, info | ||
|
||
def reset(self, **kwargs): | ||
"""Reset only when lives are exhausted. | ||
This way all states are still reachable even though lives are episodic, | ||
and the learner need not know about any of this behind-the-scenes. | ||
""" | ||
if self.was_real_done: | ||
obs = self.env.reset(**kwargs) | ||
else: | ||
# no-op step to advance from terminal/lost life state | ||
obs, _, _, _ = self.env.step(0) | ||
self.lives = self.env.unwrapped.ale.lives() | ||
return obs | ||
|
||
class MaxAndSkipEnv(gym.Wrapper): | ||
def __init__(self, env, skip=4): | ||
"""Return only every `skip`-th frame""" | ||
gym.Wrapper.__init__(self, env) | ||
# most recent raw observations (for max pooling across time steps) | ||
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8) | ||
self._skip = skip | ||
|
||
def step(self, action): | ||
"""Repeat action, sum reward, and max over last observations.""" | ||
total_reward = 0.0 | ||
done = None | ||
for i in range(self._skip): | ||
obs, reward, done, info = self.env.step(action) | ||
if i == self._skip - 2: self._obs_buffer[0] = obs | ||
if i == self._skip - 1: self._obs_buffer[1] = obs | ||
total_reward += reward | ||
if done: | ||
break | ||
# Note that the observation on the done=True frame | ||
# doesn't matter | ||
max_frame = self._obs_buffer.max(axis=0) | ||
|
||
return max_frame, total_reward, done, info | ||
|
||
def reset(self, **kwargs): | ||
return self.env.reset(**kwargs) | ||
|
||
class ClipRewardEnv(gym.RewardWrapper): | ||
def __init__(self, env): | ||
gym.RewardWrapper.__init__(self, env) | ||
|
||
def reward(self, reward): | ||
"""Bin reward to {+1, 0, -1} by its sign.""" | ||
return np.sign(reward) | ||
|
||
class WarpFrame(gym.ObservationWrapper): | ||
def __init__(self, env, width=84, height=84, grayscale=True): | ||
"""Warp frames to 84x84 as done in the Nature paper and later work.""" | ||
gym.ObservationWrapper.__init__(self, env) | ||
self.width = width | ||
self.height = height | ||
self.grayscale = grayscale | ||
if self.grayscale: | ||
self.observation_space = spaces.Box(low=0, high=255, | ||
shape=(self.height, self.width, 1), dtype=np.uint8) | ||
else: | ||
self.observation_space = spaces.Box(low=0, high=255, | ||
shape=(self.height, self.width, 3), dtype=np.uint8) | ||
|
||
def observation(self, frame): | ||
if self.grayscale: | ||
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) | ||
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA) | ||
if self.grayscale: | ||
frame = np.expand_dims(frame, -1) | ||
return frame | ||
|
||
class FrameStack(gym.Wrapper): | ||
def __init__(self, env, k): | ||
"""Stack k last frames. | ||
Returns lazy array, which is much more memory efficient. | ||
See Also | ||
-------- | ||
baselines.common.atari_wrappers.LazyFrames | ||
""" | ||
gym.Wrapper.__init__(self, env) | ||
self.k = k | ||
self.frames = deque([], maxlen=k) | ||
shp = env.observation_space.shape | ||
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype) | ||
|
||
def reset(self): | ||
ob = self.env.reset() | ||
for _ in range(self.k): | ||
self.frames.append(ob) | ||
return self._get_ob() | ||
|
||
def step(self, action): | ||
ob, reward, done, info = self.env.step(action) | ||
self.frames.append(ob) | ||
return self._get_ob(), reward, done, info | ||
|
||
def _get_ob(self): | ||
assert len(self.frames) == self.k | ||
return LazyFrames(list(self.frames)) | ||
|
||
class ScaledFloatFrame(gym.ObservationWrapper): | ||
def __init__(self, env): | ||
gym.ObservationWrapper.__init__(self, env) | ||
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32) | ||
|
||
def observation(self, observation): | ||
# careful! This undoes the memory optimization, use | ||
# with smaller replay buffers only. | ||
return np.array(observation).astype(np.float32) / 255.0 | ||
|
||
class LazyFrames(object): | ||
def __init__(self, frames): | ||
"""This object ensures that common frames between the observations are only stored once. | ||
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay | ||
buffers. | ||
This object should only be converted to numpy array before being passed to the model. | ||
You'd not believe how complex the previous solution was.""" | ||
self._frames = frames | ||
self._out = None | ||
|
||
def _force(self): | ||
if self._out is None: | ||
self._out = np.concatenate(self._frames, axis=-1) | ||
self._frames = None | ||
return self._out | ||
|
||
def __array__(self, dtype=None): | ||
out = self._force() | ||
if dtype is not None: | ||
out = out.astype(dtype) | ||
return out | ||
|
||
def __len__(self): | ||
return len(self._force()) | ||
|
||
def __getitem__(self, i): | ||
return self._force()[i] | ||
|
||
def make_atari(env_id): | ||
env = gym.make(env_id) | ||
assert 'NoFrameskip' in env.spec.id | ||
env = NoopResetEnv(env, noop_max=30) | ||
env = MaxAndSkipEnv(env, skip=4) | ||
return env | ||
|
||
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False): | ||
"""Configure environment for DeepMind-style Atari. | ||
""" | ||
if episode_life: | ||
env = EpisodicLifeEnv(env) | ||
if 'FIRE' in env.unwrapped.get_action_meanings(): | ||
env = FireResetEnv(env) | ||
env = WarpFrame(env) | ||
if scale: | ||
env = ScaledFloatFrame(env) | ||
if clip_rewards: | ||
env = ClipRewardEnv(env) | ||
if frame_stack: | ||
env = FrameStack(env, 4) | ||
return env | ||
|
Oops, something went wrong.