diff --git a/.gitignore b/.gitignore
index 676b1b8a046b56d3b9d61d5c5077d8970967284b..0461dbe45d3188e3efef3997829f4fdf113c6ab7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,2 @@
 planarEnvs.egg-info/
+*.swp
diff --git a/examples/mobile_base.py b/examples/mobile_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b24b4f192238ccf367f36fc764ae5c53c369589
--- /dev/null
+++ b/examples/mobile_base.py
@@ -0,0 +1,28 @@
+import gym
+import mobileBase
+import time
+import numpy as np
+
+
+def main():
+    env = gym.make('mobile-base-vel-v0', dt=0.01)
+    defaultAction = [2.1]
+    n_episodes = 1
+    n_steps = 1000
+    cumReward = 0.0
+    for e in range(n_episodes):
+        ob = env.reset()
+        print("Starting episode")
+        for i in range(n_steps):
+            time.sleep(env._dt)
+            action = env.action_space.sample()
+            action = defaultAction
+            env.render()
+            ob, reward, done, info = env.step(action)
+            cumReward += reward
+            if done:
+                break
+
+
+if __name__ == '__main__':
+    main()
diff --git a/mobileBase/.gitignore b/mobileBase/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..c18dd8d83ceed1806b50b0aaa46beb7e335fff13
--- /dev/null
+++ b/mobileBase/.gitignore
@@ -0,0 +1 @@
+__pycache__/
diff --git a/mobileBase/__init__.py b/mobileBase/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdf06cc2b7e871f2bcf09e40389f92dd346cf667
--- /dev/null
+++ b/mobileBase/__init__.py
@@ -0,0 +1,5 @@
+from gym.envs.registration import register
+register(
+    id='mobile-base-vel-v0',
+    entry_point='mobileBase.envs:MobileBaseVelEnv'
+)
diff --git a/mobileBase/envs/.gitignore b/mobileBase/envs/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..c18dd8d83ceed1806b50b0aaa46beb7e335fff13
--- /dev/null
+++ b/mobileBase/envs/.gitignore
@@ -0,0 +1 @@
+__pycache__/
diff --git a/mobileBase/envs/__init__.py b/mobileBase/envs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..13b800ff45daaccaca9397fc2e53266c1ea409ed
--- /dev/null
+++ b/mobileBase/envs/__init__.py
@@ -0,0 +1 @@
+from mobileBase.envs.vel import MobileBaseVelEnv
diff --git a/mobileBase/envs/vel.py b/mobileBase/envs/vel.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb4d84e5d85ab7466b99d2402d794297d39f434b
--- /dev/null
+++ b/mobileBase/envs/vel.py
@@ -0,0 +1,110 @@
+import numpy as np
+from numpy import sin, cos, pi
+
+from scipy.integrate import odeint
+
+from gym import core, spaces
+from gym.utils import seeding
+
+
+class MobileBaseVelEnv(core.Env):
+
+    metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 15}
+
+    BASE_HEIGHT = 1.0 # [m]
+    LINK_MASS_BASE = 500.0  #: [kg] mass of link 1
+
+    MAX_VEL_BASE = 1
+    MAX_POS_BASE = 5.0
+
+    actionlimits = [np.array([-MAX_VEL_BASE]), np.array([MAX_VEL_BASE])]
+
+
+    def __init__(self, dt=0.01):
+        self.viewer = None
+        high = np.array(
+            [
+                self.MAX_POS_BASE,
+                self.MAX_VEL_BASE
+            ],
+            dtype=np.float32,
+        )
+        low = -high
+        self.observation_space = spaces.Box(low=low, high=high, dtype=np.float64)
+        self.action_space = spaces.Box(
+            low=self.actionlimits[0], high=self.actionlimits[1], dtype=np.float64
+        )
+        self.state = None
+        self._dt = dt
+        self.seed()
+
+
+    def seed(self, seed=None):
+        self.np_random, seed = seeding.np_random(seed)
+        return [seed]
+
+    def reset(self):
+        self.state = np.zeros(shape=(2))
+        return self._get_ob()
+
+    def step(self, a):
+        s = self.state
+        self.action = a
+        ns = self.integrate()
+        self.state = np.append(ns, a)
+        terminal = self._terminal()
+        reward = -1.0 if not terminal else 0.0
+        return (self._get_ob(), reward, terminal, {})
+
+    def _get_ob(self):
+        return self.state
+
+    def _terminal(self):
+        if self.state[0] > self.MAX_POS_BASE or self.state[0] < -self.MAX_POS_BASE:
+            return True
+        return False
+
+    def continuous_dynamics(self, x, t):
+        return self.action
+
+    def integrate(self):
+        x0 = self.state[0]
+        t = np.arange(0, 2 * self._dt, self._dt)
+        ynext = odeint(self.continuous_dynamics, x0, t)
+        return ynext[1]
+
+    def render(self, mode="human"):
+        from gym.envs.classic_control import rendering
+
+        s = self.state
+
+        if self.viewer is None:
+            self.viewer = rendering.Viewer(500, 500)
+            bound = self.MAX_POS_BASE + 1.0
+            self.viewer.set_bounds(-bound, bound, -bound, bound)
+
+        if s is None:
+            return None
+
+        p0 = [s[0], 0.5 * self.BASE_HEIGHT]
+        p1 = [p0[0], p0[1] + 0.5 * self.BASE_HEIGHT]
+
+        p = [p0, p1]
+        thetas = [0.0, 0.0]
+        tf0 = rendering.Transform(rotation=thetas[0], translation=p0)
+        tf1 = rendering.Transform(rotation=thetas[1], translation=p1)
+        tf = [tf0, tf1]
+
+        self.viewer.draw_line((-5.5, 0), (5.5, 0))
+
+        l, r, t, b = -0.5, 0.5, 0.5, -0.5
+        link = self.viewer.draw_polygon([(l, b), (l, t), (r, t), (r, b)])
+        link.set_color(0, 0.8, 0.8)
+        link.add_attr(tf[0])
+
+        return self.viewer.render(return_rgb_array=mode == "rgb_array")
+
+    def close(self):
+        if self.viewer:
+            self.viewer.close()
+            self.viewer = None