Skip to content

Commit

Permalink
Fix CI for macos-latest (#91)
Browse files Browse the repository at this point in the history
* fix ci macos

* is_success consistency

* signature for compute_reward

* update actions versions

* try another ci

* secret token

* build only pr

* upload only py3.10
  • Loading branch information
qgallouedec authored Jun 10, 2024
1 parent 51fb901 commit ede15d7
Show file tree
Hide file tree
Showing 7 changed files with 55 additions and 45 deletions.
76 changes: 43 additions & 33 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
@@ -1,71 +1,81 @@
name: build

on: [push, pull_request]
on: [pull_request]

jobs:
test:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
os: [ubuntu-latest, macos-latest, macos-13, windows-latest]
python-version: ['3.7', '3.8', '3.9', '3.10']
exclude:
# Exclude the combination of macOS-latest and Python 3.7 as arm64 doesn't support Python 3.7
- os: macos-latest
python-version: '3.7'
steps:
- uses: actions/checkout@v3
# Check out the repository code
- uses: actions/checkout@v4

# Set up the specified Python version
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

# Install dependencies
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e .
pip install pytest-cov
# Run tests with pytest and generate coverage report
- name: Test with pytest
run: |
pytest --cov=./ --cov-report=xml
# Additional steps only for ubuntu-latest and Python 3.10
# Upload the coverage report as an artifact
- name: Save coverage report
uses: actions/upload-artifact@v2
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: ./coverage.xml

upload-coverage:
needs: test
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Download coverage reports
uses: actions/download-artifact@v2
with:
name: coverage-report
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
directory: ./coverage/reports/
fail_ci_if_error: true
files: ./coverage.xml
name: codecov-umbrella
verbose: true
codestyle_type_and_doc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.8
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Install dependencies
# Install development dependencies
- name: Install dev dependencies
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
run: |
python -m pip install --upgrade pip
pip install -e .[develop]
# Run Pytype for type checking
- name: Pytype
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
run: |
pytype panda_gym
# Check code style with black and isort
- name: Check codestyle
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
run: |
black -l 127 --check panda_gym test
isort -l 127 --profile black --check panda_gym test
# Build documentation
- name: Make docs
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
run: |
make html
# Upload coverage to Codecov
- name: Upload coverage to Codecov
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
uses: codecov/codecov-action@v4
with:
files: ./coverage.xml
fail_ci_if_error: true
name: codecov-umbrella
verbose: true
token: ${{ secrets.CODECOV_TOKEN }} # required
4 changes: 2 additions & 2 deletions panda_gym/envs/tasks/flip.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,11 @@ def _sample_object(self) -> Tuple[np.ndarray, np.ndarray]:
object_rotation = np.zeros(3)
return object_position, object_rotation

def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray) -> np.ndarray:
def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
d = angle_distance(achieved_goal, desired_goal)
return np.array(d < self.distance_threshold, dtype=bool)

def compute_reward(self, achieved_goal, desired_goal, info: Dict[str, Any]) -> np.ndarray:
def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
d = angle_distance(achieved_goal, desired_goal)
if self.reward_type == "sparse":
return -np.array(d > self.distance_threshold, dtype=np.float32)
Expand Down
4 changes: 2 additions & 2 deletions panda_gym/envs/tasks/pick_and_place.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,11 @@ def _sample_object(self) -> np.ndarray:
object_position += noise
return object_position

def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray) -> np.ndarray:
def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
d = distance(achieved_goal, desired_goal)
return np.array(d < self.distance_threshold, dtype=bool)

def compute_reward(self, achieved_goal, desired_goal, info: Dict[str, Any]) -> np.ndarray:
def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
d = distance(achieved_goal, desired_goal)
if self.reward_type == "sparse":
return -np.array(d > self.distance_threshold, dtype=np.float32)
Expand Down
4 changes: 2 additions & 2 deletions panda_gym/envs/tasks/push.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,11 @@ def _sample_object(self) -> np.ndarray:
object_position += noise
return object_position

def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray) -> np.ndarray:
def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
d = distance(achieved_goal, desired_goal)
return np.array(d < self.distance_threshold, dtype=bool)

def compute_reward(self, achieved_goal, desired_goal, info: Dict[str, Any]) -> np.ndarray:
def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
d = distance(achieved_goal, desired_goal)
if self.reward_type == "sparse":
return -np.array(d > self.distance_threshold, dtype=np.float32)
Expand Down
4 changes: 2 additions & 2 deletions panda_gym/envs/tasks/reach.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,11 @@ def _sample_goal(self) -> np.ndarray:
goal = self.np_random.uniform(self.goal_range_low, self.goal_range_high)
return goal

def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray) -> np.ndarray:
def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
d = distance(achieved_goal, desired_goal)
return np.array(d < self.distance_threshold, dtype=bool)

def compute_reward(self, achieved_goal, desired_goal, info: Dict[str, Any]) -> np.ndarray:
def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
d = distance(achieved_goal, desired_goal)
if self.reward_type == "sparse":
return -np.array(d > self.distance_threshold, dtype=np.float32)
Expand Down
4 changes: 2 additions & 2 deletions panda_gym/envs/tasks/slide.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,11 @@ def _sample_object(self) -> np.ndarray:
object_position += noise
return object_position

def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray) -> np.ndarray:
def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
d = distance(achieved_goal, desired_goal)
return np.array(d < self.distance_threshold, dtype=bool)

def compute_reward(self, achieved_goal, desired_goal, info: Dict[str, Any]) -> np.ndarray:
def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
d = distance(achieved_goal, desired_goal)
if self.reward_type == "sparse":
return -np.array(d > self.distance_threshold, dtype=np.float32)
Expand Down
4 changes: 2 additions & 2 deletions panda_gym/envs/tasks/stack.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,12 +117,12 @@ def _sample_objects(self) -> Tuple[np.ndarray, np.ndarray]:
# if distance(object1_position, object2_position) > 0.1:
return object1_position, object2_position

def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray) -> np.ndarray:
def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
# must be vectorized !!
d = distance(achieved_goal, desired_goal)
return np.array((d < self.distance_threshold), dtype=bool)

def compute_reward(self, achieved_goal, desired_goal, info: Dict[str, Any]) -> np.ndarray:
def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
d = distance(achieved_goal, desired_goal)
if self.reward_type == "sparse":
return -np.array((d > self.distance_threshold), dtype=np.float32)
Expand Down

0 comments on commit ede15d7

Please sign in to comment.