Skip to content

Commit df66320

Browse files
ssiegelfabclmnt
authored andcommitted
fix: fix CI pipeline
1 parent 64ddc0d commit df66320

File tree

6 files changed

+8
-8
lines changed

6 files changed

+8
-8
lines changed

.github/workflows/pull-request.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,8 @@ jobs:
2626
steps:
2727
- uses: actions/checkout@v4
2828
with:
29-
ref: ${{ github.head_ref }}
29+
repository: ${{ github.event.pull_request.head.repo.full_name }}
30+
ref: ${{ github.event.pull_request.head.ref }}
3031
fetch-depth: 0
3132
persist-credentials: false
3233

File renamed without changes.

requirements-test.txt

+1
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ codecov
44
pytest-cov
55
pytest-spark
66
nbval
7+
ipython<9
78
pyarrow
89
twine>=3.1.1
910
kaggle

src/ydata_profiling/model/alerts.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
"""Logic for alerting the user on possibly problematic patterns in the data (e.g. high number of zeros , constant
22
values, high correlations)."""
3+
34
from enum import Enum, auto, unique
4-
from typing import Any, Dict, List, Optional, Set
5+
from typing import Dict, List, Optional, Set
56

67
import numpy as np
78
import pandas as pd

src/ydata_profiling/model/pandas/summary_pandas.py

+1-5
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,7 @@
1717

1818

1919
def _is_cast_type_defined(typeset: VisionsTypeset, series: str) -> bool:
20-
return (
21-
isinstance(typeset, ProfilingTypeSet)
22-
and typeset.type_schema
23-
and series in typeset.type_schema
24-
)
20+
return isinstance(typeset, ProfilingTypeSet) and series in typeset.type_schema
2521

2622

2723
@describe_1d.register

src/ydata_profiling/utils/common.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
"""Common util functions (e.g. missing in Python)."""
2+
23
import collections.abc
34
import contextlib
45
import os
@@ -165,7 +166,7 @@ def calculate_nrows(df):
165166
).collect()[0]
166167
* n_partitions
167168
)
168-
except:
169+
except Exception:
169170
nrows = 0
170171

171172
return nrows

0 commit comments

Comments
 (0)