Skip to content

Commit 2702bf9

Browse files
Enable ruff S113 rule (TheAlgorithms#11375)
* Enable ruff S113 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent 7b88e15 commit 2702bf9

36 files changed

+68
-46
lines changed

machine_learning/linear_regression.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,8 @@ def collect_dataset():
1919
"""
2020
response = requests.get(
2121
"https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/"
22-
"master/Week1/ADRvsRating.csv"
22+
"master/Week1/ADRvsRating.csv",
23+
timeout=10,
2324
)
2425
lines = response.text.splitlines()
2526
data = []

pyproject.toml

-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule
1414
"RUF002", # Docstring contains ambiguous {}. Did you mean {}?
1515
"RUF003", # Comment contains ambiguous {}. Did you mean {}?
1616
"S101", # Use of `assert` detected -- DO NOT FIX
17-
"S113", # Probable use of requests call without timeout -- FIX ME
1817
"S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME
1918
"SLF001", # Private member accessed: `_Iterator` -- FIX ME
2019
"UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX

scripts/validate_solutions.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def added_solution_file_path() -> list[pathlib.Path]:
5757
"Accept": "application/vnd.github.v3+json",
5858
"Authorization": "token " + os.environ["GITHUB_TOKEN"],
5959
}
60-
files = requests.get(get_files_url(), headers=headers).json()
60+
files = requests.get(get_files_url(), headers=headers, timeout=10).json()
6161
for file in files:
6262
filepath = pathlib.Path.cwd().joinpath(file["filename"])
6363
if (

web_programming/co2_emission.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,13 @@
1111

1212
# Emission in the last half hour
1313
def fetch_last_half_hour() -> str:
14-
last_half_hour = requests.get(BASE_URL).json()["data"][0]
14+
last_half_hour = requests.get(BASE_URL, timeout=10).json()["data"][0]
1515
return last_half_hour["intensity"]["actual"]
1616

1717

1818
# Emissions in a specific date range
1919
def fetch_from_to(start, end) -> list:
20-
return requests.get(f"{BASE_URL}/{start}/{end}").json()["data"]
20+
return requests.get(f"{BASE_URL}/{start}/{end}", timeout=10).json()["data"]
2121

2222

2323
if __name__ == "__main__":

web_programming/covid_stats_via_xpath.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,9 @@ class CovidData(NamedTuple):
1818

1919
def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> CovidData:
2020
xpath_str = '//div[@class = "maincounter-number"]/span/text()'
21-
return CovidData(*html.fromstring(requests.get(url).content).xpath(xpath_str))
21+
return CovidData(
22+
*html.fromstring(requests.get(url, timeout=10).content).xpath(xpath_str)
23+
)
2224

2325

2426
fmt = """Total COVID-19 cases in the world: {}

web_programming/crawl_google_results.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
if __name__ == "__main__":
99
print("Googling.....")
1010
url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
11-
res = requests.get(url, headers={"UserAgent": UserAgent().random})
11+
res = requests.get(url, headers={"UserAgent": UserAgent().random}, timeout=10)
1212
# res.raise_for_status()
1313
with open("project1a.html", "wb") as out_file: # only for knowing the class
1414
for data in res.iter_content(10000):

web_programming/crawl_google_scholar_citation.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,9 @@ def get_citation(base_url: str, params: dict) -> str:
1111
"""
1212
Return the citation number.
1313
"""
14-
soup = BeautifulSoup(requests.get(base_url, params=params).content, "html.parser")
14+
soup = BeautifulSoup(
15+
requests.get(base_url, params=params, timeout=10).content, "html.parser"
16+
)
1517
div = soup.find("div", attrs={"class": "gs_ri"})
1618
anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a")
1719
return anchors[2].get_text()

web_programming/currency_converter.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ def convert_currency(
176176
params = locals()
177177
# from is a reserved keyword
178178
params["from"] = params.pop("from_")
179-
res = requests.get(URL_BASE, params=params).json()
179+
res = requests.get(URL_BASE, params=params, timeout=10).json()
180180
return str(res["amount"]) if res["error"] == 0 else res["error_message"]
181181

182182

web_programming/current_stock_price.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@
44

55
def stock_price(symbol: str = "AAPL") -> str:
66
url = f"https://finance.yahoo.com/quote/{symbol}?p={symbol}"
7-
yahoo_finance_source = requests.get(url, headers={"USER-AGENT": "Mozilla/5.0"}).text
7+
yahoo_finance_source = requests.get(
8+
url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10
9+
).text
810
soup = BeautifulSoup(yahoo_finance_source, "html.parser")
911
specific_fin_streamer_tag = soup.find("fin-streamer", {"data-test": "qsp-price"})
1012

web_programming/current_weather.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,13 @@ def current_weather(location: str) -> list[dict]:
2020
if OPENWEATHERMAP_API_KEY:
2121
params_openweathermap = {"q": location, "appid": OPENWEATHERMAP_API_KEY}
2222
response_openweathermap = requests.get(
23-
OPENWEATHERMAP_URL_BASE, params=params_openweathermap
23+
OPENWEATHERMAP_URL_BASE, params=params_openweathermap, timeout=10
2424
)
2525
weather_data.append({"OpenWeatherMap": response_openweathermap.json()})
2626
if WEATHERSTACK_API_KEY:
2727
params_weatherstack = {"query": location, "access_key": WEATHERSTACK_API_KEY}
2828
response_weatherstack = requests.get(
29-
WEATHERSTACK_URL_BASE, params=params_weatherstack
29+
WEATHERSTACK_URL_BASE, params=params_weatherstack, timeout=10
3030
)
3131
weather_data.append({"Weatherstack": response_weatherstack.json()})
3232
if not weather_data:

web_programming/daily_horoscope.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ def horoscope(zodiac_sign: int, day: str) -> str:
77
"https://www.horoscope.com/us/horoscopes/general/"
88
f"horoscope-general-daily-{day}.aspx?sign={zodiac_sign}"
99
)
10-
soup = BeautifulSoup(requests.get(url).content, "html.parser")
10+
soup = BeautifulSoup(requests.get(url, timeout=10).content, "html.parser")
1111
return soup.find("div", class_="main-horoscope").p.text
1212

1313

web_programming/download_images_from_google_query.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,9 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5)
3939
"ijn": "0",
4040
}
4141

42-
html = requests.get("https://www.google.com/search", params=params, headers=headers)
42+
html = requests.get(
43+
"https://www.google.com/search", params=params, headers=headers, timeout=10
44+
)
4345
soup = BeautifulSoup(html.text, "html.parser")
4446
matched_images_data = "".join(
4547
re.findall(r"AF_initDataCallback\(([^<]+)\);", str(soup.select("script")))

web_programming/emails_from_url.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:
7777

7878
try:
7979
# Open URL
80-
r = requests.get(url)
80+
r = requests.get(url, timeout=10)
8181

8282
# pass the raw HTML to the parser to get links
8383
parser.feed(r.text)
@@ -88,7 +88,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:
8888
# open URL.
8989
# read = requests.get(link)
9090
try:
91-
read = requests.get(link)
91+
read = requests.get(link, timeout=10)
9292
# Get the valid email.
9393
emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text)
9494
# If not in list then append it.

web_programming/fetch_anime_and_play.py

+5-3
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def search_scraper(anime_name: str) -> list:
2828
search_url = f"{BASE_URL}/search/{anime_name}"
2929

3030
response = requests.get(
31-
search_url, headers={"UserAgent": UserAgent().chrome}
31+
search_url, headers={"UserAgent": UserAgent().chrome}, timeout=10
3232
) # request the url.
3333

3434
# Is the response ok?
@@ -82,7 +82,9 @@ def search_anime_episode_list(episode_endpoint: str) -> list:
8282

8383
request_url = f"{BASE_URL}{episode_endpoint}"
8484

85-
response = requests.get(url=request_url, headers={"UserAgent": UserAgent().chrome})
85+
response = requests.get(
86+
url=request_url, headers={"UserAgent": UserAgent().chrome}, timeout=10
87+
)
8688
response.raise_for_status()
8789

8890
soup = BeautifulSoup(response.text, "html.parser")
@@ -132,7 +134,7 @@ def get_anime_episode(episode_endpoint: str) -> list:
132134
episode_page_url = f"{BASE_URL}{episode_endpoint}"
133135

134136
response = requests.get(
135-
url=episode_page_url, headers={"User-Agent": UserAgent().chrome}
137+
url=episode_page_url, headers={"User-Agent": UserAgent().chrome}, timeout=10
136138
)
137139
response.raise_for_status()
138140

web_programming/fetch_bbc_news.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
def fetch_bbc_news(bbc_news_api_key: str) -> None:
99
# fetching a list of articles in json format
10-
bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key).json()
10+
bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key, timeout=10).json()
1111
# each article in the list is a dict
1212
for i, article in enumerate(bbc_news_page["articles"], 1):
1313
print(f"{i}.) {article['title']}")

web_programming/fetch_github_info.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def fetch_github_info(auth_token: str) -> dict[Any, Any]:
4242
"Authorization": f"token {auth_token}",
4343
"Accept": "application/vnd.github.v3+json",
4444
}
45-
return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers).json()
45+
return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers, timeout=10).json()
4646

4747

4848
if __name__ == "__main__": # pragma: no cover

web_programming/fetch_jobs.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,9 @@
1313

1414

1515
def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str], None, None]:
16-
soup = BeautifulSoup(requests.get(url + location).content, "html.parser")
16+
soup = BeautifulSoup(
17+
requests.get(url + location, timeout=10).content, "html.parser"
18+
)
1719
# This attribute finds out all the specifics listed in a job
1820
for job in soup.find_all("div", attrs={"data-tn-component": "organicJob"}):
1921
job_title = job.find("a", attrs={"data-tn-element": "jobTitle"}).text.strip()

web_programming/fetch_quotes.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,11 @@
1414

1515

1616
def quote_of_the_day() -> list:
17-
return requests.get(API_ENDPOINT_URL + "/today").json()
17+
return requests.get(API_ENDPOINT_URL + "/today", timeout=10).json()
1818

1919

2020
def random_quotes() -> list:
21-
return requests.get(API_ENDPOINT_URL + "/random").json()
21+
return requests.get(API_ENDPOINT_URL + "/random", timeout=10).json()
2222

2323

2424
if __name__ == "__main__":

web_programming/fetch_well_rx_price.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None:
4242
return None
4343

4444
request_url = BASE_URL.format(drug_name, zip_code)
45-
response = get(request_url)
45+
response = get(request_url, timeout=10)
4646

4747
# Is the response ok?
4848
response.raise_for_status()

web_programming/get_amazon_product_data.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,9 @@ def get_amazon_product_data(product: str = "laptop") -> DataFrame:
2424
),
2525
"Accept-Language": "en-US, en;q=0.5",
2626
}
27-
soup = BeautifulSoup(requests.get(url, headers=header).text, features="lxml")
27+
soup = BeautifulSoup(
28+
requests.get(url, headers=header, timeout=10).text, features="lxml"
29+
)
2830
# Initialize a Pandas dataframe with the column titles
2931
data_frame = DataFrame(
3032
columns=[

web_programming/get_imdb_top_250_movies_csv.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
def get_imdb_top_250_movies(url: str = "") -> dict[str, float]:
1010
url = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
11-
soup = BeautifulSoup(requests.get(url).text, "html.parser")
11+
soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser")
1212
titles = soup.find_all("td", attrs="titleColumn")
1313
ratings = soup.find_all("td", class_="ratingColumn imdbRating")
1414
return {

web_programming/get_ip_geolocation.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ def get_ip_geolocation(ip_address: str) -> str:
88
url = f"https://ipinfo.io/{ip_address}/json"
99

1010
# Send a GET request to the API
11-
response = requests.get(url)
11+
response = requests.get(url, timeout=10)
1212

1313
# Check if the HTTP request was successful
1414
response.raise_for_status()

web_programming/get_top_billionaires.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]:
5757
Returns:
5858
List of top 10 realtime billionaires data.
5959
"""
60-
response_json = requests.get(API_URL).json()
60+
response_json = requests.get(API_URL, timeout=10).json()
6161
return [
6262
{
6363
"Name": person["personName"],

web_programming/get_top_hn_posts.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,15 @@
55

66
def get_hackernews_story(story_id: str) -> dict:
77
url = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
8-
return requests.get(url).json()
8+
return requests.get(url, timeout=10).json()
99

1010

1111
def hackernews_top_stories(max_stories: int = 10) -> list[dict]:
1212
"""
1313
Get the top max_stories posts from HackerNews - https://news.ycombinator.com/
1414
"""
1515
url = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
16-
story_ids = requests.get(url).json()[:max_stories]
16+
story_ids = requests.get(url, timeout=10).json()[:max_stories]
1717
return [get_hackernews_story(story_id) for story_id in story_ids]
1818

1919

web_programming/giphy.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ def get_gifs(query: str, api_key: str = giphy_api_key) -> list:
1111
"""
1212
formatted_query = "+".join(query.split())
1313
url = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
14-
gifs = requests.get(url).json()["data"]
14+
gifs = requests.get(url, timeout=10).json()["data"]
1515
return [gif["url"] for gif in gifs]
1616

1717

web_programming/instagram_crawler.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def get_json(self) -> dict:
3939
"""
4040
Return a dict of user information
4141
"""
42-
html = requests.get(self.url, headers=headers).text
42+
html = requests.get(self.url, headers=headers, timeout=10).text
4343
scripts = BeautifulSoup(html, "html.parser").find_all("script")
4444
try:
4545
return extract_user_profile(scripts[4])

web_programming/instagram_pic.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def download_image(url: str) -> str:
1515
A message indicating the result of the operation.
1616
"""
1717
try:
18-
response = requests.get(url)
18+
response = requests.get(url, timeout=10)
1919
response.raise_for_status()
2020
except requests.exceptions.RequestException as e:
2121
return f"An error occurred during the HTTP request to {url}: {e!r}"
@@ -30,7 +30,7 @@ def download_image(url: str) -> str:
3030
return f"Image URL not found in meta tag {image_meta_tag}."
3131

3232
try:
33-
image_data = requests.get(image_url).content
33+
image_data = requests.get(image_url, timeout=10).content
3434
except requests.exceptions.RequestException as e:
3535
return f"An error occurred during the HTTP request to {image_url}: {e!r}"
3636
if not image_data:

web_programming/instagram_video.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@
55

66
def download_video(url: str) -> bytes:
77
base_url = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
8-
video_url = requests.get(base_url + url).json()[0]["urls"][0]["src"]
9-
return requests.get(video_url).content
8+
video_url = requests.get(base_url + url, timeout=10).json()[0]["urls"][0]["src"]
9+
return requests.get(video_url, timeout=10).content
1010

1111

1212
if __name__ == "__main__":

web_programming/nasa_data.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@ def get_apod_data(api_key: str) -> dict:
99
Get your API Key from: https://api.nasa.gov/
1010
"""
1111
url = "https://api.nasa.gov/planetary/apod"
12-
return requests.get(url, params={"api_key": api_key}).json()
12+
return requests.get(url, params={"api_key": api_key}, timeout=10).json()
1313

1414

1515
def save_apod(api_key: str, path: str = ".") -> dict:
1616
apod_data = get_apod_data(api_key)
1717
img_url = apod_data["url"]
1818
img_name = img_url.split("/")[-1]
19-
response = requests.get(img_url, stream=True)
19+
response = requests.get(img_url, stream=True, timeout=10)
2020

2121
with open(f"{path}/{img_name}", "wb+") as img_file:
2222
shutil.copyfileobj(response.raw, img_file)
@@ -29,7 +29,7 @@ def get_archive_data(query: str) -> dict:
2929
Get the data of a particular query from NASA archives
3030
"""
3131
url = "https://images-api.nasa.gov/search"
32-
return requests.get(url, params={"q": query}).json()
32+
return requests.get(url, params={"q": query}, timeout=10).json()
3333

3434

3535
if __name__ == "__main__":

web_programming/open_google_results.py

+1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
res = requests.get(
1717
url,
1818
headers={"User-Agent": str(UserAgent().random)},
19+
timeout=10,
1920
)
2021

2122
try:

web_programming/random_anime_character.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def save_image(image_url: str, image_title: str) -> None:
1212
"""
1313
Saves the image of anime character
1414
"""
15-
image = requests.get(image_url, headers=headers)
15+
image = requests.get(image_url, headers=headers, timeout=10)
1616
with open(image_title, "wb") as file:
1717
file.write(image.content)
1818

@@ -21,7 +21,9 @@ def random_anime_character() -> tuple[str, str, str]:
2121
"""
2222
Returns the Title, Description, and Image Title of a random anime character .
2323
"""
24-
soup = BeautifulSoup(requests.get(URL, headers=headers).text, "html.parser")
24+
soup = BeautifulSoup(
25+
requests.get(URL, headers=headers, timeout=10).text, "html.parser"
26+
)
2527
title = soup.find("meta", attrs={"property": "og:title"}).attrs["content"]
2628
image_url = soup.find("meta", attrs={"property": "og:image"}).attrs["content"]
2729
description = soup.find("p", id="description").get_text()

web_programming/recaptcha_verification.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,9 @@ def login_using_recaptcha(request):
5656
client_key = request.POST.get("g-recaptcha-response")
5757

5858
# post recaptcha response to Google's recaptcha api
59-
response = requests.post(url, data={"secret": secret_key, "response": client_key})
59+
response = requests.post(
60+
url, data={"secret": secret_key, "response": client_key}, timeout=10
61+
)
6062
# if the recaptcha api verified our keys
6163
if response.json().get("success", False):
6264
# authenticate the user

0 commit comments

Comments
 (0)