35
35
}
36
36
37
37
38
- def cleanup_cache (token : str ):
38
+ def cleanup_cache (token : str , cache_dir : str ):
39
39
# Retrieve the size per model for all models used in the CI.
40
40
size_per_model = {}
41
41
extension_per_model = {}
@@ -74,7 +74,7 @@ def cleanup_cache(token: str):
74
74
total_required_size = sum (size_per_model .values ())
75
75
print (f"Total required disk: { total_required_size :.2f} GB" )
76
76
77
- cached_dir = huggingface_hub .scan_cache_dir ()
77
+ cached_dir = huggingface_hub .scan_cache_dir (cache_dir )
78
78
79
79
cache_size_per_model = {}
80
80
cached_required_size_per_model = {}
@@ -121,7 +121,7 @@ def cleanup_cache(token: str):
121
121
122
122
print ("Removing" , largest_model_id )
123
123
for sha in cached_shas_per_model [largest_model_id ]:
124
- huggingface_hub .scan_cache_dir ().delete_revisions (sha ).execute ()
124
+ huggingface_hub .scan_cache_dir (cache_dir ).delete_revisions (sha ).execute ()
125
125
126
126
del cache_size_per_model [largest_model_id ]
127
127
@@ -135,10 +135,11 @@ def cleanup_cache(token: str):
135
135
parser .add_argument (
136
136
"--token" , help = "Hugging Face Hub token." , required = True , type = str
137
137
)
138
+ parser .add_argument ("--cache-dir" , help = "Hub cache path." , required = True , type = str )
138
139
args = parser .parse_args ()
139
140
140
141
start = time .time ()
141
- extension_per_model = cleanup_cache (args .token )
142
+ extension_per_model = cleanup_cache (args .token , args . cache_dir )
142
143
end = time .time ()
143
144
144
145
print (f"Cache cleanup done in { end - start :.2f} s" )
@@ -153,6 +154,7 @@ def cleanup_cache(token: str):
153
154
revision = revision ,
154
155
token = args .token ,
155
156
allow_patterns = f"*{ extension_per_model [model_id ]} " ,
157
+ cache_dir = args .cache_dir ,
156
158
)
157
159
end = time .time ()
158
160
0 commit comments