Remove cached and exported models after conversion.
Browse files
app.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import json
|
|
|
|
| 3 |
import subprocess
|
| 4 |
import urllib.parse
|
| 5 |
from pathlib import Path
|
| 6 |
|
| 7 |
-
from huggingface_hub import hf_hub_download, HfApi
|
| 8 |
from coremltools import ComputeUnit
|
| 9 |
from coremltools.models.utils import _is_macos, _macos_version
|
| 10 |
|
|
@@ -201,7 +202,6 @@ def on_model_change(model):
|
|
| 201 |
)
|
| 202 |
|
| 203 |
|
| 204 |
-
|
| 205 |
def convert_model(preprocessor, model, model_coreml_config,
|
| 206 |
compute_units, precision, tolerance, output,
|
| 207 |
use_past=False, seq2seq=None,
|
|
@@ -252,6 +252,26 @@ def push_to_hub(destination, directory, task, precision, token=None):
|
|
| 252 |
return get_pr_url(HfApi(token=token), destination, commit_message)
|
| 253 |
|
| 254 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
def convert(model_id, task,
|
| 256 |
compute_units, precision, tolerance, framework,
|
| 257 |
push_destination, destination_model, token,
|
|
@@ -270,8 +290,8 @@ def convert(model_id, task,
|
|
| 270 |
return error_str("Please provide a token to push to the Hub.", open_discussion=False)
|
| 271 |
|
| 272 |
# TODO: support legacy format
|
| 273 |
-
|
| 274 |
-
output =
|
| 275 |
output.mkdir(parents=True, exist_ok=True)
|
| 276 |
output = output/f"{precision}_model.mlpackage"
|
| 277 |
|
|
@@ -324,16 +344,17 @@ def convert(model_id, task,
|
|
| 324 |
)
|
| 325 |
|
| 326 |
progress(0.7, "Uploading model to Hub")
|
| 327 |
-
pr_url = push_to_hub(destination_model,
|
| 328 |
progress(1, "Done")
|
| 329 |
|
|
|
|
|
|
|
| 330 |
did_validate = _is_macos() and _macos_version() >= (12, 0)
|
| 331 |
result = f"""### Successfully converted!
|
| 332 |
We opened a PR to add the Core ML weights to the model repo. Please, view and merge the PR [here]({pr_url}).
|
| 333 |
|
| 334 |
{f"**Note**: model could not be automatically validated as this Space is not running on macOS." if not did_validate else ""}
|
| 335 |
"""
|
| 336 |
-
|
| 337 |
return result
|
| 338 |
except Exception as e:
|
| 339 |
return error_str(e, model=model_id, task=task, framework=framework, compute_units=compute_units, precision=precision, tolerance=tolerance)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import json
|
| 3 |
+
import shutil
|
| 4 |
import subprocess
|
| 5 |
import urllib.parse
|
| 6 |
from pathlib import Path
|
| 7 |
|
| 8 |
+
from huggingface_hub import hf_hub_download, HfApi, scan_cache_dir
|
| 9 |
from coremltools import ComputeUnit
|
| 10 |
from coremltools.models.utils import _is_macos, _macos_version
|
| 11 |
|
|
|
|
| 202 |
)
|
| 203 |
|
| 204 |
|
|
|
|
| 205 |
def convert_model(preprocessor, model, model_coreml_config,
|
| 206 |
compute_units, precision, tolerance, output,
|
| 207 |
use_past=False, seq2seq=None,
|
|
|
|
| 252 |
return get_pr_url(HfApi(token=token), destination, commit_message)
|
| 253 |
|
| 254 |
|
| 255 |
+
def cleanup(model_id, exported):
|
| 256 |
+
if exported:
|
| 257 |
+
shutil.rmtree(exported)
|
| 258 |
+
|
| 259 |
+
# We remove the model from the huggingface cache, so it will have to be downloaded again
|
| 260 |
+
# if the user wants to convert it for a different task or precision.
|
| 261 |
+
# Alternatively, we could remove models older than 1 day or so.
|
| 262 |
+
cache_info = scan_cache_dir()
|
| 263 |
+
try:
|
| 264 |
+
repo = next(repo for repo in cache_info.repos if repo.repo_id==model_id)
|
| 265 |
+
except StopIteration:
|
| 266 |
+
# The model was not in the cache!
|
| 267 |
+
return
|
| 268 |
+
|
| 269 |
+
if repo is not None:
|
| 270 |
+
for revision in repo.revisions:
|
| 271 |
+
delete_strategy = cache_info.delete_revisions(revision.commit_hash)
|
| 272 |
+
delete_strategy.execute()
|
| 273 |
+
|
| 274 |
+
|
| 275 |
def convert(model_id, task,
|
| 276 |
compute_units, precision, tolerance, framework,
|
| 277 |
push_destination, destination_model, token,
|
|
|
|
| 290 |
return error_str("Please provide a token to push to the Hub.", open_discussion=False)
|
| 291 |
|
| 292 |
# TODO: support legacy format
|
| 293 |
+
exported_base = Path("exported")/model_id
|
| 294 |
+
output = exported_base/"coreml"/task
|
| 295 |
output.mkdir(parents=True, exist_ok=True)
|
| 296 |
output = output/f"{precision}_model.mlpackage"
|
| 297 |
|
|
|
|
| 344 |
)
|
| 345 |
|
| 346 |
progress(0.7, "Uploading model to Hub")
|
| 347 |
+
pr_url = push_to_hub(destination_model, exported_base, task, precision, token=token)
|
| 348 |
progress(1, "Done")
|
| 349 |
|
| 350 |
+
cleanup(model_id, exported_base)
|
| 351 |
+
|
| 352 |
did_validate = _is_macos() and _macos_version() >= (12, 0)
|
| 353 |
result = f"""### Successfully converted!
|
| 354 |
We opened a PR to add the Core ML weights to the model repo. Please, view and merge the PR [here]({pr_url}).
|
| 355 |
|
| 356 |
{f"**Note**: model could not be automatically validated as this Space is not running on macOS." if not did_validate else ""}
|
| 357 |
"""
|
|
|
|
| 358 |
return result
|
| 359 |
except Exception as e:
|
| 360 |
return error_str(e, model=model_id, task=task, framework=framework, compute_units=compute_units, precision=precision, tolerance=tolerance)
|