Feature/import keywords as tags (#1170)

* feat: import original keywords as tags

* remove cached env

* Update frontend api types

* fix: Issues with previous tag scraping implementation

* Update category handling in backend

* Update backend tests to include group_id

* Correct type check

* Update create-url interface

* Improve tag cleaner list support

* remove builtin name shadowing

* update type annotations

* test include tags scraper

* implement scaper context for optional data

* readd cache venv

* use continue instead of break

* remove test callback

Co-authored-by: Miroito <alban.vachette@gmail.com>
This commit is contained in:
Hayden
2022-04-23 12:23:12 -08:00
committed by GitHub
parent 75c535fb2e
commit c696dee320
27 changed files with 281 additions and 61 deletions

View File

@@ -0,0 +1,49 @@
from dataclasses import dataclass
from pydantic import UUID4
from slugify import slugify
from mealie.repos.repository_factory import AllRepositories
from mealie.schema.recipe import TagOut
from mealie.schema.recipe.recipe_category import TagSave
class NoContextException(Exception):
pass
@dataclass(slots=True)
class ScraperContext:
user_id: UUID4
group_id: UUID4
repos: AllRepositories
class ScrapedExtras:
def __init__(self) -> None:
self._tags: list[str] = []
def set_tags(self, tags: list[str]) -> None:
self._tags = tags
def use_tags(self, ctx: ScraperContext) -> list[TagOut]:
if not self._tags:
return []
repo = ctx.repos.tags.by_group(ctx.group_id)
tags = []
for tag in self._tags:
slugify_tag = slugify(tag)
# Check if tag exists
if db_tag := repo.get_one(slugify_tag, "slug"):
tags.append(db_tag)
continue
save_data = TagSave(name=tag, group_id=ctx.group_id)
db_tag = repo.create(save_data)
tags.append(db_tag)
return tags