init
This commit is contained in:
299
tests/conftest.py
Normal file
299
tests/conftest.py
Normal file
@@ -0,0 +1,299 @@
|
||||
import concurrent.futures
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass
|
||||
|
||||
import aqt.operations.note
|
||||
import pytest
|
||||
import anki.collection
|
||||
from _pytest.monkeypatch import MonkeyPatch # noqa
|
||||
from pytest_anki._launch import anki_running, temporary_user # noqa
|
||||
from waitress import wasyncore
|
||||
|
||||
from plugin import AnkiConnect, anki_version
|
||||
from plugin.edit import Edit
|
||||
from plugin.util import DEFAULT_CONFIG
|
||||
|
||||
try:
|
||||
from PyQt6 import QtTest
|
||||
except ImportError:
|
||||
from PyQt5 import QtTest
|
||||
|
||||
|
||||
ac = AnkiConnect()
|
||||
|
||||
|
||||
# wait for n seconds, while events are being processed
|
||||
def wait(seconds):
|
||||
milliseconds = int(seconds * 1000)
|
||||
QtTest.QTest.qWait(milliseconds) # noqa
|
||||
|
||||
|
||||
def wait_until(booleanish_function, at_most_seconds=30):
|
||||
deadline = time.time() + at_most_seconds
|
||||
|
||||
while time.time() < deadline:
|
||||
if booleanish_function():
|
||||
return
|
||||
wait(0.01)
|
||||
|
||||
raise Exception(f"Function {booleanish_function} never once returned "
|
||||
f"a positive value in {at_most_seconds} seconds")
|
||||
|
||||
|
||||
def delete_model(model_name):
|
||||
model = ac.collection().models.byName(model_name)
|
||||
ac.collection().models.remove(model["id"])
|
||||
|
||||
|
||||
def close_all_dialogs_and_wait_for_them_to_run_closing_callbacks():
|
||||
aqt.dialogs.closeAll(onsuccess=lambda: None)
|
||||
wait_until(aqt.dialogs.allClosed)
|
||||
|
||||
|
||||
def get_dialog_instance(name):
|
||||
return aqt.dialogs._dialogs[name][1] # noqa
|
||||
|
||||
|
||||
# waitress is a WSGI server that Anki starts to serve css etc to its web views.
|
||||
# it seems to have a race condition issue;
|
||||
# the main loop thread is trying to `select.select` the sockets
|
||||
# which a worker thread is closing because of a dead connection.
|
||||
# this is especially pronounced in tests,
|
||||
# as we open and close windows rapidly--and so web views and their connections.
|
||||
# this small patch makes waitress skip actually closing the sockets
|
||||
# (unless the server is shutting down--if it is, loop exceptions are ignored).
|
||||
# while the unclosed sockets might accumulate,
|
||||
# this should not pose an issue in test environment.
|
||||
# see https://github.com/Pylons/waitress/issues/374
|
||||
@contextmanager
|
||||
def waitress_patched_to_prevent_it_from_dying():
|
||||
original_close = wasyncore.dispatcher.close
|
||||
sockets_that_must_not_be_garbage_collected = [] # lists are thread-safe
|
||||
|
||||
def close(self):
|
||||
if not aqt.mw.mediaServer.is_shutdown:
|
||||
sockets_that_must_not_be_garbage_collected.append(self.socket)
|
||||
self.socket = None
|
||||
original_close(self)
|
||||
|
||||
with MonkeyPatch().context() as monkey:
|
||||
monkey.setattr(wasyncore.dispatcher, "close", close)
|
||||
yield
|
||||
|
||||
|
||||
@contextmanager
|
||||
def anki_patched_to_prevent_backups():
|
||||
with MonkeyPatch().context() as monkey:
|
||||
if anki_version < (2, 1, 50):
|
||||
monkey.setitem(aqt.profiles.profileConf, "numBackups", 0)
|
||||
else:
|
||||
monkey.setattr(anki.collection.Collection, "create_backup",
|
||||
lambda *args, **kwargs: True)
|
||||
yield
|
||||
|
||||
|
||||
@contextmanager
|
||||
def empty_anki_session_started():
|
||||
with waitress_patched_to_prevent_it_from_dying():
|
||||
with anki_patched_to_prevent_backups():
|
||||
with anki_running(
|
||||
qtbot=None, # noqa
|
||||
enable_web_debugging=False,
|
||||
profile_name="test_user",
|
||||
) as session:
|
||||
yield session
|
||||
|
||||
|
||||
@contextmanager
|
||||
def profile_created_and_loaded(session):
|
||||
with temporary_user(session.base, "test_user", "en_US"):
|
||||
with session.profile_loaded():
|
||||
yield session
|
||||
|
||||
|
||||
@contextmanager
|
||||
def anki_connect_config_loaded(session, web_bind_port):
|
||||
with session.addon_config_created(
|
||||
package_name="plugin",
|
||||
default_config=DEFAULT_CONFIG,
|
||||
user_config={**DEFAULT_CONFIG, "webBindPort": web_bind_port}
|
||||
):
|
||||
yield
|
||||
|
||||
|
||||
@contextmanager
|
||||
def current_decks_and_models_etc_preserved():
|
||||
deck_names_before = ac.deckNames()
|
||||
model_names_before = ac.modelNames()
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
deck_names_after = ac.deckNames()
|
||||
model_names_after = ac.modelNames()
|
||||
|
||||
deck_names_to_delete = {*deck_names_after} - {*deck_names_before}
|
||||
model_names_to_delete = {*model_names_after} - {*model_names_before}
|
||||
|
||||
ac.deleteDecks(decks=deck_names_to_delete, cardsToo=True)
|
||||
for model_name in model_names_to_delete:
|
||||
delete_model(model_name)
|
||||
|
||||
ac.guiDeckBrowser()
|
||||
|
||||
|
||||
@dataclass
|
||||
class Setup:
|
||||
deck_id: int
|
||||
note1_id: int
|
||||
note2_id: int
|
||||
note1_card_ids: "list[int]"
|
||||
note2_card_ids: "list[int]"
|
||||
card_ids: "list[int]"
|
||||
|
||||
|
||||
def set_up_test_deck_and_test_model_and_two_notes():
|
||||
ac.createModel(
|
||||
modelName="test_model",
|
||||
inOrderFields=["field1", "field2"],
|
||||
cardTemplates=[
|
||||
{"Front": "{{field1}}", "Back": "{{field2}}"},
|
||||
{"Front": "{{field2}}", "Back": "{{field1}}"}
|
||||
],
|
||||
css="* {}",
|
||||
)
|
||||
|
||||
deck_id = ac.createDeck("test_deck")
|
||||
|
||||
note1_id = ac.addNote(dict(
|
||||
deckName="test_deck",
|
||||
modelName="test_model",
|
||||
fields={"field1": "note1 field1", "field2": "note1 field2"},
|
||||
tags={"tag1"},
|
||||
))
|
||||
|
||||
note2_id = ac.addNote(dict(
|
||||
deckName="test_deck",
|
||||
modelName="test_model",
|
||||
fields={"field1": "note2 field1", "field2": "note2 field2"},
|
||||
tags={"tag2"},
|
||||
))
|
||||
|
||||
note1_card_ids = ac.findCards(query=f"nid:{note1_id}")
|
||||
note2_card_ids = ac.findCards(query=f"nid:{note2_id}")
|
||||
card_ids = ac.findCards(query="deck:test_deck")
|
||||
|
||||
return Setup(
|
||||
deck_id=deck_id,
|
||||
note1_id=note1_id,
|
||||
note2_id=note2_id,
|
||||
note1_card_ids=note1_card_ids,
|
||||
note2_card_ids=note2_card_ids,
|
||||
card_ids=card_ids,
|
||||
)
|
||||
|
||||
|
||||
#############################################################################
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--tear-down-profile-after-each-test",
|
||||
action="store_true",
|
||||
default=True)
|
||||
parser.addoption("--no-tear-down-profile-after-each-test", "-T",
|
||||
action="store_false",
|
||||
dest="tear_down_profile_after_each_test")
|
||||
|
||||
|
||||
def pytest_report_header(config):
|
||||
if config.option.forked:
|
||||
return "test isolation: perfect; each test is run in a separate process"
|
||||
if config.option.tear_down_profile_after_each_test:
|
||||
return "test isolation: good; user profile is torn down after each test"
|
||||
else:
|
||||
return "test isolation: poor; only newly created decks and models " \
|
||||
"are cleaned up between tests"
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def run_background_tasks_on_main_thread(request, monkeypatch): # noqa
|
||||
"""
|
||||
Makes background operations such as card deletion execute on main thread
|
||||
and execute the callback immediately
|
||||
"""
|
||||
def run_in_background(task, on_done=None, kwargs=None):
|
||||
future = concurrent.futures.Future()
|
||||
|
||||
try:
|
||||
future.set_result(task(**kwargs if kwargs is not None else {}))
|
||||
except BaseException as e:
|
||||
future.set_exception(e)
|
||||
|
||||
if on_done is not None:
|
||||
on_done(future)
|
||||
|
||||
monkeypatch.setattr(aqt.mw.taskman, "run_in_background", run_in_background)
|
||||
|
||||
|
||||
# don't use run_background_tasks_on_main_thread for tests that don't run Anki
|
||||
def pytest_generate_tests(metafunc):
|
||||
if (
|
||||
run_background_tasks_on_main_thread.__name__ in metafunc.fixturenames
|
||||
and session_scope_empty_session.__name__ not in metafunc.fixturenames
|
||||
):
|
||||
metafunc.fixturenames.remove(run_background_tasks_on_main_thread.__name__)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def session_scope_empty_session():
|
||||
with empty_anki_session_started() as session:
|
||||
yield session
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def session_scope_session_with_profile_loaded(session_scope_empty_session):
|
||||
with profile_created_and_loaded(session_scope_empty_session):
|
||||
yield session_scope_empty_session
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def session_with_profile_loaded(session_scope_empty_session, request):
|
||||
"""
|
||||
Like anki_session fixture from pytest-anki, but:
|
||||
* Default profile is loaded
|
||||
* It's relying on session-wide app instance so that
|
||||
it can be used without forking every test;
|
||||
this can be useful to speed up tests and also
|
||||
to examine Anki's stdout/stderr, which is not visible with forking.
|
||||
* If command line option --no-tear-down-profile-after-each-test is passed,
|
||||
only the newly created decks and models are deleted.
|
||||
Otherwise, the profile is completely torn down after each test.
|
||||
Tearing down the profile is significantly slower.
|
||||
"""
|
||||
if request.config.option.tear_down_profile_after_each_test:
|
||||
with profile_created_and_loaded(session_scope_empty_session):
|
||||
yield session_scope_empty_session
|
||||
else:
|
||||
session = request.getfixturevalue(
|
||||
session_scope_session_with_profile_loaded.__name__
|
||||
)
|
||||
with current_decks_and_models_etc_preserved():
|
||||
yield session
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def setup(session_with_profile_loaded):
|
||||
"""
|
||||
Like session_with_profile_loaded, but also:
|
||||
* Added are:
|
||||
* A deck `test_deck`
|
||||
* A model `test_model` with fields `filed1` and `field2`
|
||||
and two cards per note
|
||||
* Two notes with two valid cards each using the above deck and model
|
||||
* Edit dialog is registered with dialog manager
|
||||
* Any dialogs, if open, are safely closed on exit
|
||||
"""
|
||||
Edit.register_with_anki()
|
||||
yield set_up_test_deck_and_test_model_and_two_notes()
|
||||
close_all_dialogs_and_wait_for_them_to_run_closing_callbacks()
|
||||
93
tests/test_cards.py
Executable file
93
tests/test_cards.py
Executable file
@@ -0,0 +1,93 @@
|
||||
import pytest
|
||||
from anki.errors import NotFoundError # noqa
|
||||
|
||||
from conftest import ac
|
||||
|
||||
|
||||
def test_findCards(setup):
|
||||
card_ids = ac.findCards(query="deck:test_deck")
|
||||
assert len(card_ids) == 4
|
||||
|
||||
|
||||
class TestEaseFactors:
|
||||
def test_setEaseFactors(self, setup):
|
||||
result = ac.setEaseFactors(cards=setup.card_ids, easeFactors=[4200] * 4)
|
||||
assert result == [True] * 4
|
||||
|
||||
def test_setEaseFactors_with_invalid_card_id(self, setup):
|
||||
result = ac.setEaseFactors(cards=[123], easeFactors=[4200])
|
||||
assert result == [False]
|
||||
|
||||
def test_getEaseFactors(self, setup):
|
||||
ac.setEaseFactors(cards=setup.card_ids, easeFactors=[4200] * 4)
|
||||
result = ac.getEaseFactors(cards=setup.card_ids)
|
||||
assert result == [4200] * 4
|
||||
|
||||
def test_getEaseFactors_with_invalid_card_id(self, setup):
|
||||
assert ac.getEaseFactors(cards=[123]) == [None]
|
||||
|
||||
|
||||
class TestSuspending:
|
||||
def test_suspend(self, setup):
|
||||
assert ac.suspend(cards=setup.card_ids) is True
|
||||
|
||||
def test_suspend_fails_with_incorrect_id(self, setup):
|
||||
with pytest.raises(NotFoundError):
|
||||
assert ac.suspend(cards=[123])
|
||||
|
||||
def test_areSuspended_returns_False_for_regular_cards(self, setup):
|
||||
result = ac.areSuspended(cards=setup.card_ids)
|
||||
assert result == [False] * 4
|
||||
|
||||
def test_areSuspended_returns_True_for_suspended_cards(self, setup):
|
||||
ac.suspend(setup.card_ids)
|
||||
result = ac.areSuspended(cards=setup.card_ids)
|
||||
assert result == [True] * 4
|
||||
|
||||
|
||||
def test_areDue_returns_True_for_new_cards(setup):
|
||||
result = ac.areDue(cards=setup.card_ids)
|
||||
assert result == [True] * 4
|
||||
|
||||
|
||||
def test_getIntervals(setup):
|
||||
ac.getIntervals(cards=setup.card_ids, complete=False)
|
||||
ac.getIntervals(cards=setup.card_ids, complete=True)
|
||||
|
||||
|
||||
def test_cardsToNotes(setup):
|
||||
result = ac.cardsToNotes(cards=setup.card_ids)
|
||||
assert {*result} == {setup.note1_id, setup.note2_id}
|
||||
|
||||
|
||||
class TestCardInfo:
|
||||
def test_with_valid_ids(self, setup):
|
||||
result = ac.cardsInfo(cards=setup.card_ids)
|
||||
assert [item["cardId"] for item in result] == setup.card_ids
|
||||
|
||||
def test_with_incorrect_id(self, setup):
|
||||
result = ac.cardsInfo(cards=[123])
|
||||
assert result == [{}]
|
||||
|
||||
|
||||
def test_forgetCards(setup):
|
||||
ac.forgetCards(cards=setup.card_ids)
|
||||
|
||||
|
||||
def test_relearnCards(setup):
|
||||
ac.relearnCards(cards=setup.card_ids)
|
||||
|
||||
|
||||
class TestAnswerCards:
|
||||
def test_answerCards(self, setup):
|
||||
ac.scheduler().reset()
|
||||
answers = [
|
||||
{"cardId": a, "ease": b} for a, b in zip(setup.card_ids, [2, 1, 4, 3])
|
||||
]
|
||||
result = ac.answerCards(answers)
|
||||
assert result == [True] * 4
|
||||
|
||||
def test_answerCards_with_invalid_card_id(self, setup):
|
||||
ac.scheduler().reset()
|
||||
result = ac.answerCards([{"cardId": 123, "ease": 2}])
|
||||
assert result == [False]
|
||||
74
tests/test_decks.py
Executable file
74
tests/test_decks.py
Executable file
@@ -0,0 +1,74 @@
|
||||
import pytest
|
||||
|
||||
from conftest import ac
|
||||
|
||||
|
||||
def test_deckNames(session_with_profile_loaded):
|
||||
result = ac.deckNames()
|
||||
assert result == ["Default"]
|
||||
|
||||
|
||||
def test_deckNamesAndIds(session_with_profile_loaded):
|
||||
result = ac.deckNamesAndIds()
|
||||
assert result == {"Default": 1}
|
||||
|
||||
|
||||
def test_createDeck(session_with_profile_loaded):
|
||||
ac.createDeck("foo")
|
||||
assert {*ac.deckNames()} == {"Default", "foo"}
|
||||
|
||||
|
||||
def test_changeDeck(setup):
|
||||
ac.changeDeck(cards=setup.card_ids, deck="bar")
|
||||
assert "bar" in ac.deckNames()
|
||||
|
||||
|
||||
def test_deleteDeck(setup):
|
||||
before = ac.deckNames()
|
||||
ac.deleteDecks(decks=["test_deck"], cardsToo=True)
|
||||
after = ac.deckNames()
|
||||
assert {*before} - {*after} == {"test_deck"}
|
||||
|
||||
|
||||
def test_deleteDeck_must_be_called_with_cardsToo_set_to_True_on_later_api(setup):
|
||||
with pytest.raises(Exception):
|
||||
ac.deleteDecks(decks=["test_deck"])
|
||||
with pytest.raises(Exception):
|
||||
ac.deleteDecks(decks=["test_deck"], cardsToo=False)
|
||||
|
||||
|
||||
def test_getDeckConfig(session_with_profile_loaded):
|
||||
result = ac.getDeckConfig(deck="Default")
|
||||
assert result["name"] == "Default"
|
||||
|
||||
|
||||
def test_saveDeckConfig(session_with_profile_loaded):
|
||||
config = ac.getDeckConfig(deck="Default")
|
||||
result = ac.saveDeckConfig(config=config)
|
||||
assert result is True
|
||||
|
||||
|
||||
def test_setDeckConfigId(session_with_profile_loaded):
|
||||
result = ac.setDeckConfigId(decks=["Default"], configId=1)
|
||||
assert result is True
|
||||
|
||||
|
||||
def test_cloneDeckConfigId(session_with_profile_loaded):
|
||||
result = ac.cloneDeckConfigId(cloneFrom=1, name="test")
|
||||
assert isinstance(result, int)
|
||||
|
||||
|
||||
def test_removedDeckConfigId(session_with_profile_loaded):
|
||||
new_config_id = ac.cloneDeckConfigId(cloneFrom=1, name="test")
|
||||
assert ac.removeDeckConfigId(configId=new_config_id) is True
|
||||
|
||||
|
||||
def test_removedDeckConfigId_fails_with_invalid_id(session_with_profile_loaded):
|
||||
new_config_id = ac.cloneDeckConfigId(cloneFrom=1, name="test")
|
||||
assert ac.removeDeckConfigId(configId=new_config_id) is True
|
||||
assert ac.removeDeckConfigId(configId=new_config_id) is False
|
||||
|
||||
|
||||
def test_getDeckStats(session_with_profile_loaded):
|
||||
result = ac.getDeckStats(decks=["Default"])
|
||||
assert list(result.values())[0]["name"] == "Default"
|
||||
253
tests/test_edit.py
Normal file
253
tests/test_edit.py
Normal file
@@ -0,0 +1,253 @@
|
||||
from dataclasses import dataclass
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import aqt.operations.note
|
||||
import pytest
|
||||
|
||||
from conftest import get_dialog_instance, wait_until
|
||||
from plugin.edit import Edit, DecentPreviewer, history, DOMAIN_PREFIX
|
||||
|
||||
|
||||
NOTHING = object()
|
||||
|
||||
|
||||
class Value:
|
||||
def __init__(self):
|
||||
self.value = NOTHING
|
||||
|
||||
def set(self, value):
|
||||
self.value = value
|
||||
|
||||
def has_been_set(self):
|
||||
return self.value is not NOTHING
|
||||
|
||||
|
||||
@dataclass
|
||||
class JavascriptDialogButtonManipulator:
|
||||
dialog: ...
|
||||
|
||||
def eval_js(self, js):
|
||||
evaluation_result = Value()
|
||||
self.dialog.editor.web.evalWithCallback(js, evaluation_result.set)
|
||||
wait_until(evaluation_result.has_been_set)
|
||||
return evaluation_result.value
|
||||
|
||||
def wait_until_toolbar_buttons_are_ready(self):
|
||||
ready_flag = Value()
|
||||
self.dialog.editor._links["set_ready_flag"] = ready_flag.set # noqa
|
||||
self.dialog.run_javascript_after_toolbar_ready("pycmd('set_ready_flag');")
|
||||
wait_until(ready_flag.has_been_set)
|
||||
|
||||
# preview button doesn't have an id, so find by label
|
||||
def click_preview_button(self):
|
||||
self.eval_js("""
|
||||
document.evaluate("//button[text()='Preview']", document)
|
||||
.iterateNext()
|
||||
.click()
|
||||
""")
|
||||
|
||||
def click_button(self, button_id):
|
||||
self.eval_js(f"""
|
||||
document.getElementById("{DOMAIN_PREFIX}{button_id}").click()
|
||||
""")
|
||||
|
||||
def is_button_disabled(self, button_id):
|
||||
return self.eval_js(f"""
|
||||
document.getElementById("{DOMAIN_PREFIX}{button_id}").disabled
|
||||
""")
|
||||
|
||||
|
||||
##############################################################################
|
||||
|
||||
|
||||
def test_edit_dialog_opens(setup):
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
|
||||
|
||||
def test_edit_dialog_opens_only_once(setup):
|
||||
dialog1 = Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
dialog2 = Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
assert dialog1 is dialog2
|
||||
|
||||
|
||||
def test_edit_dialog_fails_to_open_with_invalid_note(setup):
|
||||
with pytest.raises(Exception):
|
||||
Edit.open_dialog_and_show_note_with_id(123)
|
||||
|
||||
|
||||
class TestBrowser:
|
||||
@staticmethod
|
||||
def get_selected_card_ids():
|
||||
return get_dialog_instance("Browser").table.get_selected_card_ids()
|
||||
|
||||
def test_dialog_opens(self, setup):
|
||||
dialog = Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
dialog.show_browser()
|
||||
|
||||
def test_selects_cards_of_last_note(self, setup):
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note2_id).show_browser()
|
||||
|
||||
assert {*self.get_selected_card_ids()} == {*setup.note2_card_ids}
|
||||
|
||||
def test_selects_cards_of_note_before_last_after_previous_button_pressed(self, setup):
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
dialog = Edit.open_dialog_and_show_note_with_id(setup.note2_id)
|
||||
|
||||
def verify_that_the_table_shows_note2_cards_then_note1_cards():
|
||||
get_dialog_instance("Browser").table.select_all()
|
||||
assert {*self.get_selected_card_ids()[:2]} == {*setup.note2_card_ids}
|
||||
assert {*self.get_selected_card_ids()[2:]} == {*setup.note1_card_ids}
|
||||
|
||||
dialog.show_previous()
|
||||
dialog.show_browser()
|
||||
assert {*self.get_selected_card_ids()} == {*setup.note1_card_ids}
|
||||
verify_that_the_table_shows_note2_cards_then_note1_cards()
|
||||
|
||||
dialog.show_next()
|
||||
dialog.show_browser()
|
||||
assert {*self.get_selected_card_ids()} == {*setup.note2_card_ids}
|
||||
verify_that_the_table_shows_note2_cards_then_note1_cards()
|
||||
|
||||
|
||||
class TestPreviewDialog:
|
||||
def test_opens(self, setup):
|
||||
edit_dialog = Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
edit_dialog.show_preview()
|
||||
|
||||
@pytest.fixture
|
||||
def dialog(self, setup):
|
||||
edit_dialog = Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
preview_dialog: DecentPreviewer = edit_dialog.show_preview()
|
||||
|
||||
def press_next_button(times=0):
|
||||
for _ in range(times):
|
||||
preview_dialog._last_render = 0 # render without delay
|
||||
preview_dialog._on_next()
|
||||
|
||||
preview_dialog.press_next_button = press_next_button
|
||||
|
||||
yield preview_dialog
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"next_button_presses, current_card, "
|
||||
"showing_question_only, previous_enabled, next_enabled",
|
||||
[
|
||||
pytest.param(0, 0, True, False, True,
|
||||
id="next button pressed 0 times; first card, question"),
|
||||
pytest.param(1, 0, False, True, True,
|
||||
id="next button pressed 1 time; first card, answer"),
|
||||
pytest.param(2, 1, True, True, True,
|
||||
id="next button pressed 2 times; second card, question"),
|
||||
pytest.param(3, 1, False, True, False,
|
||||
id="next button pressed 3 times; second card, answer"),
|
||||
pytest.param(4, 1, False, True, False,
|
||||
id="next button pressed 4 times; second card still, answer"),
|
||||
]
|
||||
)
|
||||
def test_navigation(self, dialog, next_button_presses, current_card,
|
||||
showing_question_only, previous_enabled, next_enabled):
|
||||
dialog.press_next_button(times=next_button_presses)
|
||||
assert dialog.adapter.current == current_card
|
||||
assert dialog.showing_question_and_can_show_answer() is showing_question_only
|
||||
assert dialog._should_enable_prev() is previous_enabled
|
||||
assert dialog._should_enable_next() is next_enabled
|
||||
|
||||
|
||||
class TestButtons:
|
||||
@pytest.fixture
|
||||
def manipulator(self, setup):
|
||||
dialog = Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
return JavascriptDialogButtonManipulator(dialog)
|
||||
|
||||
def test_preview_button_can_be_clicked(self, manipulator, monkeypatch):
|
||||
monkeypatch.setattr(manipulator.dialog, "show_preview", MagicMock())
|
||||
manipulator.wait_until_toolbar_buttons_are_ready()
|
||||
manipulator.click_preview_button()
|
||||
wait_until(lambda: manipulator.dialog.show_preview.call_count == 1)
|
||||
|
||||
def test_addon_buttons_can_be_clicked(self, manipulator):
|
||||
manipulator.wait_until_toolbar_buttons_are_ready()
|
||||
manipulator.click_button(button_id="browse")
|
||||
wait_until(lambda: get_dialog_instance("Browser") is not None)
|
||||
|
||||
def test_addon_buttons_get_disabled_enabled(self, setup, manipulator):
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note2_id)
|
||||
manipulator.wait_until_toolbar_buttons_are_ready()
|
||||
assert manipulator.is_button_disabled("previous") is False
|
||||
assert manipulator.is_button_disabled("next") is True
|
||||
|
||||
|
||||
class TestHistory:
|
||||
@pytest.fixture(autouse=True)
|
||||
def cleanup(self):
|
||||
history.note_ids = []
|
||||
|
||||
def test_single_note(self, setup):
|
||||
assert history.note_ids == []
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
assert history.note_ids == [setup.note1_id]
|
||||
|
||||
def test_two_notes(self, setup):
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note2_id)
|
||||
assert history.note_ids == [setup.note1_id, setup.note2_id]
|
||||
|
||||
def test_old_note_reopened(self, setup):
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note2_id)
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
assert history.note_ids == [setup.note2_id, setup.note1_id]
|
||||
|
||||
def test_navigation(self, setup):
|
||||
dialog = Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note2_id)
|
||||
|
||||
dialog.show_previous()
|
||||
assert dialog.note.id == setup.note1_id
|
||||
|
||||
dialog.show_previous()
|
||||
assert dialog.note.id == setup.note1_id
|
||||
|
||||
dialog.show_next()
|
||||
assert dialog.note.id == setup.note2_id
|
||||
|
||||
dialog.show_next()
|
||||
assert dialog.note.id == setup.note2_id
|
||||
|
||||
|
||||
class TestNoteDeletionElsewhere:
|
||||
@pytest.fixture
|
||||
def delete_note(self, run_background_tasks_on_main_thread):
|
||||
"""
|
||||
Yields a function that accepts a single note id and deletes the note,
|
||||
running the required hooks in sync
|
||||
"""
|
||||
return (
|
||||
lambda note_id: aqt.operations.note
|
||||
.remove_notes(parent=None, note_ids=[note_id]) # noqa
|
||||
.run_in_background()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def edit_dialog_is_open():
|
||||
return aqt.dialogs._dialogs[Edit.dialog_registry_tag][1] is not None # noqa
|
||||
|
||||
@pytest.fixture
|
||||
def dialog(self, setup):
|
||||
Edit.open_dialog_and_show_note_with_id(setup.note1_id)
|
||||
yield Edit.open_dialog_and_show_note_with_id(setup.note2_id)
|
||||
|
||||
def test_one_of_the_history_notes_is_deleted_and_dialog_stays(self,
|
||||
setup, dialog, delete_note):
|
||||
assert dialog.note.id == setup.note2_id
|
||||
|
||||
delete_note(setup.note2_id)
|
||||
assert self.edit_dialog_is_open()
|
||||
assert dialog.note.id == setup.note1_id
|
||||
|
||||
def test_all_of_the_history_notes_are_deleted_and_dialog_closes(self,
|
||||
setup, dialog, delete_note):
|
||||
delete_note(setup.note1_id)
|
||||
delete_note(setup.note2_id)
|
||||
assert not self.edit_dialog_is_open()
|
||||
139
tests/test_graphical.py
Executable file
139
tests/test_graphical.py
Executable file
@@ -0,0 +1,139 @@
|
||||
import pytest
|
||||
from unittest import mock
|
||||
|
||||
from conftest import ac, anki_version, wait_until, \
|
||||
close_all_dialogs_and_wait_for_them_to_run_closing_callbacks, \
|
||||
get_dialog_instance
|
||||
|
||||
|
||||
def test_guiBrowse(setup):
|
||||
ac.guiBrowse()
|
||||
|
||||
|
||||
def test_guiDeckBrowser(setup):
|
||||
ac.guiDeckBrowser()
|
||||
|
||||
|
||||
# todo executing this test without running background tasks on main thread
|
||||
# rarely causes media server (`aqt.mediasrv`) to fail:
|
||||
# its `run` method raises OSError: invalid file descriptor.
|
||||
# this can cause other tests to fail to tear down;
|
||||
# particularly, any dialogs with editor may fail to close
|
||||
# due to their trying to save the note first, which is done via web view,
|
||||
# which fails to complete due to corrupt media server. investigate?
|
||||
def test_guiCheckDatabase(setup, run_background_tasks_on_main_thread):
|
||||
ac.guiCheckDatabase()
|
||||
|
||||
|
||||
def test_guiDeckOverview(setup):
|
||||
assert ac.guiDeckOverview(name="test_deck") is True
|
||||
|
||||
|
||||
def test_guiImportFile(setup):
|
||||
if anki_version >= (2, 1, 52):
|
||||
with mock.patch('aqt.import_export.importing.prompt_for_file_then_import') as mock_prompt_for_file_then_import:
|
||||
mock_prompt_for_file_then_import.return_value = True
|
||||
ac.guiImportFile()
|
||||
|
||||
|
||||
class TestAddCards:
|
||||
note = {
|
||||
"deckName": "test_deck",
|
||||
"modelName": "Basic",
|
||||
"fields": {"Front": "new front1", "Back": "new back1"},
|
||||
"tags": ["tag1"]
|
||||
}
|
||||
|
||||
# an actual small image, you can see it if you run the test with GUI
|
||||
# noinspection SpellCheckingInspection
|
||||
base64_gif = "R0lGODlhBQAEAHAAACwAAAAABQAEAIH///8AAAAAAAAAAAACB0QMqZcXDwoAOw=="
|
||||
|
||||
picture = {
|
||||
"picture": [
|
||||
{
|
||||
"data": base64_gif,
|
||||
"filename": "smiley.gif",
|
||||
"fields": ["Front"],
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def click_on_add_card_dialog_save_button():
|
||||
dialog = get_dialog_instance("AddCards")
|
||||
dialog.addButton.click()
|
||||
|
||||
# todo previously, these tests were verifying
|
||||
# that the return value of `guiAddCards` is `int`.
|
||||
# while it is indeed `int`, on modern Anki it is also always a `0`,
|
||||
# so we consider it useless. update documentation?
|
||||
def test_without_note(self, setup):
|
||||
ac.guiAddCards()
|
||||
|
||||
def test_with_note(self, setup):
|
||||
ac.guiAddCards(note=self.note)
|
||||
self.click_on_add_card_dialog_save_button()
|
||||
close_all_dialogs_and_wait_for_them_to_run_closing_callbacks()
|
||||
|
||||
assert len(ac.findCards(query="new")) == 1
|
||||
|
||||
def test_with_note_and_a_picture(self, setup):
|
||||
ac.guiAddCards(note={**self.note, **self.picture})
|
||||
self.click_on_add_card_dialog_save_button()
|
||||
close_all_dialogs_and_wait_for_them_to_run_closing_callbacks()
|
||||
|
||||
assert len(ac.findCards(query="new")) == 1
|
||||
assert ac.retrieveMediaFile(filename="smiley.gif") == self.base64_gif
|
||||
|
||||
|
||||
class TestReviewActions:
|
||||
@pytest.fixture
|
||||
def reviewing_started(self, setup):
|
||||
assert ac.guiDeckReview(name="test_deck") is True
|
||||
|
||||
def test_startCardTimer(self, reviewing_started):
|
||||
assert ac.guiStartCardTimer() is True
|
||||
|
||||
def test_guiShowQuestion(self, reviewing_started):
|
||||
assert ac.guiShowQuestion() is True
|
||||
assert ac.reviewer().state == "question"
|
||||
|
||||
def test_guiShowAnswer(self, reviewing_started):
|
||||
assert ac.guiShowAnswer() is True
|
||||
assert ac.reviewer().state == "answer"
|
||||
|
||||
def test_guiAnswerCard(self, reviewing_started):
|
||||
ac.guiShowAnswer()
|
||||
reviews_before = ac.cardReviews(deck="test_deck", startID=0)
|
||||
assert ac.guiAnswerCard(ease=4) is True
|
||||
|
||||
reviews_after = ac.cardReviews(deck="test_deck", startID=0)
|
||||
assert len(reviews_after) == len(reviews_before) + 1
|
||||
|
||||
def test_guiUndo(self, reviewing_started):
|
||||
ac.guiShowAnswer()
|
||||
reviews_before = ac.cardReviews(deck="test_deck", startID=0)
|
||||
assert ac.guiAnswerCard(ease=4) is True
|
||||
|
||||
reviews_after_answer = ac.cardReviews(deck="test_deck", startID=0)
|
||||
assert len(reviews_after_answer) == len(reviews_before) + 1
|
||||
|
||||
assert ac.guiUndo() is True
|
||||
|
||||
reviews_after_undo = ac.cardReviews(deck="test_deck", startID=0)
|
||||
assert len(reviews_after_undo) == len(reviews_before)
|
||||
|
||||
|
||||
class TestSelectedNotes:
|
||||
def test_with_valid_deck_query(self, setup):
|
||||
ac.guiBrowse(query="deck:test_deck")
|
||||
wait_until(ac.guiSelectedNotes)
|
||||
assert ac.guiSelectedNotes()[0] in {setup.note1_id, setup.note2_id}
|
||||
|
||||
|
||||
def test_with_invalid_deck_query(self, setup):
|
||||
ac.guiBrowse(query="deck:test_deck")
|
||||
wait_until(ac.guiSelectedNotes)
|
||||
|
||||
ac.guiBrowse(query="deck:invalid")
|
||||
wait_until(lambda: not ac.guiSelectedNotes())
|
||||
56
tests/test_media.py
Executable file
56
tests/test_media.py
Executable file
@@ -0,0 +1,56 @@
|
||||
import base64
|
||||
import os.path
|
||||
|
||||
from conftest import ac
|
||||
|
||||
|
||||
FILENAME = "_test.txt"
|
||||
BASE64_DATA_1 = base64.b64encode(b"test 1").decode("ascii")
|
||||
BASE64_DATA_2 = base64.b64encode(b"test 2").decode("ascii")
|
||||
|
||||
|
||||
def store_one_media_file():
|
||||
return ac.storeMediaFile(filename=FILENAME, data=BASE64_DATA_1)
|
||||
|
||||
|
||||
def store_two_media_files():
|
||||
filename_1 = ac.storeMediaFile(filename=FILENAME, data=BASE64_DATA_1)
|
||||
filename_2 = ac.storeMediaFile(filename=FILENAME, data=BASE64_DATA_2,
|
||||
deleteExisting=False)
|
||||
return filename_1, filename_2
|
||||
|
||||
|
||||
##############################################################################
|
||||
|
||||
|
||||
def test_storeMediaFile_one_file(session_with_profile_loaded):
|
||||
filename_1 = store_one_media_file()
|
||||
assert FILENAME == filename_1
|
||||
|
||||
|
||||
def test_storeMediaFile_two_files_with_the_same_name(session_with_profile_loaded):
|
||||
filename_1, filename_2 = store_two_media_files()
|
||||
assert FILENAME == filename_1 != filename_2
|
||||
|
||||
|
||||
def test_retrieveMediaFile(session_with_profile_loaded):
|
||||
store_one_media_file()
|
||||
result = ac.retrieveMediaFile(filename=FILENAME)
|
||||
assert result == BASE64_DATA_1
|
||||
|
||||
|
||||
def test_getMediaFilesNames(session_with_profile_loaded):
|
||||
filenames = store_two_media_files()
|
||||
result = ac.getMediaFilesNames(pattern="_tes*.txt")
|
||||
assert {*filenames} == {*result}
|
||||
|
||||
|
||||
def test_deleteMediaFile(session_with_profile_loaded):
|
||||
filename_1, filename_2 = store_two_media_files()
|
||||
ac.deleteMediaFile(filename=filename_1)
|
||||
assert ac.retrieveMediaFile(filename=filename_1) is False
|
||||
assert ac.getMediaFilesNames(pattern="_tes*.txt") == [filename_2]
|
||||
|
||||
|
||||
def test_getMediaDirPath(session_with_profile_loaded):
|
||||
assert os.path.isdir(ac.getMediaDirPath())
|
||||
77
tests/test_misc.py
Executable file
77
tests/test_misc.py
Executable file
@@ -0,0 +1,77 @@
|
||||
import os
|
||||
|
||||
import aqt
|
||||
import pytest
|
||||
|
||||
from conftest import ac, anki_connect_config_loaded, \
|
||||
set_up_test_deck_and_test_model_and_two_notes, \
|
||||
current_decks_and_models_etc_preserved, wait
|
||||
|
||||
|
||||
# version is retrieved from config
|
||||
def test_version(session_with_profile_loaded):
|
||||
with anki_connect_config_loaded(
|
||||
session=session_with_profile_loaded,
|
||||
web_bind_port=0,
|
||||
):
|
||||
assert ac.version() == 6
|
||||
|
||||
|
||||
def test_reloadCollection(setup):
|
||||
ac.reloadCollection()
|
||||
|
||||
|
||||
def test_apiReflect(setup):
|
||||
result = ac.apiReflect(
|
||||
scopes=["actions", "invalidType"],
|
||||
actions=["apiReflect", "invalidMethod"]
|
||||
)
|
||||
assert result == {
|
||||
"scopes": ["actions"],
|
||||
"actions": ["apiReflect"]
|
||||
}
|
||||
|
||||
|
||||
class TestProfiles:
|
||||
def test_getProfiles(self, session_with_profile_loaded):
|
||||
result = ac.getProfiles()
|
||||
assert result == ["test_user"]
|
||||
|
||||
# waiting a little while gets rid of the cryptic warning:
|
||||
# Qt warning: QXcbConnection: XCB error: 8 (BadMatch), sequence: 658,
|
||||
# resource id: 2097216, major code: 42 (SetInputFocus), minor code: 0
|
||||
def test_loadProfile(self, session_with_profile_loaded):
|
||||
aqt.mw.unloadProfileAndShowProfileManager()
|
||||
wait(0.1)
|
||||
ac.loadProfile(name="test_user")
|
||||
|
||||
|
||||
class TestExportImport:
|
||||
# since Anki 2.1.50, exporting media for some wild reason
|
||||
# will change the current working directory, which then gets removed.
|
||||
# see `exporting.py`, ctrl-f `os.chdir(self.mediaDir)`
|
||||
@pytest.fixture(autouse=True)
|
||||
def current_working_directory_preserved(self):
|
||||
cwd = os.getcwd()
|
||||
yield
|
||||
|
||||
try:
|
||||
os.getcwd()
|
||||
except FileNotFoundError:
|
||||
os.chdir(cwd)
|
||||
|
||||
def test_exportPackage(self, session_with_profile_loaded, setup):
|
||||
filename = session_with_profile_loaded.base + "/export.apkg"
|
||||
ac.exportPackage(deck="test_deck", path=filename)
|
||||
|
||||
def test_importPackage(self, session_with_profile_loaded):
|
||||
filename = session_with_profile_loaded.base + "/export.apkg"
|
||||
|
||||
with current_decks_and_models_etc_preserved():
|
||||
set_up_test_deck_and_test_model_and_two_notes()
|
||||
ac.exportPackage(deck="test_deck", path=filename)
|
||||
|
||||
with current_decks_and_models_etc_preserved():
|
||||
assert "test_deck" not in ac.deckNames()
|
||||
ac.importPackage(path=filename)
|
||||
assert "test_deck" in ac.deckNames()
|
||||
295
tests/test_models.py
Executable file
295
tests/test_models.py
Executable file
@@ -0,0 +1,295 @@
|
||||
from conftest import ac
|
||||
from plugin import anki_version
|
||||
|
||||
|
||||
def test_modelNames(setup):
|
||||
result = ac.modelNames()
|
||||
assert "test_model" in result
|
||||
|
||||
|
||||
def test_modelNamesAndIds(setup):
|
||||
result = ac.modelNamesAndIds()
|
||||
assert isinstance(result["test_model"], int)
|
||||
|
||||
|
||||
def test_modelFieldNames(setup):
|
||||
result = ac.modelFieldNames(modelName="test_model")
|
||||
assert result == ["field1", "field2"]
|
||||
|
||||
|
||||
def test_modelFieldDescriptions(setup):
|
||||
result = ac.modelFieldDescriptions(modelName="test_model")
|
||||
assert result == ["", ""]
|
||||
|
||||
|
||||
def test_modelFieldFonts(setup):
|
||||
result = ac.modelFieldFonts(modelName="test_model")
|
||||
assert result == {
|
||||
"field1": {
|
||||
"font": "Arial",
|
||||
"size": 20,
|
||||
},
|
||||
"field2": {
|
||||
"font": "Arial",
|
||||
"size": 20,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_modelFieldsOnTemplates(setup):
|
||||
result = ac.modelFieldsOnTemplates(modelName="test_model")
|
||||
assert result == {
|
||||
"Card 1": [["field1"], ["field2"]],
|
||||
"Card 2": [["field2"], ["field1"]],
|
||||
}
|
||||
|
||||
|
||||
class TestCreateModel:
|
||||
createModel_kwargs = {
|
||||
"modelName": "test_model_foo",
|
||||
"inOrderFields": ["field1", "field2"],
|
||||
"cardTemplates": [{"Front": "{{field1}}", "Back": "{{field2}}"}],
|
||||
}
|
||||
|
||||
def test_createModel_without_css(self, session_with_profile_loaded):
|
||||
ac.createModel(**self.createModel_kwargs)
|
||||
|
||||
def test_createModel_with_css(self, session_with_profile_loaded):
|
||||
ac.createModel(**self.createModel_kwargs, css="* {}")
|
||||
|
||||
|
||||
class TestStyling:
|
||||
def test_modelStyling(self, setup):
|
||||
result = ac.modelStyling(modelName="test_model")
|
||||
assert result == {"css": "* {}"}
|
||||
|
||||
def test_updateModelStyling(self, setup):
|
||||
ac.updateModelStyling(model={
|
||||
"name": "test_model",
|
||||
"css": "* {color: red;}"
|
||||
})
|
||||
|
||||
assert ac.modelStyling(modelName="test_model") == {
|
||||
"css": "* {color: red;}"
|
||||
}
|
||||
|
||||
|
||||
class TestModelTemplates:
|
||||
def test_modelTemplates(self, setup):
|
||||
result = ac.modelTemplates(modelName="test_model")
|
||||
assert result == {
|
||||
"Card 1": {"Front": "{{field1}}", "Back": "{{field2}}"},
|
||||
"Card 2": {"Front": "{{field2}}", "Back": "{{field1}}"}
|
||||
}
|
||||
|
||||
def test_updateModelTemplates(self, setup):
|
||||
ac.updateModelTemplates(model={
|
||||
"name": "test_model",
|
||||
"templates": {"Card 1": {"Front": "{{field1}}", "Back": "foo"}}
|
||||
})
|
||||
|
||||
assert ac.modelTemplates(modelName="test_model") == {
|
||||
"Card 1": {"Front": "{{field1}}", "Back": "foo"},
|
||||
"Card 2": {"Front": "{{field2}}", "Back": "{{field1}}"}
|
||||
}
|
||||
|
||||
|
||||
def test_findAndReplaceInModels(setup):
|
||||
ac.findAndReplaceInModels(
|
||||
modelName="test_model",
|
||||
findText="}}",
|
||||
replaceText="}}!",
|
||||
front=True,
|
||||
back=False,
|
||||
css=False,
|
||||
)
|
||||
|
||||
ac.findAndReplaceInModels(
|
||||
modelName="test_model",
|
||||
findText="}}",
|
||||
replaceText="}}?",
|
||||
front=True,
|
||||
back=True,
|
||||
css=False,
|
||||
)
|
||||
|
||||
ac.findAndReplaceInModels(
|
||||
modelName="test_model",
|
||||
findText="}",
|
||||
replaceText="color: blue;}",
|
||||
front=False,
|
||||
back=False,
|
||||
css=True,
|
||||
)
|
||||
|
||||
assert ac.modelTemplates(modelName="test_model") == {
|
||||
"Card 1": {"Front": "{{field1}}?!", "Back": "{{field2}}?"},
|
||||
"Card 2": {"Front": "{{field2}}?!", "Back": "{{field1}}?"}
|
||||
}
|
||||
|
||||
assert ac.modelStyling(modelName="test_model") == {
|
||||
"css": "* {color: blue;}"
|
||||
}
|
||||
|
||||
|
||||
class TestModelTemplates:
|
||||
def test_modelTemplateRename(self, setup):
|
||||
ac.modelTemplateRename(
|
||||
modelName="test_model",
|
||||
oldTemplateName="Card 1",
|
||||
newTemplateName="Card 1 Renamed",
|
||||
)
|
||||
|
||||
result = ac.modelTemplates(modelName="test_model")
|
||||
assert result == {
|
||||
"Card 1 Renamed": {"Front": "{{field1}}", "Back": "{{field2}}"},
|
||||
"Card 2": {"Front": "{{field2}}", "Back": "{{field1}}"}
|
||||
}
|
||||
|
||||
def test_modelTemplateReposition(self, setup):
|
||||
# There currently isn't a way to test for order, so this is just a
|
||||
# smoke test for now
|
||||
ac.modelTemplateReposition(
|
||||
modelName="test_model",
|
||||
templateName="Card 1",
|
||||
index=1,
|
||||
)
|
||||
|
||||
def test_modelTemplateAdd(self, setup):
|
||||
ac.modelTemplateAdd(
|
||||
modelName="test_model",
|
||||
template={
|
||||
"Name": "Card 3",
|
||||
"Front": "{{field1}} Card 3",
|
||||
"Back": "{{field2}}",
|
||||
}
|
||||
)
|
||||
|
||||
result = ac.modelTemplates(modelName="test_model")
|
||||
assert result == {
|
||||
"Card 1": {"Front": "{{field1}}", "Back": "{{field2}}"},
|
||||
"Card 2": {"Front": "{{field2}}", "Back": "{{field1}}"},
|
||||
"Card 3": {"Front": "{{field1}} Card 3", "Back": "{{field2}}"},
|
||||
}
|
||||
|
||||
def test_modelTemplateRemove(self, setup):
|
||||
ac.modelTemplateRemove(
|
||||
modelName="test_model",
|
||||
templateName="Card 2"
|
||||
)
|
||||
|
||||
result = ac.modelTemplates(modelName="test_model")
|
||||
assert result == {
|
||||
"Card 1": {"Front": "{{field1}}", "Back": "{{field2}}"},
|
||||
}
|
||||
|
||||
|
||||
class TestModelFieldNames:
|
||||
def test_modelFieldRename(self, setup):
|
||||
ac.modelFieldRename(
|
||||
modelName="test_model",
|
||||
oldFieldName="field1",
|
||||
newFieldName="foo",
|
||||
)
|
||||
|
||||
result = ac.modelFieldNames(modelName="test_model")
|
||||
assert result == ["foo", "field2"]
|
||||
|
||||
def test_modelFieldReposition(self, setup):
|
||||
ac.modelFieldReposition(
|
||||
modelName="test_model",
|
||||
fieldName="field1",
|
||||
index=2,
|
||||
)
|
||||
|
||||
result = ac.modelFieldNames(modelName="test_model")
|
||||
assert result == ["field2", "field1"]
|
||||
|
||||
def test_modelFieldAdd(self, setup):
|
||||
ac.modelFieldAdd(
|
||||
modelName="test_model",
|
||||
fieldName="Foo",
|
||||
)
|
||||
|
||||
result = ac.modelFieldNames(modelName="test_model")
|
||||
assert result == ["field1", "field2", "Foo"]
|
||||
|
||||
def test_modelFieldAdd_with_index(self, setup):
|
||||
ac.modelFieldAdd(
|
||||
modelName="test_model",
|
||||
fieldName="Foo",
|
||||
index=1,
|
||||
)
|
||||
|
||||
result = ac.modelFieldNames(modelName="test_model")
|
||||
assert result == ["field1", "Foo", "field2"]
|
||||
|
||||
def test_modelFieldRemove(self, setup):
|
||||
# makes sure that the front template always has a field,
|
||||
# and makes sure that the front template of the cards are not the same
|
||||
ac.updateModelTemplates(model={
|
||||
"name": "test_model",
|
||||
"templates": {"Card 1": {"Front": "{{field2}} {{field2}}", "Back": "foo"}}
|
||||
})
|
||||
|
||||
ac.modelFieldRemove(
|
||||
modelName="test_model",
|
||||
fieldName="field1",
|
||||
)
|
||||
|
||||
result = ac.modelFieldNames(modelName="test_model")
|
||||
assert result == ["field2"]
|
||||
|
||||
def test_modelFieldSetFont(self, setup):
|
||||
ac.modelFieldSetFont(
|
||||
modelName="test_model",
|
||||
fieldName="field1",
|
||||
font="Courier",
|
||||
)
|
||||
|
||||
result = ac.modelFieldFonts(modelName="test_model")
|
||||
assert result == {
|
||||
"field1": {
|
||||
"font": "Courier",
|
||||
"size": 20,
|
||||
},
|
||||
"field2": {
|
||||
"font": "Arial",
|
||||
"size": 20,
|
||||
},
|
||||
}
|
||||
|
||||
def test_modelFieldSetFontSize(self, setup):
|
||||
ac.modelFieldSetFontSize(
|
||||
modelName="test_model",
|
||||
fieldName="field2",
|
||||
fontSize=16,
|
||||
)
|
||||
|
||||
result = ac.modelFieldFonts(modelName="test_model")
|
||||
assert result == {
|
||||
"field1": {
|
||||
"font": "Arial",
|
||||
"size": 20,
|
||||
},
|
||||
"field2": {
|
||||
"font": "Arial",
|
||||
"size": 16,
|
||||
},
|
||||
}
|
||||
|
||||
def test_modelFieldSetDescription(self, setup):
|
||||
set_desc = ac.modelFieldSetDescription(
|
||||
modelName="test_model",
|
||||
fieldName="field1",
|
||||
description="test description",
|
||||
)
|
||||
|
||||
result = ac.modelFieldDescriptions(modelName="test_model")
|
||||
|
||||
if anki_version < (2, 1, 50):
|
||||
assert not set_desc
|
||||
assert result == ["", ""]
|
||||
else:
|
||||
assert set_desc
|
||||
assert result == ["test description", ""]
|
||||
163
tests/test_notes.py
Executable file
163
tests/test_notes.py
Executable file
@@ -0,0 +1,163 @@
|
||||
import pytest
|
||||
from anki.errors import NotFoundError # noqa
|
||||
|
||||
from conftest import ac
|
||||
|
||||
|
||||
def make_note(*, front="front1", allow_duplicates=False):
|
||||
note = {
|
||||
"deckName": "test_deck",
|
||||
"modelName": "Basic",
|
||||
"fields": {"Front": front, "Back": "back1"},
|
||||
"tags": ["tag1"],
|
||||
}
|
||||
|
||||
if allow_duplicates:
|
||||
return {**note, "options": {"allowDuplicate": True}}
|
||||
else:
|
||||
return note
|
||||
|
||||
|
||||
##############################################################################
|
||||
|
||||
|
||||
class TestNoteAddition:
|
||||
def test_addNote(self, setup):
|
||||
result = ac.addNote(note=make_note())
|
||||
assert isinstance(result, int)
|
||||
|
||||
def test_addNote_will_not_allow_duplicates_by_default(self, setup):
|
||||
ac.addNote(make_note())
|
||||
with pytest.raises(Exception, match="it is a duplicate"):
|
||||
ac.addNote(make_note())
|
||||
|
||||
def test_addNote_will_allow_duplicates_if_options_say_aye(self, setup):
|
||||
ac.addNote(make_note())
|
||||
ac.addNote(make_note(allow_duplicates=True))
|
||||
|
||||
def test_addNotes(self, setup):
|
||||
result = ac.addNotes(notes=[
|
||||
make_note(front="foo"),
|
||||
make_note(front="bar"),
|
||||
make_note(front="foo"),
|
||||
])
|
||||
|
||||
assert len(result) == 3
|
||||
assert isinstance(result[0], int)
|
||||
assert isinstance(result[1], int)
|
||||
assert result[2] is None
|
||||
|
||||
def test_bug164(self, setup):
|
||||
note = {
|
||||
"deckName": "test_deck",
|
||||
"modelName": "Basic",
|
||||
"fields": {"Front": " Whitespace\n", "Back": ""},
|
||||
"options": {"allowDuplicate": False, "duplicateScope": "deck"}
|
||||
}
|
||||
|
||||
ac.addNote(note=note)
|
||||
with pytest.raises(Exception, match="it is a duplicate"):
|
||||
ac.addNote(note=note)
|
||||
|
||||
|
||||
def test_notesInfo(setup):
|
||||
result = ac.notesInfo(notes=[setup.note1_id])
|
||||
assert len(result) == 1
|
||||
assert result[0]["noteId"] == setup.note1_id
|
||||
assert result[0]["tags"] == ["tag1"]
|
||||
assert result[0]["fields"]["field1"]["value"] == "note1 field1"
|
||||
|
||||
|
||||
class TestTags:
|
||||
def test_addTags(self, setup):
|
||||
ac.addTags(notes=[setup.note1_id], tags="tag2")
|
||||
tags = ac.notesInfo(notes=[setup.note1_id])[0]["tags"]
|
||||
assert {*tags} == {"tag1", "tag2"}
|
||||
|
||||
def test_getTags(self, setup):
|
||||
result = ac.getTags()
|
||||
assert {*result} == {"tag1", "tag2"}
|
||||
|
||||
def test_removeTags(self, setup):
|
||||
ac.removeTags(notes=[setup.note2_id], tags="tag2")
|
||||
assert ac.notesInfo(notes=[setup.note2_id])[0]["tags"] == []
|
||||
|
||||
def test_replaceTags(self, setup):
|
||||
ac.replaceTags(notes=[setup.note1_id, 123],
|
||||
tag_to_replace="tag1", replace_with_tag="foo")
|
||||
notes_info = ac.notesInfo(notes=[setup.note1_id])
|
||||
assert notes_info[0]["tags"] == ["foo"]
|
||||
|
||||
def test_replaceTagsInAllNotes(self, setup):
|
||||
ac.replaceTagsInAllNotes(tag_to_replace="tag1", replace_with_tag="foo")
|
||||
notes_info = ac.notesInfo(notes=[setup.note1_id])
|
||||
assert notes_info[0]["tags"] == ["foo"]
|
||||
|
||||
def test_clearUnusedTags(self, setup):
|
||||
ac.removeTags(notes=[setup.note2_id], tags="tag2")
|
||||
ac.clearUnusedTags()
|
||||
assert ac.getTags() == ["tag1"]
|
||||
|
||||
def test_updateNoteTags_and_getNoteTags(self, setup):
|
||||
ac.updateNoteTags(note=setup.note1_id, tags="footag")
|
||||
assert ac.getNoteTags(note=setup.note1_id) == ["footag"]
|
||||
ac.updateNoteTags(note=setup.note1_id, tags=["foo", "bar", "baz"])
|
||||
assert len(ac.getNoteTags(note=setup.note1_id)) == 3
|
||||
|
||||
|
||||
class TestUpdateNoteFields:
|
||||
def test_updateNoteFields(self, setup):
|
||||
new_fields = {"field1": "foo", "field2": "bar"}
|
||||
good_note = {"id": setup.note1_id, "fields": new_fields}
|
||||
ac.updateNoteFields(note=good_note)
|
||||
notes_info = ac.notesInfo(notes=[setup.note1_id])
|
||||
assert notes_info[0]["fields"]["field2"]["value"] == "bar"
|
||||
|
||||
def test_updateNoteFields_will_not_update_invalid_notes(self, setup):
|
||||
bad_note = {"id": 123, "fields": make_note()["fields"]}
|
||||
with pytest.raises(NotFoundError):
|
||||
ac.updateNoteFields(note=bad_note)
|
||||
|
||||
|
||||
class TestUpdateNote:
|
||||
def test_updateNote(self, setup):
|
||||
new_fields = {"field1": "frontbar", "field2": "backbar"}
|
||||
new_tags = ["foobar"]
|
||||
good_note = {"id": setup.note1_id, "fields": new_fields, "tags": new_tags}
|
||||
ac.updateNote(note=good_note)
|
||||
notes_info = ac.notesInfo(notes=[setup.note1_id])
|
||||
assert notes_info[0]["fields"]["field2"]["value"] == "backbar"
|
||||
assert notes_info[0]["tags"] == ["foobar"]
|
||||
|
||||
def test_updateNote_requires_either_fields_or_tags(self, setup):
|
||||
with pytest.raises(Exception, match="ust provide"):
|
||||
ac.updateNote(note={"id": setup.note1_id})
|
||||
|
||||
|
||||
class TestCanAddNotes:
|
||||
foo_bar_notes = [make_note(front="foo"), make_note(front="bar")]
|
||||
|
||||
def test_canAddNotes(self, setup):
|
||||
result = ac.canAddNotes(notes=self.foo_bar_notes)
|
||||
assert result == [True, True]
|
||||
|
||||
def test_canAddNotes_will_not_add_duplicates_if_options_do_not_say_aye(self, setup):
|
||||
ac.addNotes(notes=self.foo_bar_notes)
|
||||
notes = [
|
||||
make_note(front="foo"),
|
||||
make_note(front="baz"),
|
||||
make_note(front="foo", allow_duplicates=True)
|
||||
]
|
||||
result = ac.canAddNotes(notes=notes)
|
||||
assert result == [False, True, True]
|
||||
|
||||
|
||||
def test_findNotes(setup):
|
||||
result = ac.findNotes(query="deck:test_deck")
|
||||
assert {*result} == {setup.note1_id, setup.note2_id}
|
||||
|
||||
|
||||
def test_deleteNotes(setup):
|
||||
ac.deleteNotes(notes=[setup.note1_id, setup.note2_id])
|
||||
result = ac.findNotes(query="deck:test_deck")
|
||||
assert result == []
|
||||
198
tests/test_server.py
Normal file
198
tests/test_server.py
Normal file
@@ -0,0 +1,198 @@
|
||||
import json
|
||||
import multiprocessing
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass
|
||||
from functools import partial
|
||||
|
||||
import pytest
|
||||
from pytest_anki._launch import anki_running # noqa
|
||||
from pytest_anki._util import find_free_port # noqa
|
||||
|
||||
from plugin import AnkiConnect
|
||||
from tests.conftest import wait_until, \
|
||||
empty_anki_session_started, \
|
||||
anki_connect_config_loaded, \
|
||||
profile_created_and_loaded
|
||||
|
||||
|
||||
@contextmanager
|
||||
def function_running_in_a_process(context, function):
|
||||
process = context.Process(target=function)
|
||||
process.start()
|
||||
|
||||
try:
|
||||
yield process
|
||||
finally:
|
||||
process.join()
|
||||
|
||||
|
||||
# todo stop the server?
|
||||
@contextmanager
|
||||
def anki_connect_web_server_started():
|
||||
plugin = AnkiConnect()
|
||||
plugin.startWebServer()
|
||||
yield plugin
|
||||
|
||||
|
||||
@dataclass
|
||||
class Client:
|
||||
port: int
|
||||
|
||||
@staticmethod
|
||||
def make_request(action, **params):
|
||||
return {"action": action, "params": params, "version": 6}
|
||||
|
||||
def send_request(self, action, **params):
|
||||
request_data = self.make_request(action, **params)
|
||||
json_bytes = json.dumps(request_data).encode("utf-8")
|
||||
return json.loads(self.send_bytes(json_bytes))
|
||||
|
||||
def send_bytes(self, bytes, headers={}): # noqa
|
||||
request_url = f"http://localhost:{self.port}"
|
||||
request = urllib.request.Request(request_url, bytes, headers)
|
||||
response = urllib.request.urlopen(request).read()
|
||||
return response
|
||||
|
||||
def wait_for_web_server_to_come_live(self, at_most_seconds=30):
|
||||
deadline = time.time() + at_most_seconds
|
||||
|
||||
while time.time() < deadline:
|
||||
try:
|
||||
self.send_request("version")
|
||||
return
|
||||
except urllib.error.URLError:
|
||||
time.sleep(0.01)
|
||||
|
||||
raise Exception(f"Anki-Connect web server did not come live "
|
||||
f"in {at_most_seconds} seconds")
|
||||
|
||||
|
||||
# spawning requires a top-level function for pickling
|
||||
def external_anki_entry_function(web_bind_port, exit_event):
|
||||
with empty_anki_session_started() as session:
|
||||
with anki_connect_config_loaded(session, web_bind_port):
|
||||
with anki_connect_web_server_started():
|
||||
with profile_created_and_loaded(session):
|
||||
wait_until(exit_event.is_set)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def external_anki_running(process_run_method):
|
||||
context = multiprocessing.get_context(process_run_method)
|
||||
exit_event = context.Event()
|
||||
web_bind_port = find_free_port()
|
||||
function = partial(external_anki_entry_function, web_bind_port, exit_event)
|
||||
|
||||
with function_running_in_a_process(context, function) as process:
|
||||
client = Client(port=web_bind_port)
|
||||
client.wait_for_web_server_to_come_live()
|
||||
|
||||
try:
|
||||
yield client
|
||||
finally:
|
||||
exit_event.set()
|
||||
|
||||
assert process.exitcode == 0
|
||||
|
||||
|
||||
# if a Qt app was already launched in current process,
|
||||
# launching a new Qt app, even from grounds up, fails or hangs.
|
||||
# of course, this includes forked processes. therefore,
|
||||
# * if launching without --forked, use the `spawn` process run method;
|
||||
# * otherwise, use the `fork` method, as it is significantly faster.
|
||||
# with --forked, each test has its fixtures assembled inside the fork,
|
||||
# which means that when the test begins, Qt was never started in the fork.
|
||||
@pytest.fixture(scope="module")
|
||||
def external_anki(request):
|
||||
"""
|
||||
Runs Anki in an external process, with the plugin loaded and started.
|
||||
On exit, neatly ends the process and makes sure its exit code is 0.
|
||||
Yields a client that can send web request to the external process.
|
||||
"""
|
||||
with external_anki_running(
|
||||
"fork" if request.config.option.forked else "spawn"
|
||||
) as client:
|
||||
yield client
|
||||
|
||||
|
||||
##############################################################################
|
||||
|
||||
|
||||
def test_successful_request(external_anki):
|
||||
response = external_anki.send_request("version")
|
||||
assert response == {"error": None, "result": 6}
|
||||
|
||||
|
||||
def test_can_handle_multiple_requests(external_anki):
|
||||
assert external_anki.send_request("version") == {"error": None, "result": 6}
|
||||
assert external_anki.send_request("version") == {"error": None, "result": 6}
|
||||
|
||||
|
||||
def test_multi_request(external_anki):
|
||||
version_request = Client.make_request("version")
|
||||
response = external_anki.send_request("multi", actions=[version_request] * 3)
|
||||
assert response == {
|
||||
"error": None,
|
||||
"result": [{"error": None, "result": 6}] * 3
|
||||
}
|
||||
|
||||
|
||||
def test_request_with_empty_body_returns_version_banner(external_anki):
|
||||
response = json.loads(external_anki.send_bytes(b""))
|
||||
assert response == {
|
||||
"apiVersion": "AnkiConnect v.6"
|
||||
}
|
||||
|
||||
|
||||
def test_failing_request_due_to_bad_arguments(external_anki):
|
||||
response = external_anki.send_request("addNote", bad="request")
|
||||
assert response["result"] is None
|
||||
assert "unexpected keyword argument" in response["error"]
|
||||
|
||||
|
||||
def test_failing_request_due_to_anki_raising_exception(external_anki):
|
||||
response = external_anki.send_request("suspend", cards=[-123])
|
||||
assert response["result"] is None
|
||||
assert "Card was not found" in response["error"]
|
||||
|
||||
|
||||
def test_failing_request_due_to_bad_encoding(external_anki):
|
||||
response = json.loads(external_anki.send_bytes(b"\xe7\x8c"))
|
||||
assert response["result"] is None
|
||||
assert "can't decode" in response["error"]
|
||||
|
||||
|
||||
def test_failing_request_due_to_bad_json(external_anki):
|
||||
response = json.loads(external_anki.send_bytes(b'{1: 2}'))
|
||||
assert response["result"] is None
|
||||
assert "in double quotes" in response["error"]
|
||||
|
||||
|
||||
def test_failing_request_due_to_json_root_not_being_an_object(external_anki):
|
||||
response = json.loads(external_anki.send_bytes(b"1.2"))
|
||||
assert response["result"] is None
|
||||
assert "is not of type 'object'" in response["error"]
|
||||
|
||||
|
||||
def test_failing_request_due_to_json_missing_wanted_properties(external_anki):
|
||||
response = json.loads(external_anki.send_bytes(b"{}"))
|
||||
assert response["result"] is None
|
||||
assert "'action' is a required property" in response["error"]
|
||||
|
||||
|
||||
def test_failing_request_due_to_json_properties_being_of_wrong_types(external_anki):
|
||||
response = json.loads(external_anki.send_bytes(b'{"action": 1}'))
|
||||
assert response["result"] is None
|
||||
assert "1 is not of type 'string'" in response["error"]
|
||||
|
||||
|
||||
def test_403_in_case_of_disallowed_origin(external_anki):
|
||||
with pytest.raises(urllib.error.HTTPError, match="403"): # good request/json
|
||||
json_bytes = json.dumps(Client.make_request("version")).encode("utf-8")
|
||||
external_anki.send_bytes(json_bytes, headers={b"origin": b"foo"})
|
||||
|
||||
with pytest.raises(urllib.error.HTTPError, match="403"): # bad json
|
||||
external_anki.send_bytes(b'{1: 2}', headers={b"origin": b"foo"})
|
||||
46
tests/test_stats.py
Executable file
46
tests/test_stats.py
Executable file
@@ -0,0 +1,46 @@
|
||||
from conftest import ac
|
||||
|
||||
|
||||
def test_getNumCardsReviewedToday(setup):
|
||||
result = ac.getNumCardsReviewedToday()
|
||||
assert isinstance(result, int)
|
||||
|
||||
|
||||
def test_getNumCardsReviewedByDay(setup):
|
||||
result = ac.getNumCardsReviewedByDay()
|
||||
assert isinstance(result, list)
|
||||
|
||||
|
||||
def test_getCollectionStatsHTML(setup):
|
||||
result = ac.getCollectionStatsHTML()
|
||||
assert isinstance(result, str)
|
||||
|
||||
|
||||
class TestReviews:
|
||||
def test_zero_reviews_for_a_new_deck(self, setup):
|
||||
assert ac.cardReviews(deck="test_deck", startID=0) == []
|
||||
assert ac.getLatestReviewID(deck="test_deck") == 0
|
||||
|
||||
def test_some_reviews_for_a_reviewed_deck(self, setup):
|
||||
ac.insertReviews(reviews=[
|
||||
(456, setup.card_ids[0], -1, 3, 4, -60, 2500, 6157, 0),
|
||||
(789, setup.card_ids[1], -1, 1, -60, -60, 0, 4846, 0)
|
||||
])
|
||||
|
||||
assert len(ac.cardReviews(deck="test_deck", startID=0)) == 2
|
||||
assert ac.getLatestReviewID(deck="test_deck") == 789
|
||||
assert ac.getReviewsOfCards(cards=[setup.card_ids[0]]) == \
|
||||
{
|
||||
setup.card_ids[0]: [
|
||||
{
|
||||
"id": 456,
|
||||
"usn": -1,
|
||||
"ease": 3,
|
||||
"ivl": 4,
|
||||
"lastIvl": -60,
|
||||
"factor": 2500,
|
||||
"time": 6157,
|
||||
"type": 0,
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user