Skip to content

Commit

Permalink
Made it possible to turn off llm rerank (#190)
Browse files Browse the repository at this point in the history
  • Loading branch information
whitead authored Aug 29, 2023
1 parent 76ecaf4 commit e5c1721
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 3 deletions.
12 changes: 10 additions & 2 deletions paperqa/docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,11 @@ def delete(
self.deleted_dockeys.add(dockey)

async def adoc_match(
self, query: str, k: int = 25, get_callbacks: CallbackFactory = lambda x: None
self,
query: str,
k: int = 25,
rerank: Optional[bool] = None,
get_callbacks: CallbackFactory = lambda x: None,
) -> Set[DocKey]:
"""Return a list of dockeys that match the query."""
if self.doc_index is None:
Expand All @@ -310,7 +314,11 @@ async def adoc_match(
return set()
# this only works for gpt-4 (in my testing)
try:
if cast(BaseLanguageModel, self.llm).model_name.startswith("gpt-4"):
if (
rerank is None
and cast(BaseLanguageModel, self.llm).model_name.startswith("gpt-4")
or rerank is True
):
chain = make_chain(
self.prompts.select,
cast(BaseLanguageModel, self.llm),
Expand Down
2 changes: 1 addition & 1 deletion paperqa/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "3.8.0"
__version__ = "3.9.0"

0 comments on commit e5c1721

Please sign in to comment.