GraphGen / graphgen /models /llm /ollama_client.py
github-actions[bot]
Auto-sync from demo at Tue Sep 30 03:30:14 UTC 2025
3a3b216
raw
history blame
587 Bytes
# TODO: implement ollama client
from typing import Any, List, Optional
from graphgen.bases import BaseLLMClient, Token
class OllamaClient(BaseLLMClient):
async def generate_answer(
self, text: str, history: Optional[List[str]] = None, **extra: Any
) -> str:
pass
async def generate_topk_per_token(
self, text: str, history: Optional[List[str]] = None, **extra: Any
) -> List[Token]:
pass
async def generate_inputs_prob(
self, text: str, history: Optional[List[str]] = None, **extra: Any
) -> List[Token]:
pass