Skip to content

Commit

Permalink
Merge pull request #880 from cheshire-cat-ai/develop
Browse files Browse the repository at this point in the history
1.7.1
  • Loading branch information
pieroit authored Aug 7, 2024
2 parents 4fe0738 + 8d9fdc0 commit f14f2c0
Show file tree
Hide file tree
Showing 49 changed files with 634 additions and 965 deletions.
6 changes: 6 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,12 @@
# Decide to use https / wss secure protocols
# CCAT_CORE_USE_SECURE_PROTOCOLS=true

# Uvicorn and FastAPI operating behind https proxy
# CCAT_HTTPS_PROXY_MODE=true

# Comma separated list of IPs to trust with proxy headers. A wildcard '*' means always trust.
# CCAT_CORS_FORWARDED_ALLOW_IPS="*"

# Protect endpoints with an access token
# CCAT_API_KEY=meow
# CCAT_API_KEY_WS=meow2
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/pr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Cat up
run: docker-compose up -d
run: docker compose up -d
- name: Test
run: docker exec cheshire_cat_core python -m pytest --color=yes .
- name: Cat down
run: docker-compose down
run: docker compose down
11 changes: 11 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,16 @@
# Changelog

## 1.7.1 ( 2024-08-01 )

New in version 1.7

- User system
- permission system
- JWT support
- Custom Auth support
- White Rabbit
- StrayCat.classify

## 1.5.0 ( 2024-03-07 )

New in version 1.5.0: **Sir Punctilious Cat, the IV**!
Expand Down
4 changes: 2 additions & 2 deletions compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ services:
context: ./core
container_name: cheshire_cat_core
# Uncomment the two lines below to use your .env (see .env.example)
# env_file:
# - .env
#env_file:
# - .env
ports:
- ${CCAT_CORE_PORT:-1865}:80
- 5678:5678 # only for development purposes (take away in production)
Expand Down
3 changes: 3 additions & 0 deletions core/cat/agents/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from .base_agent import BaseAgent, AgentOutput

__all__ = ['BaseAgent', 'AgentOutput']
26 changes: 2 additions & 24 deletions core/cat/agents/base_agent.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
from typing import List
from abc import ABC, abstractmethod

from langchain_core.utils import get_colored_text

from cat.utils import BaseModelDict

class AgentOutput(BaseModelDict):
output: str
output: str | None = None
intermediate_steps: List = []
return_direct: bool = False

Expand All @@ -15,25 +14,4 @@ class BaseAgent(ABC):

@abstractmethod
async def execute(*args, **kwargs) -> AgentOutput:
pass

# TODO: this is here to debug langchain, take it away
def _log_prompt(self, langchain_prompt, title):
print("\n")
print(get_colored_text(f"==================== {title} ====================", "green"))
for m in langchain_prompt.messages:
print(get_colored_text(type(m).__name__, "green"))
print(m.content)
print(get_colored_text("========================================", "green"))
return langchain_prompt

# TODO: this is here to debug langchain, take it away
def _log_output(self, langchain_output, title):
print("\n")
print(get_colored_text(f"==================== {title} ====================", "blue"))
if hasattr(langchain_output, 'content'):
print(langchain_output.content)
else:
print(langchain_output)
print(get_colored_text("========================================", "blue"))
return langchain_output
pass
38 changes: 38 additions & 0 deletions core/cat/agents/form_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import traceback
from cat.experimental.form import CatFormState
from cat.agents import BaseAgent, AgentOutput
from cat.log import log

class FormAgent(BaseAgent):

async def execute(self, stray) -> AgentOutput:

# get active form from working memory
active_form = stray.working_memory.active_form

if not active_form:
# no active form
return AgentOutput()
elif active_form._state == CatFormState.CLOSED:
# form is closed, delete it from working memory
stray.working_memory.active_form = None
return AgentOutput()
else:
# continue form
try:
form_output = active_form.next() # form should be async and should be awaited
return AgentOutput(
output=form_output["output"],
return_direct=True, # we assume forms always do a return_direct
intermediate_steps=[
((active_form.name, ""), form_output["output"])
]
)

except Exception as e:
log.error(e)
traceback.print_exc()
return AgentOutput()



13 changes: 8 additions & 5 deletions core/cat/agents/main_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from cat.looking_glass import prompts
from cat.utils import verbal_timedelta, BaseModelDict
from cat.env import get_env
from cat.agents.base_agent import BaseAgent, AgentOutput
from cat.agents import BaseAgent, AgentOutput
from cat.agents.memory_agent import MemoryAgent
from cat.agents.procedures_agent import ProceduresAgent

Expand Down Expand Up @@ -51,8 +51,10 @@ async def execute(self, stray) -> AgentOutput:
fast_reply = self.mad_hatter.execute_hook(
"agent_fast_reply", fast_reply, cat=stray
)
if len(fast_reply.keys()) > 0:
if isinstance(fast_reply, AgentOutput):
return fast_reply
if isinstance(fast_reply, dict) and "output" in fast_reply:
return AgentOutput(**fast_reply)

# obtain prompt parts from plugins
prompt_prefix = self.mad_hatter.execute_hook(
Expand Down Expand Up @@ -114,14 +116,15 @@ def format_agent_input(self, stray):
)

# format conversation history to be inserted in the prompt
# conversation_history_formatted_content = stray.stringify_chat_history()
# TODOV2: take away
conversation_history_formatted_content = stray.stringify_chat_history()

return BaseModelDict(**{
"input": stray.working_memory.user_message_json.text, # TODO: deprecate, since it is included in chat history
"episodic_memory": episodic_memory_formatted_content,
"declarative_memory": declarative_memory_formatted_content,
# "chat_history": conversation_history_formatted_content,
"tools_output": "",
"input": stray.working_memory.user_message_json.text, # TODOV2: take away
"chat_history": conversation_history_formatted_content, # TODOV2: take away
})

def agent_prompt_episodic_memories(
Expand Down
29 changes: 18 additions & 11 deletions core/cat/agents/memory_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,33 +5,40 @@
from langchain_core.output_parsers.string import StrOutputParser

from cat.looking_glass.callbacks import NewTokenHandler, ModelInteractionHandler
from cat.agents.base_agent import BaseAgent, AgentOutput
from cat.agents import BaseAgent, AgentOutput
from cat import utils


class MemoryAgent(BaseAgent):

async def execute(self, stray, prompt_prefix, prompt_suffix) -> AgentOutput:

final_prompt = ChatPromptTemplate(

prompt_variables = stray.working_memory.agent_input.model_dump()
sys_prompt = prompt_prefix + prompt_suffix

# ensure prompt variables and placeholders match
prompt_variables, sys_prompt = utils.match_prompt_variables(prompt_variables, sys_prompt)

prompt = ChatPromptTemplate(
messages=[
SystemMessagePromptTemplate.from_template(
template=prompt_prefix + prompt_suffix
template=sys_prompt
),
*(stray.langchainfy_chat_history()),
]
)

memory_chain = (
final_prompt
| RunnableLambda(lambda x: self._log_prompt(x, "MAIN PROMPT"))
chain = (
prompt
| RunnableLambda(lambda x: utils.langchain_log_prompt(x, "MAIN PROMPT"))
| stray._llm
| RunnableLambda(lambda x: self._log_output(x, "MAIN PROMPT OUTPUT"))
| RunnableLambda(lambda x: utils.langchain_log_output(x, "MAIN PROMPT OUTPUT"))
| StrOutputParser()
)

output = memory_chain.invoke(
output = chain.invoke(
# convert to dict before passing to langchain
# TODO: ensure dict keys and prompt placeholders map, so there are no issues on mismatches
stray.working_memory.agent_input.model_dump(),
prompt_variables,
config=RunnableConfig(callbacks=[NewTokenHandler(stray), ModelInteractionHandler(stray, self.__class__.__name__)])
)

Expand Down
Loading

0 comments on commit f14f2c0

Please sign in to comment.