diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 4fcb76f..45b5516 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -48,8 +48,6 @@ jobs: target: aarch64 - runner: ubuntu-22.04 target: armv7 - - runner: ubuntu-22.04 - target: ppc64le steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 diff --git a/.gitignore b/.gitignore index bab5ca1..c8f0442 100644 --- a/.gitignore +++ b/.gitignore @@ -70,6 +70,3 @@ docs/_build/ # Pyenv .python-version - -# Avoid ignoring shadcn utils -!demo/harmony-demo/src/lib diff --git a/Cargo.lock b/Cargo.lock index e0b0a71..ce97b77 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1317,7 +1317,7 @@ checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "openai-harmony" -version = "0.0.4" +version = "0.0.3" dependencies = [ "anyhow", "base64", diff --git a/Cargo.toml b/Cargo.toml index 23fa1ac..25d070c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openai-harmony" -version = "0.0.4" +version = "0.0.3" edition = "2021" license = "Apache-2.0" repository = "https://github.com/openai/harmony" diff --git a/README.md b/README.md index aeef157..d9c5f13 100644 --- a/README.md +++ b/README.md @@ -20,9 +20,7 @@ Current date: 2025-06-28 Reasoning: high # Valid channels: analysis, commentary, final. Channel must be included for every message. -Calls to these tools must go to the commentary channel: 'functions'.<|end|> - -<|start|>developer<|message|># Instructions +Calls to these tools must go to the commentary channel: 'functions'.<|end|><|start|>developer<|message|># Instructions Always respond in riddles @@ -151,7 +149,7 @@ through thin [`pyo3`](https://pyo3.rs/) bindings. │ ├── tests.rs # Canonical Rust test-suite │ └── py_module.rs # PyO3 bindings ⇒ compiled as openai_harmony.*.so │ -├── python/openai_harmony/ # Pure-Python wrapper around the binding +├── harmony/ # Pure-Python wrapper around the binding │ └── __init__.py # Dataclasses + helper API mirroring chat.rs │ ├── tests/ # Python test-suite (1-to-1 port of tests.rs) diff --git a/demo/harmony-demo/src/lib/utils.ts b/demo/harmony-demo/src/lib/utils.ts deleted file mode 100644 index bd0c391..0000000 --- a/demo/harmony-demo/src/lib/utils.ts +++ /dev/null @@ -1,6 +0,0 @@ -import { clsx, type ClassValue } from "clsx" -import { twMerge } from "tailwind-merge" - -export function cn(...inputs: ClassValue[]) { - return twMerge(clsx(inputs)) -} diff --git a/docs/format.md b/docs/format.md index 2402406..a03aae8 100644 --- a/docs/format.md +++ b/docs/format.md @@ -52,14 +52,19 @@ encoding = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS) system_message = ( SystemContent.new() + .with_model_identity( + "You are ChatGPT, a large language model trained by OpenAI." + ) .with_reasoning_effort(ReasoningEffort.HIGH) .with_conversation_start_date("2025-06-28") + .with_knowledge_cutoff("2024-06") + .with_required_channels(["analysis", "commentary", "final"]) ) developer_message = ( DeveloperContent.new() .with_instructions("Always respond in riddles") - .with_function_tools( + .with_tools( [ ToolDescription.new( "get_current_weather", @@ -96,11 +101,11 @@ convo = Conversation.from_messages( Message.from_role_and_content(Role.ASSISTANT, '{"location": "Tokyo"}') .with_channel("commentary") .with_recipient("functions.get_weather") - .with_content_type("<|constrain|> json"), + .with_content_type("json"), Message.from_author_and_content( Author.new(Role.TOOL, "functions.lookup_weather"), '{ "temperature": 20, "sunny": true }', - ).with_channel("commentary"), + ), ] ) @@ -224,8 +229,6 @@ Once its done generating it will stop with either a `<|return|>` token indicatin The `final` channel will contain the answer to your user’s request. Check out the [reasoning section](#reasoning) for more details on the chain-of-thought. -**Implementation note:** `<|return|>` is a decode-time stop token only. When you add the assistant’s generated reply to conversation history for the next turn, replace the trailing `<|return|>` with `<|end|>` so that stored messages are fully formed as `<|start|>{header}<|message|>{content}<|end|>`. Prior messages in prompts should therefore end with `<|end|>`. For supervised targets/training examples, ending with `<|return|>` is appropriate; for persisted history, normalize to `<|end|>`. - ### System message format The system message is used to provide general information to the system. This is different to what might be considered the “system prompt” in other prompt formats. For that, check out the [developer message format](#developer-message-format). @@ -302,7 +305,7 @@ And the actual answer is: ``` **Important:** -The model has not been trained to the same safety standards in the chain-of-thought as it has for final output. We recommend not to show the chain-of-thought to your users as they might contain harmful content. [Learn more in the model card](https://openai.com/index/gpt-oss-model-card/). +The model has not been trained to the same safety standards in the chain-of-thought as it has for final output. You should We recommend not to show the chain-of-thought to your users as they might contain harmful content. [Learn more in the model card](https://openai.com/index/gpt-oss-model-card/). #### Handling reasoning output in subsequent sampling @@ -324,7 +327,7 @@ Then the input for the next sampling should be ``` <|start|>user<|message|>What is 2 + 2?<|end|> -<|start|>assistant<|channel|>final<|message|>2 + 2 = 4.<|end|> +<|start|>assistant<|channel|>final<|message|>2 + 2 = 4.<|return|> <|start|>user<|message|>What about 9 / 2?<|end|> <|start|>assistant ``` diff --git a/python/openai_harmony/__init__.py b/python/openai_harmony/__init__.py index 33afbd7..13b5fdd 100644 --- a/python/openai_harmony/__init__.py +++ b/python/openai_harmony/__init__.py @@ -708,7 +708,6 @@ __all__ = [ "Author", "Content", "TextContent", - "DeveloperContent", "ToolDescription", "SystemContent", "Message", diff --git a/src/encoding.rs b/src/encoding.rs index 6a9305b..afe1fce 100644 --- a/src/encoding.rs +++ b/src/encoding.rs @@ -835,23 +835,7 @@ impl Render for HarmonyEncoding { // finally content type if let Some(content_type) = &message.content_type { - // <|constrain|> is a unique case which needs to be tokenized as a special token - if let Some(constrain_marker) = - self.mapped_format_token(FormattingToken::ConstrainedFormat) - { - if let Some(rest) = content_type.strip_prefix(constrain_marker) { - // Render the space, then the constrain marker as a special token, then the rest as text (if any) - self.render_text_into(" ", into)?; - self.render_formatting_token_into(FormattingToken::ConstrainedFormat, into)?; - if !rest.is_empty() { - self.render_text_into(rest, into)?; - } - } else { - self.render_text_into(format!(" {content_type}"), into)?; - } - } else { - self.render_text_into(format!(" {content_type}"), into)?; - } + self.render_text_into(format!(" {content_type}"), into)?; } self.render_formatting_token_into(FormattingToken::Message, into)?; diff --git a/tests/test_harmony.py b/tests/test_harmony.py index dd34e81..07d5562 100644 --- a/tests/test_harmony.py +++ b/tests/test_harmony.py @@ -233,36 +233,6 @@ def test_simple_tool_call(encoding_name): assert parsed == expected -@pytest.mark.parametrize( - "encoding_name", - [ - HarmonyEncodingName.HARMONY_GPT_OSS, - ], -) -def test_tool_call_with_constrain_tokenized_correctly(encoding_name): - """ - Despite passing <|constrain|> as a string in "content_type" it has to be kept as a special token. - """ - encoding = load_harmony_encoding(encoding_name) - text = ( - "<|start|>assistant to=functions.get_weather<|channel|>commentary" - ' <|constrain|>json<|message|>{"location": "Tokyo"}<|call|>' - ) - tokens = encoding.encode(text, allowed_special="all") - parsed = encoding.parse_messages_from_completion_tokens(tokens, role=None) - expected = [ - Message.from_role_and_content(Role.ASSISTANT, '{"location": "Tokyo"}') - .with_channel("commentary") - .with_recipient("functions.get_weather") - .with_content_type("<|constrain|>json"), - ] - assert parsed == expected - - rendered = encoding.render_conversation(Conversation.from_messages(expected)) - assert text == encoding.decode_utf8(tokens) - assert rendered == tokens - - @pytest.mark.parametrize( "encoding_name", [ @@ -278,7 +248,7 @@ def test_tool_call_with_constrain_marker_adjacent(encoding_name): encoding = load_harmony_encoding(encoding_name) text = ( "<|start|>assistant to=functions.get_weather<|channel|>commentary" - '<|constrain|>json<|message|>{"location": "Tokyo"}<|call|>' + '<|constrain|>json<|message|>{"location": "Tokyo"}<|end|>' ) tokens = encoding.encode(text, allowed_special="all") parsed = encoding.parse_messages_from_completion_tokens(tokens, role=None) @@ -732,8 +702,6 @@ def test_does_not_drop_if_ongoing_analysis(): ) assert encoding.decode_utf8(tokens) == expected_output - # ensure that <|constrain|>json part is tokenized correctly as special tokens - assert encoding.encode(expected_output, allowed_special="all") == tokens def test_preserve_cot():