Calling LLM APIs for Conversation with Python

paz 17/12/2025

I. OpenAI (GPT)

1. Install Package

bash

pip install openai

2. Configure API Key and Base URL

python

OPENAI_API_KEY = "sk-xxxxx"
OPENAI_BASE_URL = "https://api.openai.com/v1"

3. API Call Example

python

import os
from flask import Flask, jsonify
from openai import OpenAI
import configparser

app = Flask(__name__)

# Read configuration
config = configparser.ConfigParser()
config.read("config.cfg", encoding="utf-8")

OPENAI_API_KEY = config.get("default", "OPENAI_API_KEY", fallback=None)
OPENAI_BASE_URL = config.get("default", "OPENAI_BASE_URL", fallback=None)

# Initialize OpenAI client
client = OpenAI(
    api_key=OPENAI_API_KEY,
    base_url=OPENAI_BASE_URL
)

@app.route("/gpt_test")
def gpt_test():
    """
    Simple GPT call that returns an answer to a fixed question.
    """
    if not OPENAI_API_KEY:
        return jsonify({"error": "OPENAI_API_KEY not configured"}), 500

    try:
        # Using chat.completions.create style
        resp = client.chat.completions.create(
            model="gpt-4.1-mini",  # Or any available model like gpt-4.1, gpt-4o, etc.
            messages=[
                {"role": "system", "content": "You are a concise assistant."},
                {"role": "user", "content": "Briefly introduce yourself in one sentence."},
            ],
        )
        answer = resp.choices[0].message.content
        return jsonify({"answer": answer})
    except Exception as e:
        print("GPT call exception:", repr(e))
        return jsonify({"error": str(e)}), 500

II. Alibaba Tongyi (Qwen)

1. Install Official SDK

bash

pip install dashscope

2. Basic API Call
The dashscope.Generation.call method is largely compatible with OpenAI’s format.

python

ALIYUN_API_KEY = config.get("default", "ALIYUN_API_KEY", fallback=None)

@app.route("/llm_test/")
def llm_test():
    """Test conversation with the LLM."""
    try:
        messages = [
            {'role': 'system', 'content': 'You are a helpful assistant.'},
            {'role': 'user', 'content': 'Who are you?'}
        ]
        answer = chat_with_model(messages)
        return jsonify({"answer": answer})
    except Exception as e:
        print("LLM error:", repr(e))
        return jsonify({"error": str(e)}), 500

Available Model IDs:

  • qwen3-max
  • qwen-plus
  • qwen-turbo

3. Using Prompts from Files

If you need to use prompts from files, e.g., prompt files under app/prompt_store/ like doc-llm-latest.md:

First, read the prompt content as a string:

python

from pathlib import Path

# Directory where run.py is located
BASE_DIR = Path(__file__).resolve().parent
PROMPT_DIR = BASE_DIR / "app" / "prompt_store"
PROMPT_LATEST_FILE = PROMPT_DIR / "doc-llm-latest.md"

def load_latest_prompt() -> str | None:
    """Read content from doc-llm-latest.md."""
    try:
        with PROMPT_LATEST_FILE.open("r", encoding="utf-8") as f:
            return f.read()
    except FileNotFoundError:
        print(f"[WARN] Prompt file not found: {PROMPT_LATEST_FILE}")
        return None
    except Exception as e:
        print(f"[ERROR] Failed to read prompt: {e!r}")
        return None

Then, incorporate it into the message format:

python

@app.route("/llm_with_prompt/")
def llm_with_prompt():
    """Converse with LLM using the latest prompt."""
    prompt = load_latest_prompt()
    if not prompt:
        return jsonify({"error": "No prompt available"}), 500

    try:
        messages = [
            {
                'role': 'system',
                'content': prompt
            },
            {
                'role': 'user',
                'content': "Please summarize the core objective of this document testing specification in one or two sentences."
            }
        ]
        answer = chat_with_model(messages)
        return jsonify({"answer": answer})
    except Exception as e:
        print("LLM with prompt error:", repr(e))
        return jsonify({"error": str(e)}), 500

4. Encapsulating LLM Conversation Functionality

We can encapsulate the LLM conversation capability into a reusable class or function:

python

# Unified management of LLM calls with configuration
import configparser
from pathlib import Path
import dashscope

BASE_DIR = Path(__file__).resolve().parent.parent
CONFIG_FILE = BASE_DIR / "config.cfg"

config = configparser.ConfigParser()
config.read(CONFIG_FILE, encoding="utf-8")

ALIYUN_API_KEY = config.get("default", "ALIYUN_API_KEY", fallback=None)
ALIYUN_MODEL = config.get("default", "ALIYUN_MODEL")

def init_llm():
    """Called once during Flask startup to set api_key."""
    if not ALIYUN_API_KEY:
        print("[WARN] No ALIYUN_API_KEY configured in config.cfg")
    dashscope.api_key = ALIYUN_API_KEY

def chat_with_model(messages: list[dict]) -> str:
    """Call LLM for conversation.
    
    Args:
        messages (list[dict]): Message list, format follows OpenAI Chat API.
    
    Returns:
        str: Model response content.
    """
    if not ALIYUN_API_KEY:
        raise ValueError("No ALIYUN_API_KEY configured")
    
    response = dashscope.Generation.call(
        model=ALIYUN_MODEL,
        messages=messages,
    )
    print(f"raw response: {response}")
    answer = response["output"]["choices"][0]["message"]["content"]
    return answer