﻿AIプラットフォームとライブラリによる生成AIプログラミング


掲載リスト




リスト1-1
print("Welcome to Colaboratory!")




リスト1-2
# prompt: 100以下の素数を全て表示する


def is_prime(n):
  """
  素数かどうかを判定する関数
  """
  if n <= 1:
    return False
  for i in range(2, int(n**0.5) + 1):
    if n % i == 0:
      return False
  return True


primes = [i for i in range(100) if is_prime(i)]
print(primes)




リスト2-1
{
  "name": "ChatML",
  "inference_params": {
    "input_prefix": "<|im_end|>\n<|im_start|>user\n",
    "input_suffix": "<|im_end|>\n<|im_start|>assistant\n",
    "antiprompt": [
      "<|im_start|>",
      "<|im_end|>"
    ],
    "pre_prompt_prefix": "<|im_start|>system\n",
    "pre_prompt_suffix": "",
    "pre_prompt": "Perform the task to the best of your ability."
  }
}




リスト2-2
from openai import OpenAI


client = OpenAI(
  base_url="http://localhost:1234/v1",
  api_key="not-needed")


prompt = input("prompt:")


completion = client.chat.completions.create(
  model="local-model",
  messages=[
    {"role": "user", "content": prompt}
  ],
)


print(f"Result: {completion.choices[0].message.content}")




リスト2-3
curl http://localhost:1234/v1/chat/completions -H "Content-Type: application/json" -d '{ "messages": [{"role":"user","content":"こんにちは。"}]}'


 いくつもの値が組み合わせられているため非常にわかりにくくなっていますね。改行して見やすくすると以下のようになります。


curl http://localhost:1234/v1/chat/completions ⏎
  -H "Content-Type: application/json" ⏎
  -d '{ ⏎
    "messages": [⏎
      {"role":"user","content":"こんにちは。"} ⏎
    ]⏎
  }'




リスト2-4
from openai import OpenAI


client = OpenAI(
  base_url="http://localhost:1234/v1",
  api_key="not-needed")


prompt = input("prompt:")


completion = client.chat.completions.create(
  model="local-model",
  messages=[
    {"role": "user", "content": prompt}
  ],
  temperature=0.7,
  max_tokens=100,
  top_p=0.1,
  presence_penalty=1.0,
  n=1,
  stop=["！","？"],
  stream=False
)
l = len(completion.choices)
for n in range(l):
  print(f"Result: {completion.choices[n].message.content}")




リスト2-5
from openai import OpenAI


client = OpenAI(
  base_url="http://localhost:1234/v1",
  api_key="not-needed")


messages = []


while True:
  prompt = input("prompt: ")


  if prompt:
    messages.append({
      "role": "user",
      "content": prompt
    })
    completion = client.chat.completions.create(
      model="local-model",
      messages=messages,
      temperature=0.7,
      max_tokens=100
    )
    message = completion.choices[0].message
    print(f"Result: {message.content.strip()}")
    messages.append({
      "role":"assistant",
      "content":message.content
    })
  else:
    print("これで終わりです。さようなら！")
    break




リスト2-6
from openai import OpenAI


client = OpenAI(
  base_url="http://localhost:1234/v1",
  api_key="not-needed")


prompt = input("prompt: ")


completion = client.chat.completions.create(
  model="local-model",
  messages=[
    {
      "role": "user",
      "content": prompt
    }
  ],
  temperature=0.7,
  max_tokens=300,
  stream=True
)
print("Result:")
for chunk in completion:
  if chunk.choices[0].delta.content:
    print(chunk.choices[0].delta.content, end="")




リスト3-1
!pip install replicate --q




リスト3-2
from getpass import getpass
import os


REPLICATE_API_TOKEN = getpass()
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN




リスト3-3
import replicate


prompt = "" # @param {type:"string"}


output = replicate.run(
  "meta/llama-2-7b",
  input={
    "prompt": prompt,
  }
)
print(output)




リスト3-4
"".join(output)




リスト3-5
prompt = "" # @param {type:"string"}


output = replicate.run(
  "ai-forever/kandinsky-2.2:ea1addaab376f4dc227f5368bbd8e⏎
    ff901820fd1cc14ed8cad63b29249e9d463",
  input={
    "prompt": prompt
  }
)
print(output)




リスト3-6
from IPython.display import Image


Image(url=output[0], width=512, height=512)


（3-47：生成されたイメージが表示される。）




リスト3-7
prompt = "" # @param {type:"string"}


output = replicate.run(
  "meta/musicgen:b05b1dff1d8c6dc63d14b0cdb42135378⏎
    dcb87f6373b0d3d341ede46e59e2b38",
  input={
    "prompt": prompt
  }
)
output




リスト3-8
from IPython.display import Audio


Audio(output)




リスト3-9
import replicate


prompt = "" # @param {type:"string"}


for event in replicate.stream(
  "meta/llama-2-7b",
  input={
    "prompt": prompt
  },
):
  print(str(event), end="")




リスト3-10
prompt = "" # @param {type:"string"}


model = replicate.models.get("ai-forever/kandinsky-2.2")
version = model.versions.get("ea1addaab376f4dc227f5368bbd8eff⏎
  901820fd1cc14ed8cad63b29249e9d463")
prediction = replicate.predictions.create(
  version=version,
  input={
    "prompt":prompt
  },
)
prediction 




リスト3-11
import sys
from IPython.display import Image


prediction.reload()


if prediction.status != "succeeded":
  print(prediction.status)
  sys.exit(prediction.status)


Image(url=prediction.output[0], width=512, height=512)




リスト3-12
import replicate


model = replicate.models.create(
  owner="《アカウント名》",
  name="《モデル名"》",
  visibility="《PublicまたはPrivate》",
  hardware="《ハードウェア名》"
)
model




リスト3-13
import replicate


model = replicate.models.get("《アカウント名》/《モデル名》")
model




リスト3-14
import replicate


training = replicate.trainings.create(
  version="meta/llama-2-7b:73001d654114dad81ec65da3⏎
    b834e2f691af1e1526453189b7bf36fb3f32d0f9",
  input={
    "train_data": "https://……学習データのURL……",
    "num_train_epochs": 3
  },
  destination="《アカウント名》/《モデル名》"
)
training




リスト3-15
import replicate


prompt = "" # @param {type: "string"}


output = replicate.run(
  "《アカウント名》/《モデル名》:《バージョン名》",
  input={
    "prompt": prompt
  }
)
"".join(output)




リスト3-16
!curl -s -X DELETE \
  -H "Authorization: Token 《APIトークン》\
  " https://api.replicate.com/v1/models/《アカウント名》/《モデル名》/versions/《バージョン》




リスト4-1
!curl \
  -H "Authorization: Bearer 《アクセストークン》" \
  -H "Content-Type: application/json" \
  -d '{"inputs": "こんにちは。"}' \
  https://api-inference.huggingface.co/models/openai-community/gpt2




リスト4-2
import requests


ACCESS_TOKEN = "《アクセストークン》"
API_BASE = "https://api-inference.huggingface.co/models"
API_URL = f"{API_BASE}/openai-community/gpt2"


headers = {
  "Authorization": f"Bearer {ACCESS_TOKEN}",
  "Content-Type": "application/json"
}


def query(payload):
  response = requests.post(
    API_URL, 
    headers=headers, 
    json=payload)
  return response.json()




リスト4-3
prompt = "" # @param {type:"string"}


API_URL = f"{API_BASE}/openai-community/gpt2"


output = query({
  "inputs": prompt
})
output[0]["generated_text"]




リスト4-4
prompt = "" # @param {type:"string"}


API_URL = f"{API_BASE}/rinna/japanese-gpt2-medium"


output = query({
  "inputs": prompt
})
output[0]["generated_text"]




リスト4-5
!pip install transformers --q -U




リスト4-6
from transformers import pipeline


pipe = pipeline("text-generation")




リスト4-7
prompt = "" # @param {type:"string"}
pipe(prompt)




リスト4-8
pipe = pipeline("text-classification")




リスト4-9
pipe = pipeline("translation_en_to_fr")




リスト4-10
pipe = pipeline("text-generation", model="rinna/japanese-gpt2-medium")




リスト4-11
pipe = pipeline("translation_ja_to_en", 
  model="facebook/mbart-large-50-many-to-many-mmt")




リスト4-12
pipe = pipeline(
  "text-generation", 
  model="rinna/japanese-gpt2-medium",
  temperature=0.75,
  top_p=0.75)




リスト4-13
prompt = "" # @param {type:"string"}
pipe(prompt,
  max_length=50,
  num_return_sequences=3)




リスト4-14
from transformers import AutoTokenizer


tokenizer = AutoTokenizer.from_pretrained("rinna/japanese-gpt2-medium")
tokenizer




リスト4-15
from transformers import AutoModelForCausalLM


model = AutoModelForCausalLM.from_pretrained("rinna/japanese-gpt2-medium")
model




リスト4-16
prompt = "" # @param {type:"string"}


input_ids = tokenizer.encode(prompt, return_tensors='pt')
gen_tokens = model.generate(input_ids, max_length=50)
generated = tokenizer.batch_decode(gen_tokens)
generated




リスト4-17
!pip install diffusers --q
!pip install torch --q




リスト4-18
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
import torch


model_id = "stabilityai/stable-diffusion-2"
scheduler = EulerDiscreteScheduler.from_pretrained(
  model_id, 
  subfolder="scheduler")


pipe = StableDiffusionPipeline.from_pretrained(
  model_id, 
  scheduler=scheduler)


pipe = pipe.to("cuda")




リスト4-19
prompt = "" # @param {type:"string"}
image = pipe(prompt).images[0]
image




リスト5-1
curl https://openrouter.ai/api/v1/chat/completions  -H "Content-Type: application/json" -H "Authorization: Bearer 《APIキー》"  -d '{ "model":"openchat/openchat-7b:free", "messages":[ {"role": "user", "content": "あなたは誰？"}  ] }'




リスト5-2
!curl https://openrouter.ai/api/v1/chat/completions \
  -H "Content-Type: application/json" \
  -H "Authorization: Bearer 《APIキー》" \
  -d '{ \
  "model":"openchat/openchat-7b:free", \
  "messages":[ \
    {"role": "user", "content": "あなたは誰？"} \
  ] \
}'




リスト5-3
OPENROUTER_API_KEY = "《APIキー》"




リスト5-4
import requests
import json


prompt = "" # @param {type:"string"}


response = requests.post(
  url="https://openrouter.ai/api/v1/chat/completions",
  headers={
    "Authorization": f"Bearer {OPENROUTER_API_KEY}",
    "Content-Type": "application/json"
  },
  data=json.dumps({
    "model": "openchat/openchat-7b:free",
    "messages": [
      {"role": "user", "content": prompt}
    ]
  })
)
response




リスト5-5
json_data = response.json()
json_data['choices'][0]["message"]




リスト5-6
!pip install openai --q -U




リスト5-7
from openai import OpenAI


client = OpenAI(
  base_url="https://openrouter.ai/api/v1",
  api_key=OPENROUTER_API_KEY
)
client




リスト5-8
prompt = "" # @param {type:"string"}


completion = client.chat.completions.create(
  model="openchat/openchat-7b:free",
  messages=[
    {
      "role": "user",
      "content": prompt
    },
  ],
)
completion.choices[0].message.content




リスト5-9
import requests
import json


prompt = "" # @param {type:"string"}
response = requests.post(
  url="https://openrouter.ai/api/v1/chat/completions",
  headers={
    "Authorization": f"Bearer {OPENROUTER_API_KEY}",
  },
  data=json.dumps({
    "model": "openchat/openchat-7b:free",
    "max_tokens":300,
    "temperature":0.7,
    "top_p":0.7,
    "messages": [
      {"role": "user", "content": prompt}
    ]
  })
)
json_data = response.json()
json_data['choices'][0]["message"]["content"]




リスト5-10
from openai import OpenAI


prompt = "" # @param {type:"string"}


# gets API Key from environment variable OPENAI_API_KEY
client = OpenAI(
  base_url="https://openrouter.ai/api/v1",
  api_key=OPENROUTER_API_KEY
)


completion = client.chat.completions.create(
  model="openchat/openchat-7b:free",
  messages=[
    {
      "role": "user",
      "content": prompt,
    },
  ],
  max_tokens=300,
  temperature=0.7,
  top_p=0.7
)
completion.choices[0].message.content




リスト5-11
import requests
import json


prompt = "こんにちは。あなたは誰？" # @param {type:"string"}
response = requests.post(
  url="https://openrouter.ai/api/v1/chat/completions",
  headers={
    "Authorization": f"Bearer {OPENROUTER_API_KEY}",
    
  },
  data=json.dumps({
    "model": [
      "openchat/openchat-7b:free",
      "mistralai/mistral-7b-instruct:free",
      "huggingfaceh4/zephyr-7b-beta:free"
    ],
    "route": "fallback",
    "messages": [
      {"role": "user", "content": prompt}
    ]
  })
)
response.json()




リスト5-12
import time
from openai import OpenAI


# gets API Key from environment variable OPENAI_API_KEY
client = OpenAI(
  base_url="https://openrouter.ai/api/v1",
  api_key=OPENROUTER_API_KEY
)


completion = client.chat.completions.create(
  model="openchat/openchat-7b:free",
  messages=[
    {
      "role": "user",
      "content": "こんにちは。あなたは誰？",
    },
  ],
  stream=True
)
for item in completion:
  time.sleep(0.1)
  print(item.choices[0].delta.content, end="")




リスト6-1
!curl -X POST "https://api.together.xyz/inference" \
  -H "Authorization: Bearer 《APIキー》" \
  -H "Content-Type: application/json" \
  -d '{"model": "togethercomputer/StripedHyena-Nous-7B", ⏎
    "prompt": "Q: あなたは誰ですか。\nA:"}'




リスト6-2
TOGETHER_API = "《APIキー》"




リスト6-3
import requests
import json


prompt = "" # @param {type:"string"}
model_id = "togethercomputer/StripedHyena-Nous-7B"


response = requests.post(
  url="https://api.together.xyz/inference",
  headers={
    "Authorization": f"Bearer {TOGETHER_API}",
    "Content-Type": "application/json"
  },
  data=json.dumps({
    "model": model_id,
    "prompt": prompt
  })
)
response




リスト6-4
import json


resopnse_json = response.json()
resopnse_json['choices'][0]['text']




リスト6-5
response = requests.post(
  url="https://api.together.xyz/v1/completions",
  headers={
    "Authorization": f"Bearer {TOGETHER_API}",
    "Content-Type": "application/json"
  },
  data=json.dumps({
    "model": model_id,
    "prompt": prompt,
    "max_tokens": 200,
    "temperature": 0.7,
    "top_p": 0.5,
    "top_k": 30
  })
)




リスト6-6
import requests
import json


prompt = "" # @param {type:"string"}
model_id = "togethercomputer/StripedHyena-Nous-7B"


response = requests.post(
  url="https://api.together.xyz/v1/chat/completions",
  headers={
    "Authorization": f"Bearer {TOGETHER_API}",
    "Content-Type": "application/json"
  },
  data=json.dumps({
    "model": model_id,
    "messages": [
      {"role":"user", "content":prompt}
    ]
  })
)
response




リスト6-7
import json


resopnse_json = response.json()
resopnse_json["choices"][0]["message"]["content"]




リスト6-8
!pip install openai --q -U




リスト6-9
from openai import OpenAI


message = "" # @param {type:"string"}
model_id = "togethercomputer/StripedHyena-Nous-7B"


client = OpenAI(api_key=TOGETHER_API,
  base_url='https://api.together.xyz',
)


response = client.chat.completions.create(
  model=model_id,
  messages=[
    {
      "role": "system",
      "content": "あなたは日本語のAIアシスタントです。",
    },
    {
      "role": "user",
      "content": message,
    }
  ]
)
response




リスト6-10
response.choices[0].message.content




リスト6-11
!pip install together --q -U




リスト6-12
import together


prompt = "" # @param {type:"string"}
model_id = "togethercomputer/StripedHyena-Nous-7B"


together.api_key = TOGETHER_API


response = together.Complete.create(
  model=model_id,
  prompt=prompt)
response




リスト6-13
response['output']['choices'][0]["text"]




リスト6-14
import together


prompt = "" # @param {type:"string"}
model_id = "stabilityai/stable-diffusion-2-1"


response = together.Image.create(
  model=model_id,
  prompt=prompt,
  height=512, width=512,
  results=1)
response




リスト6-15
import base64
from IPython.display import Image


image = response["output"]["choices"][0]
b64_decode = base64.b64decode(image["image_base64"])
with open("image.png", "wb") as f:
  f.write(b64_decode)
Image(data=b64_decode)




リスト7-1
!pip install langchain --q




リスト7-2
!pip install langchain-openai --q




リスト7-3
OPENAI_API_KEY = "《APIキー》"




リスト7-4
from langchain_openai import OpenAI


llm = OpenAI(openai_api_key=OPENAI_API_KEY)
llm




リスト7-5
prompt = "" # @param{type:"string"}


llm.invoke(prompt)




リスト7-6
llm = OpenAI(
  openai_api_key=OPENAI_API_KEY,
  model_name="gpt-3.7-turbo-instruct")
llm




リスト7-7
llm = OpenAI(
  openai_api_key=OPENAI_API_KEY,
  model_name="gpt-3.7-turbo-instruct",
  max_tokens=300,
  temperature=0.7,
  top_p=0.7)
llm




リスト7-8
llm = OpenAI(openai_api_key=OPENAI_API_KEY, stream=True)
llm




リスト7-9
import time


prompt = "" # @param {type:"string"}


response = llm.stream(prompt)
for item in response:
  time.sleep(0.1)
  print(item.content, end="")




リスト7-10
from langchain_openai import ChatOpenAI


llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY)
llm




リスト7-11
from langchain.schema import HumanMessage


text = "Hello. Who are you?" # @param {type:"string"}
messages = [HumanMessage(content=text)]


llm.invoke(messages)




リスト7-12
from langchain.schema import HumanMessage, AIMessage


messages = []


while True:
  prompt = input("prompt: ")
  if prompt:
    messages.append(HumanMessage(content=prompt))
    result = llm.invoke(messages)
    print(f"AI: {result.content}")
    messages.append(result)
  else:
    break
print("***** finished. *****")




リスト7-13
from langchain.schema import HumanMessage, AIMessage, SystemMessage


messages = [
  SystemMessage(content="あなたは英訳アシスタントです。ユーザーの⏎
    入力を全て英訳して答えなさい。")
]




リスト7-14
!pip install langchain-google-genai --q -U




リスト7-15
GOOGLE_API_KEY = "《APIキー》"




リスト7-16
from langchain_google_genai import GoogleGenerativeAI


llm = GoogleGenerativeAI(
  model="gemini-pro",
  google_api_key=GOOGLE_API_KEY)
llm




リスト7-17
prompt = "" # @param {type:"string"}


llm.invoke(prompt)




リスト7-18
from langchain_google_genai import ChatGoogleGenerativeAI


llm = ChatGoogleGenerativeAI(
  model="gemini-pro",
  google_api_key=GOOGLE_API_KEY)
llm




リスト7-19
from langchain.schema import HumanMessage


prompt = "" # @param {type:"string"}
messages = [HumanMessage(content=text)]
llm.invoke(messages)




リスト7-20
!pip install langchain-anthropic --q -U




リスト7-21
ANTHROPIC_API_KEY = "《APIキー》"


 《APIキー》には、先ほどAnthropicコンソールで作成したAPIキーの値を文字列で指定しま
リスト7-22
from langchain_anthropic import ChatAnthropic


llm = ChatAnthropic(
  model="claude-2.1",
  anthropic_api_key=ANTHROPIC_API_KEY
)
llm




リスト7-23
from langchain.schema import HumanMessage


text = "" # @param {type:"string"}
messages = [HumanMessage(content=text)]


llm.invoke(messages)




リスト7-24
!pip install huggingface_hub --q -U




リスト7-25
from getpass import getpass
import os


HUGGINGFACEHUB_API_TOKEN = getpass()
os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN




リスト7-26
from langchain_community.llms import HuggingFaceHub


llm = HuggingFaceHub(repo_id="rinna/japanese-gpt2-medium")
llm




リスト7-27
prompt = "" # @param {type:"string"}
llm.invoke(prompt)




リスト7-28
llm = HuggingFaceHub(
  repo_id="rinna/japanese-gpt2-medium",
  task="text-generation",
  model_kwargs={
    "max_new_tokens": 100,
    "temperature": 0.1,
  })




リスト7-29
!pip install transformers --q -U




リスト7-30
from huggingface_hub import login
login()




リスト7-31
from langchain_community.chat_models.huggingface import ChatHuggingFace


chat_model = ChatHuggingFace(llm=llm)
chat_model




リスト7-32
from langchain.schema import HumanMessage


prompt = "" # @param {type:"string"}
messages = [
  HumanMessage(content=prompt)
]
chat_model.invoke(prompt )




リスト7-33
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline


hf = HuggingFacePipeline.from_model_id(
  model_id="gpt2",
  task="text-generation",
  pipeline_kwargs={"max_new_tokens": 100},
)




リスト7-34
prompt = "" # @param {type:"string"}
hf.invoke(prompt )




リスト7-35
from transformers import AutoModelForCausalLM, AutoTokenizer


model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)




リスト7-36
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from transformers import pipeline


pipe = pipeline(
  "text-generation", 
  model=model, 
  tokenizer=tokenizer, 
  max_new_tokens=100)
hf = HuggingFacePipeline(pipeline=pipe)
hf




リスト7-37
prompt = "Hi, there!" # @param {type:"string"}
hf.invoke(prompt)




リスト7-38
prompt = "" # @param {type:"string"}


input_ids = tokenizer.encode(prompt, return_tensors='pt')
gen_tokens = model.generate(input_ids, max_length=50)
generated = tokenizer.batch_decode(gen_tokens)
generated




リスト7-39
from langchain_core.prompts import PromptTemplate
from langchain_community.llms import HuggingFaceHub


template = PromptTemplate.from_template(
  "tell me a poem about {topic}")


llm = HuggingFaceHub(
  repo_id="openai-community/gpt2",
  task="text-generation")




リスト7-40
prompt ="cat" # @param {type:"string"}


chain = template | llm
chain.invoke({"topic": prompt})




リスト7-41
prompt ="" # @param {type:"string"}


result = template.invoke({"topic":prompt})
resp = llm.invoke(input=result)
resp




リスト7-42
prompt ="sunday" # param {type:"string"}


def format_input(str):
  return {"topic": '"' + str + '" '}


def format_output(str):
  return str.replace('\n\n', ' ')


chain = format_input | template | llm | format_output
chain.invoke(prompt)




リスト7-43
def prime_list(n):
  primes = [2]
  for i in range(3, n + 1):
    is_prime = True
    for prime in primes:
      if i % prime == 0:
        is_prime = False
        break
    if is_prime:
      primes.append(i)
  return primes


def list_sum(arr):
  return sum(arr)


r = prime_list | list_sum
r.invoke(100)




リスト7-44
from langchain_core.runnables import RunnableLambda


number = 0 # @param {type:"integer"}


r = RunnableLambda(prime_list) | RunnableLambda(list_sum)
r.invoke(number)




リスト7-45
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory


conversation = ConversationChain(
  llm=llm,
  verbose=False,
  memory=ConversationBufferMemory()
)




リスト7-46
flag = True
while flag:
  prompt = input('prompt: ')
  if prompt == "":
    flag = False
  else:
    response = conversation.invoke(input=prompt)
    print(f'AI: {response["response"]}')
    print("-------------------------------------------")
print('***** finished. ****')




リスト7-47
for message in conversation.memory.chat_memory.messages:
  print(message.content)




リスト7-48
memory = ConversationBufferMemory()


memory.chat_memory.add_user_message("あなたは誰ですか。")
memory.chat_memory.add_ai_message("私は、リカです。29歳のOLです。")
memory.chat_memory.add_user_message("どんな仕事をしていますか。")
memory.chat_memory.add_ai_message("建築事務所で設計士をしています。")


memory.chat_memory.messages.insert(0,
  SystemMessage("あなたはOLです。そのつもりで会話して下さい。"))


conversation = ConversationChain(
  llm=llm,
  verbose=False,
  memory=memory
)
conversation 




リスト8-1
!pip install litellm --q -U




リスト8-2
import os




リスト8-3
from litellm import completion


prompt = "" # @param {type:"string"}


messages = [
  {"role": "user", "content":prompt}
]


response = completion(
  model="gpt-3.5-turbo", 
  messages=messages)
response




リスト8-4
response.choices[0]["message"]




リスト8-5
ANTHROPIC_API = "《Anthropic APIキー》"
os.environ["ANTHROPIC_API_KEY"] = ANTHROPIC_API




リスト8-6
prompt = "" # @param {type:"string"}


response = completion(
  model="claude-2.1",
  messages=[
    {"role": "user", "content": prompt}
  ]
)


message = response.choices[0].message
print(f"{message.role}: {message.content}")




リスト8-7
REPLICATE_API = "《ReplicateのAPIキー》" 
os.environ["REPLICATE_API_KEY"] = REPLICATE_API




リスト8-8
prompt = "" # @param {type:"string"}


model_id = "meta/llama-2-7b-chat:f1d50bb24186c52daae319ca8366e⏎
  53debdaa9e0ae7ff976e918df752732ccc4"


response = completion(
  model=f"replicate/{model_id}",
  messages=[
    {"role": "user", "content": prompt}
  ]
)


message = response.choices[0].message
print(f"{message.role}: {message.content}")




リスト8-9
from litellm import completion


prompt = "" # @param {type:"string"}


messages = [
  {"role": "user", "content":prompt}
]


response = completion(
  model="gpt-4", 
  messages=messages,
  temperature=0.7,
  max_tokens=100,
  top_p=0.5)
message = response.choices[0].message
print(f"{message.role}: {message.content}")




リスト8-10
import time
prompt = "" # @param {type:"string"}


response = completion(
  model="gpt-3.5-turbo", 
  messages=[
    {"role": "user", "content": prompt}
  ], 
  stream=True)
for part in response:
  time.sleep(0.1)
  print(part.choices[0].delta.content or "", end="")




リスト8-11
HUGGINGEFACE_API = "《Hugging FaceのAPIトークン》"
os.environ["HUGGINGFACE_API_KEY"] = HUGGINGEFACE_API




リスト8-12
import litellm
repository_id = "meta/llama-2-7b-chat:《ID》"


litellm.register_prompt_template(
  model=repository_id,
  initial_prompt_value="You are a good assistant", 
  roles={
    "system": {
      "pre_message": "[INST]\n<<SYS>>\n",
      "post_message": "\n<</SYS>>\n [/INST]\n" 
    },
    "user": { 
      "pre_message": "[INST] ", 
      "post_message": " [/INST]" 
    }, 
    "assistant": {
      "pre_message": "assistant:",
      "post_message": "\n" 
    }
  },
  final_prompt_value="Now answer as best you can:"
)




リスト8-13
prompt = "" # @param {type:"string"}
messages = [{"role": "user", "content": prompt}] 


response = completion(
  model=f"huggingface/{repository_id}", 
  messages=messages)
print(response['choices'][0]['message']['content'])
prompt = "Hi, there!" # @param {type:"string"}


model_id = "meta/llama-2-7b-chat:《ID》"


response = completion(
  model=f"huggingface/{repository_id}",
  api_base="https://《バックエンド》",
  messages=[
    {"role": "user", "content": prompt}
  ]
)


message = response.choices[0].message
print(f"{message.role}: {message.content}")




リスト8-14
from litellm import batch_completion


prompt_1 = "" # @param {type:"string"}
prompt_2 = "" # @param {type:"string"}


responses = batch_completion(
  model="gpt-4",
  messages = [
    [
      {"role": "user", "content": prompt_1}
    ],
    [
      {"role": "user", "content": prompt_2 }
    ]
  ]
)
responses




リスト8-15
for response in responses:
  print("*", response.choices[0].message.content)




リスト8-16
from litellm import batch_completion_models_all_responses


prompt = "" # @param {type:"string"}


responses = batch_completion_models_all_responses(
  models=["gpt-4", "claude-2.1"],
  messages = [
    {"role": "user", "content": prompt}
  ]
)
responses




リスト8-17
from litellm import image_generation


prompt = "" # @param {type:"string"}


response = image_generation(
  model="dall-e-2", 
  prompt=prompt,
  size="512x512",
  n=3)
response




リスト8-18
from IPython.display import display, HTML


html_code = ""


for item in response.data:
  url = item["url"]
  html_code += f'<img src="{url}" width="256" height="256">'
display(HTML(html_code))




リスト8-19
from litellm import image_generation


prompt = "" # @param {type:"string"}


response = image_generation(
  model="dall-e-2", 
  prompt=prompt,
  size="512x512",
  response_format="b64_json",
  n=3)
response




リスト8-20
import base64
from datetime import datetime
from IPython.display import display, HTML


html_code = ""


for item in response.data:
  dt_str = str(datetime.now())
  base64_data = item["b64_json"]
  html_code += f'<img src="data:image/png;base64,⏎
    {base64_data}" width="256" height="256">'
  binary_data = base64.b64decode(base64_data)
  with open(f"{dt_str}.png", "wb") as f:
    f.write(binary_data)


display(HTML(html_code))