Computer Science/인공지능,딥러닝

[Rag, OpenAI] RAG 기반 오만과 편견 챗봇 구축하기 (LangChain + OpenAI + Streamlit)

Lv.Forest 2025. 4. 30. 20:00


Jane Austen의 Pride and Prejudice PDF 파일을 기반으로, GPT와 LangChain을 활용해 RAG 기반 대화형 챗봇을 만들어보았습니다.


📁 1. PDF 파일 준비

project/
 └── pdf/
     └── Pride_and_Prejudice.pdf
     
먼저 프로젝트 루트 디렉토리 아래에 `pdf` 폴더를 만들고, 여기에 오만과 편견 PDF 파일을 넣어줍니다:



🧠 2. GPT 처리 모듈: `gpt_proc.py`

# gpt_proc.py

from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain.document_loaders import PyPDFLoader
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import ConversationalRetrievalChain

# 벡터 DB 생성 함수
def create_vector():
    pdfloader = PyPDFLoader('pdf/Pride_and_Prejudice.pdf')
    document = pdfloader.load()
    txt_split = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20)
    docs = txt_split.split_documents(document)
    
    embedding = OpenAIEmbeddings(model='text-embedding-ada-002')
    vdb = FAISS.from_documents(docs, embedding)
    return vdb

# 대화형 응답 함수
def convesation_response(question, hist):
    result = qa_chain.invoke({'question': question, 'chat_history': hist})
    return result['answer']

# 모델 및 체인 초기화
gpt = ChatOpenAI(model='gpt-3.5-turbo', temperature=0)
vdb = create_vector()
retriever = vdb.as_retriever()
qa_chain = ConversationalRetrievalChain.from_llm(llm=gpt, retriever=retriever)



💬 3. Streamlit 챗봇 UI: `chatbot.py`

# chatbot.py

import streamlit as st
from streamlit_chat import message
from gpt_proc import convesation_response

st.title("📚 RAG 기반 오만과 편견 챗봇")

# 세션 상태 초기화
if "history" not in st.session_state:
    st.session_state['history'] = []

# 채팅 인터페이스
response_container = st.container()

# 입력 폼
with st.form("rag_chat", clear_on_submit=True):
    user_input = st.text_input("질문을 입력하세요:", key='input')
    submit = st.form_submit_button("Send")

    if submit and user_input:
        response = convesation_response(user_input, st.session_state['history'])
        st.session_state['history'].append((user_input, response))

        with response_container:
            for u, r in st.session_state['history']:
                message(u, avatar_style="fun-emoji", is_user=True)
                message(r, avatar_style="bottts")




▶️ 4. 실행 방법

터미널에서 아래 명령어로 실행할 수 있습니다:

streamlit run chatbot.py

 

 

 

Pride_and_Prejudice.pdf
2.52MB

위의  pdf를 프로젝트 폴더 pdf 아래에 넣어줍니다.

 

- gpt_proc.py

from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from langchain.document_loaders import PyPDFLoader, JSONLoader, CSVLoader,WebBaseLoader
from langchain_openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
from langchain.chains import RetrievalQA, ConversationalRetrievalChain


def create_vector():
    pdfloader = PyPDFLoader( 'pdf/Pride_and_Prejudice.pdf')
    document = pdfloader.load()

    txt_split = RecursiveCharacterTextSplitter( chunk_size=500, chunk_overlap=20 )
    docs = txt_split.split_documents( document )

    embedding = OpenAIEmbeddings( model='text-embedding-ada-002')
    vdb = FAISS.from_documents( docs, embedding )
    return vdb

def convesation_response( question, hist):
    result = qa_chain.invoke( {'question':question, 'chat_history':hist} )
    return  result['answer']

gpt = ChatOpenAI( model='gpt-3.5-turbo', temperature=0)
vdb = create_vector()
retriever=  vdb.as_retriever( )
qa_chain = ConversationalRetrievalChain.from_llm(llm=gpt, 
                                                     retriever=retriever )

 

위의 코드에서 pdf가 여러개일 경우에 create_vector_db 함수를 아래와 같이 작성해주면 됩니다.

def create_vector_db():
    alldoc = []
    for p in glob('./test_source/*.pdf'):
        loader = PyPDFLoader(p)
        documents = loader.load()
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=700, chunk_overlap=20)
        docs = text_splitter.split_documents(documents)
        alldoc.extend(docs)
    embedding = OpenAIEmbeddings()
    vector_db = FAISS.from_documents(alldoc, embedding)
    return vector_db

# 전역 변수로 벡터 DB와 체인을 초기화
vdb = create_vector_db()
gpt = ChatOpenAI(model='gpt-3.5-turbo', temperature=0)
retriever = vdb.as_retriever()
qa_chain = ConversationalRetrievalChain.from_llm(llm=gpt, retriever=retriever)

- chatbot.py

import streamlit as st
from streamlit_chat import message
from gpt_proc import convesation_response
st.title("RAG 기반 대화형 챗봇")
# 세션 상태 초기화
if "history" not in st.session_state:
    st.session_state['history'] =[]
# 채팅 컨테이너 생성
response_container = st.container()
# 입력 폼 생성
with st.form("rag chat", clear_on_submit=True):
    user_input = st.text_input('질문을 입력하세요:', key='input')
    submit = st.form_submit_button('send')
    if submit and user_input:
       # 응답 생성
       response = convesation_response(user_input, 
                                       st.session_state['history'] )
       # 대화 기록 업데이트
       st.session_state['history'].append((user_input,response) )
       with response_container:
           for u, r in st.session_state['history']:
               message( u, avatar_style="fun-emoji" ,is_user=True)
               message( r, avatar_style="bottts")
        #    st.write( response)

 

터미널 창에 

streamlit run chatbot.py