├── demo.mp4 ├── images ├── train-ar.png ├── train-eng.png ├── train-ranks.png └── train-whisper.png ├── ollama-screenshot.png ├── requirements.txt ├── flagged ├── Upload Audio │ ├── 2c6cb00f800ab73ced09 │ │ └── audio.wav │ ├── 2e46742711dca4132463 │ │ └── audio.wav │ ├── a7b0422bbab0a748ce04 │ │ └── audio.wav │ ├── c4d684f80d3560afcb4e │ │ └── audio.wav │ ├── d71908811285b5857c65 │ │ └── audio.wav │ └── fe2d55bd04d866d12087 │ │ └── audio.wav └── log.csv ├── to_json.py ├── LICENSE ├── evaluate.py ├── parallel_arabic_preprocess.py ├── app.py ├── to_jsonl.py ├── speech_dataset.py ├── preprocess_arazn.py ├── README.md ├── .gitignore └── train.py /demo.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedheakl/arazn-llm/HEAD/demo.mp4 -------------------------------------------------------------------------------- /images/train-ar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedheakl/arazn-llm/HEAD/images/train-ar.png -------------------------------------------------------------------------------- /images/train-eng.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedheakl/arazn-llm/HEAD/images/train-eng.png -------------------------------------------------------------------------------- /ollama-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedheakl/arazn-llm/HEAD/ollama-screenshot.png -------------------------------------------------------------------------------- /images/train-ranks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedheakl/arazn-llm/HEAD/images/train-ranks.png -------------------------------------------------------------------------------- /images/train-whisper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedheakl/arazn-llm/HEAD/images/train-whisper.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pandas 2 | bitsandbytes 3 | accelerate 4 | peft 5 | trl 6 | bert_score 7 | transformers 8 | pymeteor 9 | gradio 10 | ollama -------------------------------------------------------------------------------- /flagged/Upload Audio/2c6cb00f800ab73ced09/audio.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedheakl/arazn-llm/HEAD/flagged/Upload Audio/2c6cb00f800ab73ced09/audio.wav -------------------------------------------------------------------------------- /flagged/Upload Audio/2e46742711dca4132463/audio.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedheakl/arazn-llm/HEAD/flagged/Upload Audio/2e46742711dca4132463/audio.wav -------------------------------------------------------------------------------- /flagged/Upload Audio/a7b0422bbab0a748ce04/audio.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedheakl/arazn-llm/HEAD/flagged/Upload Audio/a7b0422bbab0a748ce04/audio.wav -------------------------------------------------------------------------------- /flagged/Upload Audio/c4d684f80d3560afcb4e/audio.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedheakl/arazn-llm/HEAD/flagged/Upload Audio/c4d684f80d3560afcb4e/audio.wav -------------------------------------------------------------------------------- /flagged/Upload Audio/d71908811285b5857c65/audio.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedheakl/arazn-llm/HEAD/flagged/Upload Audio/d71908811285b5857c65/audio.wav -------------------------------------------------------------------------------- /flagged/Upload Audio/fe2d55bd04d866d12087/audio.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedheakl/arazn-llm/HEAD/flagged/Upload Audio/fe2d55bd04d866d12087/audio.wav -------------------------------------------------------------------------------- /to_json.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script coverts the data in dev.csv, test.csv, and train.csv into a 3 | sepearte json file for each dataset. Since each file contains three columns, 4 | the json file will contain a list of entries, where each entry is a dictionary 5 | with three keys: "code_switched", "arabic", and "english". The value of each 6 | key is the text in the corresponding column. 7 | """ 8 | 9 | import json 10 | 11 | import pandas as pd 12 | 13 | 14 | def main(): 15 | """Convert the data in dev.csv, test.csv, and train.csv into a json file.""" 16 | ds_types = ["dev", "test", "train"] 17 | for ds_type in ds_types: 18 | file = f"data/{ds_type}.csv" 19 | df = pd.read_csv(file, encoding="utf-8") 20 | df = df.to_dict(orient="records") 21 | 22 | # write json and take into account the encoding 23 | with open(f"data/{ds_type}.json", "w", encoding="utf-8") as f: 24 | json.dump(df, f, ensure_ascii=False, indent=4) 25 | 26 | 27 | if __name__ == "__main__": 28 | main() 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Ahmed Heakl 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /evaluate.py: -------------------------------------------------------------------------------- 1 | """This script is to evaluate the models results""" 2 | import pandas as pd 3 | import sacrebleu 4 | from tqdm import tqdm 5 | 6 | translation_path = "translations/llama3-ara.csv" 7 | 8 | df = pd.read_csv(translation_path) 9 | tot = 0 10 | bert_p = 0 11 | bert_b = 0 12 | meteor = 0 13 | eed = 0 14 | bert_f1 = 0 15 | for record in tqdm(df.to_dict(orient='records')): 16 | reference = record['target'] 17 | hypothesis = record['generated'] 18 | bleu = sacrebleu.sentence_bleu(hypothesis, [reference], tokenize="intl", 19 | smooth_method='add-k', 20 | lowercase=True) 21 | tot += bleu.score 22 | bert_f1 += float(record['bert_f1']) 23 | bert_b += float(record['bert_r']) 24 | bert_p += float(record['bert_p']) 25 | meteor += float(record['meteor_score']) 26 | eed += float(record['eed_score']) 27 | 28 | print(len(df)) 29 | print(f"SacreBLEU score: {tot/len(df):.2f}") 30 | print(f"BertScore F1: {bert_f1/len(df):.2f}") 31 | print(f"BertScore precision: {bert_p/len(df):.2f}") 32 | print(f"BertScore recall: {bert_b/len(df):.2f}") 33 | print(f"Meteor score: {meteor/len(df):.2f}") 34 | print(f"EED score: {eed/len(df):.2f}") -------------------------------------------------------------------------------- /flagged/log.csv: -------------------------------------------------------------------------------- 1 | stream,Upload Audio,stream,Transcription,Translation,flag,username,timestamp 2 | ,,,اشتغلت science teacher فترة كده,"i was a science teacher, i did that .",,,2024-06-08 22:40:48.169277 3 | ,flagged/Upload Audio/fe2d55bd04d866d12087/audio.wav,,اشتغلت science teacher فترة كده.. فترة كده,that period .,,,2024-06-08 22:40:59.018296 4 | ,flagged/Upload Audio/2e46742711dca4132463/audio.wav,,اشتغلت science teacher فترة كده.. فترة كده.. فترة كده.. اشتغلت science teacher,i worked as a science teacher .,,,2024-06-08 22:41:05.882129 5 | ,,,,,,,2024-06-08 22:41:07.440919 6 | ,flagged/Upload Audio/a7b0422bbab0a748ce04/audio.wav,,مشاهد,scenes .,,,2024-06-08 22:41:11.551949 7 | ,flagged/Upload Audio/d71908811285b5857c65/audio.wav,,اشتغلت science teacher فترة كده.. فترة كده.. فترة كده.. اشتغلت science teacher,i worked as a science teacher .,,,2024-06-08 22:41:11.552544 8 | ,flagged/Upload Audio/2c6cb00f800ab73ced09/audio.wav,,اشتغلت science teacher فترة كده.. فترة كده.. فترة كده.. اشتغلت science teacher,i worked as a science teacher .,,,2024-06-08 22:41:17.890357 9 | ,flagged/Upload Audio/c4d684f80d3560afcb4e/audio.wav,,اشتغلت science teacher فترة كده.. فترة كده.. فترة كده.. اشتغلت science teacher,i worked as a science teacher .,,,2024-06-08 22:42:18.703554 10 | -------------------------------------------------------------------------------- /parallel_arabic_preprocess.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | from tqdm import tqdm 4 | 5 | import pandas as pd 6 | 7 | DATA_ROOT = "arabic-parallel" 8 | NUM_ROWS = 24_000 9 | 10 | def clean_text(text): 11 | # Remove special characters such as [U+202B] 12 | cleaned_text = re.sub(r'[^\w\s]', '', text) 13 | return cleaned_text.strip() 14 | 15 | def main(): 16 | data = {"arabic": "", "english": ""} 17 | df = pd.DataFrame(data, index=[0]) 18 | 19 | files = [[f"{DATA_ROOT}/{folder}/{file}", folder] for folder in os.listdir(DATA_ROOT) for file in os.listdir(f"{DATA_ROOT}/{folder}")] 20 | 21 | for file, folder in tqdm(files): 22 | if folder != "Songs": 23 | file_df = pd.read_excel(file, header=None) 24 | file_df = file_df.iloc[:, :2] 25 | file_df.columns = ["arabic", "english"] 26 | else: 27 | file_df = pd.read_excel(file) 28 | file_df = file_df[["Egyptian Arabic Lyrics", "English Translation"]] 29 | file_df.columns = ["arabic", "english"] 30 | 31 | file_df = file_df.dropna() 32 | file_df['arabic'] = file_df['arabic'].apply(clean_text) 33 | file_df['english'] = file_df['english'].apply(clean_text) 34 | df = pd.concat([df, file_df]) 35 | 36 | df = df.drop(index=0) 37 | df = df.reset_index(drop=True) 38 | 39 | df = df.iloc[:NUM_ROWS] 40 | 41 | output_path = f"arabic_parallel_{NUM_ROWS}.jsonl" 42 | df.to_json(output_path, orient="records", lines=True, force_ascii=False) 43 | print(f"Data with {NUM_ROWS} records written to {output_path}") 44 | 45 | 46 | if __name__ == "__main__": 47 | main() 48 | 49 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import gradio 2 | import ollama 3 | from transformers import pipeline 4 | import numpy as np 5 | 6 | ENG_MODEL = "arazn-llama3-eng" 7 | WHISPER_SMALL = "ahmedheakl/arazn-whisper-small-v2" 8 | TEMPERATURE=0.6 9 | TOP_P=0.9 10 | 11 | transcriber = pipeline("automatic-speech-recognition", model=WHISPER_SMALL, device="cuda:0") 12 | 13 | def generate_text_eng(prompt): 14 | response = ollama.chat(model=ENG_MODEL, messages=[ 15 | {"role": "user", "content": prompt} 16 | ], options=ollama.Options(num_gpu=1, main_gpu=1, temperature=TEMPERATURE, top_p=TOP_P)) 17 | return response['message']['content'] 18 | 19 | def transcribe(stream, audio, text_input=None): 20 | if text_input is not None: 21 | return None, text_input, generate_text_eng(text_input) 22 | 23 | sr, y = audio 24 | y = y.astype(np.float32) 25 | print(y) 26 | y /= np.max(np.abs(y)) 27 | 28 | if stream is not None and stream.shape[0] < 500_000: 29 | stream = np.concatenate([stream, y]) 30 | else: 31 | stream = y 32 | 33 | prompt = transcriber({"sampling_rate": sr, "raw": stream})["text"] 34 | to_be_translated = prompt.split(".")[-1] 35 | return stream, prompt, generate_text_eng(to_be_translated) 36 | 37 | demo = gradio.Interface( 38 | transcribe, 39 | ["state", gradio.Audio(label="Upload Audio", sources=["microphone"], streaming=True), gradio.Textbox(label="Or Enter Text")], 40 | ["state", gradio.Textbox(label="Transcription"), gradio.Textbox(label="Translation (English)")], 41 | title="Whisper to Ollama", 42 | description="Upload an audio clip or enter text and get a response from the Ollama AI.", 43 | live=True 44 | ) 45 | 46 | if __name__ == "__main__": 47 | demo.launch(share=True) -------------------------------------------------------------------------------- /to_jsonl.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script coverts the data in dev.csv, test.csv, and train.csv into a 3 | sepearte jsonl file for each dataset. Since each file contains three columns, 4 | the json file will contain a list of entries, where each entry is a dictionary 5 | with three keys: "code_switched", "arabic", and "english". The value of each 6 | key is the text in the corresponding column. 7 | """ 8 | 9 | import json 10 | import pandas as pd 11 | 12 | DEV_CSV = "data/dev.csv" 13 | TEST_CSV = "data/test.csv" 14 | TRAIN_CSV = "data/train.csv" 15 | 16 | 17 | def main(): 18 | """Read the data from the csv files and convert it into a jsonl file.""" 19 | dev = pd.read_csv(DEV_CSV, encoding="utf-8") 20 | test = pd.read_csv(TEST_CSV, encoding="utf-8") 21 | train = pd.read_csv(TRAIN_CSV, encoding="utf-8") 22 | 23 | dev_data = [] 24 | for i in range(len(dev)): 25 | entry = { 26 | "code_switched": dev.iloc[i, 0], 27 | "arabic": dev.iloc[i, 1], 28 | "english": dev.iloc[i, 2], 29 | } 30 | dev_data.append(entry) 31 | 32 | test_data = [] 33 | for i in range(len(test)): 34 | entry = { 35 | "code_switched": test.iloc[i, 0], 36 | "arabic": test.iloc[i, 1], 37 | "english": test.iloc[i, 2], 38 | } 39 | test_data.append(entry) 40 | 41 | train_data = [] 42 | for i in range(len(train)): 43 | entry = { 44 | "code_switched": train.iloc[i, 0], 45 | "arabic": train.iloc[i, 1], 46 | "english": train.iloc[i, 2], 47 | } 48 | train_data.append(entry) 49 | 50 | with open("data/dev.jsonl", "w", encoding="utf-8") as f: 51 | for entry in dev_data: 52 | f.write(json.dumps(entry, ensure_ascii=False) + "\n") 53 | 54 | with open("data/test.jsonl", "w", encoding="utf-8") as f: 55 | for entry in test_data: 56 | f.write(json.dumps(entry, ensure_ascii=False) + "\n") 57 | 58 | with open("data/train.jsonl", "w", encoding="utf-8") as f: 59 | for entry in train_data: 60 | f.write(json.dumps(entry, ensure_ascii=False) + "\n") 61 | 62 | 63 | if __name__ == "__main__": 64 | main() 65 | -------------------------------------------------------------------------------- /speech_dataset.py: -------------------------------------------------------------------------------- 1 | """This script converts an ASR dataset into a Hugging Face dataset. 2 | 3 | The audio files in ArzEn_SpeechCorpus/recording contains a lot of text. These audio files 4 | should be loaded and divide according to timestamps in ArzEn_SpeechCorpus/ASR_files/text. 5 | Each line in "text" contains the following information: 6 | , the utterance ID consists of [speaker_ID]-[corpus_ID]-[Recording_ID]_[timestamp_start]-[timestamp_end]. 7 | where the timestamp represented in 6 digits representing the time in seconds divided by 100. For example, 123456 represents 1234.56 seconds. 8 | 9 | The path to a recording is ArzEn_SpeechCorpus/recordings/[corpus_ID]-[Recording_ID].wav 10 | """ 11 | import os 12 | from tqdm import tqdm 13 | from datasets import Dataset, Audio 14 | 15 | ROOT_DIR = "ArzEn_SpeechCorpus" 16 | OUTPUT_AUDIO_DIR = f"{ROOT_DIR}/audio" 17 | 18 | def main(): 19 | # Load the text file 20 | with open(f"{ROOT_DIR}/ASR_files/text") as f: 21 | lines = f.readlines() 22 | 23 | os.makedirs(OUTPUT_AUDIO_DIR, exist_ok=True) 24 | 25 | audio_files = os.listdir(f"{ROOT_DIR}/recordings") 26 | print(f"[INFO] Loaded {len(audio_files)} audio files") 27 | 28 | df = {"audio": [], "sentence": []} 29 | 30 | for i, line in enumerate(tqdm(lines)): 31 | utterance_id, transcription = line.split(" ", 1) 32 | 33 | _, corpus_id, recording_id, timestamp_end = utterance_id.split("-") 34 | recording_id, timestamp_start = recording_id.split("_") 35 | timestamp_start = int(timestamp_start) / 100 36 | timestamp_end = int(timestamp_end) / 100 37 | 38 | audio_file = f"{corpus_id}-{recording_id}.WAV" 39 | if audio_file not in audio_files: 40 | print(f"[ERROR] Audio file {audio_file} not found") 41 | continue 42 | 43 | new_audio_file = f"{OUTPUT_AUDIO_DIR}/{utterance_id}.wav" 44 | os.system(f"sox {ROOT_DIR}/recordings/{audio_file} {new_audio_file} trim {timestamp_start} ={timestamp_end}") 45 | 46 | df["audio"].append(new_audio_file) 47 | df['sentence'].append(transcription.strip()) 48 | 49 | 50 | dataset = Dataset.from_dict(df) 51 | dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) 52 | 53 | 54 | 55 | if __name__ == "__main__": 56 | main() -------------------------------------------------------------------------------- /preprocess_arazn.py: -------------------------------------------------------------------------------- 1 | """ 2 | Load code-switched, arabic, and english triplets. Each language is in a separate folder 3 | transcriptions, translations_EgyptianArabic, translations_English, and each is divided into 4 | three files dev.src, test.src, and train.src. This script loads the triplets into a pandas 5 | dataframe and saves it as csv file. 6 | 7 | Each .src file contains two columns, i.e. the ID and the text. The ID is not used in this 8 | script. The text is loaded into a pandas dataframe and concatenated into a single dataframe 9 | for each language. The three dataframes are then concatenated into a single dataframe. 10 | """ 11 | 12 | import re 13 | 14 | import pandas as pd 15 | 16 | TRANSCRIPTIONS = "data/transcriptions/" 17 | TRANSLATIONS_EGYPTIAN_ARABIC = "data/translations_EgyptianArabic/" 18 | TRANSLATIONS_ENGLISH = "data/translations_English/" 19 | CODE_SWITCHED = "code_switched" 20 | ENGLISH = "english" 21 | ARABIC = "arabic" 22 | 23 | 24 | def preprocess_text(text: str) -> str: 25 | """ 26 | Text might contain [HES], [LAUGHTER] .. etc. 27 | This function removes these tags from the text. 28 | """ 29 | 30 | text = re.sub(r"\[.*?\]", "", text) 31 | text = re.sub(r"\s+", " ", text) 32 | text = text.strip() 33 | text = re.sub(r"(\[|\()\s*(LAUGHTER|LAUGH|COUGH|NOISE|HES|HUM|BREATH)\s*(\]|\))"," ",text,flags=re.IGNORECASE) 34 | text = re.sub(r'%#?\w+', " ", text) 35 | text = re.sub(r"(//|=|#|\(\(|\)\)|@|\$)"," ",text) 36 | 37 | return text 38 | 39 | 40 | def load_triplets(): 41 | """Load the code-switched, arabic, and english triplets into a pandas dataframe. 42 | Only the text is loaded into the dataframe. The ID is not used. 43 | """ 44 | folders = [TRANSCRIPTIONS, TRANSLATIONS_EGYPTIAN_ARABIC, TRANSLATIONS_ENGLISH] 45 | ds_types = ["dev", "test", "train"] 46 | extensions = ["src", "tgtEg", "tgtEn"] 47 | columns = [CODE_SWITCHED, ARABIC, ENGLISH] 48 | 49 | dev = pd.DataFrame() 50 | test = pd.DataFrame() 51 | train = pd.DataFrame() 52 | 53 | datasets = { 54 | "dev": [], 55 | "test": [], 56 | "train": [], 57 | } 58 | for ds_type in ds_types: 59 | for i, folder in enumerate(folders): 60 | 61 | extension = extensions[i] 62 | column = columns[i] 63 | file = f"{folder}{ds_type}.{extension}" 64 | df = pd.read_csv(file, sep="\t", header=None, names=["ID", column]) 65 | df = df.drop(columns=["ID"]) 66 | 67 | df[column] = df[column].apply(preprocess_text) 68 | 69 | datasets[ds_type].append(df) 70 | 71 | dev = pd.concat(datasets["dev"], axis=1) 72 | test = pd.concat(datasets["test"], axis=1) 73 | train = pd.concat(datasets["train"], axis=1) 74 | 75 | return dev, test, train 76 | 77 | 78 | def save_triplets(dev: pd.DataFrame, test: pd.DataFrame, train: pd.DataFrame): 79 | """Save triplets as csv files""" 80 | dev.to_csv("data/dev.csv", index=False) 81 | test.to_csv("data/test.csv", index=False) 82 | train.to_csv("data/train.csv", index=False) 83 | 84 | 85 | def main(): 86 | """Load triplets and save as csv files""" 87 | dev, test, train = load_triplets() 88 | save_triplets(dev, test, train) 89 | 90 | 91 | if __name__ == "__main__": 92 | main() 93 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ArzEn-LLM: Code-Switched Egyptian Arabic-English Translation and Speech Recognition Using LLMs 🇪🇬🇬🇧 2 | 3 | [![Hugging Face](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Models-yellow)](http://huggingface.co/collections/ahmedheakl/arazn-llm-662ceaf12777656607b9524e) 4 | [![arXiv](https://img.shields.io/badge/arXiv-2406.18120-b31b1b.svg)](https://arxiv.org/abs/2406.18120) 5 | [![Speech Dataset](https://img.shields.io/badge/🗣️%20Speech%20Dataset-Hugging%20Face-blue)](https://huggingface.co/datasets/ahmedheakl/arzen-llm-speech-ds) 6 | [![Translation Dataset](https://img.shields.io/badge/🔤%20Translation%20Dataset-Hugging%20Face-blue)](https://huggingface.co/datasets/ahmedheakl/arzen-llm-dataset) 7 | 8 | ## Introduction 9 | 10 | In recent times, code-switching between Egyptian Arabic and English has become increasingly prevalent. This repository presents our work on developing advanced machine translation (MT) and automatic speech recognition (ASR) systems specifically designed to handle this linguistic phenomenon. 11 | 12 | ### 🎥 Demo 13 | 14 | Check out our demo to see ARZEN-LLM in action! 15 | 16 | https://github.com/ahmedheakl/arazn-llm/assets/52796111/f8d0e8af-5444-4664-b653-7401578e2069 17 | 18 | ### 🎯 Our Goal 19 | 20 | Our primary objective is to translate code-switched Egyptian Arabic-English to either English or Egyptian Arabic. We employ state-of-the-art methodologies utilizing large language models such as LLama and Gemma. 21 | 22 | ### 🔊 ASR Integration 23 | 24 | In the realm of ASR, we leverage the Whisper model for code-switched Egyptian Arabic recognition. Our experimental procedures encompass: 25 | - Data preprocessing techniques 26 | - Advanced training methodologies 27 | 28 | We've implemented a consecutive speech-to-text translation system that seamlessly integrates ASR with MT, addressing challenges posed by limited resources and the unique characteristics of the Egyptian Arabic dialect. 29 | 30 | ### 📊 Performance 31 | 32 | Our evaluation against established metrics demonstrates promising results: 33 | - **English Translation**: Significant improvement of X% over the state-of-the-art 34 | - **Arabic Translation**: Y% improvement in performance 35 | 36 | ### 🌟 Why It Matters 37 | 38 | Code-switching is deeply inherent in spoken languages, making it crucial for ASR systems to effectively handle this phenomenon. This capability enables seamless interaction across various domains, including: 39 | - Business negotiations 40 | - Cultural exchanges 41 | - Academic discourse 42 | 43 | ## Open-Source Resources 44 | 45 | We're committed to advancing research in this field. Our models and code are available as open-source resources: 46 | 47 | - 🤗 **Models**: [Hugging Face Collection](http://huggingface.co/collections/ahmedheakl/arazn-llm-662ceaf12777656607b9524e) 48 | - 🗣️ **Speech Dataset**: [ARZEN-LLM Speech Dataset](https://huggingface.co/datasets/ahmedheakl/arzen-llm-speech-ds) 49 | - 🔤 **Translation Dataset**: [ARZEN-LLM Translation Dataset](https://huggingface.co/datasets/ahmedheakl/arzen-llm-dataset) 50 | - 📄 **Research Paper**: [arXiv:2406.18120](https://arxiv.org/abs/2406.18120) 51 | 52 | Feel free to explore, contribute, and build upon our work! 53 | 54 | ```bibtex 55 | @article{heakl2024arzen, 56 | title={ArzEn-LLM: Code-Switched Egyptian Arabic-English Translation and Speech Recognition Using LLMs}, 57 | author={Heakl, Ahmed and Zaghloul, Youssef and Ali, Mennatullah and Hossam, Rania and Gomaa, Walid}, 58 | journal={arXiv preprint arXiv:2406.18120}, 59 | year={2024} 60 | } 61 | ``` 62 | 63 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | translations/ 163 | ArzEn_SpeechCorpus/ 164 | arazn-preprocessing/ 165 | arabic_parallel_24000.jsonl 166 | arabic-parallel/ -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | """Train LLaMa3 on Arazn Dataset""" 2 | import torch 3 | from datasets import load_dataset 4 | from transformers import ( 5 | AutoModelForCausalLM, 6 | AutoTokenizer, 7 | BitsAndBytesConfig, 8 | TrainingArguments, 9 | ) 10 | from peft import LoraConfig 11 | from trl import SFTTrainer, DataCollatorForCompletionOnlyLM 12 | 13 | model_name = "meta-llama/Meta-Llama-3-8B-Instruct" 14 | train_dataset_path = "train.jsonl" 15 | test_dataset_path = "test.jsonl" 16 | hf_token = "" 17 | 18 | # QLoRA parameters 19 | lora_alpha = 64 20 | lora_r = lora_alpha * 2 21 | lora_dropout = 0 22 | 23 | # bitsandbytes parameters 24 | use_4bit = True 25 | bnb_4bit_compute_dtype = "float16" 26 | bnb_4bit_quant_type = "nf4" 27 | use_nested_quant = False 28 | 29 | # TrainingArguments parameters 30 | num_train_epochs = 1 31 | fp16 = False 32 | bf16 = False 33 | per_device_train_batch_size = 1 34 | gradient_accumulation_steps = 4 35 | gradient_checkpointing = True 36 | max_grad_norm = 0.3 37 | learning_rate = 2e-4 38 | weight_decay = 0.001 39 | optim = "paged_adamw_32bit" 40 | lr_scheduler_type = "constant" 41 | max_steps = -1 42 | warmup_ratio = 0.03 43 | group_by_length = True 44 | save_steps = 400 45 | logging_steps = 50 46 | 47 | # SFT parameters 48 | max_seq_length = 512 49 | packing = False 50 | device_map = "auto" 51 | 52 | new_model = f"llama3-arazn-ar-v1" 53 | output_dir = new_model 54 | 55 | 56 | input_field = "code_switched" 57 | output_field = "arabic" 58 | 59 | 60 | raw_prompt = """<|begin_of_text|><|start_header_id|>system<|end_header_id|> 61 | 62 | Translate the following code-switched Arabic-English-mixed text to Arabic only.<|eot_id|><|start_header_id|>user<|end_header_id|> 63 | 64 | {source}<|eot_id|><|start_header_id|>assistant<|end_header_id|> 65 | 66 | """ 67 | 68 | 69 | with open(train_dataset_path, "r") as f: 70 | lines = f.readlines() 71 | 72 | pop_list = [1882, 1967, 2033, 2070, 2071, 2072, 2073, 2074] 73 | for i in pop_list: 74 | lines.pop(i) 75 | 76 | 77 | with open(train_dataset_path, "w") as f: 78 | f.writelines(lines) 79 | 80 | 81 | train_dataset = load_dataset('json', data_files=train_dataset_path, split="train") 82 | test_dataset = load_dataset('json', data_files=test_dataset_path, split="train") 83 | 84 | mapper_fn = lambda examples: { 85 | 'text': [raw_prompt.format(source=source) + target + "<|eot_id|>" 86 | for source, target, in zip(examples[input_field], examples[output_field])] 87 | } 88 | 89 | train_dataset_mapped = train_dataset.map(mapper_fn, batched=True) 90 | test_dataset_mapped = test_dataset.map(mapper_fn, batched=True) 91 | 92 | print(train_dataset_mapped[0]['text']) 93 | 94 | compute_dtype = getattr(torch, bnb_4bit_compute_dtype) 95 | 96 | bnb_config = BitsAndBytesConfig( 97 | load_in_4bit=use_4bit, 98 | bnb_4bit_quant_type=bnb_4bit_quant_type, 99 | bnb_4bit_compute_dtype=compute_dtype, 100 | bnb_4bit_use_double_quant=use_nested_quant, 101 | ) 102 | 103 | peft_config = LoraConfig( 104 | use_dora=True, 105 | lora_alpha=lora_alpha, 106 | lora_dropout=lora_dropout, 107 | r=lora_r, 108 | bias="none", 109 | task_type="CAUSAL_LM", 110 | ) 111 | 112 | model = AutoModelForCausalLM.from_pretrained( 113 | model_name, 114 | quantization_config=bnb_config, 115 | device_map=device_map, 116 | token=hf_token, 117 | use_cache=False, 118 | ) 119 | 120 | model.config.pretraining_tp = 1 121 | 122 | tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, token=hf_token) 123 | tokenizer.pad_token = tokenizer.eos_token 124 | tokenizer.padding_side = 'right' 125 | 126 | response_template = "assistant<|end_header_id|>\n\n" 127 | collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer) 128 | 129 | training_arguments = TrainingArguments( 130 | output_dir=output_dir, 131 | num_train_epochs=num_train_epochs, 132 | per_device_train_batch_size=per_device_train_batch_size, 133 | gradient_accumulation_steps=gradient_accumulation_steps, 134 | optim=optim, 135 | save_steps=save_steps, 136 | logging_steps=logging_steps, 137 | learning_rate=learning_rate, 138 | weight_decay=weight_decay, 139 | fp16=fp16, 140 | bf16=bf16, 141 | max_grad_norm=max_grad_norm, 142 | max_steps=max_steps, 143 | warmup_ratio=warmup_ratio, 144 | group_by_length=group_by_length, 145 | lr_scheduler_type=lr_scheduler_type, 146 | report_to="all", 147 | push_to_hub=True, 148 | ) 149 | 150 | trainer = SFTTrainer( 151 | model=model, 152 | train_dataset=train_dataset_mapped, 153 | peft_config=peft_config, 154 | dataset_text_field="text", 155 | max_seq_length=max_seq_length, 156 | tokenizer=tokenizer, 157 | args=training_arguments, 158 | packing=packing, 159 | data_collator=collator, 160 | ) 161 | 162 | trainer.train() --------------------------------------------------------------------------------