├── figures ├── fig1.png ├── fig2.png ├── fig3.png ├── fig3a.png ├── fig3b.png ├── fig3c.png └── fig4.png ├── requirements.txt ├── configs ├── prompts.py └── constants.py ├── .gitignore ├── main.py ├── eval.py ├── inference └── eval_API.py ├── index.html ├── analyze.py ├── README.md └── LICENSE.txt /figures/fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cambrian-yzt/MOAT/HEAD/figures/fig1.png -------------------------------------------------------------------------------- /figures/fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cambrian-yzt/MOAT/HEAD/figures/fig2.png -------------------------------------------------------------------------------- /figures/fig3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cambrian-yzt/MOAT/HEAD/figures/fig3.png -------------------------------------------------------------------------------- /figures/fig3a.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cambrian-yzt/MOAT/HEAD/figures/fig3a.png -------------------------------------------------------------------------------- /figures/fig3b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cambrian-yzt/MOAT/HEAD/figures/fig3b.png -------------------------------------------------------------------------------- /figures/fig3c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cambrian-yzt/MOAT/HEAD/figures/fig3c.png -------------------------------------------------------------------------------- /figures/fig4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cambrian-yzt/MOAT/HEAD/figures/fig4.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | openai 2 | requests 3 | pandas 4 | Pillow 5 | matplotlib 6 | datasets -------------------------------------------------------------------------------- /configs/prompts.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | # with cot 4 | VQA_SYSTEM_PROMPT = json.dumps({ 5 | 'task': 'Answer the question presented to you truthfully.', 6 | 'requirements': [ 7 | 'Analyze the image(s) first, then answer the question. If you are given a list of possible answers, you must choose from it.', 8 | 'You must answer in the following json format: {"analysis": "(write your analysis here)", "answer": "(your answer)"}' 9 | ] 10 | }) 11 | 12 | # w/o cot 13 | # VQA_SYSTEM_PROMPT = json.dumps({ 14 | # 'task': 'Answer the question presented to you truthfully.', 15 | # 'requirements': [ 16 | # 'If you are given a list of possible answers, you must choose from it.', 17 | # 'You must answer in the following json format: {"answer": "(your answer)"}' 18 | # ] 19 | # }) 20 | 21 | EVAL_SYSTEM_PROMPT = json.dumps({ 22 | 'task': 'Evaluate whether the answer to a question is correct.', 23 | 'requirements': [ 24 | 'Compare an answer to a question with the ground truth answer. Determine whether it is correct.', 25 | 'You must ignore any analysis of the problem if present. You must focus only on the final answer.', 26 | 'You must answer in the following json format: {"verdict": "(1 for correct, 0 for incorrect)"}' 27 | ] 28 | }) -------------------------------------------------------------------------------- /configs/constants.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | # General API settings 5 | API_TIMEOUT = 180 # API timeout, in seconds. 3 minutes should be enough for o1 to think ;) 6 | 7 | # VQA API settings 8 | VQA_ENDPOINT = None # put your API endpoint here (e.g. https://api.openai.com/v1) 9 | VQA_API_KEY = '' # put your API key here 10 | VQA_MODEL = 'gpt-4o-mini' # use any model you like :) 11 | VQA_API_VISION_DETAIL_LEVEL = 'high' 12 | VQA_TEMPERATURE = 0.0 13 | 14 | # Evaluation settings 15 | EVAL_ENDPOINT = None # put your API endpoint here (e.g. https://api.openai.com/v1) 16 | EVAL_API_KEY = '' # put your API key here 17 | EVAL_MODEL = 'gpt-4o-mini' # we used gpt-4o-mini to grade the answers in our paper. given that all questions have a single correct answer, we did not observe any grading errors by gpt-4o-mini. 18 | EVAL_TEMPERATURE = 0.0 19 | 20 | # Task settings 21 | N_RUNS = 3 # account for randomness in LLM output 22 | LOG_DIR = os.path.join('.', 'logs') 23 | ANALYTICS_DIR = os.path.join('.', 'analytics') 24 | 25 | # Tweak this in accordance with your rate limit, which is defined by your API provider 26 | MAX_CONCURRENT_REQUESTS = 32 27 | 28 | # A lot models are very dumb and do not know how to write a very simple JSON object :( 29 | # The evaluation script would try to decode the output of the LMM if this is set to True. 30 | JSON_OUTPUT = True 31 | 32 | # Some models are very dumb and have a limited output length :( 33 | MAX_TOKENS = 2048 34 | 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | 29 | # Flask specific 30 | instance/ 31 | .webassets-cache 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | .hypothesis/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | local_settings.py 61 | db.sqlite3 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | scrapbook/ 67 | 68 | # Sphinx documentation 69 | docs/_build/ 70 | 71 | # PyBuilder 72 | target/ 73 | 74 | # Jupyter Notebook 75 | .ipynb_checkpoints 76 | 77 | # IPython 78 | profile_default/ 79 | ipython_config.py 80 | 81 | # Environments 82 | .env 83 | .venv/ 84 | env/ 85 | venv/ 86 | ENV/ 87 | env.bak/ 88 | venv.bak/ 89 | 90 | # MySQL 91 | *.sql 92 | 93 | # dotenv 94 | *.env 95 | 96 | # Pyre type checker 97 | .pyre/ 98 | 99 | # mypy 100 | .mypy_cache/ 101 | .dmypy.json 102 | dmypy.json 103 | 104 | # Pycharm 105 | .idea/ 106 | 107 | # VSCode 108 | .vscode/ 109 | 110 | # MacOS 111 | .DS_Store 112 | 113 | # Thumbnails 114 | Thumbs.db 115 | 116 | # Other 117 | *.pot 118 | *.mo 119 | 120 | **/*.log 121 | logs/ 122 | analytics/ 123 | 124 | test*.py 125 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from concurrent.futures import ThreadPoolExecutor, as_completed 2 | from tqdm import tqdm 3 | from datetime import datetime 4 | import json 5 | from datasets import load_dataset 6 | 7 | from eval import eval_vqa_item 8 | from configs.constants import * 9 | 10 | 11 | if __name__ == '__main__': 12 | for run_id in range(1, N_RUNS + 1): 13 | print(f'VQA Model: {VQA_MODEL}') 14 | print(f'Temperature: {VQA_TEMPERATURE}') 15 | print(f'Max Concurrent Requests: {MAX_CONCURRENT_REQUESTS}') 16 | print(f'Eval Model: {EVAL_MODEL}') 17 | 18 | launch_time = datetime.now() 19 | dataset = load_dataset("waltsun/MOAT", split='test') 20 | 21 | result_dict = {} 22 | n_correct = 0 23 | with ThreadPoolExecutor(max_workers=MAX_CONCURRENT_REQUESTS) as executor: 24 | futures = [executor.submit(eval_vqa_item, question_dict) for question_dict in dataset] 25 | 26 | pbar = tqdm(total=len(dataset), dynamic_ncols=True) 27 | for future in as_completed(futures): 28 | index, answer, reason, ground_truth, verdict, time_delta = future.result() 29 | n_correct += verdict 30 | result_dict[index] = { 31 | 'index': index, 32 | 'answer': answer, 33 | 'reason': reason, 34 | 'ground_truth': ground_truth, 35 | 'verdict': verdict 36 | } 37 | pbar.update(1) 38 | if not os.path.exists(LOG_DIR): 39 | os.makedirs(LOG_DIR) 40 | with open(os.path.join(LOG_DIR, f'{VQA_MODEL}_{run_id}.json'), 'w') as f: 41 | result_list = [result_dict[key] for key in sorted(result_dict.keys())] 42 | json.dump({ 43 | 'summary': { 44 | 'Correct Count': n_correct, 45 | 'Accuracy': n_correct / len(dataset), 46 | 'Launch Time': str(launch_time), 47 | 'VQA Model': VQA_MODEL, 48 | 'Run ID': run_id, 49 | 'Temperature': VQA_TEMPERATURE, 50 | 'Eval Model': EVAL_MODEL, 51 | }, 52 | 'logs': result_list, 53 | }, f, indent=4) 54 | -------------------------------------------------------------------------------- /eval.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | from time import perf_counter 3 | from PIL.Image import Image 4 | 5 | from configs.constants import * 6 | from configs.prompts import * 7 | from inference.eval_API import get_response_API 8 | 9 | 10 | def eval_answer(input: str, ground_truth: str, max_tokens=4096, max_tries=5) -> int: 11 | # build a query context 12 | eval_query = f'The answer to evaluate is {input}\nThe ground truth answer is {ground_truth}' 13 | messages = [ 14 | { 15 | 'role': 'system', 16 | 'content': [ 17 | { 18 | 'type': 'text', 19 | 'text': EVAL_SYSTEM_PROMPT, 20 | } 21 | ] 22 | }, 23 | { 24 | 'role': 'user', 25 | 'content': [ 26 | { 27 | 'type': 'text', 28 | 'text': eval_query, 29 | } 30 | ] 31 | } 32 | ] 33 | client = OpenAI(base_url=EVAL_ENDPOINT, api_key=EVAL_API_KEY) 34 | for _ in range(max_tries): 35 | try: 36 | completion = client.chat.completions.create( 37 | model=EVAL_MODEL, 38 | temperature=EVAL_TEMPERATURE, 39 | max_tokens=max_tokens, 40 | messages=messages, 41 | response_format={'type': 'json_object'}, 42 | timeout=API_TIMEOUT 43 | ) 44 | break 45 | except Exception as e: 46 | print(e) 47 | response_json = json.loads(completion.model_dump()['choices'][0]['message']['content']) 48 | return int(response_json['verdict']) 49 | 50 | 51 | def eval_vqa_item(question_dict: dict, max_tries=5) -> tuple[str, str, str, str, int, float]: 52 | """ 53 | Evaluate one VQA item 54 | """ 55 | start_time = perf_counter() 56 | try: 57 | question_index: int = question_dict['index'] 58 | question_text: str = question_dict['question'] 59 | choices: list[str] = question_dict['choices'] 60 | images: list[Image] = question_dict['images'] 61 | outside_knowledge_text: str = question_dict['outside_knowledge_text'] 62 | outside_knowledge_images: list[Image] = question_dict['outside_knowledge_images'] 63 | ground_truth: str = question_dict['answer'] 64 | except Exception as e: 65 | print(f"Failed to extract question data for question {question_dict}: {e}") 66 | raise 67 | 68 | try: 69 | # use the API 70 | answer, reason = get_response_API(question_text, images, outside_knowledge_text, outside_knowledge_images, choices, max_tries=max_tries, max_tokens=MAX_TOKENS) 71 | except Exception as e: 72 | print(f"Failed to get a valid answer for question {question_index}: {e}") 73 | raise 74 | 75 | try: 76 | verdict = eval_answer(answer, ground_truth) 77 | except Exception as e: 78 | print(f"Failed to get a valid verdict for the answer to question {question_index}: {e}") 79 | verdict = 0 80 | # raise 81 | 82 | end_time = perf_counter() 83 | return question_index, answer, reason, ground_truth, verdict, end_time - start_time 84 | 85 | 86 | if __name__ == '__main__': 87 | from datasets import load_dataset, Dataset 88 | dataset = load_dataset("waltsun/MOAT", split='test') 89 | index, answer, reason, ground_truth, verdict, time_delta = eval_vqa_item(dataset[0]) 90 | print(f"Model: {VQA_MODEL}") 91 | print(f"Question Index: {index}. Time taken: {time_delta:.2f} seconds.") 92 | print(f"Answer: {answer}") 93 | print(f"Reason: {reason}") 94 | print(f"Ground truth: {ground_truth}") 95 | print(f"Verdict: {verdict}") -------------------------------------------------------------------------------- /inference/eval_API.py: -------------------------------------------------------------------------------- 1 | import time 2 | import random 3 | from openai import OpenAI 4 | from PIL.Image import Image 5 | 6 | import base64 7 | from io import BytesIO 8 | import os 9 | from random import shuffle 10 | 11 | from configs.constants import * 12 | from configs.prompts import * 13 | 14 | 15 | def get_response_API(question: str, images: list[Image], outside_knowledge_text: str, outside_knowledge_images: list[Image], choices: list[str], max_tokens=4096, max_tries=5) -> tuple[str, str]: 16 | """ 17 | Exactly what it says in the names of the function and parameters 18 | """ 19 | # read the image files and convert them to base64 20 | base64_images: list[str] = [] 21 | base64_outside_knowledge_images: list[str] = [] 22 | for img in images: 23 | buffered = BytesIO() 24 | img.save(buffered, format="PNG") 25 | base64_images.append(base64.b64encode(buffered.getvalue()).decode('utf-8')) 26 | for img in outside_knowledge_images: 27 | buffered = BytesIO() 28 | img.save(buffered, format="PNG") 29 | base64_outside_knowledge_images.append(base64.b64encode(buffered.getvalue()).decode('utf-8')) 30 | 31 | # build a query context 32 | messages = [ 33 | { 34 | 'role': 'system' if 'o1' not in VQA_MODEL else 'developer', 35 | 'content': [ 36 | { 37 | 'type': 'text', 38 | 'text': VQA_SYSTEM_PROMPT, 39 | } 40 | ] if 'doubao' not in VQA_MODEL and 'moonshot' not in VQA_MODEL else VQA_SYSTEM_PROMPT # Doubao only accepts a simple string as system prompt, which is strange :( 41 | }, 42 | ] 43 | if len(choices) > 0: 44 | shuffle(choices) 45 | question += f'\nThe choices are: {choices}' 46 | user_message_contents = [ 47 | { 48 | 'type': 'text', 49 | 'text': question, # may with choices 50 | } 51 | ] 52 | for idx, img in enumerate(base64_images): 53 | user_message_contents.append({ 54 | 'type': 'text', 55 | 'text': f'Image {idx + 1}', 56 | }) 57 | user_message_contents.append({ 58 | 'type': 'image_url', 59 | 'image_url': { 60 | 'url': f'data:image/png;base64,{img}', 61 | 'detail': VQA_API_VISION_DETAIL_LEVEL, 62 | } 63 | }) 64 | if len(outside_knowledge_text) > 0: 65 | user_message_contents.append({ 66 | 'type': 'text', 67 | 'text': 'Hint:\n' + outside_knowledge_text, 68 | }) 69 | for idx, img in enumerate(base64_outside_knowledge_images): 70 | user_message_contents.append({ 71 | 'type': 'text', 72 | 'text': f'Hint image {idx}', 73 | }) 74 | user_message_contents.append({ 75 | 'type': 'image_url', 76 | 'image_url': { 77 | 'url': f'data:image/png;base64,{img}', 78 | 'detail': VQA_API_VISION_DETAIL_LEVEL, 79 | } 80 | }) 81 | messages.append({ 82 | 'role': 'user', 83 | 'content': user_message_contents, 84 | }) 85 | 86 | # client = AzureOpenAI(azure_endpoint=AZURE_ENDPOINT, api_key=AZURE_KEY, api_version=AZURE_API_VERSION) 87 | client = OpenAI(api_key=VQA_API_KEY, base_url=VQA_ENDPOINT) 88 | 89 | for i in range(max_tries): 90 | try: 91 | if 'o1' in VQA_MODEL: 92 | completion = client.chat.completions.create( 93 | model=VQA_MODEL, 94 | temperature=1, 95 | max_completion_tokens=16384, 96 | messages=messages, 97 | timeout=API_TIMEOUT 98 | ) 99 | else: 100 | completion = client.chat.completions.create( 101 | model=VQA_MODEL, 102 | temperature=VQA_TEMPERATURE, 103 | max_tokens=max_tokens, 104 | messages=messages, 105 | response_format={'type': 'json_object'}, 106 | timeout=API_TIMEOUT 107 | ) 108 | if JSON_OUTPUT: 109 | raw_response = completion.model_dump()['choices'][0]['message']['content'] 110 | try: 111 | if '```json' in raw_response: 112 | raw_response = raw_response[7: -3] 113 | response_json: dict = json.loads(raw_response) 114 | # weird gemini stuff 115 | if VQA_MODEL == 'gemini-2.0-pro-exp-02-05' or VQA_MODEL == 'gemini-2.0-flash-lite-preview-02-05': 116 | try: 117 | response_json = response_json[0] 118 | except Exception as e: 119 | pass 120 | answer = response_json['answer'] 121 | reason = response_json.get('analysis', 'NO REASON') 122 | except Exception as e: 123 | # json decoding failed, treat as non-json output 124 | answer = raw_response 125 | reason = 'NO REASON' 126 | else: 127 | answer = completion.model_dump()['choices'][0]['message']['content'] 128 | reason = 'NO REASON' 129 | time.sleep(0.5) 130 | break 131 | except Exception as e: 132 | print(images, e) 133 | answer = 'RESPONSE FAILED' 134 | reason = 'NO REASON' 135 | time.sleep(5 * (i + random.random())) 136 | return answer, reason 137 | -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |
4 | 5 | 6 |
32 |
33 | 43 | MOAT (Multimodal model Of All Trades) is a challenging benchmark for large multimodal models (LMMs). It consists of vision language (VL) tasks that require the LMM to integrate several VL capabilities and engage in human-like generalist visual problem solving. Moreover, many tasks in MOAT focus on LMMs' capability to ground complex text and visual instructions, which is crucial for the application of LMMs in-the-wild. Developing on the VL capability taxonomies proposed in previous benchmark papers, we define 10 fundamental VL capabilities in MOAT. 44 |
45 | 46 |
47 |
48 | 49 | Notably, we purposefully insulated MOAT from the influence of domain knowledge, text generation style, and other external factors by making the questions close-ended (i.e. have a single short answer) and solvable with the information and hints provided in the question itself. This allows MOAT to focus on fundamental generalist VL capabilities. We also did not include VL capabilities like general object recognition and attribute recognition in our taxonomy, since these are required by all MOAT tasks, and performance on these fronts can be reflected in the overall accuracy on MOAT. 50 |
51 | 52 |54 | MOAT tasks require LMMs to integrate up to 6 fundamental VL capabilities. We report the proportion of questions requiring each VL capability, the distribution of the number of VL capabilities required, and the 15 most common capability combinations required in MOAT. 55 |
56 | 57 |
58 |
59 | 61 | ALL existing LMMs, proprietary and open source, perform very poorly on MOAT, with the best performing model (OpenAI o1) achieving an accuracy (38.8%) less than half of that achieved by humans (82.7%). For individual VL capabilities, CNT, RLA, 3DTF and 3DQNT saw consistent poor performance by LMMs. In addition, GNDT and GNDV performance did not scale well with model size. Please refer to our paper for more detailed analysis of the results, as well as discussion on the implication of LMM architecture choices such as tiling and built-in CoT reasoning (or 'thinking') capability. 62 |
63 | 64 |
65 |
66 | 68 | We intend to further increase the diversity of the tasks in MOAT, involving more capability combinations and encompassing more domains and scenarios. Stay tuned! 69 |
70 | 71 |@article{ye2025moat,
74 | title={MOAT: Evaluating LMMs for Capability Integration and Instruction Grounding},
75 | author={Ye, Zhoutong and Sun, Mingze and Gao, Huan-ang and Yu, Chun and Shi, Yuanchun},
76 | journal={arXiv preprint arXiv:2503.09348},
77 | year={2025}
78 | }
79 |
23 |
24 | ## News
25 |
26 | - `2025-03-26` Added results for Gemini 2.5 Pro, which overtook OpenAI o1 as the best performing model on MOAT. However, there is still a long way to go before LMMs reach human-like performance on MOAT!
27 | - `2025-03-19` Added results for GPT 4.5, see the table below for details.
28 | - `2025-03-13` MOAT is now supported by [VLMEvalKit](https://github.com/open-compass/VLMEvalKit)! Try it out!
29 | - `2025-03-12` We released MOAT on [GitHub](https://github.com/Cambrian-yzt/MOAT) and [Hugging Face](https://huggingface.co/datasets/waltsun/MOAT)!
30 |
31 | ## Overview
32 |
33 | **MOAT** (**M**ultimodal model **O**f **A**ll **T**rades) is a challenging benchmark for large multimodal models (LMMs). It consists of vision language (VL) tasks that require the LMM to integrate several VL capabilities and engage in human-like generalist visual problem solving. Moreover, many tasks in **MOAT** focus on LMMs' capability to ground complex text and visual instructions, which is crucial for the application of LMMs in-the-wild. Developing on the VL capability taxonomies proposed in previous benchmark papers, we define 10 fundamental VL capabilities in **MOAT**.
34 |
35 |
36 |
37 | Notably, we purposefully insulated **MOAT** from the influence of domain knowledge, text generation style, and other external factors by making the questions close-ended (i.e. have a single short answer) and solvable with the information and hints provided in the question itself. This allows **MOAT** to focus on fundamental generalist VL capabilities. We also did not include VL capabilities like *general object recognition* and *attribute recognition* in our taxonomy, since these are required by all **MOAT** tasks, and performance on these fronts can be reflected in the overall accuracy on **MOAT**.
38 |
39 | ## Benchmark Composition
40 |
41 | **MOAT** tasks require LMMs to integrate up to 6 fundamental VL capabilities. We report the proportion of questions requiring each VL capability, the distribution of the number of VL capabilities required, and the 15 most common capability combinations required in **MOAT**.
42 |
43 |
44 |
45 |
46 |
47 | ## Leaderboard
48 |
49 | **ALL** existing LMMs, proprietary and open source, perform very poorly on **MOAT**, with the best performing model (OpenAI o1) achieving an accuracy (38.8%) less than half of that achieved by humans (82.7%). For individual VL capabilities, **CNT**, **RLA**, **3DTF** and **3DQNT** saw consistent poor performance by LMMs. In addition, **GNDT** and **GNDV** performance did not scale well with model size. Please refer to our paper for more detailed analysis of the results, as well as discussion on the implication of LMM architecture choices such as tiling and built-in CoT reasoning (or '*thinking*') capability.
50 |
51 |
52 |
53 | ## Usage
54 |
55 | We provide **three** ways to evaluate your LMM on **MOAT**.
56 |
57 | ### With VLMEvalKit
58 |
59 | **VLMEvalKit** is a toolkit for evaluating large multimodal models (LMMs) on various visual language (VL) benchmarks. It provides a unified interface for evaluating LMMs on different VL benchmarks, including **MOAT**. Please refer to the [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) repository for more information.
60 |
61 | ### Develop Your Own Code With Hugging Face Datasets
62 |
63 | You can access our dataset with the following code:
64 |
65 | ```python
66 | from datasets import load_dataset
67 | dataset = load_dataset("waltsun/MOAT", split='test')
68 | ```
69 |
70 | As some questions are formatted as interleaved text and image(s), we recommend referring to the `./inference/eval_API.py` file for the correct way to query the LMM.
71 |
72 | ### Use Our GitHub Repository
73 |
74 | * Dependencies are listed in `./requirements.txt`. We used `Python 3.12.8` in our experiments. Run `pip install -r requirements.txt` to install all dependencies.
75 |
76 | * The dataset is available on [Hugging Face](https://huggingface.co/datasets/waltsun/MOAT). Our code will automatically download the dataset from Hugging Face.
77 |
78 | * To evaluate an LMM, run `python main.py`, and the results will be logged under `./logs/`. Change the model name, API endpoint, and API key in `./configs/constants.py`.
79 |
80 | ## Details
81 |
82 | **File Structure in GitHub Repo**
83 |
84 | * `./config/constants.py`: You can tweak the experiment settings here.
85 | * `./config/prompts.py`: You can find the VQA prompt (both the CoT version and the non-CoT version) and the evaluation prompt.
86 | * `./inference/eval_API.py`: The QA process, including how the LMM query context is structured and details about API calls, is defined in this file.
87 | * `./eval.py`: The evaluation process for each question, including the QA phase and the answer evaluation phase.
88 | * `./main.py`: The script in `main.py` loops over all questions and uses multithreading to speed up the evaluation process. Logging is taken care of in `main.py` as well.
89 | * `./analyze.py`: Used to generate the leaderboard based on the logs. The leaderboard can be found under the directory `./analytics/`.
90 |
91 | **Column Description in Hugging Face Dataset**
92 |
93 | - `index`: The index of the question in the dataset.
94 | - `question`: The question text.
95 | - `choices`: A list of the answer choices. Can be empty.
96 | - `images`: The list of PIL images.
97 | - `outside_knowledge_text`: The essential information for answering the question. Optional.
98 | - `outside_knowledge_images`: The list of PIL images that are essential for answering the question. Can be empty.
99 | - `answer`: The correct answer.
100 | - `capability`: The VL capabilities required to answer the question. A list of strings.
101 | - `human_cot`: The human annotation for the CoT reasoning process.
102 |
103 | ## Future Work
104 |
105 | Going forward, we intend to further increase the diversity of the tasks in **MOAT**, involving more capability combinations and encompassing more domains and scenarios. Stay tuned!
106 |
107 | ## Citation
108 |
109 | Cite our work using the BibTex code below!
110 |
111 | ```latex
112 | @article{ye2025moat,
113 | title={MOAT: Evaluating LMMs for Capability Integration and Instruction Grounding},
114 | author={Ye, Zhoutong and Sun, Mingze and Gao, Huan-ang and Yu, Chun and Shi, Yuanchun},
115 | journal={arXiv preprint arXiv:2503.09348},
116 | year={2025}
117 | }
118 | ```
119 |
120 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------