|
|
import pandas as pd
|
|
|
|
|
|
import streamlit as st
|
|
|
|
|
|
st.set_page_config(layout="wide")
|
|
|
SHORT_CAPTIONS = [
|
|
|
'ALIGN:align-base:coyo700m', 'OpenCLIP:ViT-B-32:openai', 'OpenCLIP:ViT-B-16:openai',
|
|
|
'OpenCLIP:ViT-L-14:openai', 'OpenCLIP:ViT-L-14-336:openai',
|
|
|
'OpenCLIP:ViT-B-32:laion2b_s34b_b79k', 'OpenCLIP:ViT-B-16:laion2b_s34b_b88k',
|
|
|
'OpenCLIP:ViT-L-14:laion2b_s32b_b82k', 'OpenCLIP:ViT-g-14:laion2b_s34b_b88k',
|
|
|
'OpenCLIP:ViT-H-14:laion2b_s32b_b79k', 'OpenCLIP:roberta-ViT-B-32:laion2b_s12b_b32k',
|
|
|
'OpenCLIP:ViT-B-16-SigLIP:webli', 'OpenCLIP:ViT-B-16-SigLIP-384:webli',
|
|
|
'OpenCLIP:ViT-L-16-SigLIP-256:webli', 'OpenCLIP:ViT-L-16-SigLIP-384:webli',
|
|
|
'OpenCLIP:ViT-SO400M-14-SigLIP:webli', 'OpenCLIP:coca_ViT-B-32:laion2b_s13b_b90k',
|
|
|
'OpenCLIP:coca_ViT-L-14:laion2b_s13b_b90k'
|
|
|
]
|
|
|
LONG_CAPTIONS = [
|
|
|
'DreamLIP:dreamlip-vitb16:cc3m-long', 'DreamLIP:dreamlip-vitb16:cc12m-long',
|
|
|
'DreamLIP:dreamlip-vitb16:yfcc15m-long', 'DreamLIP:dreamlip-vitb16:cc30m-long',
|
|
|
'FLAIR:flair-vitb16:cc3m-recap', 'FLAIR:flair-vitb16:cc12m-recap',
|
|
|
'FLAIR:flair-vitb16:yfcc15m-recap', 'FLAIR:flair-vitb16:cc30m-recap',
|
|
|
'CLIPS:CLIPS-Large-14-224:recap-datacomp1b', 'CLIPS:CLIPS-Large-14-336:recap-datacomp1b',
|
|
|
'CLIPS:CLIPS-Huge-14-224:recap-datacomp1b', 'LoTLIP:LoTLIP-ViT-B-32:lotlip100m',
|
|
|
'LoTLIP:LoTLIP-ViT-B-16:lotlip100m', 'Recap-CLIP:ViT-L-16-HTxt-Recap-CLIP:recap-datacomp1b',
|
|
|
'LongCLIP:longclip-vitb32:sharegpt4v-1m', 'LongCLIP:longclip-vitb16:sharegpt4v-1m',
|
|
|
'LongCLIP:longclip-vitl14:sharegpt4v-1m', 'LongCLIP:longclip-vitl14_336px:sharegpt4v-1m',
|
|
|
'Jina-CLIP:jina-clip-v1:jinaai', 'Jina-CLIP:jina-clip-v2:jinaai'
|
|
|
]
|
|
|
COMPOSITIONALITY = [
|
|
|
"OpenCLIP:ViT-B-32:openai", 'StructuredCLIP:NegCLIP-ViT-B-32:coco-ft',
|
|
|
'StructuredCLIP:CE-CLIP-ViT-B-32:coco-ft', 'StructuredCLIP:DAC-LLM-ViT-B-32:cc3m-ft',
|
|
|
'StructuredCLIP:DAC-SAM-ViT-B-32:cc3m-ft', 'FSC-CLIP:fsc-clip-ViT-B-32:laioncoco-ft',
|
|
|
'FSC-CLIP:fsc-clip-ViT-B-16:laioncoco-ft', 'FSC-CLIP:fsc-clip-ViT-L-14:laioncoco-ft'
|
|
|
]
|
|
|
|
|
|
DECODERS = [
|
|
|
'vqascore:instructblip-flant5-xl:none', 'vqascore:clip-flant5-xl:none',
|
|
|
'vqascore:llava-v1.5-7b:none', 'vqascore:sharegpt4v-7b:none',
|
|
|
'visualgptscore:instructblip-flant5-xl:none', 'visualgptscore:clip-flant5-xl:none',
|
|
|
'visualgptscore:llava-v1.5-7b:none', 'visualgptscore:sharegpt4v-7b:none'
|
|
|
]
|
|
|
|
|
|
MODEL_GROUPS = {
|
|
|
"short_captions": SHORT_CAPTIONS,
|
|
|
"long_captions": LONG_CAPTIONS,
|
|
|
"compositionality": COMPOSITIONALITY
|
|
|
}
|
|
|
|
|
|
|
|
|
def format_df(df):
|
|
|
cols = []
|
|
|
for col in df.columns:
|
|
|
if col in ["family", "model", "tag"]:
|
|
|
continue
|
|
|
cols.append(col)
|
|
|
formatted_df = df.style.format({col: "{:.1f}" for col in cols})
|
|
|
return formatted_df
|
|
|
|
|
|
|
|
|
def print_table_overall(df, model_names):
|
|
|
named_rows = df[["family", "model", "tag"]].apply(lambda row: ":".join(row), axis=1)
|
|
|
new_rows = []
|
|
|
for name in model_names:
|
|
|
new_rows.append(df[named_rows == name])
|
|
|
new_rows = format_df(pd.concat(new_rows, axis=0))
|
|
|
st.table(new_rows)
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
st.title("Interface")
|
|
|
df = pd.read_csv("data/250124/overall.csv")
|
|
|
for group, model_names in MODEL_GROUPS.items():
|
|
|
st.markdown(f"## {group} models")
|
|
|
print_table_overall(df, model_names)
|
|
|
|
|
|
df = pd.read_csv("data/250124/decoder_overall.csv")
|
|
|
st.markdown("## Decoder-based models")
|
|
|
print_table_overall(df, DECODERS)
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
main()
|
|
|
|