File size: 8,511 Bytes
4a718ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
# %%
import os
import sys
import json
import time
import asyncio
import logging
from pathlib import Path
from tqdm.asyncio import tqdm
from argparse import ArgumentParser
from playwright.async_api import async_playwright

cmd_args = True
parser = ArgumentParser()
parser.add_argument('--test_dir', default='./test_webpages', help='the directory of test webpages.')
parser.add_argument('--inference_dir', default='./inference', help='the directory of model output webpages.')
parser.add_argument('--save_dir', default='./save_results', help='the directory for saving result info jsonl file.')
parser.add_argument('--model_name', default="Qwen2.5-VL-32B-Instruct", help='using the vlms for your inference')
parser.add_argument('--num_workers', default=50, help='num workers for computing')
parser.add_argument('--log_dir', default='./', help='num workers for computing')
if not cmd_args:
    args = parser.parse_args([]) # You can directly set above parameters in the default.
else:
    args = parser.parse_args()

MODEL_NAME = os.path.basename(args.model_name)
LOG_PATH = os.path.join(args.log_dir, f'get_evaluation_{MODEL_NAME}.log')
INFERENCE_DIR = args.inference_dir
ORI_DIR = args.test_dir
SAVE_PATH = os.path.join(args.save_dir, {MODEL_NAME}.jsonl)

# 启动时预加载一次
with open("./scripts/js/vue.global.js", "r", encoding="utf-8") as f:
    vue_code = f.read()
with open("./scripts/js/one-color-all.js", "r", encoding="utf-8") as f:
    one_color_code = f.read()
with open("./scripts/js/codeSim.js", "r", encoding="utf-8") as f:
    codesim_code = f.read()

# Worker数量
NUM_WORKERS = args.num_workers


def setup_logger(is_console_handler=True):
    logger = logging.getLogger('web_scraping')
    logger.setLevel(logging.INFO)

    file_handler = logging.FileHandler(LOG_PATH)
    file_handler.setLevel(logging.INFO)

    if is_console_handler:
        console_handler = logging.StreamHandler(sys.stdout)
        console_handler.setLevel(logging.INFO)

    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s',
                                  datefmt='%Y-%m-%d %H:%M:%S')
    file_handler.setFormatter(formatter)
    if is_console_handler:
        console_handler.setFormatter(formatter)

    logger.addHandler(file_handler)
    if is_console_handler:
        logger.addHandler(console_handler)

    return logger


cache = []
file_lock = asyncio.Lock()


async def save_result(name, layoutSim, styleSim, force=False):
    if force:
        async with file_lock:
            with open(SAVE_PATH, 'a') as f:
                for c in cache:
                    f.write(json.dumps(c) + '\n')
            cache.clear()
        return 0
    save_item = {
        'name': name,
        'groupLayoutScore': layoutSim['groupLayoutScore'],
        'overallScore': layoutSim['overallScore'],
        'relativeLayoutScore': layoutSim['relativeLayoutScore'],
        'relativeStyleScore': styleSim['relativeStyleScore']
    }
    cache.append(save_item)
    if len(cache) >= NUM_WORKERS:
        async with file_lock:
            with open(SAVE_PATH, 'a') as f:
                for c in cache:
                    f.write(json.dumps(c) + '\n')
            cache.clear()


async def worker(name, queue, browser, logger, pbar):
    while not queue.empty():
        html_item = await queue.get()
        name = os.path.splitext(html_item)[0]
        source_html_path = os.path.join(INFERENCE_DIR, f"{name}.html")
        target_html_path = os.path.join(ORI_DIR, name, "index.html")
        if not os.path.exists(source_html_path):
            logger.error(f"❌ File not Exsits: {source_html_path}")
            queue.task_done()
            pbar.update(1)
            continue
        if not os.path.exists(target_html_path):
            logger.error(f"❌ File not Exsits: {target_html_path}")
            queue.task_done()
            pbar.update(1)
            continue
        logger.info(
                f"⭐ push {source_html_path}")
        file_url = Path(source_html_path).as_uri()

        start_time = time.perf_counter()
        page = await browser.new_page(viewport={'width': 1920, 'height': 1080})
        try:
            try:
                await page.goto(
                    file_url,
                    timeout=10000,
                    wait_until='domcontentloaded'
                )
            except Exception as e:
                logger.error(f"⚠ Loading: {file_url}, Error: {e}")
            await page.add_script_tag(content=vue_code)
            await page.add_script_tag(content=one_color_code)
            await page.add_script_tag(content=codesim_code)
            sources = await page.evaluate("() => getElements()")
            elapsed = time.perf_counter() - start_time
            logger.info(
                f"✅ Source computed complete {source_html_path}, element count: {len(sources)} time: {elapsed:.2f}")
        except Exception as e:
            logger.error(f"❌ Source computed failed: {source_html_path}, Error: {e}")
        finally:
            await page.close()

        file_url = Path(target_html_path).as_uri()
        sec_start_time = time.perf_counter()
        page = await browser.new_page(viewport={'width': 1920, 'height': 1080})
        # Listen console
        # page.on("console", lambda msg: print(f"Console: {msg.type} - {msg.text}"))
        try:
            try:
                await page.goto(
                    file_url,
                    timeout=3000,
                    wait_until='domcontentloaded'
                )
            except Exception as e:
                logger.error(f"⚠ Loading: {file_url}, Error: {e}")
            await page.add_script_tag(content=vue_code)
            await page.add_script_tag(content=one_color_code)
            await page.add_script_tag(content=codesim_code)
            await page.evaluate("() => targetEls.value = getElements(false)")
            layoutSim = await page.evaluate("(sources) => getLayoutSim(sources)", sources)
            styleSim = await page.evaluate("(sources) => getStyleSim(sources)", sources)
            await save_result(name, layoutSim, styleSim)
            elapsed_total = time.perf_counter() - start_time
            elapsed = time.perf_counter() - sec_start_time
            logger.info(
                f"✅ Target computed complete {target_html_path}, layoutSim: {layoutSim['overallScore']}, styleSim: {styleSim['relativeStyleScore']}, element_count: {len(sources)} time: {elapsed:.2f} total_time: {elapsed_total:.2f}")
        except Exception as e:
            logger.error(f"❌ Target computed failed: {target_html_path}, Error: {e}")
        finally:
            await page.close()
        queue.task_done()
        pbar.update(1)


async def main():
    # Construct Task Queue
    logger = setup_logger(False)
    source_list = os.listdir(INFERENCE_DIR)
    
    len_total = len(source_list)
    print('Total: ', len_total)
    exists_meta = []
    if os.path.exists(SAVE_PATH):
        with open(SAVE_PATH) as f:
            exists_data = f.readlines()
            exists_data = [json.loads(item) for item in exists_data]
            for meta_info in exists_data:
                name = meta_info.get("name", 'none')
                exists_meta.append(name + '.html')
                
    exists_meta = set(exists_meta)
    source_list = [h for h in source_list if h not in exists_meta]
    print(
        f"Found: {len(exists_meta)} Matched: {len_total - len(source_list)} Remain: {len(source_list)}")
    
    # random.shuffle(source_list)
    queue = asyncio.Queue()
    for html_item in source_list:
        await queue.put(html_item)

    async with async_playwright() as p:
        browser_list = []
        for i in range(NUM_WORKERS):
            browser = await p.chromium.launch(headless=True, args=['--no-sandbox', '--disable-setuid-sandbox'])
            browser_list.append(browser)
        print('Browser Started')
        with tqdm(total=len(source_list), desc="Progress") as pbar:
            tasks = []
            for i in range(NUM_WORKERS):
                browser = browser_list[i % len(browser_list)]
                tasks.append(asyncio.create_task(
                    worker(f"worker-{i}", queue, browser, logger, pbar)))
            await queue.join()
            for t in tasks:
                t.cancel()
        for browser in browser_list:
            await browser.close()
        save_result(None, None, None, True)

if __name__ == "__main__":
    asyncio.run(main())

# await main()