# -*- coding: utf-8 -*-
"""
抓取一致性測試腳本

測試目的：
1. 驗證重新抓取後資料筆數是否一致
2. 測量完整抓取所需時間
3. 驗證程式執行的確定性（無隨機性）
"""

import sys
sys.stdout.reconfigure(encoding='utf-8')

import os
import time
import csv
import json
from datetime import datetime

# 加入上層目錄以便 import scraper
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import requests
import urllib3
from bs4 import BeautifulSoup
import re

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

TEST_DIR = os.path.dirname(os.path.abspath(__file__))

# ===== 複製 scraper.py 的核心函數 =====

def parse_standard_table(table):
    rows = table.find_all('tr')
    member = {}
    for row in rows:
        cells = row.find_all('td')
        for i in range(0, len(cells)-1, 2):
            key = cells[i].get_text(strip=True)
            value = cells[i+1].get_text(strip=True)
            if key:
                if key == '開業事務所':
                    key = '事務所'
                member[key] = value
    return member


def fetch_standard_pages(base_url, max_pages=200):
    all_members = []
    seen_ids = set()
    page = 1

    while page <= max_pages:
        url = f'{base_url}?page={page}'
        response = requests.get(url, verify=False, timeout=30)
        response.encoding = 'utf-8'
        soup = BeautifulSoup(response.text, 'html.parser')
        tables = soup.find_all('table', class_='list_form')

        if not tables:
            break

        new_count = 0
        for table in tables:
            member = parse_standard_table(table)
            if member and '姓名' in member:
                member_id = member.get('會員編號', member['姓名'])
                if member_id not in seen_ids:
                    seen_ids.add(member_id)
                    all_members.append(member)
                    new_count += 1

        if new_count == 0:
            break
        if len(tables) < 10:
            break

        page += 1
        time.sleep(0.2)

    return all_members


def scrape_taipei(update_time):
    results = {}

    members = fetch_standard_pages('http://www.reaa.org.tw/member_namelist.php')
    for m in members:
        m['更新時間'] = update_time
    results['taipei_appraisers'] = members

    assistants = fetch_standard_pages('http://www.reaa.org.tw/member_namelist_02.php')
    for a in assistants:
        a['更新時間'] = update_time
    results['taipei_assistants'] = assistants

    url = 'http://www.reaa.org.tw/supervisor_namelist.php'
    response = requests.get(url, verify=False, timeout=30)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')
    tables = soup.find_all('table', class_='list_form')

    supervisors = []
    for table in tables:
        member = parse_standard_table(table)
        if member and '姓名' in member:
            member['屆別'] = '現任'
            member['更新時間'] = update_time
            supervisors.append(member)
    results['taipei_supervisors'] = supervisors

    return results


def scrape_newtaipei(update_time):
    url = 'http://www.tcarea.url.tw/product_1244107.html'
    response = requests.get(url, verify=False, timeout=30)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')

    text = soup.get_text()
    lines = text.split('\n')
    members = []
    current_member = {}

    for line in lines:
        line = line.strip()
        if not line:
            continue

        if re.match(r'^\d+$', line) and int(line) < 200:
            if current_member.get('姓名') and current_member['姓名'] != '會員':
                members.append(current_member)
            current_member = {'序號': line}
        elif current_member.get('序號') and not current_member.get('姓名'):
            if len(line) >= 2 and len(line) <= 4:
                current_member['姓名'] = line
        elif current_member.get('姓名') and not current_member.get('事務所'):
            if '不動產估價師' in line:
                current_member['事務所'] = line
        elif current_member.get('事務所') and not current_member.get('聯絡電話'):
            if re.match(r'^0\d', line):
                current_member['聯絡電話'] = line
        elif current_member.get('聯絡電話') and not current_member.get('地址'):
            if '新北市' in line or '台北市' in line:
                current_member['地址'] = line

    if current_member.get('姓名') and current_member['姓名'] != '會員':
        members.append(current_member)

    members = [m for m in members if m.get('姓名') and m.get('事務所')]
    for m in members:
        m['更新時間'] = update_time

    return {'newtaipei_appraisers': members}


def scrape_taoyuan(update_time):
    url = 'https://www.tarea.org.tw/list/Member'
    response = requests.get(url, verify=False, timeout=30)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')

    table = soup.find('table')
    rows = table.find_all('tr')

    members = []
    headers = []
    for i, row in enumerate(rows):
        cells = row.find_all(['td', 'th'])
        values = [c.get_text(strip=True) for c in cells]

        if i == 0:
            headers = values
        else:
            member = dict(zip(headers, values))
            member['更新時間'] = update_time
            members.append(member)

    return {'taoyuan_appraisers': members}


def scrape_taichung(update_time):
    results = {}

    members = fetch_standard_pages('http://www.creaa.org.tw/member_namelist.php')
    seen_ids = set()
    unique_members = []
    for m in members:
        member_id = m.get('會員編號', m['姓名'])
        if member_id not in seen_ids:
            seen_ids.add(member_id)
            m['更新時間'] = update_time
            unique_members.append(m)
    results['taichung_appraisers'] = unique_members

    url = 'http://www.creaa.org.tw/supervisor_namelist.php'
    response = requests.get(url, verify=False, timeout=30)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')
    tables = soup.find_all('table', class_='list_form')

    supervisors = []
    for table in tables:
        member = parse_standard_table(table)
        if member and '姓名' in member:
            member['屆別'] = '現任'
            member['更新時間'] = update_time
            supervisors.append(member)
    results['taichung_supervisors'] = supervisors

    return results


def scrape_tainan(update_time):
    members = fetch_standard_pages('http://www.tnreaa.org.tw/member_namelist.php')
    for m in members:
        m['更新時間'] = update_time
    return {'tainan_appraisers': members}


def scrape_kaohsiung(update_time):
    all_members = []

    for page in range(1, 20):
        url = f'http://www.karea.org.tw/member_roster.php?page={page}'
        response = requests.get(url, verify=False, timeout=30)
        response.encoding = 'utf-8'
        soup = BeautifulSoup(response.text, 'html.parser')

        cards = soup.find_all('div', class_='msgrids')
        if not cards:
            break

        for card in cards:
            member = {}
            title = card.find('div', class_='titles')
            if title:
                member['事務所'] = title.get_text(strip=True)

            ptxts = card.find_all('div', class_='ptxt')
            for ptxt in ptxts:
                t1 = ptxt.find('div', class_='t1')
                t2 = ptxt.find('div', class_='t2')
                if t1 and t2:
                    key = t1.get_text(strip=True)
                    value = t2.get_text(strip=True)
                    key_map = {'估價師': '姓名', '事務所地址': '地址', '事務所網址': '網址'}
                    key = key_map.get(key, key)
                    member[key] = value

            if member.get('姓名'):
                all_members.append(member)

        time.sleep(0.2)

    seen_names = set()
    unique_members = []
    for m in all_members:
        name = m['姓名']
        if name not in seen_names:
            seen_names.add(name)
            m['更新時間'] = update_time
            unique_members.append(m)

    return {'kaohsiung_appraisers': unique_members}


def scrape_associations(update_time):
    url = 'http://www.rocreaa.org.tw/member_namelist.php'
    response = requests.get(url, verify=False, timeout=30)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')

    tables = soup.find_all('table', class_='list_form')

    associations = []
    for table in tables:
        assoc = parse_standard_table(table)
        if assoc.get('公會名稱'):
            assoc['更新時間'] = update_time
            associations.append(assoc)

    return {'associations': associations}


# ===== 測試主程式 =====

def main():
    print('=' * 60)
    print('台灣不動產估價師公會名單抓取 - 一致性測試')
    print('=' * 60)

    # 記錄開始時間
    start_time = time.time()
    start_datetime = datetime.now()
    update_time = start_datetime.strftime('%Y-%m-%d %H:%M:%S')

    print(f'\n開始時間: {update_time}')
    print('-' * 60)

    all_results = {}
    scrape_times = {}

    scrapers = [
        ('台北市', scrape_taipei),
        ('新北市', scrape_newtaipei),
        ('桃園市', scrape_taoyuan),
        ('台中市', scrape_taichung),
        ('台南市', scrape_tainan),
        ('高雄市', scrape_kaohsiung),
        ('全聯會', scrape_associations),
    ]

    for name, scraper in scrapers:
        print(f'\n抓取 {name}...')
        scrape_start = time.time()
        try:
            result = scraper(update_time)
            scrape_end = time.time()
            scrape_times[name] = scrape_end - scrape_start

            for key, data in result.items():
                all_results[key] = data
                print(f'  {key}: {len(data)} 筆')
            print(f'  耗時: {scrape_times[name]:.2f} 秒')
        except Exception as e:
            print(f'  錯誤: {e}')
            scrape_times[name] = 0

    # 記錄結束時間
    end_time = time.time()
    end_datetime = datetime.now()
    total_time = end_time - start_time

    print('\n' + '=' * 60)
    print('測試結果')
    print('=' * 60)

    # 統計結果
    summary = {
        'test_time': {
            'start': start_datetime.isoformat(),
            'end': end_datetime.isoformat(),
            'duration_seconds': round(total_time, 2),
        },
        'counts': {},
        'scrape_times': {},
    }

    print(f'\n開始時間: {start_datetime.strftime("%Y-%m-%d %H:%M:%S")}')
    print(f'結束時間: {end_datetime.strftime("%Y-%m-%d %H:%M:%S")}')
    print(f'總耗時: {total_time:.2f} 秒 ({total_time/60:.2f} 分鐘)')

    print('\n各公會抓取耗時:')
    for name, duration in scrape_times.items():
        print(f'  {name}: {duration:.2f} 秒')
        summary['scrape_times'][name] = round(duration, 2)

    print('\n資料筆數統計:')
    total_appraisers = 0
    for key, data in sorted(all_results.items()):
        count = len(data)
        summary['counts'][key] = count
        print(f'  {key}: {count} 筆')
        if 'appraisers' in key:
            total_appraisers += count

    summary['counts']['total_appraisers'] = total_appraisers
    print(f'\n估價師總計: {total_appraisers} 位')

    # 與預期值比較
    expected = {
        'taipei_appraisers': 182,
        'taipei_assistants': 192,
        'taipei_supervisors': 37,
        'newtaipei_appraisers': 66,
        'taoyuan_appraisers': 25,
        'taichung_appraisers': 80,
        'taichung_supervisors': 21,
        'tainan_appraisers': 48,
        'kaohsiung_appraisers': 75,
        'associations': 7,
    }

    print('\n' + '=' * 60)
    print('與預期值比較')
    print('=' * 60)

    all_match = True
    comparison = []
    for key, expected_count in expected.items():
        actual_count = summary['counts'].get(key, 0)
        match = actual_count == expected_count
        status = 'OK' if match else 'DIFF'
        diff = actual_count - expected_count
        comparison.append({
            'key': key,
            'expected': expected_count,
            'actual': actual_count,
            'diff': diff,
            'match': match,
        })
        print(f'  {key}: 預期 {expected_count}, 實際 {actual_count} [{status}]' +
              (f' (差異: {diff:+d})' if diff != 0 else ''))
        if not match:
            all_match = False

    summary['comparison'] = comparison
    summary['all_match'] = all_match

    print('\n' + '=' * 60)
    if all_match:
        print('結論: 所有資料筆數與預期值完全一致')
    else:
        print('結論: 部分資料筆數與預期值不符')
    print('=' * 60)

    # 儲存測試結果
    output_file = os.path.join(TEST_DIR, 'test_result.json')
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(summary, f, ensure_ascii=False, indent=2)
    print(f'\n測試結果已儲存至: {output_file}')

    # 回答第三個問題
    print('\n' + '=' * 60)
    print('關於變異性的說明')
    print('=' * 60)
    print('''
這個程式是「確定性」的，不會有 LLM 那樣的隨機性。

原因：
1. 網路爬蟲是純粹的程式邏輯，沒有使用任何隨機數或機率模型
2. 相同的 HTML 輸入會產生相同的解析結果
3. 去重邏輯（seen_ids, seen_names）是基於集合操作，順序確定

可能導致筆數變化的因素（非程式隨機性）：
1. 網站資料本身有更新（新增/刪除會員）
2. 網站分頁機制不穩定（如台中市、高雄市的 bug）
3. 網路連線問題導致某些頁面抓取失敗
4. 網站結構改版

結論：同一時間點多次執行，結果應該完全一致。
''')


if __name__ == '__main__':
    main()
