# -*- coding: utf-8 -*-
"""
台灣不動產估價師公會名單抓取工具

支援公會：
- 台北市：http://www.reaa.org.tw/
- 新北市：http://www.tcarea.url.tw/
- 桃園市：https://www.tarea.org.tw/
- 台中市：http://www.creaa.org.tw/
- 台南市：http://www.tnreaa.org.tw/
- 高雄市：http://www.karea.org.tw/
- 全聯會：http://www.rocreaa.org.tw/

使用方式：
    python -X utf8 scraper.py              # 抓取所有公會
    python -X utf8 scraper.py taipei       # 只抓取台北市
    python -X utf8 scraper.py --list       # 列出支援的公會

建議每月執行一次更新。
"""

import sys
sys.stdout.reconfigure(encoding='utf-8')

import requests
import urllib3
from bs4 import BeautifulSoup
import csv
import time
import os
import re
from datetime import datetime

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

OUTPUT_DIR = os.path.dirname(os.path.abspath(__file__))


def parse_standard_table(table):
    """解析標準 list_form 表格（台北、台中、台南格式）"""
    rows = table.find_all('tr')
    member = {}
    for row in rows:
        cells = row.find_all('td')
        for i in range(0, len(cells)-1, 2):
            key = cells[i].get_text(strip=True)
            value = cells[i+1].get_text(strip=True)
            if key:
                if key == '開業事務所':
                    key = '事務所'
                member[key] = value
    return member


def fetch_standard_pages(base_url, max_pages=200):
    """抓取標準分頁結構的名單"""
    all_members = []
    seen_ids = set()
    page = 1

    while page <= max_pages:
        url = f'{base_url}?page={page}'
        response = requests.get(url, verify=False, timeout=30)
        response.encoding = 'utf-8'
        soup = BeautifulSoup(response.text, 'html.parser')
        tables = soup.find_all('table', class_='list_form')

        if not tables:
            break

        new_count = 0
        for table in tables:
            member = parse_standard_table(table)
            if member and '姓名' in member:
                # 使用「會員編號 + 姓名」組合鍵，避免同 ID 不同人被誤刪
                member_id = member.get('會員編號', '') + '_' + member.get('姓名', '')
                if member_id not in seen_ids:
                    seen_ids.add(member_id)
                    all_members.append(member)
                    new_count += 1

        if new_count == 0:
            break
        if len(tables) < 10:
            break

        page += 1
        time.sleep(0.2)

    return all_members


def scrape_taipei(update_time):
    """抓取台北市公會"""
    print('【台北市不動產估價師公會】')

    results = {}

    # 會員名單
    members = fetch_standard_pages('http://www.reaa.org.tw/member_namelist.php')
    for m in members:
        m['更新時間'] = update_time

    output_file = os.path.join(OUTPUT_DIR, 'taipei_appraisers.csv')
    fieldnames = ['姓名', '會員編號', '聯絡電話', '傳真', '電子信箱', '事務所', '地址', '更新時間']
    with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
        writer.writeheader()
        writer.writerows(members)
    print(f'  估價師會員: {len(members)} 位')
    results['members'] = len(members)

    # 助理員名單
    assistants = fetch_standard_pages('http://www.reaa.org.tw/member_namelist_02.php')
    for a in assistants:
        a['更新時間'] = update_time

    output_file = os.path.join(OUTPUT_DIR, 'taipei_assistants.csv')
    fieldnames = ['姓名', '聯絡電話', '事務所', '地址', '更新時間']
    with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
        writer.writeheader()
        writer.writerows(assistants)
    print(f'  估價助理員: {len(assistants)} 位')
    results['assistants'] = len(assistants)

    # 理監事名單
    url = 'http://www.reaa.org.tw/supervisor_namelist.php'
    response = requests.get(url, verify=False, timeout=30)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')
    tables = soup.find_all('table', class_='list_form')

    supervisors = []
    for table in tables:
        member = parse_standard_table(table)
        if member and '姓名' in member:
            member['屆別'] = '現任'
            member['更新時間'] = update_time
            supervisors.append(member)

    output_file = os.path.join(OUTPUT_DIR, 'taipei_supervisors.csv')
    fieldnames = ['姓名', '職稱', '聯絡電話', '傳真', '電子信箱', '事務所', '地址', '屆別', '更新時間']
    with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
        writer.writeheader()
        writer.writerows(supervisors)
    print(f'  理監事: {len(supervisors)} 位')
    results['supervisors'] = len(supervisors)

    return results


def scrape_newtaipei(update_time):
    """抓取新北市公會"""
    print('【新北市不動產估價師公會】')

    url = 'http://www.tcarea.url.tw/product_1244107.html'
    response = requests.get(url, verify=False, timeout=30)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')

    text = soup.get_text()
    lines = text.split('\n')
    members = []
    current_member = {}

    for line in lines:
        line = line.strip()
        if not line:
            continue

        if re.match(r'^\d+$', line) and int(line) < 200:
            if current_member.get('姓名') and current_member['姓名'] != '會員':
                members.append(current_member)
            current_member = {'序號': line}
        elif current_member.get('序號') and not current_member.get('姓名'):
            if len(line) >= 2 and len(line) <= 4:
                current_member['姓名'] = line
        elif current_member.get('姓名') and not current_member.get('事務所'):
            if '不動產估價師' in line:
                current_member['事務所'] = line
        elif current_member.get('事務所') and not current_member.get('聯絡電話'):
            if re.match(r'^0\d', line):
                current_member['聯絡電話'] = line
        elif current_member.get('聯絡電話') and not current_member.get('地址'):
            if '新北市' in line or '台北市' in line:
                current_member['地址'] = line

    if current_member.get('姓名') and current_member['姓名'] != '會員':
        members.append(current_member)

    members = [m for m in members if m.get('姓名') and m.get('事務所')]

    for m in members:
        m['更新時間'] = update_time

    output_file = os.path.join(OUTPUT_DIR, 'newtaipei_appraisers.csv')
    fieldnames = ['序號', '姓名', '事務所', '聯絡電話', '地址', '更新時間']
    with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
        writer.writeheader()
        writer.writerows(members)

    print(f'  估價師會員: {len(members)} 位')
    return {'members': len(members)}


def scrape_taoyuan(update_time):
    """抓取桃園市公會"""
    print('【桃園市不動產估價師公會】')

    url = 'https://www.tarea.org.tw/list/Member'
    response = requests.get(url, verify=False, timeout=30)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')

    table = soup.find('table')
    rows = table.find_all('tr')

    members = []
    headers = []
    for i, row in enumerate(rows):
        cells = row.find_all(['td', 'th'])
        values = [c.get_text(strip=True) for c in cells]

        if i == 0:
            headers = values
        else:
            member = dict(zip(headers, values))
            member['更新時間'] = update_time
            members.append(member)

    output_file = os.path.join(OUTPUT_DIR, 'taoyuan_appraisers.csv')
    fieldnames = ['會號', '姓名', '證書字號', '事務所名稱', '開業執照地址', '更新時間']
    with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
        writer.writeheader()
        writer.writerows(members)

    print(f'  估價師會員: {len(members)} 位')
    return {'members': len(members)}


def scrape_taichung(update_time):
    """抓取台中市公會"""
    print('【台中市不動產估價師公會】')

    results = {}

    members = fetch_standard_pages('http://www.creaa.org.tw/member_namelist.php')

    seen_ids = set()
    unique_members = []
    for m in members:
        # 使用「會員編號 + 姓名」組合鍵，避免同 ID 不同人被誤刪
        member_id = m.get('會員編號', '') + '_' + m.get('姓名', '')
        if member_id not in seen_ids:
            seen_ids.add(member_id)
            m['更新時間'] = update_time
            unique_members.append(m)

    output_file = os.path.join(OUTPUT_DIR, 'taichung_appraisers.csv')
    fieldnames = ['姓名', '會員編號', '聯絡電話', '傳真', '電子信箱', '事務所', '地址', '更新時間']
    with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
        writer.writeheader()
        writer.writerows(unique_members)

    print(f'  估價師會員: {len(unique_members)} 位')
    results['members'] = len(unique_members)

    # 理監事名單
    url = 'http://www.creaa.org.tw/supervisor_namelist.php'
    response = requests.get(url, verify=False, timeout=30)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')
    tables = soup.find_all('table', class_='list_form')

    supervisors = []
    for table in tables:
        member = parse_standard_table(table)
        if member and '姓名' in member:
            member['屆別'] = '現任'
            member['更新時間'] = update_time
            supervisors.append(member)

    output_file = os.path.join(OUTPUT_DIR, 'taichung_supervisors.csv')
    fieldnames = ['姓名', '職稱', '聯絡電話', '傳真', '電子信箱', '事務所', '地址', '屆別', '更新時間']
    with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
        writer.writeheader()
        writer.writerows(supervisors)

    print(f'  理監事: {len(supervisors)} 位')
    results['supervisors'] = len(supervisors)

    return results


def scrape_tainan(update_time):
    """抓取台南市公會"""
    print('【台南市不動產估價師公會】')

    members = fetch_standard_pages('http://www.tnreaa.org.tw/member_namelist.php')

    for m in members:
        m['更新時間'] = update_time

    output_file = os.path.join(OUTPUT_DIR, 'tainan_appraisers.csv')
    fieldnames = ['姓名', '會員編號', '聯絡電話', '傳真', '電子信箱', '事務所', '地址', '更新時間']
    with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
        writer.writeheader()
        writer.writerows(members)

    print(f'  估價師會員: {len(members)} 位')
    return {'members': len(members)}


def scrape_kaohsiung(update_time):
    """抓取高雄市公會（卡片式結構）"""
    print('【高雄市不動產估價師公會】')

    all_members = []

    for page in range(1, 20):
        url = f'http://www.karea.org.tw/member_roster.php?page={page}'
        response = requests.get(url, verify=False, timeout=30)
        response.encoding = 'utf-8'
        soup = BeautifulSoup(response.text, 'html.parser')

        cards = soup.find_all('div', class_='msgrids')
        if not cards:
            break

        for card in cards:
            member = {}
            title = card.find('div', class_='titles')
            if title:
                member['事務所'] = title.get_text(strip=True)

            ptxts = card.find_all('div', class_='ptxt')
            for ptxt in ptxts:
                t1 = ptxt.find('div', class_='t1')
                t2 = ptxt.find('div', class_='t2')
                if t1 and t2:
                    key = t1.get_text(strip=True)
                    value = t2.get_text(strip=True)
                    key_map = {'估價師': '姓名', '事務所地址': '地址', '事務所網址': '網址'}
                    key = key_map.get(key, key)
                    member[key] = value

            if member.get('姓名'):
                all_members.append(member)

        time.sleep(0.2)

    seen_names = set()
    unique_members = []
    for m in all_members:
        name = m['姓名']
        if name not in seen_names:
            seen_names.add(name)
            m['更新時間'] = update_time
            unique_members.append(m)

    output_file = os.path.join(OUTPUT_DIR, 'kaohsiung_appraisers.csv')
    fieldnames = ['姓名', '聯絡電話', '傳真', '電子信箱', '事務所', '地址', '網址', '更新時間']
    with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
        writer.writeheader()
        writer.writerows(unique_members)

    print(f'  估價師會員: {len(unique_members)} 位')
    return {'members': len(unique_members)}


def scrape_associations(update_time):
    """抓取全聯會公會名錄"""
    print('【全聯會公會名錄】')

    url = 'http://www.rocreaa.org.tw/member_namelist.php'
    response = requests.get(url, verify=False, timeout=30)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')

    tables = soup.find_all('table', class_='list_form')

    associations = []
    for table in tables:
        assoc = parse_standard_table(table)
        if assoc.get('公會名稱'):
            assoc['更新時間'] = update_time
            associations.append(assoc)
            print(f'  - {assoc["公會名稱"]}')

    output_file = os.path.join(OUTPUT_DIR, 'associations.csv')
    fieldnames = ['公會名稱', '聯絡電話', '傳真', '電子信箱', '地址', '更新時間']
    with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
        writer.writeheader()
        writer.writerows(associations)

    return {'associations': len(associations)}


def scrape_ctreaa(update_time):
    """抓取中台灣公會理監事名單（含詳細頁面）"""
    print('【中台灣不動產估價師公會】')

    # ===== Step 1: 抓取 team.html 取得理監事清單 =====
    base_url = 'https://ctreaa.org.tw'
    team_url = f'{base_url}/team.html'

    response = requests.get(team_url, verify=False, timeout=30)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')

    # 理監事基本資料（從 team.html 解析）
    supervisors_data = [
        {'position': '理事長', 'name': '邱仕皇', 'eng_name': 'CHIU SHIH-HUANG', 'detail_page': 'manager_3.html'},
        {'position': '副理事長兼公關主委', 'name': '林筱涵', 'eng_name': 'LIN HSIAO-HAN', 'detail_page': 'manager_4.html'},
        {'position': '副理事長兼專案主委', 'name': '劉獻隆', 'eng_name': 'LIU HSIEN-LUNG', 'detail_page': 'manager_5.html'},
        {'position': '名譽理事長', 'name': '黃昭閔', 'eng_name': 'HUANG CHAO-MING', 'detail_page': 'manager_6.html'},
        {'position': '常務監事', 'name': '江東融', 'eng_name': 'CHIANG TUNG-JUNG', 'detail_page': 'manager_7.html'},
        {'position': '秘書長', 'name': '宋明一', 'eng_name': 'SUNG MING-YI', 'detail_page': 'manager_8.html'},
        {'position': '理事兼襄閱主委', 'name': '李順吉', 'eng_name': 'LEE SHUN-CHI', 'detail_page': 'manager_9.html'},
        {'position': '理事兼估價技術主委', 'name': '陳永洲', 'eng_name': 'CHEN YUNG-CHOU', 'detail_page': 'manager_10.html'},
        {'position': '理事兼活動主委', 'name': '廖家顯', 'eng_name': 'LIAO CHIA-HSIEN', 'detail_page': 'manager_11.html'},
        {'position': '理事', 'name': '陳順傑', 'eng_name': 'CHEN SHUN-CHIEH', 'detail_page': 'manager_12.html'},
        {'position': '理事', 'name': '姚承欣', 'eng_name': 'YAO CHENG-HSIN', 'detail_page': 'manager_13.html'},
        {'position': '理事', 'name': '邱盟文', 'eng_name': 'CHIU MENG-WEN', 'detail_page': 'manager_14.html'},
        {'position': '監事兼紀律主委', 'name': '朱俶瑩', 'eng_name': 'CHU CHU-YING', 'detail_page': 'manager_15.html'},
        {'position': '監事兼市場資訊主委', 'name': '張順奇', 'eng_name': 'CHANG SHUN-CHYI', 'detail_page': 'manager_16.html'},
        {'position': '副秘書長', 'name': '林秀青', 'eng_name': 'LIN HSIU-CHING', 'detail_page': 'manager_17.html'},
        {'position': '副秘書長', 'name': '鄭春輝', 'eng_name': 'JHENG CHUN-HUEI', 'detail_page': 'manager_18.html'},
        {'position': '副秘書長兼教育主委', 'name': '劉冠于', 'eng_name': 'LIU GUAN-YU', 'detail_page': 'manager_19.html'},
    ]

    # ===== Step 2: 抓取每個理監事的詳細頁面 =====
    supervisors = []

    for data in supervisors_data:
        supervisor = {
            '職稱': data['position'],
            '姓名': data['name'],
            '英文名': data['eng_name'],
            '事務所': '',
            '學歷': '',
            '證照': '',
            '屆別': '現任',
            '更新時間': update_time,
        }

        # 嘗試抓取詳細頁面
        if data.get('detail_page'):
            try:
                detail_url = f"{base_url}/{data['detail_page']}"
                detail_resp = requests.get(detail_url, verify=False, timeout=10)
                detail_resp.encoding = 'utf-8'
                detail_soup = BeautifulSoup(detail_resp.text, 'html.parser')

                # 解析詳細資訊（Vue.js 資料嵌入在 HTML 中）
                html = detail_resp.text

                # 從 Vue.js data 物件中提取資料
                # 格式如：事務所名稱:'駿豐不動產估價師事務所',
                office_match = re.search(r"事務所名稱:\s*['\"]([^'\"]+)['\"]", html)
                if office_match:
                    supervisor['事務所'] = office_match.group(1)

                education_match = re.search(r"學歷:\s*['\"]([^'\"]+)['\"]", html)
                if education_match:
                    # 清理 HTML 標籤
                    education = education_match.group(1).replace('<br/>', '；').replace('<br>', '；')
                    supervisor['學歷'] = education

                # 證照欄位格式：證照:["不動產估價師、不動產經紀人、地政士、仲裁人"]
                license_match = re.search(r"證照:\s*\[([^\]]+)\]", html)
                if license_match:
                    # 提取陣列中的內容
                    licenses_raw = license_match.group(1)
                    licenses = re.findall(r"['\"]([^'\"]+)['\"]", licenses_raw)
                    if licenses:
                        supervisor['證照'] = licenses[0]  # 通常是逗號分隔的單一字串

                time.sleep(0.2)  # 避免請求過快

            except Exception as e:
                print(f'    {data["name"]} 詳細頁面抓取失敗: {e}')

        supervisors.append(supervisor)
        print(f'    {data["name"]} ({data["position"]}) - {supervisor.get("事務所", "未知")}')

    # ===== Step 3: 輸出 CSV =====
    output_file = os.path.join(OUTPUT_DIR, 'ctreaa_supervisors.csv')
    fieldnames = ['職稱', '姓名', '英文名', '事務所', '學歷', '證照', '屆別', '更新時間']
    with open(output_file, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
        writer.writeheader()
        writer.writerows(supervisors)

    print(f'  理監事: {len(supervisors)} 位（含詳細資訊）')
    print('  (註：此公會網站無會員名單，需從政府開放資料取得)')
    return {'supervisors': len(supervisors)}


SCRAPERS = {
    'taipei': ('台北市', scrape_taipei),
    'newtaipei': ('新北市', scrape_newtaipei),
    'taoyuan': ('桃園市', scrape_taoyuan),
    'taichung': ('台中市', scrape_taichung),
    'tainan': ('台南市', scrape_tainan),
    'kaohsiung': ('高雄市', scrape_kaohsiung),
    'ctreaa': ('中台灣', scrape_ctreaa),
    'associations': ('全聯會', scrape_associations),
}


def main():
    update_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    if len(sys.argv) > 1:
        arg = sys.argv[1].lower()

        if arg == '--list':
            print('支援的公會：')
            for key, (name, _) in SCRAPERS.items():
                print(f'  {key}: {name}')
            return

        if arg in SCRAPERS:
            name, scraper = SCRAPERS[arg]
            print(f'抓取 {name} 名單')
            print(f'更新時間：{update_time}')
            print('=' * 50)
            scraper(update_time)
            return

        print(f'未知參數: {arg}')
        print('使用 --list 查看支援的公會')
        return

    print('台灣不動產估價師公會名單抓取工具')
    print(f'更新時間：{update_time}')
    print('=' * 50)

    total_members = 0

    for key, (name, scraper) in SCRAPERS.items():
        try:
            result = scraper(update_time)
            if 'members' in result:
                total_members += result['members']
        except Exception as e:
            print(f'  錯誤: {e}')
        print()

    print('=' * 50)
    print(f'總計抓取: {total_members} 位估價師')
    print(f'更新時間: {update_time}')


if __name__ == '__main__':
    main()
