import json
import subprocess
from datetime import datetime, time
import sys
from pyspark.sql import SparkSession
from Pyspark_job.utils import common_util
from Pyspark_job.utils import DolphinschedulerHelper
from yswg_utils.common_df import get_asin_unlanuch_df
from Pyspark_job.utils.spark_util import SparkUtil
import script.pg14_to_pg6 as sc
from Pyspark_job.script import post_to_dolphin
import subprocess

if __name__ == '__main__':
    # date_info = '2023_34'
    # table_names = f"us_search_term_rank_er_{date_info}," \
    #               f"us_search_term_rank_hr_{date_info},us_search_term_rank_tr_{date_info},us_other_search_term_{date_info}," \
    #               f"us_brand_analytics_{date_info}"
    # post_to_dolphin.DolphinschedulerHelper.start_process_instance('us', '2023-34', table_names, 'aba')
    str.upper("seatunnel")