import tkinter as tk
from tkinter import ttk, scrolledtext, messagebox
from collections import Counter
import time
import threading

class StaticGolfer:
    def __init__(self, root):
        self.root = root
        self.root.title("StaticGolfer - Multi-Language Code Compressor")
        self.root.geometry("1000x800")

        # Enable/disable custom token entry
        self.custom_tokens_var = tk.BooleanVar(value=False)
        self.custom_tokens_separator_var = tk.StringVar(value='\\n')
        self.custom_tokens_visible = False
        self.tokenization_method_var = tk.StringVar(value="re-tokenization")
        
        # New: Python legacy packer option
        self.python_legacy_packer_var = tk.BooleanVar(value=False)
        
        self.setup_available_chars()
        self.setup_gui()
        self.setup_realtime_updates()

    def setup_available_chars(self):
        self.common_safe_chars = [
            'h', 'j', 'm', 'q', 'r', 'u', 'v', 'w', 'x', 'z',
            'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
            'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
            '0', '2', '4', '5', '6', '7', '8', '9',
            '§', '¶', '©', '®', '°', '·', '•',
        ]
        self.language_chars = {
            'TeX': self.common_safe_chars + ['±', '×', '÷', '≈', '≠', '≤', '≥', '∞', '∂', '∇', '√', '∫', '∑', '∏', 'π', 'µ',
                                            '£', '€', '¥', '¢', '←', '→', '↑', '↓', '↔',
                                            'À', 'Á', 'Â', 'Ã', 'Ä', 'Å', 'Æ', 'Ç', 'È', 'É', 'Ê', 'Ë', 'Ì', 'Í', 'Î', 'Ï',
                                            'Ð', 'Ñ', 'Ò', 'Ó', 'Ô', 'Õ', 'Ö', 'Ø', 'Ù', 'Ú', 'Û', 'Ü', 'Ý', 'Þ', 'ß',
                                            'à', 'á', 'â', 'ã', 'ä', 'å', 'æ', 'ç', 'è', 'é', 'ê', 'ë', 'ì', 'í', 'î', 'ï',
                                            'ð', 'ñ', 'ò', 'ó', 'ô', 'õ', 'ö', 'ø', 'ù', 'ú', 'û', 'ü', 'ý', 'þ', 'ÿ'],
            'Python': self.common_safe_chars + list('!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'),
            'JavaScript': self.common_safe_chars + list('!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'),
            'Pascal': self.common_safe_chars + list('!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'),
            'jq': self.common_safe_chars + list('!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'),
            'Lua': self.common_safe_chars + list('!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'),
            'Bash': self.common_safe_chars + list('!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'),
            'GolfScript': self.common_safe_chars + list('!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
        }
        # Remove unsafe character restrictions - allow everything including spaces and newlines!
        self.language_unsafe_chars = {
            'TeX': set(),
            'Python': set(),
            'JavaScript': set(),
            'Pascal': set(),
            'jq': set(),
            'Lua': set(),
            'Bash': set(),
            'GolfScript': set()
        }
        self.language_compatibility = {
            'TeX': ['LaTeX', 'XeLaTeX', 'LuaLaTeX', 'pdfLaTeX'],
            'Python': ['V', 'Scala'],
            'JavaScript': ['TypeScript', 'CoffeeScript', 'LiveScript'],
            'Pascal': ['Delphi', 'Free Pascal', 'Turbo Pascal'],
            'jq': ['jq'],
            'Lua': ['Lua'],
            'Bash': ['Bash'],
            'GolfScript': ['GolfScript']
        }

    def setup_gui(self):
        main_frame = ttk.Frame(self.root, padding="10")
        main_frame.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))

        ttk.Label(main_frame, text="Input Text to Compress:", font=('Arial', 11, 'bold')).grid(row=0, column=0, sticky=tk.W, pady=(0, 5))
        self.input_text = scrolledtext.ScrolledText(main_frame, width=120, height=10, font=('Consolas', 9))
        self.input_text.grid(row=1, column=0, columnspan=2, sticky=(tk.W, tk.E), pady=(0, 10))

        settings_frame = ttk.LabelFrame(main_frame, text="Compression Settings", padding="5")
        settings_frame.grid(row=2, column=0, columnspan=2, sticky=(tk.W, tk.E), pady=(0, 10))

        settings_row1 = ttk.Frame(settings_frame)
        settings_row1.grid(row=0, column=0, columnspan=7, sticky=(tk.W, tk.E), pady=(0, 5))

        ttk.Label(settings_row1, text="Target Language:").grid(row=0, column=0, sticky=tk.W)
        self.language_var = tk.StringVar(value="TeX")
        language_combo = ttk.Combobox(settings_row1, textvariable=self.language_var,
                                      values=["TeX", "Python", "JavaScript", "Pascal", "jq", "Lua", "Bash", "GolfScript"],
                                      state="readonly", width=12)
        language_combo.grid(row=0, column=1, sticky=tk.W, padx=(5, 0))

        self.fixed_token_var = tk.BooleanVar(value=False)
        self.fixed_checkbutton = ttk.Checkbutton(settings_row1, text="Force fixed token count", variable=self.fixed_token_var)
        self.fixed_checkbutton.grid(row=0, column=2, sticky=tk.W, padx=(20, 0))

        # Remove the "Allow newlines in tokens" checkbox since it's always allowed now
        self.allow_newlines_var = tk.BooleanVar(value=True)

        self.js_short_replace_var = tk.BooleanVar(value=True)
        ttk.Checkbutton(settings_row1, text="Use short .R in JS", variable=self.js_short_replace_var).grid(row=0, column=3, sticky=tk.W, padx=(20, 0))

        # New: Python legacy packer checkbox
        self.python_legacy_check = ttk.Checkbutton(settings_row1, text="Python: Use legacy packer", variable=self.python_legacy_packer_var)
        self.python_legacy_check.grid(row=0, column=4, sticky=tk.W, padx=(20, 0))

        settings_row2 = ttk.Frame(settings_frame)
        settings_row2.grid(row=1, column=0, columnspan=7, sticky=(tk.W, tk.E), pady=(5, 0))

        ttk.Label(settings_row2, text="Max Replacements:").grid(row=0, column=0, sticky=tk.W)
        self.max_replacements_var = tk.IntVar(value=20)
        self.max_replacements_spin = ttk.Spinbox(settings_row2, from_=1, to=1000, width=8, textvariable=self.max_replacements_var)
        self.max_replacements_spin.grid(row=0, column=1, sticky=tk.W, padx=(5, 0))

        ttk.Label(settings_row2, text="Min Token Length:").grid(row=0, column=2, sticky=tk.W, padx=(20, 0))
        self.min_token_var = tk.IntVar(value=2)
        self.min_token_spin = ttk.Spinbox(settings_row2, from_=1, to=1000, width=8, textvariable=self.min_token_var)
        self.min_token_spin.grid(row=0, column=3, sticky=tk.W, padx=(5, 0))

        ttk.Label(settings_row2, text="Max Token Length:").grid(row=0, column=4, sticky=tk.W, padx=(20, 0))
        self.max_token_var = tk.IntVar(value=8)
        self.max_token_spin = ttk.Spinbox(settings_row2, from_=2, to=1000, width=8, textvariable=self.max_token_var)
        self.max_token_spin.grid(row=0, column=5, sticky=tk.W, padx=(5, 0))

        ttk.Label(settings_row2, text="Tokenization:").grid(row=0, column=6, sticky=tk.W, padx=(20, 0))
        self.tokenization_combo = ttk.Combobox(settings_row2, textvariable=self.tokenization_method_var,
                                              values=["re-tokenization", "one-time"], state="readonly", width=12)
        self.tokenization_combo.grid(row=0, column=7, sticky=tk.W, padx=(5, 0))

        ttk.Label(settings_frame, text="Available chars:").grid(row=2, column=0, sticky=tk.W, pady=(10, 0))
        self.chars_preview_label = ttk.Label(settings_frame, text="", font=('Consolas', 8))
        self.chars_preview_label.grid(row=2, column=1, columnspan=6, sticky=tk.W, padx=(5, 0), pady=(10, 0))
        self.update_chars_preview()

        # Add "About PyPacker" button
        about_button = ttk.Button(settings_frame, text="About PyPacker...", command=self.show_about_pypacker, width=15)
        about_button.grid(row=2, column=7, sticky=tk.E, padx=(0, 5), pady=(10, 0))

        # Controls
        button_frame = ttk.Frame(main_frame)
        button_frame.grid(row=3, column=0, columnspan=2, pady=(0, 10))
        self.optimize_btn = ttk.Button(button_frame, text="Optimize Code", command=self.start_optimization)
        self.optimize_btn.pack(side=tk.LEFT, padx=(0, 10))
        ttk.Button(button_frame, text="Clear All", command=self.clear_all).pack(side=tk.LEFT, padx=(0, 10))
        ttk.Button(button_frame, text="Load Example", command=self.load_example).pack(side=tk.LEFT, padx=(0, 10))
        ttk.Button(button_frame, text="Analyze Tokens Only", command=self.analyze_tokens).pack(side=tk.LEFT, padx=(0, 10))
        ttk.Button(button_frame, text="Show Compatible Languages", command=self.show_compatible_languages).pack(side=tk.LEFT)

        self.progress = ttk.Progressbar(main_frame, mode='indeterminate')
        self.progress.grid(row=4, column=0, columnspan=2, sticky=(tk.W, tk.E), pady=(0, 10))
        notebook = ttk.Notebook(main_frame)
        notebook.grid(row=5, column=0, columnspan=2, sticky=(tk.W, tk.E, tk.N, tk.S), pady=(0, 10))
        code_frame = ttk.Frame(notebook, padding="5")
        self.code_text = scrolledtext.ScrolledText(code_frame, width=120, height=12, font=('Consolas', 9))
        self.code_text.pack(fill=tk.BOTH, expand=True)
        notebook.add(code_frame, text="Optimized Code")
        analysis_frame = ttk.Frame(notebook, padding="5")
        self.analysis_text = scrolledtext.ScrolledText(analysis_frame, width=120, height=12, font=('Consolas', 9))
        self.analysis_text.pack(fill=tk.BOTH, expand=True)
        notebook.add(analysis_frame, text="Token Analysis")
        stats_frame = ttk.Frame(main_frame)
        stats_frame.grid(row=6, column=0, columnspan=2, sticky=(tk.W, tk.E), pady=(5, 0))
        self.stats_label = ttk.Label(stats_frame, text="Ready to optimize...", font=('Arial', 10))
        self.stats_label.pack(side=tk.LEFT)
        self.root.columnconfigure(0, weight=1)
        self.root.rowconfigure(0, weight=1)
        main_frame.columnconfigure(0, weight=1)
        main_frame.rowconfigure(5, weight=1)

    def show_about_pypacker(self):
        about_text = """PyPacker: Python code packer for code golf

There was no python packer previously. PyPacker is made with love to
support code golfing and text compression for python, and PyPacker
was made to be a RegPack for python.

PyPacker is a python text compressor that tries to make as much
advantage by replacing repeated tokens/strings in input with
single-letter characters, then replacing them to original string
and saving lots of bytes. It uses a dictionary compression method.

To execute packed code rather than outputting a string, replace
print with exec.

It has a 78 bytes general overhead (sorry) and 4 bytes overhead
per token.

Finally, this compressor allows to win in art/text compression
code golfing challenges, with automated work and generation in
seconds without needing CPU-heavy computations."""
        messagebox.showinfo("About PyPacker", about_text)

    def update_chars_preview(self):
        language = self.language_var.get()
        available_chars = self.language_chars[language]
        preview = "".join(available_chars[:30]) + ("..." if len(available_chars) > 30 else "")
        self.chars_preview_label.config(text=preview)

    def setup_realtime_updates(self):
        self.language_var.trace('w', self.on_language_change)
        self.max_replacements_var.trace('w', self.realtime_optimize)
        self.min_token_var.trace('w', self.realtime_optimize)
        self.max_token_var.trace('w', self.realtime_optimize)
        self.fixed_token_var.trace('w', self.on_token_params_change)
        self.js_short_replace_var.trace('w', self.realtime_optimize)
        self.tokenization_method_var.trace('w', self.realtime_optimize)
        self.python_legacy_packer_var.trace('w', self.realtime_optimize)
        self.input_text.bind('<KeyRelease>', self.realtime_optimize)

    def on_token_params_change(self, *args):
        if self.fixed_token_var.get():
            self.max_replacements_spin.config(to=1000)
        else:
            self.max_replacements_spin.config(to=1000)
        self.realtime_optimize()

    def on_language_change(self, *args):
        self.update_chars_preview()
        self.realtime_optimize()

    def _get_input_text_preserve_first_leading_spaces(self):
        # Returns the text from the input, but never strips leading spaces of the first line
        text = self.input_text.get("1.0", tk.END)
        # Remove trailing newlines at end (user input box always has one extra), but never leading spaces of line 1
        if text.endswith('\n'):
            text = text[:-1]
        return text

    def realtime_optimize(self, *args):
        if self.optimize_btn['state'] == 'disabled':
            return
        input_text = self._get_input_text_preserve_first_leading_spaces()
        if not input_text.strip("\n"):  # but allow if the first line is all spaces/empty, still detect empty
            return
        if hasattr(self, '_realtime_timer'):
            self.root.after_cancel(self._realtime_timer)
        self._realtime_timer = self.root.after(500, self._perform_realtime_optimize)

    def _perform_realtime_optimize(self):
        input_text = self._get_input_text_preserve_first_leading_spaces()
        if not input_text.strip("\n"):
            return

        try:
            language = self.language_var.get()
            use_js_short = self.js_short_replace_var.get()

            if self.custom_tokens_var.get():
                # Custom tokens logic (unchanged)
                return
            else:
                max_replacements = self.max_replacements_var.get()
                min_token_len = self.min_token_var.get()
                max_token_len = self.max_token_var.get()
                fixed_token_count = self.fixed_token_var.get()
                tokenization_method = self.tokenization_method_var.get()
                use_legacy_packer = self.python_legacy_packer_var.get()

                processed_text = input_text
                used_chars = set()
                optimizations = []
                current_text = processed_text

                if tokenization_method == "one-time":
                    tokens = self.tokenize_text_comprehensive(current_text, min_token_len, max_token_len, language)
                    for round_num in range(max_replacements):
                        best_token, details = self.find_best_token_from_list(tokens, current_text, used_chars, language)
                        if not best_token:
                            if fixed_token_count:
                                best_token, details = self.find_any_token_from_list(tokens, current_text, used_chars, language)
                                if not best_token:
                                    break
                            else:
                                break
                        replacement_char = details['replacement_char']
                        used_chars.add(replacement_char)
                        current_text = self.apply_token_replacement(current_text, best_token, replacement_char, language)
                        optimizations.append({
                            'token': best_token,
                            'char': replacement_char,
                            'savings': details['savings'],
                            'frequency': details['frequency']
                        })
                        if best_token in tokens:
                            del tokens[best_token]
                else:
                    for round_num in range(max_replacements):
                        tokens = self.tokenize_text_comprehensive(current_text, min_token_len, max_token_len, language)
                        if not tokens and not fixed_token_count:
                            break
                        best_token, details = self.find_best_token(tokens, current_text, used_chars, language)
                        if not best_token:
                            if fixed_token_count:
                                best_token, details = self.find_any_token(tokens, current_text, used_chars, language)
                                if not best_token:
                                    break
                            else:
                                break
                        replacement_char = details['replacement_char']
                        used_chars.add(replacement_char)
                        current_text = self.apply_token_replacement(current_text, best_token, replacement_char, language)
                        optimizations.append({
                            'token': best_token,
                            'char': replacement_char,
                            'savings': details['savings'],
                            'frequency': details['frequency']
                        })

                # Output order for replacements:
                if language == "jq":
                    optimizations_generate = list(optimizations)
                elif language != 'TeX':
                    optimizations_generate = list(reversed(optimizations))
                else:
                    optimizations_generate = optimizations

                # Generate final code with new Python packer
                if language == "Python" and not use_legacy_packer:
                    final_code = self.generate_python_smart_code(processed_text, optimizations_generate, current_text)
                else:
                    final_code = self.generate_final_code(processed_text, optimizations_generate, current_text, language, use_js_short)

            original_bytes = len(input_text.encode('utf-8'))
            compressed_bytes = len(final_code.encode('utf-8'))
            ratio = original_bytes / compressed_bytes if compressed_bytes > 0 else 0

            self.code_text.delete("1.0", tk.END)
            self.code_text.insert("1.0", final_code)

            analysis_log = ["=== REAL-TIME OPTIMIZATION RESULTS ==="]
            analysis_log.append(f"Language: {language}")
            analysis_log.append(f"Custom tokens: {self.custom_tokens_var.get()}")
            analysis_log.append(f"Tokenization method: {self.tokenization_method_var.get()}")
            if language == "Python":
                analysis_log.append(f"Python packer: {'Legacy' if use_legacy_packer else 'Smart (PyPacker)'}")
            analysis_log.append(f"Optimizations applied: {len(optimizations)}")
            analysis_log.append(f"Characters used: {''.join(sorted(opt['char'] for opt in optimizations))}")
            analysis_log.append("")
            analysis_log.append("Applied optimizations:")
            for i, opt in enumerate(optimizations[:10]):
                token_bytes = len(opt['token'].encode('utf-8'))
                display_token = self.escape_display(opt['token'])
                analysis_log.append(f"{i+1}. '{display_token}' -> '{opt['char']}' (freq: {opt['frequency']}, bytes: {token_bytes}, savings: {opt['savings']} bytes)")

            self.analysis_text.delete("1.0", tk.END)
            self.analysis_text.insert("1.0", '\n'.join(analysis_log))

            stats_text = (f"Original: {original_bytes} bytes | "
                         f"Compressed: {compressed_bytes} bytes | "
                         f"Ratio: {ratio:.2f}x | "
                         f"Optimizations: {len(optimizations)} | "
                         f"Chars used: {''.join(sorted(opt['char'] for opt in optimizations))}")
            if language == "Python" and not use_legacy_packer:
                stats_text += " | PyPacker"
            self.stats_label.config(text=stats_text)
        except Exception as e:
            pass

    def tokenize_text_comprehensive(self, text, min_length, max_length, language):
        """Comprehensive tokenization that finds all tokens of all sizes and counts their occurrences"""
        tokens = {}
        text_length = len(text)

        for token_size in range(min_length, max_length + 1):
            for start in range(text_length - token_size + 1):
                token = text[start:start + token_size]
                count = 0
                position = -1
                while True:
                    position = text.find(token, position + 1)
                    if position == -1:
                        break
                    count += 1
                if count > 1:
                    token_bytes = len(token.encode('utf-8'))
                    replacement_savings = (token_bytes - 1) * count
                    if replacement_savings > 0:
                        if token in tokens:
                            if count > tokens[token]:
                                tokens[token] = count
                        else:
                            tokens[token] = count
        return tokens

    def tokenize_text(self, text, min_length, max_length, language):
        return self.tokenize_text_comprehensive(text, min_length, max_length, language)

    def get_available_replacement_char(self, used_chars, text_chars, language):
        available_chars = self.language_chars[language]
        for char in available_chars:
            if (
                char not in used_chars and
                char not in text_chars and
                ord(char) >= 32
            ):
                return char
        return None

    def calculate_savings(self, token, frequency, replacement_char, language, use_js_short=False):
        token_bytes = len(token.encode('utf-8'))
        total_occurrences = frequency
        if language == 'TeX':
            escaped_token = self.escape_for_tex(token)
            definition_cost = len(f"~{replacement_char}\\def{replacement_char}{{{escaped_token}}}".encode('utf-8'))
        elif language == 'Python':
            # Calculate cost for both legacy and smart packer
            escaped_token = self.escape_for_python(token)
            if self.python_legacy_packer_var.get():
                # Legacy packer cost
                definition_cost = len(f'.replace("{replacement_char}","{escaped_token}")'.encode('utf-8'))
            else:
                # Smart packer cost - depends on separator choice
                # Base cost is 4 bytes per token in smart packer (as stated in About)
                definition_cost = 4  # This is approximate, actual calculation is complex
        elif language == 'JavaScript':
            if use_js_short:
                escaped_token = self.escape_for_javascript(token)
                definition_cost = len(f'.R(/{replacement_char}/g,"{escaped_token}")'.encode('utf-8'))
            else:
                escaped_token = self.escape_for_javascript(token)
                definition_cost = len(f'.replace(/{replacement_char}/g,"{escaped_token}")'.encode('utf-8'))
        elif language == 'Pascal':
            escaped_token = self.escape_for_pascal(token)
            definition_cost = len(f"R(s,'{replacement_char}','{escaped_token}')".encode('utf-8'))
        elif language == 'jq':
            escaped_token = self.escape_for_jq(token)
            definition_cost = len(f'|gsub("{char}";"{escaped_token}")'.encode('utf-8'))
        elif language == 'Lua':
            escaped_token = self.escape_for_lua(token)
            definition_cost = len(f':gsub("{replacement_char}","{escaped_token}")'.encode('utf-8'))
        elif language == 'Bash':
            escaped_token = self.escape_for_bash(token)
            definition_cost = len(f"${{r//{replacement_char}/{escaped_token}}}".encode('utf-8'))
        elif language == 'GolfScript':
            escaped_token = self.escape_for_golfscript(token)
            definition_cost = len(f"'{replacement_char}'/'{escaped_token}'*".encode('utf-8'))
        replacement_savings = (token_bytes - 1) * total_occurrences
        net_savings = replacement_savings - definition_cost
        return net_savings, replacement_savings, definition_cost

    def find_best_token(self, tokens, current_text, used_chars, language):
        best_token = None
        best_savings = -float('inf')
        best_details = {}
        text_chars = set(current_text)
        for token, frequency in tokens.items():
            replacement_char = self.get_available_replacement_char(used_chars, text_chars, language)
            if not replacement_char:
                continue
            use_js_short = self.js_short_replace_var.get()
            savings, replacement_savings, cost = self.calculate_savings(token, frequency, replacement_char, language, use_js_short)
            if savings > best_savings and savings > 0:
                best_savings = savings
                best_token = token
                best_details = {
                    'savings': savings,
                    'replacement_savings': replacement_savings,
                    'cost': cost,
                    'frequency': frequency,
                    'length': len(token),
                    'replacement_char': replacement_char
                }
        return best_token, best_details

    def find_best_token_from_list(self, tokens, current_text, used_chars, language):
        return self.find_best_token(tokens, current_text, used_chars, language)

    def find_any_token(self, tokens, current_text, used_chars, language):
        best_token = None
        best_savings = -float('inf')
        best_details = {}
        text_chars = set(current_text)
        for token, frequency in tokens.items():
            replacement_char = self.get_available_replacement_char(used_chars, text_chars, language)
            if not replacement_char:
                continue
            use_js_short = self.js_short_replace_var.get()
            savings, replacement_savings, cost = self.calculate_savings(token, frequency, replacement_char, language, use_js_short)
            if savings > best_savings:
                best_savings = savings
                best_token = token
                best_details = {
                    'savings': savings,
                    'replacement_savings': replacement_savings,
                    'cost': cost,
                    'frequency': frequency,
                    'length': len(token),
                    'replacement_char': replacement_char
                }
        return best_token, best_details

    def find_any_token_from_list(self, tokens, current_text, used_chars, language):
        return self.find_any_token(tokens, current_text, used_chars, language)

    def apply_token_replacement(self, text, token, char, language):
        return text.replace(token, char)

    def escape_for_tex(self, text):
        escaped = text.replace('\\', '\\textbackslash ')
        escaped = escaped.replace('{', '\\{').replace('}', '\\}')
        escaped = escaped.replace('%', '\\%').replace('#', '\\#')
        escaped = escaped.replace('&', '\\&').replace('_', '\\_')
        escaped = escaped.replace('$', '\\$').replace('^', '\\^{}')
        return escaped

    def escape_for_python(self, text):
        escaped = text.replace('\\', '\\\\').replace('"', '\\"')
        escaped = escaped.replace('\n', '\\n').replace('\r', '\\r').replace('\t', '\\t')
        return escaped

    def escape_for_javascript(self, text):
        escaped = text.replace('\\', '\\\\').replace('"', '\\"')
        escaped = escaped.replace('\n', '\\n').replace('\r', '\\r').replace('\t', '\\t')
        return escaped

    def escape_for_pascal(self, text):
        escaped = text.replace("'", "''")
        escaped = escaped.replace('\n', "'+#13+'")
        return escaped

    def escape_for_jq(self, text):
        escaped = text.replace('\\', '\\\\').replace('"', '\\"')
        escaped = escaped.replace('\n', '\\n').replace('\r', '\\r').replace('\t', '\\t')
        return escaped

    def escape_for_lua(self, text):
        escaped = text.replace('\\', '\\\\').replace('"', '\\"')
        escaped = escaped.replace('\n', '\\n').replace('\r', '\\r').replace('\t', '\\t')
        return escaped

    def escape_for_bash(self, text):
        escaped = text.replace('\\', '\\\\').replace('"', '\\"')
        escaped = escaped.replace('$', '\\$').replace('`', '\\`')
        escaped = escaped.replace('\n', '\\n').replace('\r', '\\r').replace('\t', '\\t')
        return escaped

    def escape_for_golfscript(self, text):
        # Escape single quotes and backslashes for GolfScript
        escaped = text.replace('\\', '\\\\').replace("'", "\\'")
        return escaped

    def escape_display(self, text):
        return text.replace('\n', '\\n').replace('\r', '\\r').replace('\t', '\\t').replace(' ', '␣')

    def find_separator_char(self, text, tokens, expansions):
        """Find a separator character not used in text or token expansions"""
        # First try space
        if ' ' not in text and all(' ' not in token for token in tokens) and all(' ' not in exp for exp in expansions):
            return ' '
        
        # Try other characters
        for char in [chr(i) for i in range(33, 127)]:
            if char not in text and all(char not in token for token in tokens) and all(char not in exp for exp in expansions):
                return char
        
        # Fallback to a character that's unlikely to appear
        return '\x01'

    def generate_python_smart_code(self, original_text, optimizations, final_body):
        """Generate smart Python code using separator approach"""
        if not optimizations:
            # No optimizations, just return the body
            escaped_body = self.escape_for_python(final_body)
            return f'print("{escaped_body}")'
        
        # Extract tokens and expansions in reverse order
        tokens = [opt['char'] for opt in optimizations]  # Single char tokens
        expansions = [self.escape_for_python(opt['token']) for opt in optimizations]
        
        # Find separator
        separator = self.find_separator_char(final_body, tokens, expansions)
        
        # Build t and e strings
        t_string = ' '.join(tokens)
        e_string = separator.join(expansions)
        
        # Quote handling for the separator
        if separator in [' ', '	', '\n']:
            split_code_e = ".split()"
        else:
            split_code_e = f".split('{separator}')"

        split_code_t = ".split()"

        # Build the code
        escaped_body = self.escape_for_python(final_body)
        code = f"t='{t_string}'{split_code_t};e='{e_string}'{split_code_e};d='{escaped_body}'" + "\n"
        code += f"for i in range({len(optimizations)}):d=d.replace(t[i],e[i])" + "\n"
        code += "print(d)"
        
        return code

    def generate_final_code(self, original_text, optimizations, final_body, language, use_js_short=False):
        """Generate final code for all languages, with smart Python handling"""
        temp_body = final_body
        
        # Handle Python smart packer separately
        if language == 'Python' and not self.python_legacy_packer_var.get():
            return self.generate_python_smart_code(original_text, optimizations, temp_body)
        
        # Legacy code for other languages or Python with legacy packer
        if language == 'TeX':
            lines = []
            lines.append("\\def~#1{\\catcode`#1=13}")
            if optimizations:
                catcode_assignments = "".join(f"~{opt['char']}" for opt in optimizations)
                definitions_line = ""
                for opt in optimizations:
                    token = self.escape_for_tex(opt['token'])
                    char = opt['char']
                    definition = f"\\def{char}{{{token}}}"
                    definitions_line += definition
                lines.append(catcode_assignments)
                lines.append(definitions_line)
            lines.append("")
            lines.append(temp_body)
            return '\n'.join(lines)
        elif language == 'Python':  # Legacy Python packer
            escaped_body = self.escape_for_python(temp_body)
            replacements = ""
            for opt in optimizations:
                token = self.escape_for_python(opt['token'])
                char = opt['char']
                replacements += f'.replace("{char}","{token}")'
            return f'print("{escaped_body}"{replacements})'
        elif language == 'JavaScript':
            escaped_body = self.escape_for_javascript(temp_body)
            replacements = ""
            for opt in optimizations:
                token = self.escape_for_javascript(opt['token'])
                char = opt['char']
                if use_js_short:
                    replacements += f'.R(/{char}/g,"{token}")'
                else:
                    replacements += f'.replace(/{char}/g,"{token}")'
            if use_js_short:
                return f'String[h="prototype"].R=String[h].replace;console.log("{escaped_body}"{replacements})'
            else:
                return f'console.log("{escaped_body}"{replacements})'
        elif language == 'Pascal':
            escaped_body = self.escape_for_pascal(temp_body)
            replacements = f"'{escaped_body}'"
            for opt in optimizations:
                token = self.escape_for_pascal(opt['token'])
                char = opt['char']
                replacements = f"R({replacements},'{char}','{token}')"
            return (
                "function R(s,f,t:UnicodeString):UnicodeString;var p:word;begin while pos(f,s)>0 do begin p:=pos(f,s);delete(s,p,length(f));insert(t,s,p)end;R:=s end;begin write("
                + replacements
                + ")end."
            )
        elif language == 'jq':
            escaped_body = self.escape_for_jq(temp_body)
            replacements = f'"{escaped_body}"'
            # For jq, output order is as passed (first token goes at end), so apply them in reverse for the actual code
            for opt in reversed(optimizations):
                token = self.escape_for_jq(opt['token'])
                char = opt['char']
                replacements = f'{replacements}|gsub("{char}";"{token}")'
            return replacements
        elif language == 'Lua':
            code_lines = [f'r=[[{temp_body}]]']
            for opt in optimizations:
                token = self.escape_for_lua(opt['token'])
                char = opt['char']
                code_lines.append(f'r=r:gsub("{char}","{token}")')
            code_lines.append('print(r)')
            return '\n'.join(code_lines)
        elif language == 'Bash':
            escaped_body = self.escape_for_bash(temp_body)
            code_lines = [f's="{escaped_body}"']
            code_lines.append('r="$s"')
            for opt in optimizations:
                token = self.escape_for_bash(opt['token'])
                char = opt['char']
                code_lines.append(f'r="${{r//{char}/{token}}}"')
            code_lines.append('echo "$r"')
            return '\n'.join(code_lines)
        elif language == 'GolfScript':
            # GolfScript format: 'DOCUMENT' 'a'/'expansion'* 'b'/'expansion'* ...
            escaped_body = self.escape_for_golfscript(temp_body)
            code_parts = [f"'{escaped_body}'"]
            for opt in optimizations:
                token = self.escape_for_golfscript(opt['token'])
                char = opt['char']
                code_parts.append(f"'{char}'/'{token}'*")
            return ''.join(code_parts)

    def start_optimization(self):
        input_text = self._get_input_text_preserve_first_leading_spaces()
        if not input_text.strip("\n"):
            messagebox.showwarning("Input Error", "Please enter some text to optimize.")
            return
        self.optimize_btn.config(state='disabled')
        self.progress.start()
        thread = threading.Thread(target=self.optimize_code, args=(input_text,))
        thread.daemon = True
        thread.start()

    def optimize_code(self, input_text):
        try:
            start_time = time.time()
            language = self.language_var.get()
            use_js_short = self.js_short_replace_var.get()
            use_legacy_packer = self.python_legacy_packer_var.get()

            if self.custom_tokens_var.get():
                # Custom tokens logic (unchanged)
                return
            else:
                max_replacements = self.max_replacements_var.get()
                min_token_len = self.min_token_var.get()
                max_token_len = self.max_token_var.get()
                fixed_token_count = self.fixed_token_var.get()
                tokenization_method = self.tokenization_method_var.get()

                processed_text = input_text
                used_chars = set()
                optimizations = []
                current_text = processed_text
                analysis_log = []
                analysis_log.append("=== MULTI-LANGUAGE CODE OPTIMIZATION ===")
                analysis_log.append(f"Target Language: {language}")
                analysis_log.append(f"Tokenization method: {tokenization_method}")
                if language == "Python":
                    analysis_log.append(f"Python packer: {'Legacy' if use_legacy_packer else 'Smart (PyPacker)'}")
                analysis_log.append(f"Scanning for tokens of length {min_token_len} to {max_token_len}")
                analysis_log.append(f"Fixed token count: {fixed_token_count}")
                analysis_log.append(f"Use JS short replace: {use_js_short}")
                analysis_log.append("NOTE: All characters including spaces and newlines are allowed in tokens!")
                analysis_log.append("")

                if tokenization_method == "one-time":
                    analysis_log.append("Using ONE-TIME tokenization method")
                    tokens = self.tokenize_text_comprehensive(current_text, min_token_len, max_token_len, language)
                    analysis_log.append(f"Found {len(tokens)} potential tokens initially")
                    for round_num in range(max_replacements):
                        best_token, details = self.find_best_token_from_list(tokens, current_text, used_chars, language)
                        if not best_token:
                            if fixed_token_count:
                                best_token, details = self.find_any_token_from_list(tokens, current_text, used_chars, language)
                                if not best_token:
                                    analysis_log.append(f"\nRound {round_num + 1}: No tokens found. Stopping.")
                                    break
                                analysis_log.append(f"\nRound {round_num + 1}: Using suboptimal token (fixed mode)")
                            else:
                                analysis_log.append(f"\nRound {round_num + 1}: No beneficial tokens found. Stopping.")
                                break
                        replacement_char = details['replacement_char']
                        used_chars.add(replacement_char)
                        size_before = len(current_text.encode('utf-8'))
                        current_text = self.apply_token_replacement(current_text, best_token, replacement_char, language)
                        size_after = len(current_text.encode('utf-8'))
                        actual_reduction = size_before - size_after
                        opt_obj = {
                            'token': best_token,
                            'char': replacement_char,
                            'savings': details['savings'],
                            'frequency': details['frequency'],
                            'replacement_savings': details['replacement_savings'],
                            'cost': details['cost'],
                            'actual_reduction': actual_reduction
                        }
                        optimizations.append(opt_obj)
                        if best_token in tokens:
                            del tokens[best_token]
                        analysis_log.append(f"\n--- Round {round_num + 1} ---")
                        display_token = self.escape_display(best_token)
                        analysis_log.append(f"Token: '{display_token}' (bytes: {len(best_token.encode('utf-8'))})")
                        analysis_log.append(f"Frequency: {details['frequency']}")
                        analysis_log.append(f"Assigned char: '{replacement_char}'")
                        analysis_log.append(f"Replacement savings: {details['replacement_savings']} bytes")
                        analysis_log.append(f"Definition cost: {details['cost']} bytes")
                        analysis_log.append(f"Net savings: +{details['savings']} bytes")
                        analysis_log.append(f"Actual size reduction: {actual_reduction} bytes")
                        analysis_log.append(f"Remaining tokens: {len(tokens)}")
                        analysis_log.append(f"Used characters: {''.join(sorted(used_chars))}")
                else:
                    analysis_log.append("Using RE-TOKENIZATION method")
                    for round_num in range(max_replacements):
                        tokens = self.tokenize_text_comprehensive(current_text, min_token_len, max_token_len, language)
                        if not tokens and not fixed_token_count:
                            analysis_log.append(f"\nRound {round_num + 1}: No tokens left to process")
                            break
                        best_token, details = self.find_best_token(tokens, current_text, used_chars, language)
                        if not best_token:
                            if fixed_token_count:
                                best_token, details = self.find_any_token(tokens, current_text, used_chars, language)
                                if not best_token:
                                    analysis_log.append(f"\nRound {round_num + 1}: No tokens found. Stopping.")
                                    break
                                analysis_log.append(f"\nRound {round_num + 1}: Using suboptimal token (fixed mode)")
                            else:
                                analysis_log.append(f"\nRound {round_num + 1}: No beneficial tokens found. Stopping.")
                                break
                        replacement_char = details['replacement_char']
                        used_chars.add(replacement_char)
                        size_before = len(current_text.encode('utf-8'))
                        current_text = self.apply_token_replacement(current_text, best_token, replacement_char, language)
                        size_after = len(current_text.encode('utf-8'))
                        actual_reduction = size_before - size_after
                        opt_obj = {
                            'token': best_token,
                            'char': replacement_char,
                            'savings': details['savings'],
                            'frequency': details['frequency'],
                            'replacement_savings': details['replacement_savings'],
                            'cost': details['cost'],
                            'actual_reduction': actual_reduction
                        }
                        optimizations.append(opt_obj)
                        analysis_log.append(f"\n--- Round {round_num + 1} ---")
                        display_token = self.escape_display(best_token)
                        analysis_log.append(f"Token: '{display_token}' (bytes: {len(best_token.encode('utf-8'))})")
                        analysis_log.append(f"Frequency: {details['frequency']}")
                        analysis_log.append(f"Assigned char: '{replacement_char}'")
                        analysis_log.append(f"Replacement savings: {details['replacement_savings']} bytes")
                        analysis_log.append(f"Definition cost: {details['cost']} bytes")
                        analysis_log.append(f"Net savings: +{details['savings']} bytes")
                        analysis_log.append(f"Actual size reduction: {actual_reduction} bytes")
                        analysis_log.append(f"Remaining tokens: {len(tokens)}")
                        analysis_log.append(f"Used characters: {''.join(sorted(used_chars))}")

                if language == "jq":
                    optimizations_generate = list(optimizations)
                elif language != 'TeX':
                    optimizations_generate = list(reversed(optimizations))
                else:
                    optimizations_generate = optimizations

                # Generate final code
                if language == "Python" and not use_legacy_packer:
                    final_code = self.generate_python_smart_code(processed_text, optimizations_generate, current_text)
                else:
                    final_code = self.generate_final_code(processed_text, optimizations_generate, current_text, language, use_js_short)

            final_stats = self.calculate_final_stats(input_text, final_code, optimizations, start_time, language)
            self.root.after(0, self.display_results, final_code, analysis_log, final_stats)
        except Exception as e:
            self.root.after(0, self.show_error, str(e))

    def calculate_final_stats(self, original_text, final_code, optimizations, start_time, language):
        original_bytes = len(original_text.encode('utf-8'))
        compressed_bytes = len(final_code.encode('utf-8'))
        ratio = original_bytes / compressed_bytes if compressed_bytes > 0 else 0
        elapsed_time = time.time() - start_time
        total_savings = sum(opt['savings'] for opt in optimizations)
        total_reduction = original_bytes - compressed_bytes
        return {
            'original_bytes': original_bytes,
            'compressed_bytes': compressed_bytes,
            'ratio': ratio,
            'time': elapsed_time,
            'optimizations_applied': len(optimizations),
            'total_savings': total_savings,
            'total_reduction': total_reduction,
            'used_chars': ''.join(sorted(opt['char'] for opt in optimizations)),
            'language': language
        }

    def display_results(self, final_code, analysis_log, stats):
        self.progress.stop()
        self.optimize_btn.config(state='normal')
        self.code_text.delete("1.0", tk.END)
        self.code_text.insert("1.0", final_code)
        if analysis_log:
            self.analysis_text.delete("1.0", tk.END)
            self.analysis_text.insert("1.0", '\n'.join(analysis_log))
        
        # Add PyPacker credit to stats if Python with smart packer
        stats_text = (f"Original: {stats['original_bytes']} bytes | "
                      f"Compressed: {stats['compressed_bytes']} bytes | "
                      f"Ratio: {stats['ratio']:.2f}x | "
                      f"Time: {stats['time']:.2f}s | "
                      f"Optimizations: {stats['optimizations_applied']} | "
                      f"Chars used: {stats['used_chars']}")
        
        if stats['language'] == "Python" and not self.python_legacy_packer_var.get():
            stats_text += " | PyPacker by JOrE20"
        
        self.stats_label.config(text=stats_text)
        
        message_text = (f"Compression finished!\n"
                       f"Reduced from {stats['original_bytes']} to {stats['compressed_bytes']} bytes\n"
                       f"Language: {stats['language']}\n"
                       f"Tokenization: {self.tokenization_method_var.get()}\n"
                       f"Used characters: {stats['used_chars']}")
        
        if stats['language'] == "Python" and not self.python_legacy_packer_var.get():
            message_text += "\nPython packer: Smart (PyPacker)"
        
        messagebox.showinfo("Optimization Complete", message_text)

    def show_error(self, error_msg):
        self.progress.stop()
        self.optimize_btn.config(state='normal')
        messagebox.showerror("Optimization Error", f"An error occurred:\n{error_msg}")

    def analyze_tokens(self):
        input_text = self._get_input_text_preserve_first_leading_spaces()
        if not input_text.strip("\n"):
            messagebox.showwarning("Input Error", "Please enter some text to analyze.")
            return
        language = self.language_var.get()
        min_token_len = self.min_token_var.get()
        max_token_len = self.max_token_var.get()
        tokens = self.tokenize_text_comprehensive(input_text, min_token_len, max_token_len, language)
        analysis_log = ["=== TOP TOKENS BY FREQUENCY ==="]
        analysis_log.append(f"Language: {language}")
        analysis_log.append(f"Tokenization method: Comprehensive (all sizes {min_token_len}-{max_token_len})")
        analysis_log.append("ALL CHARACTERS INCLUDING SPACES AND NEWLINES ARE ALLOWED IN TOKENS!")
        analysis_log.append("")
        sorted_tokens = sorted(tokens.items(), key=lambda x: (-x[1], -len(x[0])))
        for i, (token, freq) in enumerate(sorted_tokens[:30]):
            display_token = self.escape_display(token)
            token_bytes = len(token.encode('utf-8'))
            potential_savings = (token_bytes - 1) * freq
            analysis_log.append(f"{i+1}. '{display_token}' (bytes: {token_bytes}, len: {len(token)}): {freq} occurrences, potential: {potential_savings} bytes")
        self.analysis_text.delete("1.0", tk.END)
        self.analysis_text.insert("1.0", '\n'.join(analysis_log))

    def show_compatible_languages(self):
        current_lang = self.language_var.get()
        compatible_langs = self.language_compatibility.get(current_lang, [])
        if compatible_langs:
            messagebox.showinfo(
                f"Languages Compatible with {current_lang}",
                f"The following languages should also work with {current_lang} output:\n\n" +
                "\n".join(f"• {lang}" for lang in compatible_langs)
            )
        else:
            messagebox.showinfo(
                f"Languages Compatible with {current_lang}",
                f"No other known languages are directly compatible with {current_lang} output."
            )

    def clear_all(self):
        self.input_text.delete("1.0", tk.END)
        self.code_text.delete("1.0", tk.END)
        self.analysis_text.delete("1.0", tk.END)
        self.stats_label.config(text="Ready to optimize...")

    def load_example(self):
        example = (
            "This is some example text that we want to compress.\n"
            "It contains repeated patterns like \"example text\" and \"repeated patterns\".\n"
            "The compressor should find these and replace them with single characters.\n"
            "\n"
            "Multiple lines with newlines can be very beneficial to compress.\n"
            "Each newline sequence can be replaced efficiently."
        )
        self.input_text.delete("1.0", tk.END)
        self.input_text.insert("1.0", example)

def main():
    root = tk.Tk()
    app = StaticGolfer(root)
    root.mainloop()

if __name__ == "__main__":
    main()