Spaces:
Runtime error
Runtime error
| import numpy as np | |
| import random | |
| import io | |
| import duckdb | |
| import gradio as gr | |
| import math | |
| from datetime import datetime | |
| import PIL | |
| import matplotlib.pyplot as plt | |
| from PIL import Image | |
| import pennylane as qml | |
| # Define a device | |
| dev = qml.device('default.qubit', wires=10) | |
| # Hugging Face and DuckDB function placeholders | |
| from datasets import load_dataset, Dataset | |
| def store_in_hf_dataset(data): | |
| # Convert data to Hugging Face Dataset format | |
| dataset = Dataset.from_dict({ | |
| 'id': [item[0] for item in data], | |
| 'hamiltonian': [item[2] for item in data], | |
| 'qasm_code': [item[3] for item in data], | |
| 'trotter_code': [item[4] for item in data], | |
| 'num_qubits': [item[5] for item in data], | |
| 'trotter_order': [item[6] for item in data], | |
| 'timestamp': [str(item[7]) for item in data], | |
| }) | |
| # Push to Hugging Face dataset hub (replace with your dataset path) | |
| dataset.push_to_hub("your-username/BoltzmannEntropy-QuantumLLMInstruct") | |
| def load_from_hf_dataset(): | |
| # Load from Hugging Face dataset | |
| dataset = load_dataset("your-username/BoltzmannEntropy-QuantumLLMInstruct") | |
| return dataset | |
| # Function to buffer the plot and return as PIL image | |
| def buffer_plot_and_get(fig): | |
| buf = io.BytesIO() | |
| fig.savefig(buf, format='png') | |
| buf.seek(0) | |
| return PIL.Image.open(buf) | |
| # Store image in bytes for DuckDB | |
| def pil_image_to_bytes(image): | |
| img_byte_arr = io.BytesIO() | |
| image.save(img_byte_arr, format='PNG') | |
| return img_byte_arr.getvalue() | |
| # Function to generate a random Hamiltonian | |
| def generate_random_hamiltonian(num_qubits): | |
| terms = [] | |
| for _ in range(random.randint(1, 5)): | |
| coeff = round(random.uniform(-1, 1), 2) | |
| pauli_ops = [random.choice(['I', 'X', 'Y', 'Z']) for _ in range(num_qubits)] | |
| term = f"{coeff} * {' '.join(pauli_ops)}" | |
| terms.append(term) | |
| return " + ".join(terms) | |
| # Function to convert Hamiltonian to QASM code | |
| def hamiltonian_to_qasm(hamiltonian, num_qubits): | |
| qasm_code = f"OPENQASM 2.0;\ninclude \"qelib1.inc\";\nqreg q[{num_qubits}];\n" | |
| rotations = {i: 0.0 for i in range(num_qubits)} | |
| terms = hamiltonian.split(" + ") | |
| for term in terms: | |
| coeff, paulis = term.split(" * ") | |
| paulis = paulis.split() | |
| coeff = float(coeff) | |
| for i, pauli in enumerate(paulis): | |
| if pauli == "X": | |
| qasm_code += f"x q[{i}];\n" | |
| elif pauli == "Y": | |
| qasm_code += f"ry(pi/2) q[{i}];\n" | |
| elif pauli == "Z": | |
| rotations[i] += coeff | |
| for i, angle in rotations.items(): | |
| if angle != 0: | |
| angle_degrees = round(angle * 180 / math.pi, 2) | |
| qasm_code += f"rz({angle_degrees}) q[{i}];\n" | |
| return qasm_code | |
| # Function for Trotter decomposition | |
| def trotter_decomposition(hamiltonian, order): | |
| terms = hamiltonian.split(" + ") | |
| trotter_steps = [] | |
| for term in terms: | |
| coeff, *pauli_ops = term.split(" * ") | |
| coeff = float(coeff) | |
| for _ in range(order): | |
| trotter_steps.append(f"exp({coeff / order}) * ({' * '.join(pauli_ops)})") | |
| for _ in range(order): | |
| trotter_steps.append(f"exp({-coeff / order}) * ({' * '.join(pauli_ops)})") | |
| return " + ".join(trotter_steps) | |
| # Store data in DuckDB | |
| def store_in_duckdb(data, db_file='quantum_hamiltonians.duckdb'): | |
| conn = duckdb.connect(database=db_file) | |
| conn.execute("""CREATE TABLE IF NOT EXISTS hamiltonians ( | |
| id INTEGER, | |
| plot BLOB, | |
| hamiltonian VARCHAR, | |
| qasm_code VARCHAR, | |
| trotter_code VARCHAR, | |
| num_qubits INTEGER, | |
| trotter_order INTEGER, | |
| timestamp TIMESTAMP | |
| )""") | |
| conn.executemany("""INSERT INTO hamiltonians (id, plot, hamiltonian, qasm_code, trotter_code, num_qubits, trotter_order, timestamp) | |
| VALUES (?, ?, ?, ?, ?, ?, ?, ?)""", data) | |
| conn.close() | |
| # Load results from DuckDB | |
| def load_from_duckdb(db_file='quantum_hamiltonians.duckdb'): | |
| conn = duckdb.connect(database=db_file) | |
| df = conn.execute("SELECT * FROM hamiltonians").df() | |
| conn.close() | |
| return df | |
| # Function to generate Hamiltonians | |
| def generate_hamiltonians(num_hamiltonians, selected_qubits, selected_order, write_to_hf, write_to_duckdb): | |
| results_table = [] | |
| timestamp = datetime.now() | |
| for i in range(num_hamiltonians): | |
| num_qubits = random.choice(selected_qubits) | |
| order = selected_order | |
| hamiltonian = generate_random_hamiltonian(num_qubits) | |
| qasm_code = hamiltonian_to_qasm(hamiltonian, num_qubits) | |
| trotter_code = trotter_decomposition(hamiltonian, order) | |
| # Create a dummy plot (replace with actual plot creation logic) | |
| fig, ax = plt.subplots() | |
| ax.plot([0, 1], [0, 1]) | |
| circuit_plot_image = buffer_plot_and_get(fig) | |
| circuit_plot_bytes = pil_image_to_bytes(circuit_plot_image) | |
| # Append data to results table | |
| results_table.append((i + 1, circuit_plot_bytes, hamiltonian, qasm_code, trotter_code, num_qubits, order, timestamp)) | |
| # Write data to Hugging Face dataset if selected | |
| if write_to_hf: | |
| store_in_hf_dataset(results_table) | |
| # Write data to DuckDB if selected | |
| if write_to_duckdb: | |
| store_in_duckdb(results_table) | |
| # Function to load results from either DuckDB or Hugging Face dataset | |
| def load_results(load_from_hf_checkbox, load_from_duckdb_checkbox): | |
| if load_from_hf_checkbox: | |
| return load_from_hf_dataset() # Load from HF dataset | |
| if load_from_duckdb_checkbox: | |
| return load_from_duckdb() # Load from DuckDB | |
| # Gradio app | |
| with gr.Blocks() as app: | |
| gr.Markdown("# Quantum Hamiltonian Generator") | |
| with gr.Tab("Generate Hamiltonians"): | |
| num_hamiltonians = gr.Dropdown(label="Select number of Hamiltonians to generate", choices=[1, 10, 20, 100], value=20) | |
| qubit_choices = [1, 2, 3, 4, 5, 6] | |
| selected_qubits = gr.CheckboxGroup(label="Select number of qubits", choices=qubit_choices, value=[1]) | |
| order_choices = [1, 2, 3, 4, 5] | |
| selected_order = gr.Dropdown(label="Select Trotter order", choices=order_choices, value=1) | |
| # Radio buttons for selecting either Hugging Face dataset or DuckDB | |
| write_option = gr.Radio(label="Where do you want to store the data?", | |
| choices=["Write to Hugging Face dataset", "Write to DuckDB"], | |
| value="Write to Hugging Face dataset") | |
| generate_button = gr.Button("Generate Hamiltonians") | |
| status = gr.Markdown("Click 'Generate Hamiltonians' to start the process.") | |
| def update_status(num, qubits, order, write_option): | |
| if write_option == "Write to Hugging Face dataset": | |
| # Call function to write to Hugging Face dataset | |
| generate_hamiltonians(num, qubits, order, write_to_hf=True, write_to_duckdb=False) | |
| else: | |
| # Call function to write to DuckDB | |
| generate_hamiltonians(num, qubits, order, write_to_hf=False, write_to_duckdb=True) | |
| return "Data stored as per selection." | |
| generate_button.click(update_status, inputs=[num_hamiltonians, selected_qubits, selected_order, write_option], outputs=status) | |
| with gr.Tab("View Results"): | |
| load_option = gr.Radio(label="Where do you want to load the data from?", | |
| choices=["Load from Hugging Face dataset", "Load from DuckDB"], | |
| value="Load from DuckDB") | |
| load_button = gr.Button("Load Results") | |
| output_display = gr.HTML() | |
| def load_results(load_option): | |
| if load_option == "Load from Hugging Face dataset": | |
| return load_from_hf_dataset() | |
| else: | |
| return load_from_duckdb() | |
| load_button.click(load_results, inputs=[load_option], outputs=output_display) | |
| app.launch(share=True) | |