From c2e5a67b1e764e4438882f05658dfc26928895bd Mon Sep 17 00:00:00 2001 From: Wojciech Mruczkiewicz Date: Thu, 15 Oct 2020 18:13:26 +0200 Subject: [PATCH] Add Fermi-Hubbard experiment code and examples. (#77) * Add Fermi-Hubbard experiment code and examples. * Disable the data frame generation assertion. * Fix assertion in data frame generation. * Add openfermioncirq to the requirements.txt --- docs/fermi_hubbard/.gitignore | 2 + docs/fermi_hubbard/experiment_example.ipynb | 570 +++++++++++ docs/fermi_hubbard/publication_results.ipynb | 347 +++++++ recirq/fermi_hubbard/README.md | 55 ++ recirq/fermi_hubbard/__init__.py | 78 ++ recirq/fermi_hubbard/circuits.py | 418 +++++++++ recirq/fermi_hubbard/converting_sampler.py | 95 ++ recirq/fermi_hubbard/data_plotting.py | 884 ++++++++++++++++++ recirq/fermi_hubbard/decomposition.py | 548 +++++++++++ recirq/fermi_hubbard/decomposition_test.py | 133 +++ recirq/fermi_hubbard/execution.py | 298 ++++++ recirq/fermi_hubbard/execution_test.py | 272 ++++++ recirq/fermi_hubbard/fermionic_circuits.py | 159 ++++ .../fermi_hubbard/fermionic_circuits_test.py | 88 ++ recirq/fermi_hubbard/layouts.py | 439 +++++++++ recirq/fermi_hubbard/parameters.py | 598 ++++++++++++ recirq/fermi_hubbard/parameters_test.py | 84 ++ recirq/fermi_hubbard/post_processing.py | 773 +++++++++++++++ recirq/fermi_hubbard/post_processing_test.py | 586 ++++++++++++ recirq/fermi_hubbard/publication.py | 211 +++++ requirements.txt | 4 + 21 files changed, 6642 insertions(+) create mode 100644 docs/fermi_hubbard/.gitignore create mode 100644 docs/fermi_hubbard/experiment_example.ipynb create mode 100644 docs/fermi_hubbard/publication_results.ipynb create mode 100644 recirq/fermi_hubbard/README.md create mode 100644 recirq/fermi_hubbard/__init__.py create mode 100644 recirq/fermi_hubbard/circuits.py create mode 100644 recirq/fermi_hubbard/converting_sampler.py create mode 100644 recirq/fermi_hubbard/data_plotting.py create mode 100644 recirq/fermi_hubbard/decomposition.py create mode 100644 recirq/fermi_hubbard/decomposition_test.py create mode 100644 recirq/fermi_hubbard/execution.py create mode 100644 recirq/fermi_hubbard/execution_test.py create mode 100644 recirq/fermi_hubbard/fermionic_circuits.py create mode 100644 recirq/fermi_hubbard/fermionic_circuits_test.py create mode 100644 recirq/fermi_hubbard/layouts.py create mode 100644 recirq/fermi_hubbard/parameters.py create mode 100644 recirq/fermi_hubbard/parameters_test.py create mode 100644 recirq/fermi_hubbard/post_processing.py create mode 100644 recirq/fermi_hubbard/post_processing_test.py create mode 100644 recirq/fermi_hubbard/publication.py diff --git a/docs/fermi_hubbard/.gitignore b/docs/fermi_hubbard/.gitignore new file mode 100644 index 00000000..66d1ad07 --- /dev/null +++ b/docs/fermi_hubbard/.gitignore @@ -0,0 +1,2 @@ +fermi_hubbard_data/** +trapping/** diff --git a/docs/fermi_hubbard/experiment_example.ipynb b/docs/fermi_hubbard/experiment_example.ipynb new file mode 100644 index 00000000..b0c2e810 --- /dev/null +++ b/docs/fermi_hubbard/experiment_example.ipynb @@ -0,0 +1,570 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Fermi-Hubbard Experiment Example" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook demonstrates how to execute a single instance of Fermi-Hubbard experiment on Google processor." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import cirq\n", + "from cirq.contrib.svg import SVGCircuit\n", + "import matplotlib.pyplot as plt\n", + "from tqdm.notebook import tqdm\n", + "\n", + "from recirq.fermi_hubbard import (\n", + " ConvertingSampler,\n", + " ExperimentResult,\n", + " FermiHubbardParameters,\n", + " InstanceBundle,\n", + " create_circuits,\n", + " load_experiment,\n", + " plot_quantity,\n", + " quantity_data_frame,\n", + " run_experiment,\n", + " save_experiment\n", + ")\n", + "from recirq.fermi_hubbard.publication import (\n", + " google_sqrt_iswap_converter,\n", + " ideal_sqrt_iswap_converter,\n", + " parasitic_cphase_compensation,\n", + " rainbow23_layouts,\n", + " trapping_instance\n", + ")\n", + "\n", + "# Hide numpy warnings\n", + "import warnings\n", + "warnings.filterwarnings('ignore')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Experiment parameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The first step is to decide on exact experiment parameters like problem Hamiltonian, initial state description as well as mapping of fermions to qubits on the device. All of this information is necessary to create circuits and run the experiment." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Qubits layout" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We're going to use the set of layouts prepared for 23-qubit subgrid of Google Rainbow processor. Multiple layouts are used to average experiment results over different qubits assignments and cancel some of the statistical errors which occur from calibration to calibration." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "layouts = rainbow23_layouts(sites_count=8)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This gives 16 different configurations prepared for 8 sites (16 qubits) each, for example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(layouts[0].text_diagram())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Problem parameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's use the Hamiltonian with uniform $J=1$ and $U=2$ on each site, initial state prepared as a ground state of a non-interacting Hamiltonian with trapping potential of a Gaussian shape, Trotter step size equal to 0.3, and two particles per chain. The problem parameters with this initial state can be prepared by pre-defined function ```trapping_instance``` (other configurations can be prepared by creating instances of `FermiHubbardPrameters` data class explicitly)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "parameters = [trapping_instance(layout, u=2, dt=0.3, up_particles=2, down_particles=2) for layout in layouts]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The result are instances of `FermiHubbardPrameters` data class for each layout, which uniquely describe configuration to run:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "parameters_example = parameters[0]\n", + "parameters_example.hamiltonian" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "parameters_example.initial_state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "parameters_example.layout" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "parameters_example.dt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Circuits" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This step is not really necesary in order to perform the experiment on a device but it is ilustrative how the Fermi-Hubbard execution works. It shows how to construct circuits that simulate Chemistry problems and run on the Google quantum processor efficiently." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Circuit creation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The parameters above are just a description of a problem. To create a circuit, a special compilation function is used:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "initial, trotter, measurement = create_circuits(parameters_example, trotter_steps=1)\n", + "circuit = initial + trotter + measurement" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "circuit" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Circuit decomposition" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The circuit above is constructed using gates which are not native to Google hardware, like `cirq.FSim` or `cirq.CZ` with arbitrary exponent. For the purpose of this project a special converter `ConvertToNonUniformSqrtIswapGates` is provided, which supports the primitives required for Fermi-Hubbard problem.\n", + "\n", + "The converter has an ability to decompose gates to $\\sqrt{\\small \\mbox{iSWAP}}$ with unitary parameters deviating from the perfect ones, and varying between qubit pairs. The `google_sqrt_iswap_converter` creates an instance of the converter which approximates to the average values on Rainbow processor (which are approximately equal to `cirq.FSim(π/4, π/24)`)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "google_sqrt_iswap_converter().convert(circuit)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Cirq Simulation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This section demonstrate how to simulate the above parameters using Cirq simulator. We're going to simulate evolution starting from 0 up to 10 Trotter steps (evolution time between $0$ and $3 \\hbar / J$):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "steps = range(11)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Ideal" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To run ideal simulation using the $\\sqrt{\\small \\mbox{iSWAP}}$ gate set, each circuit needs to be converted to $\\sqrt{\\small \\mbox{iSWAP}}$ gate set before execution. The Fermi-Hubbard project provides `ConvertingSampler` that converts circuits before execution. In this case, using the decomposition to `cirq.FSim(π/4, 0)`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ideal_sampler = ConvertingSampler(cirq.Simulator(), ideal_sqrt_iswap_converter().convert)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The function `experiment_run` takes the parameters, sampler and list of Trotter steps which describe circuits to compile. It runs them and wraps the outome in `ExperimentResult` data class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "with tqdm(range(len(parameters) * len(steps))) as progress:\n", + " def post_run(_1, _2):\n", + " progress.update()\n", + " experiments = [run_experiment(params, steps, ideal_sampler, post_run_func=post_run)\n", + " for params in parameters]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A series of experiments for the same problem instance but with different qubit mappings can be post-processed with the help of `InstanceBundle` class. This class takes care of averaging results over qubits layouts, re-scaling the data by comparing agains reference run (perferct simulation in this case), and extracting various quantities." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "bundle = InstanceBundle(experiments)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "bundle.cache_exact_numerics()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Number of quantities are available, accessible either through ```InstanceBundle``` methods or through dictionary:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for quantity_name in bundle.quantities:\n", + " print(quantity_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "They can be converted to Panda's data frame:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "charge_spin_density, _, _ = quantity_data_frame(bundle, 'charge_spin_density')\n", + "charge_spin_density" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "They can also be plotted with ```plot_quantity``` helper function which adjusts appearance according to the data plotted:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(bundle, 'charge_spin_density');" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "plot_quantity(bundle, 'charge_spin_spreading');" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Parasitic controlled-phsae" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The Cirq simulator simulates $\\sqrt{\\small \\mbox{iSWAP}}$ gates perfectly. To evaluate the importance of parasitic controlled-phase a circuit could be decomposed to `cirq.FSim(π/4, π/24)` instead of `cirq.FSim(π/4, 0)` and simulated against the latter." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "parasitic_sampler = ConvertingSampler(cirq.Simulator(), google_sqrt_iswap_converter().convert)\n", + "\n", + "with tqdm(range(len(parameters) * len(steps))) as progress:\n", + " def post_run(_1, _2):\n", + " progress.update()\n", + " experiments = [run_experiment(params, steps, parasitic_sampler, post_run_func=post_run) \n", + " for params in parameters]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "bundle = InstanceBundle(experiments)\n", + "bundle.cache_exact_numerics()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(bundle, 'charge_spin_density');" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(bundle, 'charge_spin_spreading', show_std_error=True);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Google's Quantum Computing Service Execution" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In order to run experiment on Google's QCS, a special sampler that runs circuits at Google's QCS is necessary. The script below assumes that an environment variable ```GOOGLE_CLOUD_PROJECT``` is present and set to a valid Google Cloud Platform project identifier." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "processor_id = 'rainbow'\n", + "engine_sampler = cirq.google.get_engine_sampler(processor_id=processor_id,\n", + " gate_set_name='sqrt_iswap')\n", + "google_sampler = ConvertingSampler(engine_sampler, google_sqrt_iswap_converter().convert)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It is convenient to watch for a progress of the QCS execution. It is also good to persist the experiment results on disk as soon sa they are ready. Although rare, the remote operation might fail for various reasons and more advanced execution workflow might include error handling, experiment pause and continuation, etc.\n", + "\n", + "The script below does not include Floquet calibration." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results_dir = 'trapping'\n", + "\n", + "with tqdm(range(len(layouts) * len(steps))) as progress:\n", + " def post_run(_1, _2):\n", + " progress.update()\n", + " for index, params in enumerate(parameters):\n", + " experiment = run_experiment(params, steps, google_sampler, post_run_func=post_run)\n", + " save_experiment(experiment, f'{results_dir}/trapping_{index + 1}.json')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "experiments = [load_experiment(f'{results_dir}/trapping_{index + 1}.json') \n", + " for index in range(len(parameters))]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "bundle = InstanceBundle(experiments=experiments,\n", + " numerics_transform=parasitic_cphase_compensation(0.138))\n", + "bundle.cache_exact_numerics()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(bundle, 'charge_spin_density', show_std_error=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(bundle, 'charge_spin_spreading', show_std_error=True);" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/fermi_hubbard/publication_results.ipynb b/docs/fermi_hubbard/publication_results.ipynb new file mode 100644 index 00000000..748862c6 --- /dev/null +++ b/docs/fermi_hubbard/publication_results.ipynb @@ -0,0 +1,347 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Fermi-Hubbard Spin-Charge Separation Results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook presents the experimental data which was collected on Google Rainbow processor for the purpose of Fermi-Hubbard spin-charge separation experiment." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**NOTE**\n", + "\n", + "\n", + "In order to run this notebook the data sets ```gaussians_1u1d.zip```, ```trapping_2u2d.zip``` and ```trapping_3u3d.zip``` needs to be downloaded from [https://doi.org/10.5061/dryad.crjdfn32v](\n", + " https://doi.org/10.5061/dryad.crjdfn32v) and extracted to ```fermi_hubbard_data``` directory." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import glob\n", + "from tqdm.notebook import tqdm\n", + "\n", + "from recirq.fermi_hubbard import (\n", + " InstanceBundle,\n", + " apply_rescalings_to_bundles,\n", + " find_bundles_rescalings,\n", + " load_experiment,\n", + " plot_quantity\n", + ")\n", + "from recirq.fermi_hubbard.publication import (\n", + " parasitic_cphase_compensation\n", + ")\n", + "\n", + "# Hide numpy warnings\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "\n", + "data_dir = 'fermi_hubbard_data'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Noninteracting Gaussians" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load results and create a bundle with extracted quantities.\n", + "gaussians_1u1d_files = glob.glob(f'{data_dir}/gaussians_1u1d/0.0/*.json')\n", + "gaussians_bundle = InstanceBundle(\n", + " experiments=[load_experiment(file) for file in gaussians_1u1d_files],\n", + " steps=range(65),\n", + " rescale_steps=range(65))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Simulate the exact numerical results that are used as a reference.\n", + "with tqdm(range(len(gaussians_bundle.steps))) as progress:\n", + " def post_run(_1, _2):\n", + " progress.update()\n", + " gaussians_bundle.cache_exact_numerics(post_run_func=post_run)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(gaussians_bundle, 'post_selection', show_std_dev=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(gaussians_bundle, 'scaling', show_std_error=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# The data for this quantity can be viewed after double-clicking this cell output.\n", + "plot_quantity(gaussians_bundle, 'up_down_density', show_std_error=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(gaussians_bundle, 'up_down_position_average', show_std_error=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(gaussians_bundle, 'up_down_position_average_dt', show_std_error=True);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Trapping Potential N=4" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load results and create a bundles with extracted quantities for each\n", + "# interaction strength.\n", + "trapping_2u2d_files = [\n", + " glob.glob(f'{data_dir}/trapping_2u2d/{u}/*.json')\n", + " for u in [0.0, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]]\n", + "trapping_2u2d_bundles = [InstanceBundle(\n", + " experiments=[load_experiment(file) for file in files],\n", + " numerics_transform=parasitic_cphase_compensation(0.138),\n", + " steps=range(11),\n", + " rescale_steps=range(11)) for files in trapping_2u2d_files]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Simulate the exact numerical results that are used as a reference.\n", + "total_steps = sum(len(bundle.steps) for bundle in trapping_2u2d_bundles)\n", + "with tqdm(range(total_steps)) as progress:\n", + " def post_run(_1, _2):\n", + " progress.update()\n", + " for bundle in trapping_2u2d_bundles:\n", + " bundle.cache_exact_numerics(post_run_func=post_run)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Use shared rescaling values among compatible problem instances.\n", + "apply_rescalings_to_bundles(find_bundles_rescalings(trapping_2u2d_bundles))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(trapping_2u2d_bundles, 'post_selection', show_std_dev=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(trapping_2u2d_bundles, 'scaling', show_std_error=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(trapping_2u2d_bundles, 'charge_spin_density', show_std_error=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(trapping_2u2d_bundles, 'charge_spin_spreading', show_std_error=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(trapping_2u2d_bundles, 'charge_spin_spreading_dt', show_std_error=True);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Trapping Potential N=6" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load results and create a bundles with extracted quantities for each \n", + "# interaction strength.\n", + "trapping_3u3d_files = [\n", + " glob.glob(f'{data_dir}/trapping_3u3d/{u}/*.json')\n", + " for u in [0.0, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]]\n", + "trapping_3u3d_bundles = [InstanceBundle(\n", + " experiments=[load_experiment(file) for file in files],\n", + " numerics_transform=parasitic_cphase_compensation(0.138),\n", + " steps=range(11),\n", + " rescale_steps=range(11)) for files in trapping_3u3d_files]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Simulate the exact numerical results that are used as a reference.\n", + "total_steps = sum(len(bundle.steps) for bundle in trapping_3u3d_bundles)\n", + "with tqdm(range(total_steps)) as progress:\n", + " def post_run(_1, _2):\n", + " progress.update()\n", + " for bundle in trapping_3u3d_bundles:\n", + " bundle.cache_exact_numerics(post_run_func=post_run)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Use shared rescaling values among compatible problem instances.\n", + "apply_rescalings_to_bundles(find_bundles_rescalings(trapping_3u3d_bundles))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(trapping_3u3d_bundles, 'post_selection', show_std_dev=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(trapping_3u3d_bundles, 'scaling', show_std_error=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(trapping_3u3d_bundles, 'charge_spin_density', show_std_error=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(trapping_3u3d_bundles, 'charge_spin_spreading', show_std_error=True);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_quantity(trapping_3u3d_bundles, 'charge_spin_spreading_dt', show_std_error=True);" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/recirq/fermi_hubbard/README.md b/recirq/fermi_hubbard/README.md new file mode 100644 index 00000000..2c3faf05 --- /dev/null +++ b/recirq/fermi_hubbard/README.md @@ -0,0 +1,55 @@ +# Simulation of spin and charge dynamics in the Fermi-Hubbard model + +This module contains code necessary to run and analyse results of the +publication *Observation of separated dynamics of charge and spin in the +Fermi-Hubbard model* by Google AI Quantum and Collaborators. + +## Installation + +The ReCirq package can be cloned directly or added to the local Python +environment with the command: +``` +pip install git+https://github.com/quantumlib/ReCirq +``` +To import and use: +``` +import recirq.fermi_hubbard +``` + +## Usage + +This package allows to study the Fermi-Hubbard dynamics with wide range of +parameters (see ```parameters.py``` for a range of possible choices) on a +quantum simulator or on the Google quantum hardware. + +To analyze and reproduce the publication results, there are two jupyter +notebooks provided: + + * The [experiment_example.ipynb]( + ../../docs/fermi_hubbard/experiment_example.ipynb) notebook guides on how the + Fermi-Hubbard code is structured, how to simulate the Fermi-Hubbard problem + and how to execute the experiment with Google's Quantum Computing Service. + * The [publication_results.ipynb]( + ../../docs/fermi_hubbard/publication_results.ipynb) notebook guides through + the analysis of results from the data set which was released together with the + publication. The data set consists of four zip files and is accessible at + [https://doi.org/10.5061/dryad.crjdfn32v]( + https://doi.org/10.5061/dryad.crjdfn32v). The files need to be downloaded and + extracted into the ```docs/fermi_hubbard/fermi_hubbard_data``` directory. + + ## Package Contents + +The ```fermi-hubbard``` package is split in four separate parts: + + * Data model: the ```layouts.py``` and ```parameters.py``` files contain data + classes which encode a Fermi-Hubbard problem parameters in a self-contained + way. They are used during execution to construct the problem circuits and + analysis to post-process the results. + * Execution: the ```execution.py``` file backed by ```circuits.py```, + ```fermionic_circuits.py``` and ```decomposition.py``` contain all the code + necessary to create circuits, execute them and persist the execution results. + * Analysis: experiment results are post-processed with the set of helper + functions in ```post_procesing.py``` and plotted with functions in + ```data_plotting.py```. + * Publication data: the ```publication.py``` file contains a set of helper + functions and pre-defined data which is specific to the publication. diff --git a/recirq/fermi_hubbard/__init__.py b/recirq/fermi_hubbard/__init__.py new file mode 100644 index 00000000..95a2ecbc --- /dev/null +++ b/recirq/fermi_hubbard/__init__.py @@ -0,0 +1,78 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from recirq.fermi_hubbard.circuits import ( + align_givens_circuit, + create_initial_circuit, + create_line_circuits, + create_line_trotter_circuit, + create_line_trotter_step_circuit, + create_measurement_circuit, + create_one_particle_circuit, + create_zigzag_circuits, + create_zigzag_trotter_circuit, + create_zigzag_trotter_step_circuit, + run_in_parallel +) +from recirq.fermi_hubbard.converting_sampler import ( + ConvertingSampler +) +from recirq.fermi_hubbard.data_plotting import ( + default_top_label, + plot_quantity, + quantity_data_frame +) +from recirq.fermi_hubbard.decomposition import ( + CPhaseEchoGate, + ConvertToNonUniformSqrtIswapGates, + DecomposeCallable, + Decomposition, + GateDecompositionError, + ParticleConservingParameters, + decompose_preserving_moments +) +from recirq.fermi_hubbard.execution import ( + ExperimentResult, + ExperimentRun, + FermiHubbardExperiment, + create_circuits, + extract_result, + load_experiment, + run_experiment, + save_experiment +) +from recirq.fermi_hubbard.layouts import ( + LineLayout, + ZigZagLayout +) +from recirq.fermi_hubbard.parameters import ( + FermiHubbardParameters, + FixedSingleParticle, + FixedTrappingPotential, + GaussianTrappingPotential, + Hamiltonian, + IndependentChainsInitialState, + InitialState, + PhasedGaussianSingleParticle, + UniformSingleParticle, + UniformTrappingPotential +) +from recirq.fermi_hubbard.post_processing import ( + AggregatedQuantity, + InstanceBundle, + PerSiteQuantity, + Rescaling, + apply_rescalings_to_bundles, + find_bundles_rescalings +) diff --git a/recirq/fermi_hubbard/circuits.py b/recirq/fermi_hubbard/circuits.py new file mode 100644 index 00000000..bc7bd43d --- /dev/null +++ b/recirq/fermi_hubbard/circuits.py @@ -0,0 +1,418 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Construct Fermi-Hubbard circuits embedded on two parallel lines or zig-zag +like layout. +""" + +from typing import Dict, Iterable, List, Optional, Sequence, Tuple, cast + +import cirq +from collections import defaultdict +import numpy as np +import openfermion +import openfermioncirq + +from recirq.fermi_hubbard.decomposition import CPhaseEchoGate +from recirq.fermi_hubbard.fermionic_circuits import create_one_particle_circuit +from recirq.fermi_hubbard.parameters import ( + ChainInitialState, + FermiHubbardParameters, + IndependentChainsInitialState, + SingleParticle, + TrappingPotential +) + + +def create_line_circuits(parameters: FermiHubbardParameters, + trotter_steps: int + ) -> Tuple[cirq.Circuit, cirq.Circuit, cirq.Circuit]: + """Creates circuit mapped on a LineLayout. + + See LineLayout docstring for details. + + Args: + parameters: Fermi-Hubbard problem parameters. + trotter_steps: Number to Trotter steps to include. + + Returns: + Tuple of: + - initial state preparation circuit, + - trotter steps circuit, + - measurement circuit. + """ + return (create_initial_circuit(parameters), + create_line_trotter_circuit(parameters, trotter_steps), + create_measurement_circuit(parameters)) + + +def create_zigzag_circuits(parameters: FermiHubbardParameters, + trotter_steps: int + ) -> Tuple[cirq.Circuit, cirq.Circuit, cirq.Circuit]: + """Creates circuit mapped on a ZigZagLayout. + + See ZigZagLayout docstring for details. + + Args: + parameters: Fermi-Hubbard problem parameters. + trotter_steps: Number to Trotter steps to include. + + Returns: + Tuple of: + - initial state preparation circuit, + - trotter steps circuit, + - measurement circuit. + """ + return (create_initial_circuit(parameters), + create_zigzag_trotter_circuit(parameters, trotter_steps), + create_measurement_circuit(parameters)) + + +def create_initial_circuit(parameters: FermiHubbardParameters + ) -> cirq.Circuit: + """Creates initial state circuit mapped on a LineLayout or ZigZagLayout.""" + if isinstance(parameters.initial_state, IndependentChainsInitialState): + return _create_independent_chains_initial_circuit(parameters) + else: + raise ValueError(f'Unsupported initial state ' + f'{parameters.initial_state}') + + +def create_measurement_circuit(parameters: FermiHubbardParameters + ) -> cirq.Circuit: + """Creates measurement circuit mapped on a LineLayout or ZigZagLayout.""" + layout = parameters.layout + return cirq.Circuit( + cirq.measure(*(layout.up_qubits + layout.down_qubits), key='z')) + + +def create_line_trotter_circuit(parameters: FermiHubbardParameters, + trotter_steps: int + ) -> cirq.Circuit: + """Creates Trotter steps circuit mapped on a LineLayout.""" + circuit = cirq.Circuit() + for _ in range(trotter_steps): + circuit += create_line_trotter_step_circuit(parameters) + return circuit + + +def create_line_trotter_step_circuit(parameters: FermiHubbardParameters + ) -> cirq.Circuit: + """Creates a single Trotter step circuit mapped on a LineLayout.""" + + layout = parameters.layout + hamiltonian = parameters.hamiltonian + dt = parameters.dt + + j_theta = dt * hamiltonian.j_array + j_theta_even, j_theta_odd = j_theta[0::2], j_theta[1::2] + + u_phi = -dt * hamiltonian.u_array + + v_phi = -dt * hamiltonian.v_array + v_phi_even, v_phi_odd = v_phi[0::2], v_phi[1::2] + + local_up = -dt * hamiltonian.local_up_array + local_down = -dt * hamiltonian.local_down_array + + circuit = cirq.Circuit() + + # Nearest-neighbor hopping terms. + circuit += cirq.Circuit(( + _create_hopping_ops(j_theta_even, layout.up_even_pairs), + _create_hopping_ops(j_theta_even, layout.down_even_pairs))) + circuit += cirq.Circuit(( + _create_hopping_ops(j_theta_odd, layout.up_odd_pairs), + _create_hopping_ops(j_theta_odd, layout.down_odd_pairs))) + + # On-site interaction terms. + if not np.allclose(u_phi, 0.0): + circuit += cirq.Circuit( + _create_interaction_ops(u_phi, layout.interaction_pairs)) + + # Nearest-neighbor interaction terms. + if not np.allclose(v_phi_even, 0.0): + circuit += cirq.Circuit(( + _create_interaction_ops(v_phi_even, layout.up_even_pairs), + _create_interaction_ops(v_phi_even, layout.down_even_pairs))) + if not np.allclose(v_phi_odd, 0.0): + circuit += cirq.Circuit(( + _create_interaction_ops(v_phi_odd, layout.up_odd_pairs), + _create_interaction_ops(v_phi_odd, layout.down_odd_pairs))) + + # Local fields. + if not np.allclose(local_up, 0.0) or not np.allclose(local_down, 0.0): + circuit += cirq.Circuit(( + _create_local_field_ops(local_up, layout.up_qubits), + _create_local_field_ops(local_down, layout.down_qubits))) + + return circuit + + +def create_zigzag_trotter_circuit(parameters: FermiHubbardParameters, + trotter_steps: int + ) -> cirq.Circuit: + """Creates Trotter steps circuit mapped on a ZigZagLayout.""" + circuit = cirq.Circuit() + for _ in range(trotter_steps): + circuit += create_zigzag_trotter_step_circuit(parameters) + return circuit + + +def create_zigzag_trotter_step_circuit(parameters: FermiHubbardParameters, + add_cphase_echo: bool = True + ) -> cirq.Circuit: + """Creates a single Trotter step circuit mapped on a ZigZagLayout.""" + + layout = parameters.layout + hamiltonian = parameters.hamiltonian + dt = parameters.dt + + j_theta = dt * hamiltonian.j_array + j_theta_even, j_theta_odd = j_theta[0::2], j_theta[1::2] + + iswap_theta = np.full(layout.size // 2 - 1, np.pi / 2) + + u_phi = -dt * hamiltonian.u_array + u_phi_even, u_phi_odd = u_phi[0::2], u_phi[1::2] + + v_phi = -dt * hamiltonian.v_array + v_phi_even, v_phi_odd = v_phi[0::2], v_phi[1::2] + + local_up = -dt * hamiltonian.local_up_array + local_down = -dt * hamiltonian.local_down_array + + circuit = cirq.Circuit() + + # Even nearest-neighbor hopping terms. + circuit += cirq.Circuit(( + _create_hopping_ops(j_theta_even, layout.up_even_pairs), + _create_hopping_ops(j_theta_even, layout.down_even_pairs))) + + # Even nearest-neighbor interaction terms. + if not np.allclose(v_phi_even, 0.0): + circuit += cirq.Circuit(( + _create_interaction_ops(v_phi_even, layout.up_even_pairs), + _create_interaction_ops(v_phi_even, layout.down_even_pairs))) + + # Even on-site interaction terms. + if not np.allclose(u_phi_even, 0.0): + circuit += cirq.Circuit(( + _create_interaction_ops(u_phi_even, layout.interaction_even_pairs), + _create_cphase_echo_ops( + layout.interaction_even_other_qubits) if add_cphase_echo else () + )) + + # Odd on-site interaction terms. + if not np.allclose(u_phi_odd, 0.0): + # Odd terms are present, an iSWAP on inner qubits is required. + + # iSWAP of inner pairs. + circuit += cirq.Circuit(( + _create_hopping_ops(iswap_theta, layout.up_odd_pairs), + _create_hopping_ops(iswap_theta, layout.down_odd_pairs))) + + # First half of the odd nearest-neighbor interaction terms. + if not np.allclose(v_phi_odd, 0.0): + circuit += cirq.Circuit(( + _create_interaction_ops(v_phi_odd / 2, layout.up_odd_pairs), + _create_interaction_ops(v_phi_odd / 2, layout.down_odd_pairs))) + + # Actual odd on-site interaction terms. + circuit += cirq.Circuit(( + _create_interaction_ops(u_phi_odd, layout.interaction_odd_pairs), + _create_cphase_echo_ops( + layout.interaction_odd_other_qubits) if add_cphase_echo else () + )) + + # Odd nearest-neighbor hopping terms with iSWAP on inner pairs. + circuit += cirq.Circuit(( + _create_hopping_ops(j_theta_odd + iswap_theta, layout.up_odd_pairs), + _create_hopping_ops(j_theta_odd + iswap_theta, + layout.down_odd_pairs))) + + # Second half of the odd nearest-neighbor interaction terms. + if not np.allclose(v_phi_odd, 0.0): + circuit += cirq.Circuit(( + _create_interaction_ops(v_phi_odd / 2, layout.up_odd_pairs), + _create_interaction_ops(v_phi_odd / 2, layout.down_odd_pairs))) + + # Fix the extra Z rotations introduced by the two iSWAPs. This rotations + # together with the pi/2 term in iswap_theta effectively implement the + # iSWAP^dagger gate. + circuit += cirq.Circuit(( + (cirq.Z(qubit) for qubit in layout.up_qubits[1:-1]), + (cirq.Z(qubit) for qubit in layout.down_qubits[1:-1]))) + else: + # Odd on-site interaction terms are not present, do a shorter circuit. + + # Odd nearest-neighbor hopping terms. + circuit += cirq.Circuit(( + _create_hopping_ops(j_theta_odd, layout.up_odd_pairs), + _create_hopping_ops(j_theta_odd, layout.down_odd_pairs))) + + # Odd nearest-neighbor interaction terms. + if not np.allclose(v_phi_odd, 0.0): + circuit += cirq.Circuit(( + _create_interaction_ops(v_phi_odd, layout.up_odd_pairs), + _create_interaction_ops(v_phi_odd, layout.down_odd_pairs))) + + # Local fields. + if not np.allclose(local_up, 0.0) or not np.allclose(local_down, 0.0): + circuit += cirq.Circuit(( + _create_local_field_ops(local_up, layout.up_qubits), + _create_local_field_ops(local_down, layout.down_qubits))) + + return circuit + + +# TODO: Candidate for move to cirq package. +def run_in_parallel(first: cirq.Circuit, second: cirq.Circuit) -> cirq.Circuit: + """Merges two circuits so that consecutive moments of two sub-circuits run + in parallel. + """ + min_size = min(len(first), len(second)) + return cirq.Circuit((cirq.Moment([a.operations, b.operations]) + for a, b in zip(first, second)) + + first[min_size:] + + second[min_size:]) + + +# TODO: Candidate for move to cirq package. +def align_givens_circuit(circuit: cirq.Circuit) -> cirq.Circuit: + """Re-aligns the Z gates of a circuit generated by givens rotation.""" + + # Operations which are followed by a list of Z operations. + following_zs: Dict[Optional[cirq.Operation], + List[cirq.Operation]] = defaultdict(lambda: []) + + # Make a copy of a circuit without Z operations. + frontier: Dict[cirq.Qid, cirq.Operation] = {} + aligned = cirq.Circuit() + for index, moment in enumerate(circuit): + for operation in moment: + if (isinstance(operation, cirq.GateOperation) and + isinstance(operation.gate, cirq.ZPowGate)): + qubit, = operation.qubits + following_zs[frontier.get(qubit)].append(operation) + else: + aligned.append(operation) + + for qubit in operation.qubits: + frontier[qubit] = operation + + # Restore the removed Z operations back. + circuit = cirq.Circuit() + if following_zs[None]: + circuit += cirq.Moment(following_zs[None]) + for moment in aligned: + circuit += moment + zs = sum((following_zs[op] for op in moment), []) + while zs: + circuit += cirq.Moment(zs) + zs = sum((following_zs[op] for op in zs), []) + + return circuit + + +def _create_independent_chains_initial_circuit( + parameters: FermiHubbardParameters +) -> cirq.Circuit: + """Creates circuit that realizes IndependentChainsInitialState initial + state. + """ + layout = parameters.layout + initial = cast(IndependentChainsInitialState, parameters.initial_state) + + up_circuit = _create_chain_initial_circuit( + parameters, layout.up_qubits, initial.up) + down_circuit = _create_chain_initial_circuit( + parameters, layout.down_qubits, initial.down) + + circuit = run_in_parallel(up_circuit, down_circuit) + circuit = align_givens_circuit(circuit) + return circuit + + +def _create_chain_initial_circuit(parameters: FermiHubbardParameters, + qubits: List[cirq.Qid], + chain: ChainInitialState) -> cirq.Circuit: + """Creates circuit that realizes ChainInitialState for given chain. """ + if isinstance(chain, SingleParticle): + return create_one_particle_circuit( + qubits, chain.get_amplitudes(len(qubits))) + elif isinstance(chain, TrappingPotential): + return _create_quadratic_hamiltonian_circuit( + qubits, + chain.particles, + chain.as_quadratic_hamiltonian(len(qubits), + parameters.hamiltonian.j)) + else: + raise ValueError(f'Unsupported chain initial state {chain}') + + +def _create_quadratic_hamiltonian_circuit( + qubits: List[cirq.Qid], + particles: int, + hamiltonian: openfermion.QuadraticHamiltonian) -> cirq.Circuit: + """Creates circuit that realizes network of givens rotation.""" + + circuit = cirq.Circuit() + + # Prepare ground state in diagonal basis and add initial circuits. + circuit.append([cirq.X.on(q) for q in qubits[:particles]]) + + # Create Givens network circuit. + _, transform, _ = hamiltonian.diagonalizing_bogoliubov_transform() + circuit += cirq.Circuit(openfermioncirq.bogoliubov_transform( + qubits, transform, range(particles))) + + return circuit + + +def _create_hopping_ops(angles: np.ndarray, + pairs: Sequence[Tuple[cirq.Qid, cirq.Qid]] + ) -> Iterable[cirq.Operation]: + """Generator of operations that realize hopping terms with given angles.""" + assert len(angles) == len(pairs), ("Length of angles and qubit pairs must " + "be equal") + for angle, qubits in zip(angles, pairs): + yield cirq.FSimGate(-angle, 0.0).on(*qubits) + + +def _create_interaction_ops(angles: np.ndarray, + pairs: Sequence[Tuple[cirq.Qid, cirq.Qid]] + ) -> Iterable[cirq.Operation]: + """Generator of operations that realize interaction terms with given angles. + """ + assert len(angles) == len(pairs), ("Length of angles and qubit pairs must " + "be equal") + for angle, qubits in zip(angles, pairs): + yield cirq.CZ(*qubits) ** (angle / np.pi) + + +def _create_local_field_ops(angles: np.ndarray, + qubits: Sequence[cirq.Qid] + ) -> Iterable[cirq.Operation]: + """Generator of operations that realize local fields with given strengths. + """ + assert len(angles) == len(qubits), ("Length of angles and qubits must be " + "equal") + for angle, qubit in zip(angles, qubits): + yield cirq.Z(qubit) ** (angle / np.pi) + + +def _create_cphase_echo_ops(qubits: Iterable[cirq.Qid] + ) -> Iterable[cirq.Operation]: + """Generator of operations that realize cphase echo gates.""" + return (CPhaseEchoGate().on(qubit) for qubit in qubits) diff --git a/recirq/fermi_hubbard/converting_sampler.py b/recirq/fermi_hubbard/converting_sampler.py new file mode 100644 index 00000000..0b35646c --- /dev/null +++ b/recirq/fermi_hubbard/converting_sampler.py @@ -0,0 +1,95 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union, Callable, Iterable, List + +import cirq +import pandas as pd + + +class ConvertingSampler(cirq.Sampler): + """Sampler delegate which converts circuit before sampling. + """ + + def __init__(self, + sampler: cirq.Sampler, + convert_func: Union[ + Callable[[cirq.Circuit], cirq.Circuit], + Iterable[Callable[[cirq.Circuit], cirq.Circuit]]] + ) -> None: + """Initializes the sampler with conversion. + + Args: + sampler: Delegate sampler where all invocations are forwarded to. + convert_func: Either a function that converts the circuit, or list + of the converting functions which are applied one by one before + delegating to the target sampler. + """ + self._sampler = sampler + if isinstance(convert_func, Iterable): + self._converters = convert_func + else: + self._converters = convert_func, + + def _convert(self, program: cirq.Circuit) -> cirq.Circuit: + for converter in self._converters: + program = converter(program) + return program + + def run( + self, + program: cirq.Circuit, + param_resolver: cirq.ParamResolverOrSimilarType = None, + repetitions: int = 1, + ) -> cirq.Result: + program = self._convert(program) + return self._sampler.run(program, + param_resolver, + repetitions) + + def sample( + self, + program: cirq.Circuit, + *, + repetitions: int = 1, + params: cirq.Sweepable = None, + ) -> pd.DataFrame: + program = self._convert(program) + return self._sampler.sample(program, + repetitions=repetitions, + params=params) + + def run_sweep( + self, + program: cirq.Circuit, + params: cirq.Sweepable, + repetitions: int = 1, + ) -> List[cirq.Result]: + program = self._convert(program) + return self._sampler.run_sweep(program, params, repetitions) + + async def run_async(self, program: cirq.Circuit, *, + repetitions: int) -> cirq.Result: + program = self._convert(program) + return await self._sampler.run_async(program, + repetitions=repetitions) + + async def run_sweep_async( + self, + program: cirq.Circuit, + params: cirq.Sweepable, + repetitions: int = 1, + ) -> List[cirq.Result]: + program = self._convert(program) + return await self._sampler.run_sweep_async(program, params, repetitions) \ No newline at end of file diff --git a/recirq/fermi_hubbard/data_plotting.py b/recirq/fermi_hubbard/data_plotting.py new file mode 100644 index 00000000..5795efff --- /dev/null +++ b/recirq/fermi_hubbard/data_plotting.py @@ -0,0 +1,884 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Create plots out of experiment results.""" + +from typing import Callable, Iterable, List, Optional, Sequence, Tuple, Union + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +from recirq.fermi_hubbard.post_processing import ( + AggregatedQuantity, + InstanceBundle, + PerSiteQuantity, + Rescaling +) + + +def default_top_label( + ax: plt.Axes, u: float, step: Optional[int], dt: float) -> None: + """Function that puts a label on top of axis with standard text.""" + + if step is not None: + label = f'u={u}, η={step}, τ={round(step * dt, 1)}' + else: + label = f'u={u}' + + ax.text(0.5, 1.02, label, ha='center', va='bottom', transform=ax.transAxes) + + +def plot_quantity( + bundle: Union[InstanceBundle, Sequence[InstanceBundle]], + quantity_name: str, + *, + plot_file: Optional[str] = None, + us: Optional[Iterable[float]] = None, + steps: Optional[Iterable[int]] = None, + show_numerics: bool = True, + show_std_dev: bool = False, + show_std_error: bool = False, + axes: Optional[Sequence[Sequence[plt.Axes]]] = None, + colors: Optional[Sequence[str]] = None, + label_top_func: Optional[Callable[ + [plt.Axes, float, Optional[int], float], None]] = default_top_label, + space_horizontal: float = 0.2, + space_vertical: float = 0.4, +) -> Tuple[Optional[plt.Figure], Sequence[Sequence[plt.Axes]]]: + """Pre-defined plotting for Fermi-Hubbard quantities. + + Args: + bundle: Bundle, or iterable of bundles to plot the data from. + quantity_name: Name of quantity to plot, the list of supported names + can be retrieved by listing keys of quantities property of + InstanceBundle class. + plot_file: File name where plot image should be rendered to. If not + provided, no file will be written to. + us: List of interaction strengths U to plot. If not provided, all + bundles will be plotted. + steps: List of Trotter steps to plot. If not provided, the steps will + be extracted from bundles provided that all of them have the same + range. + show_numerics: If true, the exact numerical simulation will be plotted + as lines. + show_std_dev: If true, the data standard deviation will be plotted as + error bars. + show_std_error: If true, the standard error of a mean will be plotted + as error bars. + axes: Axes to render onto. The shape of axes must be compatible with the + data plotted. If not provided, an appropriate axes will be created. + colors: List of colors to use for each plotted chain. If not provided, + a set of predefined colors for each quantity will be used. + label_top_func: The function that renders text on top of each axes. + space_horizontal: The horizontal spacing between axes. + space_vertical: The vertical spacing between axes. + + Returns: + Tuple of: + - Matplotlib figure created for plotting or None if axes were passed in + as input. + - Axes created for the purpose of plotting. + """ + + if isinstance(bundle, InstanceBundle): + bundles = bundle, + else: + bundles = bundle + + if not len(bundles): + return None, [[]] + + if not steps: + steps = bundles[0].steps + for bundle in bundles[1:]: + if steps != bundle.steps: + raise ValueError( + 'Unequal Trotter steps between experiment bundles') + else: + steps = list(steps) + + if not us: + us = [bundle.u for bundle in bundles] + else: + us = list(us) + + averages, errors, dts, data_shape, bundles = _extract_bundle_data( + bundles, quantity_name, us, steps, + show_std_dev=show_std_dev, + show_std_error=show_std_error) + + allow_numerics = quantity_name != 'scaling' + + if allow_numerics and show_numerics: + numerics, *_ = _extract_bundle_data( + bundles, quantity_name, us, steps, + show_std_dev=show_std_dev, + show_std_error=show_std_error, + simulated=True) + else: + numerics = None + + if not colors: + if (quantity_name.startswith('up_down') or + quantity_name.startswith('scaling')): + colors = 'tab:orange', 'tab:green' + elif quantity_name.startswith('charge_spin'): + colors = 'tab:blue', 'tab:red' + elif quantity_name.startswith('post_selection'): + colors = 'tab:purple', + + split_yaxes = (quantity_name.startswith('charge_spin') and + not quantity_name.endswith('_dt')) + + left_label, right_label = _find_left_right_labels(quantity_name) + + if len(data_shape) == 1: + sites_count, = data_shape + sites = list(range(1, sites_count + 1)) + axis_width = max(2.5, 0.3 * len(sites)) + axis_aspect_ratio = axis_width / 2.5 + label_bottom = 'Site' + + fig, axes = _plot_density_quantity( + averages, errors, numerics, us, dts, steps, sites, + colors=colors, + axes=axes, + split_yaxes=split_yaxes, + label_top_func=label_top_func, + label_left=left_label, + label_right=right_label, + label_bottom=label_bottom, + axis_width=axis_width, + axis_aspect_ratio=axis_aspect_ratio, + space_horizontal=space_horizontal, + space_vertical=space_vertical) + else: + if axes: + if len(axes) != 1: + raise ValueError('Incompatible number of axes rows') + axes, = axes + + axis_width = max(2.5, 0.3 * len(steps)) + axis_aspect_ratio = axis_width / 2.5 + label_bottom = 'Time' + + fig, axes = _plot_aggregated_quantity( + averages, errors, numerics, us, dts, steps, + colors=colors, + axes=axes, + split_yaxes=split_yaxes, + label_top_func=label_top_func, + label_left=left_label, + label_right=right_label, + label_bottom=label_bottom, + axis_width=axis_width, + axis_aspect_ratio=axis_aspect_ratio, + space_horizontal=space_horizontal, + space_vertical=space_vertical) + + if quantity_name == 'scaling': + _plot_rescaling_lines(bundles, axes) + + axes = axes, + + if plot_file: + plt.savefig(plot_file) + + return fig, axes + + +def _find_left_right_labels(name: str) -> Tuple[Optional[str], Optional[str]]: + if name.startswith('up_down_'): + left_chain, right_chain = '', None + elif name.startswith('charge_spin_'): + left_chain, right_chain = 'Charge ', 'Spin ' + else: + left_chain, right_chain = '', None + + if name.endswith('_norescale'): + name = name[:-len('_norescale')] + + if name.endswith('_dt'): + name = name[:-len('_dt')] + suffix = ' rate' + else: + suffix = '' + + if name.endswith('density'): + text = 'density' + elif name.endswith('spreading'): + text = 'spreading' + elif name.endswith('position_average'): + text = 'average position' + elif name.endswith('scaling'): + text = 'scaling' + elif name.endswith('post_selection'): + text = 'success rate' + else: + return None, None + + def format_chain(chain: Optional[str], text: str) -> Optional[str]: + if chain is None: + return None + return f'{chain}{text}{ suffix}'.capitalize() + + return format_chain(left_chain, text), format_chain(right_chain, text) + + +def quantity_data_frame(bundle: Union[InstanceBundle, Sequence[InstanceBundle]], + quantity_name: str, + us: Optional[Iterable[float]] = None, + steps: Optional[Iterable[int]] = None, + simulated: bool = False + ) -> Tuple[pd.DataFrame, type, List[InstanceBundle]]: + """Extracts given quantity from InstanceBundle(s) as Panda's data frame. + + Args: + bundle: Bundle, or iterable of bundles to plot the data from. + quantity_name: Name of quantity to plot, the list of supported names + can be retrieved by listing keys of quantities property of + InstanceBundle class. + us: List of interaction strengths U to plot. If not provided, all + bundles will be plotted. + steps: List of Trotter steps to plot. If not provided, the steps will + be extracted from bundles provided that all of them have the same + range. + simulated: When true, extracts the exact numerical simulation that + matches the instance problem parameters instead of the bundle + experimental data. + + Returns: + Tuple of: + - Panda's data frame with accumulated data. The columns of returned + data frame depends whether the quantity is of AggregatedQuantity or + PerSiteQuantity. + - Type of selected quantity, either AggregatedQuantity or + PerSiteQuantity. + - List of InstanceBundles used, when us list selects a subset of + provided bundles. + """ + + def create_aggregated_data(data: List[Tuple[float, + int, + int, + float, + float, + Optional[float], + Optional[float]]] + ) -> pd.DataFrame: + return pd.DataFrame(data, columns=[ + 'u', 'chain', 'step', 'time', 'value', 'std_error', 'std_dev']) + + def generate_aggregated_entries(dt: float, + u: float, + quantity: AggregatedQuantity, + step: int, + data_step: int + ) -> Tuple[float, + int, + int, + float, + float, + Optional[float], + Optional[float]]: + + for chain in range(quantity.chains_count): + if quantity.std_error is not None: + std_error = float(quantity.std_error[chain][step]) + else: + std_error = None + + if quantity.std_dev is not None: + std_dev = float(quantity.std_dev[chain][step]) + else: + std_dev = None + + yield (u, + chain, + data_step, + data_step * dt, + float(quantity.average[chain][step]), + std_error, + std_dev) + + def create_per_site_data(data: List[Tuple[float, + int, + int, + int, + float, + float, + Optional[float], + Optional[float]]] + ) -> pd.DataFrame: + return pd.DataFrame(data, columns=[ + 'u', 'chain', 'step', 'time', 'site', 'value', 'std_error', + 'std_dev']) + + def generate_per_site_entries(dt: float, + u: float, + quantity: AggregatedQuantity, + step: int, + data_step: int + ) -> Tuple[float, + int, + int, + int, + float, + float, + Optional[float], + Optional[float]]: + + for chain in range(quantity.chains_count): + for site in range(len(quantity.average[chain][step])): + if quantity.std_error is not None: + std_error = quantity.std_error[chain][step][site] + else: + std_error = pd.NA + + if quantity.std_dev is not None: + std_dev = quantity.std_dev[chain][step][site] + else: + std_dev = pd.NA + + yield (u, + chain, + data_step, + data_step * dt, + site + 1, + quantity.average[chain][step][site], + std_error, + std_dev) + + if isinstance(bundle, InstanceBundle): + bundles = bundle, + else: + bundles = bundle + + if not len(bundles): + raise ValueError('At least one InstanceBundle is mandatory') + + if not steps: + steps = bundles[0].steps + for bundle in bundles[1:]: + if steps != bundle.steps: + raise ValueError( + 'Unequal Trotter steps between experiment bundles') + else: + steps = list(steps) + + if not us: + us = [bundle.u for bundle in bundles] + else: + us = list(us) + + steps_map = {step: index for index, step in enumerate(steps)} + us_map = {u: index for index, u in enumerate(us)} + us_bundles = [] + + data = [] + quantity_type = None + + missing_u = set(us) + for bundle in bundles: + if bundle.u not in us_map: + continue + + if simulated: + bundle = bundle.exact_numerics_bundle() + + us_bundles.append(bundle) + missing_u.remove(bundle.u) + + quantity = bundle.quantities[quantity_name]() + + if quantity_type is None: + if isinstance(quantity, AggregatedQuantity): + quantity_type = AggregatedQuantity + elif isinstance(quantity, PerSiteQuantity): + quantity_type = PerSiteQuantity + else: + raise ValueError(f'Unknown quantity type {quantity_type}') + else: + assert isinstance(quantity, quantity_type), ( + f'Inconsistent quantity types {type(quantity)} and ' + f'{quantity_type}') + + missing_steps = set(steps) + for s, step in enumerate(bundle.steps): + if step not in steps_map: + continue + + missing_steps.remove(step) + step_index = steps_map[step] + + if quantity_type == AggregatedQuantity: + data += generate_aggregated_entries( + bundle.dt, bundle.u, quantity, s, step_index) + elif quantity_type == PerSiteQuantity: + data += generate_per_site_entries( + bundle.dt, bundle.u, quantity, s, step_index) + else: + raise RuntimeError(f'Unexpected quantity type {quantity_type}') + + if missing_steps: + raise ValueError( + f'No experiments to cover Trotter steps {missing_steps}') + + if missing_u: + raise ValueError(f'No experiments to cover U values {missing_u}') + + if quantity_type == AggregatedQuantity: + pandas = create_aggregated_data(data) + elif quantity_type == PerSiteQuantity: + pandas = create_per_site_data(data) + else: + raise RuntimeError(f'Unexpected quantity type {quantity_type}') + + return pandas, quantity_type, us_bundles + + +def _extract_bundle_data(bundles: Sequence[InstanceBundle], + quantity_name: str, + us: List[float], + steps: List[int], + show_std_dev: bool, + show_std_error: bool, + simulated: bool = False + ) -> Tuple[List[List[List[np.ndarray]]], + List[List[List[np.ndarray]]], + List[float], + Tuple[int, ...], + List[InstanceBundle]]: + + def init_list(shape: Tuple[int, ...]) -> List: + if len(shape) == 1: + return [None] * shape[0] + return [init_list(shape[1:]) for _ in range(shape[0])] + + if show_std_dev and show_std_error: + raise ValueError('Either standard deviation or standard error of a ' + 'mean can be shown, not both.') + + steps_map = {step: index for index, step in enumerate(steps)} + us_map = {u: index for index, u in enumerate(us)} + us_bundles = [] + dts = [0.0] * len(us) + + averages = None + errors = None + chains_count = None + data_shape = None + + missing_u = set(us) + for bundle in bundles: + if bundle.u not in us_map: + continue + + if simulated: + bundle = bundle.exact_numerics_bundle() + + us_bundles.append(bundle) + u_index = us_map[bundle.u] + missing_u.remove(bundle.u) + + dts[u_index] = bundle.dt + + quantity = bundle.quantities[quantity_name]() + missing_steps = set(steps) + for s, step in enumerate(bundle.steps): + if step not in steps_map: + continue + + missing_steps.remove(step) + step_index = steps_map[step] + + if chains_count is None: + chains_count = quantity.chains_count + shape = chains_count, len(us), len(steps) + averages = init_list(shape) + errors = init_list(shape) + elif chains_count != quantity.chains_count: + raise ValueError( + 'Incompatible quantity between bundles') + + for chain in range(chains_count): + average = quantity.average[chain][s] + if show_std_error and quantity.std_error: + error = quantity.std_error[chain][s] + elif show_std_dev and quantity.std_dev: + error = quantity.std_dev[chain][s] + else: + error = None + + if data_shape is None: + data_shape = average.shape + if data_shape != average.shape or ( + error is not None and data_shape != error.shape): + raise ValueError( + 'Incompatible quantity shapes between bundles') + + if error is None: + error = np.nan + + averages[chain][u_index][step_index] = average + errors[chain][u_index][step_index] = error + + if missing_steps: + raise ValueError( + f'No experiments to cover Trotter steps {missing_steps}') + + if missing_u: + raise ValueError(f'No experiments to cover U values {missing_u}') + + return averages, errors, dts, data_shape, us_bundles + + +def _plot_density_quantity( + values: List[List[List[np.ndarray]]], + errors: List[List[List[np.ndarray]]], + numerics: Optional[List[List[List[np.ndarray]]]], + us: Sequence[float], + dts: Sequence[float], + steps: Sequence[int], + sites: Sequence[int], + colors: Optional[Sequence[str]], + axes: Optional[Sequence[Sequence[plt.Axes]]] = None, + split_yaxes: bool = True, + label_top_func: Optional[ + Callable[[plt.Axes, float, Optional[int], float], None]] = None, + label_left: Optional[str] = None, + label_right: Optional[str] = None, + label_bottom: Optional[str] = None, + axis_width: Optional[float] = 4.0, + axis_aspect_ratio: Optional[float] = 4.0 / 3.0, + space_left: float = 0.7, + space_right: float = 0.7, + space_top: float = 0.3, + space_bottom: float = 0.5, + space_horizontal: float = 0.15, + space_vertical: float = 0.15, + span_extension: float = 0.05 +) -> Tuple[Optional[plt.Figure], Sequence[Sequence[plt.Axes]]]: + + if axes is None: + fig, axes = _create_default_axes(len(us), + len(steps), + axis_width=axis_width, + axis_aspect_ratio=axis_aspect_ratio, + space_left=space_left, + space_right=space_right, + space_top=space_top, + space_bottom=space_bottom, + space_horizontal=space_horizontal, + space_vertical=space_vertical) + else: + fig = None + + if len(axes) != len(us): + raise ValueError('Incompatible number of axes rows') + + spans = _calculate_spans(values, 1, span_extension) + if numerics: + numerics_spans = _calculate_spans(numerics, 1, span_extension) + spans = [_merge_spans(pair) for pair in zip(spans, numerics_spans)] + if not split_yaxes: + spans = [_merge_spans(spans)] * len(spans) + + xlim = min(sites) - 0.5, max(sites) + 0.5 + + for chain, (values_chain, errors_chain) in enumerate(zip(values, errors)): + for index_u, (u, dt_u, values_u, errors_u, axes_u) in enumerate( + zip(us, dts, values_chain, errors_chain, axes)): + + if len(axes_u) != len(steps): + raise ValueError('Incompatible number of axes columns') + + for index_step, (step, values_step, errors_step, ax_step) in \ + enumerate(zip(steps, values_u, errors_u, axes_u)): + + color = colors[chain] if colors else None + + # Disable labels for inner subplots. + if index_u != len(us) - 1: + ax_step.set_xticklabels([]) + if index_step: + ax_step.set_yticklabels([]) + + # Determine plotting axes for each chain. + if chain == 0: + ax = ax_step + if index_step == 0 and label_left: + ax.set_ylabel(label_left, + color=color if split_yaxes else 'k') + if label_top_func: + label_top_func(ax, u, step, dt_u) + if label_bottom and index_u == len(us) - 1: + ax.set_xlabel(label_bottom) + elif chain == 1: + if split_yaxes: + ax = ax_step.twinx() + if index_step != len(steps) - 1: + ax.set_yticklabels([]) + elif label_right: + ax.set_ylabel(label_right, color=color) + else: + ax = ax_step + else: + raise ValueError(f'Unsupported number of chains ' + f'{len(values)}') + + if numerics: + ax.plot(sites, + numerics[chain][index_u][index_step], + color=color, + alpha=0.75) + + ax.scatter(sites, + values_step, + color=color) + + ax.errorbar( + sites, + values_step, + errors_step, + linestyle='', + color=color, + capsize=5, + elinewidth=1, + markeredgewidth=1) + + ax.set_xlim(*xlim) + ax.set_ylim(*spans[chain]) + if split_yaxes: + ax.tick_params('y', colors=color) + + return fig, axes + + +def _plot_aggregated_quantity( + values: List[List[List[np.ndarray]]], + errors: List[List[List[np.ndarray]]], + numerics: Optional[List[List[List[np.ndarray]]]], + us: Sequence[float], + dts: Sequence[float], + steps: Sequence[int], + colors: Optional[Sequence[str]], + axes: Optional[Sequence[Sequence[plt.Axes]]] = None, + split_yaxes: bool = True, + label_top_func: Optional[ + Callable[[plt.Axes, float, Optional[int], float], None]] = None, + label_left: Optional[str] = None, + label_right: Optional[str] = None, + label_bottom: Optional[str] = None, + axis_width: Optional[float] = 4.0, + axis_aspect_ratio: Optional[float] = 4.0 / 3.0, + space_left: float = 0.7, + space_right: float = 0.7, + space_top: float = 0.3, + space_bottom: float = 0.5, + space_horizontal: float = 0.15, + space_vertical: float = 0.15, + span_extension: float = 0.05 +) -> Tuple[Optional[plt.Figure], Sequence[plt.Axes]]: + + if axes is None: + fig, (axes,) = _create_default_axes(1, + len(us), + axis_width=axis_width, + axis_aspect_ratio=axis_aspect_ratio, + space_left=space_left, + space_right=space_right, + space_top=space_top, + space_bottom=space_bottom, + space_horizontal=space_horizontal, + space_vertical=space_vertical) + else: + fig = None + + if len(axes) != len(us): + raise ValueError('Incompatible number of axes columns') + + spans = _calculate_spans(values, 0, span_extension) + if numerics: + numerics_spans = _calculate_spans(numerics, 0, span_extension) + spans = [_merge_spans(pair) for pair in zip(spans, numerics_spans)] + if not split_yaxes: + spans = [_merge_spans(spans)] * len(spans) + + for chain, (values_chain, errors_chain) in enumerate(zip(values, errors)): + + for index_u, (u, dt_u, values_u, errors_u, ax_u) in enumerate( + zip(us, dts, values_chain, errors_chain, axes)): + + times = np.array(steps) * dt_u + color = colors[chain] if colors else None + + # Disable labels for inner subplots. + if index_u: + ax_u.set_yticklabels([]) + + # Determine plotting axes for each chain. + if chain == 0: + ax = ax_u + if index_u == 0 and label_left: + ax.set_ylabel(label_left, + color=color if split_yaxes else 'k') + if label_top_func: + label_top_func(ax, u, None, dt_u) + if label_bottom: + ax.set_xlabel(label_bottom) + elif chain == 1: + if split_yaxes: + ax = ax_u.twinx() + if index_u != len(us) - 1: + ax.set_yticklabels([]) + elif label_right: + ax.set_ylabel(label_right, color=color) + else: + ax = ax_u + else: + raise ValueError(f'Unsupported number of chains ' + f'{len(values)}') + + if numerics: + ax.plot(times, + numerics[chain][index_u], + color=color, + alpha=0.75) + + ax.scatter(times, + values_u, + color=color) + + ax.errorbar( + times, + values_u, + errors_u, + linestyle='', + color=color, + capsize=5, + elinewidth=1, + markeredgewidth=1) + + ax.set_xlim(np.min(times) - dt_u, np.max(times) + dt_u) + ax.set_ylim(*spans[chain]) + if split_yaxes: + ax.tick_params('y', colors=color) + + return fig, axes + + +def _plot_rescaling_lines(bundles: Sequence[InstanceBundle], + axes: Sequence[plt.Axes]) -> None: + + def plot_rescaling(ax: plt.Axes, + steps: List[int], + dt: float, + rescaling: Rescaling, + color: str) -> None: + x = np.array([steps[0], steps[-1]]) + y = x * rescaling.slope + rescaling.intercept + time = x * dt + ax.plot(time, y, color=color) + + for bundle, ax in zip(bundles, axes): + plot_rescaling(ax, bundle.steps, bundle.dt, bundle.rescaling, 'purple') + plot_rescaling( + ax, bundle.steps, bundle.dt, bundle.intrinsic_rescaling, 'indigo') + + +def _calculate_spans(values: List[List[List[np.ndarray]]], + value_dim: int, + span_extension: float) -> List[Tuple[float, float]]: + + def expand_span(span_min: float, span_max: float) -> Tuple[float, float]: + span = span_max - span_min + return (span_min - span * span_extension, + span_max + span * span_extension) + + axes = tuple(range(1, 3 + value_dim)) + spans = zip(np.min(values, axes), np.max(values, axes)) + return [expand_span(span_min, span_max) for span_min, span_max in spans] + + +def _merge_spans(spans: Iterable[Tuple[float, float]]) -> Tuple[float, float]: + merged_min, merged_max = np.inf, -np.inf + for span_min, span_max in spans: + merged_min = min(merged_min, span_min) + merged_max = max(merged_max, span_max) + return merged_min, merged_max + + +def _create_default_axes(rows: int, + columns: int, + *, + axis_width: Optional[float] = 4.0, + total_width: Optional[float] = None, + axis_aspect_ratio: Optional[float] = 4.0 / 3.0, + total_height: Optional[float] = None, + space_left: float = 0.5, + space_right: float = 0.5, + space_top: float = 0.3, + space_bottom: float = 0.3, + space_horizontal: float = 0.15, + space_vertical: float = 0.15 + ) -> Tuple[plt.Figure, List[List[plt.Axes]]]: + + if total_width is None: + if axis_width is None: + raise ValueError('Either axis width or total width must be ' + 'provided') + total_width = (space_left + space_right + + axis_width * columns + + space_horizontal * (columns - 1)) + elif axis_width is None: + axis_width = (total_width - space_left - space_right - + space_horizontal * (columns - 1)) / columns + else: + raise ValueError('Axis width and total width can not be provided at ' + 'the same time') + + if total_height is None: + if axis_aspect_ratio is None: + raise ValueError('Either axis aspect ratio or total height must be ' + 'provided') + axis_height = axis_width / axis_aspect_ratio + total_height = (space_top + space_bottom + + axis_height * rows + space_vertical * (rows - 1)) + elif axis_aspect_ratio is None: + axis_height = (total_height - space_top - space_bottom - + space_vertical * (rows - 1)) / rows + else: + raise ValueError('Axis aspect ratio and total height can not be ' + 'provided at the same time') + + fig: plt.Figure = plt.figure(figsize=(total_width, total_height)) + axes = [] + + offset_bottom = space_bottom + for _ in range(rows): + row_axes = [] + offset_left = space_left + for _ in range(columns): + row_axes.append(plt.axes([offset_left / total_width, + offset_bottom / total_height, + axis_width / total_width, + axis_height/ total_height])) + offset_left += axis_width + space_horizontal + axes.append(row_axes) + offset_bottom += axis_height + space_vertical + + axes = list(reversed(axes)) + + return fig, axes diff --git a/recirq/fermi_hubbard/decomposition.py b/recirq/fermi_hubbard/decomposition.py new file mode 100644 index 00000000..e20df9fa --- /dev/null +++ b/recirq/fermi_hubbard/decomposition.py @@ -0,0 +1,548 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Circuit and gate decompositions.""" + +from typing import ( + Callable, Iterable, Mapping, Optional, Tuple, Union +) + +import cirq +from dataclasses import dataclass +from functools import lru_cache +from itertools import zip_longest +import numpy as np + +Decomposition = Iterable[Iterable[cirq.Operation]] +DecomposeCallable = Callable[[cirq.Operation], Optional[Decomposition]] + + +@dataclass(frozen=True) +class ParticleConservingParameters: + """Parameters of the particle-conserving two-qubit gate. + + [[1, 0, 0, 0], + [0, cos(θ) e^{-i(γ + δ)}, -i sin(θ) e^{-i(γ - χ)}, 0], + [0, -i sin(θ) e^{-i(γ + χ)}, cos(θ) e^{-i(γ - δ)}, 0], + [0, 0, 0, e^{-i (2 γ + φ}]] + + Args: + theta: the iSWAP angle θ + delta: the phase difference δ + chi: the gauge associated to the phase difference χ + gamma: the common phase factor γ + phi: the CPHASE angle φ + """ + theta: Optional[float] = None + delta: Optional[float] = None + chi: Optional[float] = None + gamma: Optional[float] = None + phi: Optional[float] = None + + @lru_cache(maxsize=None) + def for_qubits_swapped(self) -> 'ParticleConservingParameters': + """Instance of particle-conserving parameters when qubits are swapped. + """ + return ParticleConservingParameters( + theta=self.theta, + delta=-self.delta if self.delta is not None else None, + chi=-self.chi if self.chi is not None else None, + gamma=self.gamma, + phi=self.phi + ) + + def get_theta(self, qubits: Tuple[cirq.Qid, cirq.Qid]) -> float: + if self.theta is None: + raise ValueError(f'Missing θ parameter for qubits pair {qubits}') + return self.theta + + def get_delta(self, qubits: Tuple[cirq.Qid, cirq.Qid]) -> float: + if self.delta is None: + raise ValueError(f'Missing δ parameter for qubits pair {qubits}') + return self.delta + + def get_chi(self, qubits: Tuple[cirq.Qid, cirq.Qid]) -> float: + if self.chi is None: + raise ValueError(f'Missing χ parameter for qubits pair {qubits}') + return self.chi + + def get_gamma(self, qubits: Tuple[cirq.Qid, cirq.Qid]) -> float: + if self.gamma is None: + raise ValueError(f'Missing γ parameter for qubits pair {qubits}') + return self.gamma + + def get_phi(self, qubits: Tuple[cirq.Qid, cirq.Qid]) -> float: + if self.phi is None: + raise ValueError(f'Missing φ parameter for qubits pair {qubits}') + return self.phi + + +class ConvertToNonUniformSqrtIswapGates: + """Converter that decomposes circuits to √iSWAP gate set. + + The converter supports cases where √iSWAP deviate slightly from gate to gate + on a chip. + + The converter does not support all the Cirq gates but a subset of them + necessary for Fermi-Hubbard problem. See decompose_gk_gate, + decompose_cphase_gate and decompose_cphase_echo_gate for supported gates. + """ + + def __init__(self, + parameters: Mapping[Tuple[cirq.Qid, cirq.Qid], + ParticleConservingParameters], + *, + parameters_when_missing: Optional[ + ParticleConservingParameters] = None, + sin_alpha_tolerance: Optional[float] = 0.15, + eject_z_gates: bool = True) -> None: + """Initializes converter. + + Args: + parameters: Qubir pair dependent √iSWAP gate parameters. + parameters_when_missing: Fallback parameters to use when either the + needed qubit pair is not present in parameters set, or when the + parameters are present but a necessary angle value is missing. + This object might specify only a subset of angles. If a + necessary angle value is not resolved, then exception is raised + during decomposition. + sin_alpha_tolerance: Tolerance for sin(alpha) value as passed to + corrected_cphase_ops function. + eject_z_gates: Whether to apply the cirq.optimizers.EjectZ optimizer + on the decomposed circuit. The effect of this optimization is a + circuit with virtual Z gates removed; only Z gates which are + actually realized on the Google hardware are kept. + """ + + if parameters_when_missing: + self._missing = parameters_when_missing + else: + self._missing = ParticleConservingParameters() + + self._parameters = { + pair: _merge_parameters(value, self._missing) + for pair, value in parameters.items() + } + + if sin_alpha_tolerance: + self._sin_alpha_tolerance = sin_alpha_tolerance + else: + self._sin_alpha_tolerance = 0.0 + + self._eject_z_gates = eject_z_gates + + def convert(self, circuit: cirq.Circuit) -> cirq.Circuit: + """Decomposes circuit to √iSWAP gate set. + + Gates with no known decompositions are left untouched. + """ + decomposed = decompose_preserving_moments( + circuit, + decompose_func=(self._decompose_gk_gate, + self._decompose_cphase_gate, + self._decompose_cphase_echo_gate)) + + if self._eject_z_gates: + cirq.optimizers.EjectZ().optimize_circuit(decomposed) + cirq.optimizers.DropEmptyMoments().optimize_circuit(decomposed) + + return decomposed + + def _decompose_gk_gate(self, operation: cirq.Operation + ) -> Optional[Decomposition]: + """Decomposes √iSWAP with arbitrary exponent to two √iSWAP gates. + + The cirq.PhasedISwapPowGate, cirq.ISwapPowGate and cirq.FSimGate with + zero phi angle are supported by this decomposition. + + The decomposition compensates for delta, chi and gamma deviations from + the perfect unitary and keeps theta and phi errors for each √iSWAP gate. + + Args: + operation: Operation to decompose. + + Returns: + The decomposition of the supported gate to moments with single-qubit + Z rotations and two √iSWAP gates. None if operation is not supported + by this gate. + """ + + if not isinstance(operation, cirq.GateOperation): + return None + + if len(operation.qubits) != 2: + return None + + a, b = operation.qubits + qubits = a, b + gate = operation.gate + + if isinstance(gate, cirq.PhasedISwapPowGate): + angle = gate.exponent * np.pi / 2 + phase_exponent = -gate.phase_exponent - 0.5 + elif isinstance(gate, cirq.FSimGate) and np.isclose(gate.phi, 0.0): + angle = gate.theta + phase_exponent = 0.0 + elif isinstance(gate, cirq.ISwapPowGate): + angle = -gate.exponent * np.pi / 2 + phase_exponent = 0.0 + else: + return None + + return _corrected_gk_ops(qubits, + angle, + phase_exponent, + self._get_parameters(qubits)) + + def _decompose_cphase_gate(self, operation: cirq.Operation + ) -> Optional[Decomposition]: + """Decomposes CZ with arbitary exponent to two √iSWAP gates. + + The cirq.CZPowGate and cirq.FSimGate with zero theta angle are supported + by this decomposition. + + The decomposition compensates for all unitary parameter deviations from + the perfect √iSWAP unitary. + + Args: + operation: Operation to decompose. + + Returns: + The decomposition of the supported gate to moments with single-qubit + rotations and two √iSWAP gates. None if operation is not supported + by this gate. + """ + + if not isinstance(operation, cirq.GateOperation): + return None + + if len(operation.qubits) != 2: + return None + + a, b = operation.qubits + qubits = a, b + gate = operation.gate + + if isinstance(gate, cirq.CZPowGate): + angle = -gate.exponent * np.pi + elif isinstance(gate, cirq.FSimGate) and np.isclose(gate.theta, 0.0): + angle = gate.phi + else: + return None + + return _corrected_cphase_ops(qubits, + angle, + self._get_parameters(qubits), + self._sin_alpha_tolerance) + + def _decompose_cphase_echo_gate(self, operation: cirq.Operation + ) -> Optional[Decomposition]: + """Decomposes CPhaseEchoGate into two pi microwave pulses. + + This effect of this gate is a global phase shift. The decomposition puts + the microwave pulses at the matching moments in of the + decompose_cphase_gate results. + + Args: + operation: Operation to decompose. + + Returns: + The decomposition of the CPhaseEchoGate gate to two X pulses which + cancel each other. + """ + if not isinstance(operation, cirq.GateOperation): + return None + + if isinstance(operation.gate, CPhaseEchoGate): + qubit, = operation.qubits + return _corrected_cphase_echo_ops(qubit) + else: + return None + + def _get_parameters(self, qubits: Tuple[cirq.Qid, cirq.Qid] + ) -> ParticleConservingParameters: + a, b = qubits + if (a, b) in self._parameters: + return self._parameters[(a, b)] + elif (b, a) in self._parameters: + return self._parameters[(b, a)].for_qubits_swapped() + else: + return self._missing + + +@cirq.value_equality() +class CPhaseEchoGate(cirq.SingleQubitGate): + """Dummy gate that could be substituted by spin-echo pulses. + + This gate decomposes to nothing by default. + """ + + def _value_equality_values_(self): + return () + + def _decompose_(self, _) -> cirq.OP_TREE: + return () + + def _circuit_diagram_info_(self, _: cirq.CircuitDiagramInfoArgs) -> str: + return 'CPhaseEcho' + + +class GateDecompositionError(Exception): + """Raised when gate decomposition is infeasible.""" + pass + + +def _corrected_gk_ops( + qubits: Tuple[cirq.Qid, cirq.Qid], + angle: float, + phase_exponent: float, + parameters: ParticleConservingParameters +) -> Tuple[Tuple[cirq.Operation, ...], ...]: + """Decomposition of cirq.FSim(angle, 0) into two non-ideal √iSWAP gates. + + The target unitary is: + + [[1, 0, 0, 0], + [0, c, -i·s·f*, 0], + [0, -i·s·f, c, 0], + [0, 0, 0, 1]] + + where c = cos(angle), s = sin(angle) and f = exp(2πi·phase + πi/2). + + This target unitary might not be always possible to realize. The + decomposition compensates for delta, chi and gamma deviations from the + perfect unitary but keeps theta and phi errors for each √iSWAP gate. + + Args: + qubits: Two qubits to act on. Note the that ordering of qubits is + relevant because true √iSWAP is not symmetric (see + ParticleConservingParameters.for_qubits_swapped) + angle: The desired rotation angle. + phase_exponent: Single-qubit phase freedom. The effect of this rotation + can be cancelled out by applying Z rotations before or after this + gate. + parameters: True unitary parameters of the √iSWAP gate into which this + function decomposes to. + + Returns: + Tuple of tuples of operations. The outer tuple should be mapped directly + to the circuit moments, and each inner tuple consists of operations that + should be executed in parallel. + """ + + phase_exponent += 0.25 + sqrt_iswap = _corrected_sqrt_iswap_ops(qubits, parameters) + a, b = qubits + + return ( + (cirq.Z(a) ** (1.0 - phase_exponent), cirq.Z(b) ** phase_exponent), + ) + sqrt_iswap + ( + (cirq.rz(angle + np.pi).on(a), cirq.rz(-angle).on(b)), + ) + sqrt_iswap + ( + (cirq.Z(a) ** phase_exponent, cirq.Z(b) ** -phase_exponent), + ) + + +def _corrected_cphase_ops( + qubits: Tuple[cirq.Qid, cirq.Qid], + angle: float, + parameters: ParticleConservingParameters, + sin_alpha_tolerance: float = 0.0 +) -> Tuple[Tuple[cirq.Operation, ...], ...]: + """Decomposition of cirq.FSim(0, angle) into two non-ideal √iSWAP gates. + + The target unitary is: + + [[1 0 0 0], + [0 1 0 0], + [0 0 1 0], + [0 0 0 exp(-i·angle)]] + + The decomposition compensates for all unitary parameter deviations from + the perfect √iSWAP unitary and utilizes three moments of microwave pulses to + achieve this. See corrected_cphase_echo_ops for compatible spin echo moment + structure. + + Args: + qubits: Two qubits to act on. Note the that ordering of qubits is + relevant because true √iSWAP is not symmetric (see + ParticleConservingParameters.for_qubits_swapped) + angle: The desired rotation angle. + parameters: True unitary parameters of the √iSWAP gate into which this + function decomposes to. + sin_alpha_tolerance: Threshold that controls the magnitude of + approximation of the decomposition. When set to 0, the gate is + always exact. Positive value controls the amount of noise this + decomposition tolerates. + Returns: + Tuple of tuples of operations. The outer tuple should be mapped directly + to the circuit moments, and each inner tuple consists of operations that + should be executed in parallel. + + Raises: + GateDecompositionError: When deviations from true √iSWAP make the + decomposition infeasible. + """ + + theta = parameters.get_theta(qubits) + phi = parameters.get_phi(qubits) + + sin_alpha = ((np.sin(angle / 4) ** 2 - np.sin(phi / 2) ** 2) / + (np.sin(theta) ** 2 - np.sin(phi / 2) ** 2)) + + if sin_alpha < 0.0 and np.isclose(sin_alpha, 0.0, atol=1e-3): + sin_alpha = 0.0 + elif 1.0 < sin_alpha < 1.0 + sin_alpha_tolerance: + sin_alpha = 1.0 + + if 0 <= sin_alpha <= 1: + alpha = np.arcsin(sin_alpha ** 0.5) + else: + raise GateDecompositionError( + f'Cannot decompose the C-phase gate on qubits {qubits} into the ' + f'given fSim gates (angle={angle}, sin(alpha)={sin_alpha}, ' + f'parameters={parameters})') + + beta = 0.5 * np.pi * (1 - np.sign(np.cos(0.5 * phi))) + gamma = 0.5 * np.pi * (1 - np.sign(np.sin(0.5 * phi))) + + xi = np.arctan(np.tan(alpha) * np.cos(theta) / np.cos(0.5 * phi)) + beta + if angle < 0: + xi += np.pi + + if np.isclose(phi, 0.0): + eta = 0.5 * np.sign(np.tan(alpha) * np.sin(theta)) * np.pi + else: + eta = np.arctan( + np.tan(alpha) * np.sin(theta) / np.sin(0.5 * phi)) + gamma + + sqrt_iswap = _corrected_sqrt_iswap_ops(qubits, parameters) + a, b = qubits + + return ( + (cirq.rx(xi).on(a), cirq.rx(eta).on(b)), + (cirq.rz(0.5 * phi).on(a), cirq.rz(0.5 * phi).on(b)) + ) + sqrt_iswap + ( + (cirq.rx(-2 * alpha).on(a),), + (cirq.rz(0.5 * phi + np.pi).on(a), cirq.rz(0.5 * phi).on(b)) + ) + sqrt_iswap + ( + (cirq.rx(-xi).on(a), cirq.rx(-eta).on(b)), + (cirq.rz(-0.5 * angle + np.pi).on(a), cirq.rz(-0.5 * angle).on(b)) + ) + + +def _corrected_cphase_echo_ops(qubit: cirq.Qid + ) -> Tuple[Tuple[cirq.Operation, ...], ...]: + return ((),) * 5 + (cirq.X(qubit),) + ((),) * 4 + (cirq.X(qubit),) + + +def _corrected_sqrt_iswap_ops( + qubits: Tuple[cirq.Qid, cirq.Qid], + parameters: ParticleConservingParameters +) -> Tuple[Tuple[cirq.Operation, ...], ...]: + """Decomposition of √iSWAP into one non-ideal √iSWAP gate. + + Target unitary is: + + [[1, 0, 0, 0], + [0, 1/√2, -i/√2, 0], + [0, -i/√2, 1/√2, 0], + [0, 0, 0, 1]] + + This target unitary might not be always possible to realize. The + decomposition compensates for delta, chi and gamma deviations from the + perfect unitary but keeps theta and phi errors of the non-ideal √iSWAP gate. + + Args: + qubits: Two qubits to act on. Note the that ordering of qubits is + relevant because true √iSWAP is not symmetric (see + ParticleConservingParameters.for_qubits_swapped) + parameters: True unitary parameters of the √iSWAP gate into which this + function decomposes to. + Returns: + Tuple of tuples of operations. The outer tuple should be mapped directly + to the circuit moments, and each inner tuple consists of operations that + should be executed in parallel. + """ + + delta = parameters.get_delta(qubits) + gamma = parameters.get_gamma(qubits) + chi = parameters.get_chi(qubits) + + a, b = qubits + alpha = 0.5 * (delta + chi) + beta = 0.5 * (delta - chi) + return ( + (cirq.rz(0.5 * gamma - alpha).on(a), + cirq.rz(0.5 * gamma + alpha).on(b)), + (cirq.ISWAP(a, b) ** (-0.5),), + (cirq.rz(0.5 * gamma - beta).on(a), cirq.rz(0.5 * gamma + beta).on(b)) + ) + + +def _merge_parameters(parameters: ParticleConservingParameters, + fallback: ParticleConservingParameters + ) -> ParticleConservingParameters: + """Merges two instances of ParticleConservingParameters together. + + Uses values from parameters argument if they are set and fallbacks to values + from fallback argument otherwise. + """ + return ParticleConservingParameters( + theta=fallback.theta if parameters.theta is None else parameters.theta, + delta=fallback.delta if parameters.delta is None else parameters.delta, + chi=fallback.chi if parameters.chi is None else parameters.chi, + gamma=fallback.gamma if parameters.gamma is None else parameters.gamma, + phi=fallback.phi if parameters.phi is None else parameters.phi, + ) + + +def decompose_preserving_moments( + circuit: cirq.Circuit, + decompose_func: Union[DecomposeCallable, Iterable[DecomposeCallable]] +) -> cirq.Circuit: + """Decomposes circuit moment by moment. + + This function decomposes each operation within every moment simultaneously + and expands the moment into the longest operation that was decomposed. It + never mixes operation from two different input moments together. + + Args: + circuit: Circuit to decompose. + decompose_func: Function or iterable of functions that decomposes + operation into iterable of moments of simultaneously executed + operations. If many functions are provided, all off them are tried + until decomposition is not None. When no decomposition is found, + input gate is copied as is. + + Returns: + New cirq.Circuit instance which is a decomposed version of circuit. + """ + + def decompose(operation: cirq.Operation) -> Decomposition: + for func in decompose_func: + decomposition = func(operation) + if decomposition is not None: + return decomposition + return (operation,), + + if not isinstance(decompose_func, Iterable): + decompose_func = decompose_func, + + decomposed = cirq.Circuit() + for moment in circuit: + decompositions = (decompose(operation) for operation in moment) + for operations in zip_longest(*decompositions, fillvalue=()): + decomposed += cirq.Moment(operations) + + return decomposed diff --git a/recirq/fermi_hubbard/decomposition_test.py b/recirq/fermi_hubbard/decomposition_test.py new file mode 100644 index 00000000..3c01de4f --- /dev/null +++ b/recirq/fermi_hubbard/decomposition_test.py @@ -0,0 +1,133 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from itertools import product +import cirq +import numpy as np +import pytest + +from recirq.fermi_hubbard.decomposition import ( + _corrected_cphase_ops, + _corrected_gk_ops, + _corrected_sqrt_iswap_ops, + GateDecompositionError, + ParticleConservingParameters +) + + +def create_particle_conserving_matrix(theta: float, + delta: float, + chi: float, + gamma: float, + phi: float) -> np.array: + """Matrix form of the most general particle conserving two-qubit gate. + + The returned unitary is: + + [[1, 0, 0, 0], + [0, c e^{-i (gamma + delta)}, -is e^{-i (gamma - chi)}, 0], + [0, -is e^{-i (gamma + chi)}, c e^{-i (gamma - delta)}, 0], + [0, 0, 0, e^{-i (2 gamma + phi}]] + """ + + matrix = [ + [1., 0., 0., 0.], + [0., + np.exp(-1.j * (gamma + delta)) * np.cos(theta), + -1.j * np.exp(-1.j * (gamma - chi)) * np.sin(theta), + 0.], + [0., + -1.j * np.exp(-1.j * (gamma + chi)) * np.sin(theta), + np.exp(-1.j * (gamma - delta)) * np.cos(theta), + 0], + [0., 0., 0., np.exp(-1.j * (2. * gamma + phi))] + ] + return np.asarray(matrix) + + +@pytest.mark.parametrize('delta,chi,gamma', + product(np.linspace(0, 2 * np.pi, 5), + np.linspace(0, 2 * np.pi, 5), + np.linspace(0, 2 * np.pi, 5))) +def test_corrected_sqrt_iswap_ops(delta: float, chi: float, gamma: float + ) -> None: + + a, b = cirq.LineQubit.range(2) + + matrix = cirq.unitary(cirq.Circuit(_corrected_sqrt_iswap_ops( + qubits=(a, b), + parameters=ParticleConservingParameters( + delta=delta, + chi=chi, + gamma=gamma)))) + + expected = create_particle_conserving_matrix( + np.pi / 4, -delta, -chi, -gamma, 0) + + assert cirq.equal_up_to_global_phase(matrix, expected) + + +@pytest.mark.parametrize('theta', np.linspace(0, 2 * np.pi, 5)) +def test_corr_hop_gate(theta: float) -> None: + + a, b = cirq.LineQubit.range(2) + + matrix = cirq.unitary(cirq.Circuit(_corrected_gk_ops( + qubits=(a, b), + angle=theta, + phase_exponent=0.0, + parameters=ParticleConservingParameters( + theta=theta, + delta=0, + chi=0, + gamma=0)))) + + expected = create_particle_conserving_matrix(theta, 0, 0, 0, 0) + + assert cirq.equal_up_to_global_phase(matrix, expected) + + +@pytest.mark.parametrize('cphase', np.linspace(-0.32, -0.8 * np.pi, 5)) +def test_corrected_cphase_ops(cphase: float) -> None: + + a, b = cirq.LineQubit.range(2) + + matrix = cirq.unitary(cirq.Circuit(_corrected_cphase_ops( + qubits=(a, b), + angle=cphase, + parameters=ParticleConservingParameters( + theta=np.pi / 4, + delta=0, + chi=0, + gamma=0, + phi=0)))) + + expected = create_particle_conserving_matrix(0, 0, 0, 0, cphase) + + assert cirq.equal_up_to_global_phase(matrix, expected) + + +def test_corrected_cphase_ops_throws() -> None: + a, b = cirq.LineQubit.range(2) + with pytest.raises(GateDecompositionError): + _corrected_cphase_ops( + qubits=(a, b), + angle=np.pi / 13, + parameters=ParticleConservingParameters( + theta=np.pi / 4, + delta=0, + chi=0, + gamma=0, + phi=np.pi / 24)) + diff --git a/recirq/fermi_hubbard/execution.py b/recirq/fermi_hubbard/execution.py new file mode 100644 index 00000000..751c560d --- /dev/null +++ b/recirq/fermi_hubbard/execution.py @@ -0,0 +1,298 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Routines and data types for experiment execution and life cycle.""" + +from typing import ( + Callable, Dict, IO, Iterable, Optional, Tuple, Type, Union, cast) + +import cirq +from concurrent.futures import ThreadPoolExecutor +import numpy as np +import os +import pathlib +import time + +from recirq.fermi_hubbard.circuits import create_line_circuits, create_zigzag_circuits +from recirq.fermi_hubbard.layouts import ( + LineLayout, + ZigZagLayout +) +from recirq.fermi_hubbard.parameters import FermiHubbardParameters + + +@cirq.json_serializable_dataclass(init=False) +class ExperimentResult: + """Accumulated results of a single Fermi-Hubbard circuit run. + + This container is used to store raw probabilities for each qubit (fermionic + number densities), with or without post-selection applied. + """ + up_density: Tuple[float] + down_density: Tuple[float] + measurements_count: int + + def __init__(self, *, + up_density: Iterable[float], + down_density: Iterable[float], + measurements_count: int) -> None: + self.up_density = tuple(up_density) + self.down_density = tuple(down_density) + self.measurements_count = measurements_count + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return {cls.__name__: cls} + + +@cirq.json_serializable_dataclass +class ExperimentRun: + """Single Fermi-Hubbard circuit execution run data. + + Attributes: + trotter_steps: Trotter steps count executed. + end_timestamp_sec: Seconds since the unix epoch at the moment of circuit + completion. + result: Extracted fermionic number densities for each site. + result_post_selected: Extracted fermionic number densities for each + site with post selection applied. + result_raw: Bitstring measurements for the circuit execution. This field + is not populated if keep_raw_result argument of experiment_run + function is set to False. + calibration_timestamp_usec: Microseconds since the unix epoch that + identify the processor calibration used during the circuit + execution. This field is not populated by experiment_run function. + """ + trotter_steps: int + end_timestamp_sec: float + result: ExperimentResult + result_post_selected: ExperimentResult + result_raw: Optional[cirq.Result] = None + calibration_timestamp_usec: Optional[int] = None + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return { + cls.__name__: cls, + **ExperimentResult.cirq_resolvers() + } + + @property + def calibration_timestamp_sec(self) -> float: + return self.calibration_timestamp_usec / 1000. + + +@cirq.json_serializable_dataclass +class FermiHubbardExperiment: + """Results of a Fermi-Hubbard experiment execution for fixed parameters. + + Attributes: + parameters: Problem parameters used for circuit compilation and + execution. + runs: Runs results for different Trotter step counts. + processor: Processor name as passed to experiment_run function. + name: Experiment name as passed to experiment_run function. + """ + + parameters: FermiHubbardParameters + runs: Tuple[ExperimentRun] + processor: Optional[str] = None + name: Optional[str] = None + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return { + cls.__name__: cls, + **ExperimentRun.cirq_resolvers(), + **FermiHubbardParameters.cirq_resolvers(), + } + + +def save_experiment(experiment: FermiHubbardExperiment, + file_or_fn: Union[None, IO, pathlib.Path, str], + make_dirs: bool = True) -> None: + """Persists experiment to a JSON file. + + Args: + experiment: Experiment to save. + file_or_fn: File description as passed to cirq.to_json function. + make_dirs: When true and file_or_fn. + """ + if isinstance(file_or_fn, str): + dir = os.path.dirname(file_or_fn) + if dir and make_dirs: + os.makedirs(dir, exist_ok=True) + + cirq.to_json(experiment, file_or_fn) + + +def load_experiment(file_or_fn: Union[None, IO, pathlib.Path, str] + ) -> FermiHubbardExperiment: + """Loads experiment from the JSON file. + + Args: + file_or_fn: File description as passed to cirq.to_json function. + """ + data = cirq.read_json( + file_or_fn, + resolvers=cirq.DEFAULT_RESOLVERS + + [FermiHubbardExperiment.cirq_resolvers().get]) + return cast(FermiHubbardExperiment, data) + + +def run_experiment( + parameters: FermiHubbardParameters, + steps: Iterable[int], + sampler: cirq.Sampler = cirq.Simulator(), + *, + repetitions: int = 20000, + name: Optional[str] = None, + processor: Optional[str] = None, + keep_raw_result: bool = False, + threads: int = 4, + pre_run_func: Optional[Callable[[int, cirq.Circuit], None]] = None, + post_run_func: Optional[Callable[[int, cirq.Result], None]] = None +) -> FermiHubbardExperiment: + """Executes Fermi-Hubbard experiment. + + Args: + parameters: Fermi-Hubbard problem description. + steps: Array of Trotter step counts that should be simulated. + sampler: Sampler used to run the circuits. This can be either a + simulation sampler or sampler with capability of running on quantum + hardware. + repetitions: Number of repetitions for each circuit executed. + name: Name which is passed to returned container. + processor: Processor name which is passed to returned container. + keep_raw_result: When true, the raw bitstring measurements for each run + will be stored within the results object. + threads: Number of threads used for execution. When set to 0, no + multithreading routines are used. + pre_run_func: Optional callable which is called before each circuit + is scheduled for execution. + post_run_func: Optional callable which is called after each circuit + completes. + + Returns: + Instance of FermiHubbardExperiment with experiment results. + """ + + def run_step(step: int) -> ExperimentRun: + initial, trotter, measurement = create_circuits(parameters, step) + circuit = initial + trotter + measurement + + if pre_run_func: + pre_run_func(step, circuit) + + trial = sampler.run(circuit, repetitions=repetitions) + end_time = time.time() + + if post_run_func: + post_run_func(step, trial) + + result = extract_result(parameters.up_particles, + parameters.down_particles, + trial, + post_select=False) + + result_post_selected = extract_result(parameters.up_particles, + parameters.down_particles, + trial, + post_select=True) + + return ExperimentRun( + trotter_steps=step, + end_timestamp_sec=end_time, + result=result, + result_post_selected=result_post_selected, + result_raw=trial if keep_raw_result else None + ) + + if threads: + with ThreadPoolExecutor(max_workers=threads) as executor: + runs = tuple(executor.map(run_step, steps)) + else: + runs = tuple(run_step(step) for step in steps) + + return FermiHubbardExperiment( + parameters=parameters, + runs=runs, + processor=processor, + name=name + ) + + +def create_circuits(parameters: FermiHubbardParameters, + trotter_steps: int + ) -> Tuple[cirq.Circuit, cirq.Circuit, cirq.Circuit]: + """ + + Args: + parameters: Fermi-Hubbard problem description. + trotter_steps: Number of trotter steps to include. + + Returns: + Tuple of: + - initial state preparation circuit, + - trotter steps circuit, + - measurement circuit. + """ + if isinstance(parameters.layout, LineLayout): + return create_line_circuits(parameters, trotter_steps) + elif isinstance(parameters.layout, ZigZagLayout): + return create_zigzag_circuits(parameters, trotter_steps) + else: + raise ValueError(f'Unknown layout {parameters.layout}') + + +def extract_result(up_particles: int, + down_particles: int, + trial: cirq.Result, + post_select: bool) -> ExperimentResult: + """Extracts accumulated results out of bitstring measurements. + + Args: + up_particles: Number of particles in the up chain. + down_particles: Number of particles in the down chain. + trial: Bitstring measurements from quantum device or simulator. + post_select: When true, post selection is used and only measurements + which match the up_particles and down_particles counts in the up and + down chain are used. + """ + + def post_selection(measurements: np.ndarray) -> np.ndarray: + desired = np.array((up_particles, down_particles)) + counts = np.sum(measurements, axis=2, dtype=int) + errors = np.abs(counts - desired) + selected = measurements[(np.sum(errors, axis=1) == 0).nonzero()] + if not len(selected): + print(f'Warning: no measurement matching desired particle numbers' + f'{desired} for post selection {post_select}') + return selected + + def create_result(measurements: np.ndarray) -> ExperimentResult: + nd = np.average(measurements, axis=0) + return ExperimentResult( + measurements_count=len(measurements), + up_density=nd[0, :], + down_density=nd[1, :] + ) + + raw = trial.measurements['z'] + repetitions, qubits = raw.shape + raw = raw.reshape((repetitions, 2, qubits // 2)) + + if post_select: + return create_result(post_selection(raw)) + else: + return create_result(raw) diff --git a/recirq/fermi_hubbard/execution_test.py b/recirq/fermi_hubbard/execution_test.py new file mode 100644 index 00000000..771cc073 --- /dev/null +++ b/recirq/fermi_hubbard/execution_test.py @@ -0,0 +1,272 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +import cirq +import pytest + +from recirq.fermi_hubbard.decomposition import CPhaseEchoGate +from recirq.fermi_hubbard.execution import create_circuits +from recirq.fermi_hubbard.layouts import ( + LineLayout, + QubitsLayout, + ZigZagLayout +) +from recirq.fermi_hubbard.parameters import ( + FermiHubbardParameters, + GaussianTrappingPotential, + Hamiltonian, + IndependentChainsInitialState, + InitialState, + PhasedGaussianSingleParticle, + UniformSingleParticle, + UniformTrappingPotential +) + + +@pytest.mark.parametrize('layout_type', [ + LineLayout, ZigZagLayout +]) +def test_create_circuits_initial_trapping(layout_type: type) -> None: + layout = layout_type(size=4) + parameters = _create_test_parameters( + layout, + initial_state=IndependentChainsInitialState( + up=UniformTrappingPotential(particles=2), + down=GaussianTrappingPotential( + particles=2, + center=0.2, + sigma=0.3, + scale=0.4 + ) + ) + ) + + initial, _, _ = create_circuits(parameters, trotter_steps=0) + + up1, up2, up3, up4 = layout.up_qubits + down1, down2, down3, down4 = layout.down_qubits + + expected = cirq.Circuit([ + cirq.Moment(cirq.X(up1), cirq.X(up2), cirq.X(down1), cirq.X(down2)), + cirq.Moment( + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=-0.7322795271987699).on(up2, up3), + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=-0.7502896835888355 + ).on(down2, down3), + ), + cirq.Moment((cirq.Z**0.0).on(up3), (cirq.Z**0.0).on(down3)), + cirq.Moment( + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=-0.564094216848975).on(up1, up2), + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=-0.5768514417132005 + ).on(down1, down2), + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=-0.5640942168489748).on(up3, up4), + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=-0.5973464819433905 + ).on(down3, down4), + ), + cirq.Moment( + (cirq.Z**0.0).on(up2), + (cirq.Z**0.0).on(down2), + (cirq.Z**0.0).on(up4), + (cirq.Z**0.0).on(down4), + ), + cirq.Moment( + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=-0.26772047280123007).on(up2, up3), + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=-0.2994619470606122 + ).on(down2, down3), + ), + cirq.Moment((cirq.Z**0.0).on(up3), (cirq.Z**0.0).on(down3)), + ]) + + assert cirq.approx_eq(initial, expected) + + +@pytest.mark.parametrize('layout_type', [ + LineLayout, ZigZagLayout +]) +def test_create_circuits_initial_single_particle(layout_type: type) -> None: + layout = layout_type(size=4) + parameters = _create_test_parameters( + layout, + initial_state=IndependentChainsInitialState( + up=UniformSingleParticle(), + down=PhasedGaussianSingleParticle(k=0.2, sigma=0.4, position=0.6) + ) + ) + + initial, _, _ = create_circuits(parameters, trotter_steps=0) + + up1, up2, up3, up4 = layout.up_qubits + down1, down2, down3, down4 = layout.down_qubits + + expected = cirq.Circuit([ + cirq.Moment( + cirq.X(up2), + cirq.X(down2), + ), + cirq.Moment( + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=-0.5).on(up2, up3), + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=-0.5550026943884815 + ).on(down2, down3), + ), + cirq.Moment( + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=-0.5).on(up3, up4), + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=0.5).on(up1, up2), + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=-0.42338370832577876 + ).on(down3, down4), + cirq.PhasedISwapPowGate(phase_exponent=0.25, + exponent=0.3609629722553269 + ).on(down1, down2), + ), + cirq.Moment( + (cirq.Z ** 0.0).on(up3), + (cirq.Z ** 0.0).on(up4), + (cirq.Z ** 0.0).on(up1), + (cirq.Z ** 0.0).on(up2), + (cirq.Z ** 0.004244131815783875).on(down3), + (cirq.Z ** 0.02546479089470326).on(down4), + (cirq.Z ** -0.03819718634205488).on(down1), + (cirq.Z ** -0.016976527263135508).on(down2), + ), + ]) + + assert cirq.approx_eq(initial, expected) + + +def test_create_circuits_trotter_line() -> None: + layout = LineLayout(size=4) + parameters = _create_test_parameters(layout, u=2.0) + _, trotter, _ = create_circuits(parameters, trotter_steps=1) + + up1, up2, up3, up4 = layout.up_qubits + down1, down2, down3, down4 = layout.down_qubits + + expected = cirq.Circuit([ + cirq.Moment( + cirq.FSimGate(theta=-0.2, phi=0.0).on(up2, up1), + cirq.FSimGate(theta=-0.2, phi=0.0).on(up4, up3), + cirq.FSimGate(theta=-0.2, phi=0.0).on(down2, down1), + cirq.FSimGate(theta=-0.2, phi=0.0).on(down4, down3), + ), + cirq.Moment( + cirq.FSimGate(theta=-0.2, phi=0.0).on(up3, up2), + cirq.FSimGate(theta=-0.2, phi=0.0).on(down3, down2), + ), + cirq.Moment( + (cirq.CZ**-0.12732395447351627).on(up1, down1), + (cirq.CZ**-0.12732395447351627).on(up2, down2), + (cirq.CZ**-0.12732395447351627).on(up3, down3), + (cirq.CZ**-0.12732395447351627).on(up4, down4), + ), + ]) + + assert cirq.approx_eq(trotter, expected) + + +def test_create_circuits_trotter_zigzag() -> None: + layout = ZigZagLayout(size=4) + parameters = _create_test_parameters(layout, u=2.0) + _, trotter, _ = create_circuits(parameters, trotter_steps=1) + + up1, up2, up3, up4 = layout.up_qubits + down1, down2, down3, down4 = layout.down_qubits + + expected = cirq.Circuit([ + cirq.Moment( + cirq.FSimGate(theta=-0.2, phi=0.0).on(up2, up1), + cirq.FSimGate(theta=-0.2, phi=0.0).on(up4, up3), + cirq.FSimGate(theta=-0.2, phi=0.0).on(down2, down1), + cirq.FSimGate(theta=-0.2, phi=0.0).on(down4, down3), + ), + cirq.Moment( + (cirq.CZ**-0.12732395447351627).on(down2, up2), + (cirq.CZ**-0.12732395447351627).on(down4, up4), + CPhaseEchoGate().on(down1), + CPhaseEchoGate().on(down3), + CPhaseEchoGate().on(up1), + CPhaseEchoGate().on(up3), + ), + cirq.Moment( + cirq.FSimGate(theta=-1.5707963267948966, phi=0.0).on(up3, up2), + cirq.FSimGate(theta=-1.5707963267948966, phi=0.0).on(down3, down2), + ), + cirq.Moment( + (cirq.CZ**-0.12732395447351627).on(down1, up1), + (cirq.CZ**-0.12732395447351627).on(down2, up2), + CPhaseEchoGate().on(down3), + CPhaseEchoGate().on(down4), + CPhaseEchoGate().on(up3), + CPhaseEchoGate().on(up4), + ), + cirq.Moment( + cirq.FSimGate(theta=-1.7707963267948965, phi=0.0).on(up3, up2), + cirq.FSimGate(theta=-1.7707963267948965, phi=0.0).on(down3, down2), + ), + cirq.Moment( + cirq.Z(up2), cirq.Z(up3), cirq.Z(down2), cirq.Z(down3), + ), + ]) + + assert cirq.approx_eq(trotter, expected) + + +@pytest.mark.parametrize('layout_type', [ + LineLayout, ZigZagLayout +]) +def test_create_circuits_measurement(layout_type: type) -> None: + layout = layout_type(size=4) + parameters = _create_test_parameters(layout) + _, _, measurement = create_circuits(parameters, trotter_steps=0) + assert cirq.approx_eq( + measurement, + cirq.Circuit(cirq.measure(*layout.all_qubits, key='z'))) + + +def _create_test_parameters(layout: QubitsLayout, + u: float = 0.0, + initial_state: Optional[InitialState] = None + ) -> FermiHubbardParameters: + + hamiltonian = Hamiltonian( + sites_count=layout.size, + j=1.0, + u=u + ) + + if initial_state is None: + initial_state = IndependentChainsInitialState( + up=GaussianTrappingPotential( + particles=3, center=0.5, sigma=1, scale=1), + down=UniformTrappingPotential(particles=3) + ) + + return FermiHubbardParameters( + hamiltonian=hamiltonian, + initial_state=initial_state, + layout=layout, + dt=0.2 + ) \ No newline at end of file diff --git a/recirq/fermi_hubbard/fermionic_circuits.py b/recirq/fermi_hubbard/fermionic_circuits.py new file mode 100644 index 00000000..944a544b --- /dev/null +++ b/recirq/fermi_hubbard/fermionic_circuits.py @@ -0,0 +1,159 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Circuit generator that prepares a single-fermion initial states.""" + +import math +from typing import List, Optional, Tuple, Sequence, Iterable + +import cirq +import numpy as np + + +def create_one_particle_circuit( + qubits: List[cirq.Qid], + amplitudes: np.array, + x_index: Optional[int] = None +) -> cirq.Circuit: + """Construct an arbitrary single-particle fermionic state. + + This procedure constructs circuit of a lower depth compared to the more + generic openfermion.slater_determinant_preparation_circuit. It allows to + initializes the qubit in the middle as starting excitation and distributes + the amplitudes to the left and right simultaneously, which cuts the depth + by about two. + + Args: + qubits: List of qubits. + amplitudes: The desired coefficient for each fermion. + x_index: Index of the initial excitation to propagate from. + + Returns: + Circuit that realizes the desired distribution. + """ + + sites_count = len(qubits) + + if sites_count != len(amplitudes): + raise ValueError('Length of amplitudes and qubits array must be equal') + + x_index, rotations, phases = _create_one_particle_moments( + amplitudes, x_index) + + # Prepare initial state. + circuit = cirq.Circuit([cirq.X.on(qubits[x_index])]) + + # Construct the circuit from the rotations. + for moment in rotations: + circuit += cirq.Circuit(_create_moment_operations(qubits, moment)) + + # Fix the desired phases. + circuit += cirq.Circuit( + [cirq.Z(qubits[i]) ** phases[i] for i in range(sites_count)]) + + return circuit + + +def _create_one_particle_moments( + amplitudes: np.array, + x_index: Optional[int] = None, + offset: int = 0 +) -> Tuple[int, List[Tuple[Tuple[int, int, float], ...]], np.ndarray]: + + if not np.isclose(np.linalg.norm(amplitudes), 1.0): + raise ValueError('Amplitudes array must be normalized') + + sites_count = len(amplitudes) + phases = np.angle(amplitudes) / np.pi + + if x_index is None: + x_index = (sites_count - 1) // 2 + + if sites_count == 1: + return x_index, [], phases + + left_count = x_index + right_count = sites_count - x_index - 1 + + if left_count > right_count: + right_rotations, previous = _create_rotations( + list(reversed(amplitudes[x_index:])), + shift=x_index, + offset=offset, + reverse=True) + left_rotations, _ = _create_rotations( + list(amplitudes[:x_index]) + [previous], + shift=0, + offset=offset, + reverse=False) + rotations = _merge_rotations(left_rotations, right_rotations) + else: + left_rotations, previous = _create_rotations( + amplitudes[:x_index + 1], + shift=0, + offset=offset, + reverse=False) + right_rotations, _ = _create_rotations( + list(reversed(amplitudes[x_index + 1:])) + [previous], + shift=x_index, + offset=offset, + reverse=True) + rotations = _merge_rotations(right_rotations, left_rotations) + + return x_index, rotations, phases + + +def _create_rotations(amplitudes: Sequence[complex], + shift: int, + offset: int, + reverse: bool + ) -> Tuple[List[Tuple[int, int, float]], float]: + rotations = [] + size = len(amplitudes) + previous = np.abs(amplitudes[0]) + for i in range(1, size): + theta, previous = _left_givens_params( + previous, np.abs(amplitudes[i])) + if reverse: + rotations.append( + (size - i + shift - 1 + offset, + size - i + shift + offset, + -theta)) + else: + rotations.append( + (i + shift - 1 + offset, + i + shift + offset, + theta)) + return list(reversed(rotations)), previous + + +def _left_givens_params(left, right): + theta = math.atan2(left, right) + return theta, left / math.sin(theta) + + +def _merge_rotations(first: List[Tuple[int, int, float]], + second: List[Tuple[int, int, float]] + ) -> List[Tuple[Tuple[int, int, float], ...]]: + rotations = [(first[0],)] + list(zip(first[1:], second)) + count = len(rotations) + rotations += [(r,) for r in first[count:]] + rotations += [(r,) for r in second[count - 1:]] + return rotations + + +def _create_moment_operations( + qubits: List[cirq.Qid], + moment: Iterable[Tuple[int, int, float]]) -> List[cirq.Operation]: + return [cirq.givens(theta).on(qubits[a], qubits[b]) + for a, b, theta in moment] \ No newline at end of file diff --git a/recirq/fermi_hubbard/fermionic_circuits_test.py b/recirq/fermi_hubbard/fermionic_circuits_test.py new file mode 100644 index 00000000..96ad452f --- /dev/null +++ b/recirq/fermi_hubbard/fermionic_circuits_test.py @@ -0,0 +1,88 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List + +import cirq +import numpy as np +import pytest + +from recirq.fermi_hubbard.fermionic_circuits import create_one_particle_circuit + + +def _single_fermionic_modes_state(amplitudes: List[complex]) -> np.ndarray: + """Prepares state which is a superposition of single Fermionic modes. + + Args: + amplitudes: List of amplitudes to be assigned for a state representing + a Fermionic mode. + + Return: + State vector which is a superposition of single fermionic modes under + JWT representation, each with appropriate amplitude assigned. + """ + n = len(amplitudes) + state = np.zeros(1 << n, dtype=complex) + for m in range(len(amplitudes)): + state[1 << (n - 1 - m)] = amplitudes[m] + return state / np.linalg.norm(state) + + +@pytest.mark.parametrize( + 'shape', + [ + [1.0], + [1.0j], + [1.0, 1.0], + [1.0, 0.5], + [1.0, 0.5 + 0.5j], + [0.5 - 0.5j, 0.5 + 0.5j], + [1, 2, 3, 4, 5, 6, 7, 8], + [1, 1, 1, 1, 1, 1, 1, 1] + ] +) +def test_create_one_particle_circuit(shape): + amplitudes = shape / np.linalg.norm(shape) + qubits = cirq.LineQubit.range(len(amplitudes)) + circuit = create_one_particle_circuit(qubits, amplitudes) + assert np.allclose( + circuit.final_state_vector(), + _single_fermionic_modes_state(amplitudes)) + + +@pytest.mark.parametrize( + 'shape', + [ + [1.0], + [1.0j], + [1.0, 1.0], + [1.0, 0.5], + [1.0, 0.5 + 0.5j], + [0.5 - 0.5j, 0.5 + 0.5j], + [1, 2, 3], + [1, 2, 3, 4], + [1, 2, 3, 4, 5], + [1, 2, 3, 4, 5, 7], + [1, 2, 3, 4, 5, 6, 7, 8], + [1, 1, 1, 1, 1, 1, 1, 1] + ] +) +def test_create_one_particle_circuit_with_x_index(shape): + for x_index in range(len(shape)): + amplitudes = shape / np.linalg.norm(shape) + qubits = cirq.LineQubit.range(len(amplitudes)) + circuit = create_one_particle_circuit(qubits, amplitudes, x_index) + assert np.allclose( + circuit.final_state_vector(), + _single_fermionic_modes_state(amplitudes)) diff --git a/recirq/fermi_hubbard/layouts.py b/recirq/fermi_hubbard/layouts.py new file mode 100644 index 00000000..3bf84dc9 --- /dev/null +++ b/recirq/fermi_hubbard/layouts.py @@ -0,0 +1,439 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Embeddings of Fermi-Hubbard problem on a chip.""" + +from typing import Dict, List, Optional, Tuple, Type, Union + +import cirq + +GridQubitPairs = List[Tuple[cirq.GridQubit, cirq.GridQubit]] + + +@cirq.json_serializable_dataclass(init=False, unsafe_hash=True) +class LineLayout: + """Mapping of problem qubits to two parallel lines on a grid. + + This mapping encodes fermions under Jordan-Wigner transformations to qubits + on a chip. This is the most favourable connectivity, where all the + next-neighbour and on-site interactions can be realized without any swap + between fermions. + + For example, the eight-site embedding on a grid looks like: + + 1↑──2↑──3↑──4↑──5↑──6↑──7↑──8↑ + ┃ ┃ ┃ ┃ ┃ ┃ ┃ ┃ + 1↓──2↓──3↓──4↓──5↓──6↓──7↓──8↓ + + where thin lines symbolize next-neighbour interactions and bold lines the + on-site interactions. + + When instance of this layout is used in FermiHubbardParameters, the + create_circuits function constructs a cirq.Circuit mapped to given qubits + which realize a Trotterized evolution. + + Attributes: + size: Number of sites for each chain. + origin: The starting qubit coordinates on a 2D grid. The up chain will + start from this qubit and the down chain will start at the qubit + which is 90 degrees clockwise from the line direction. + rotation: Counter-clockwise rotation of the (1, 0) vector which + indicates the direction where line should be traced. Only multiplies + of 90 are allowed. + """ + + size: int + origin: Tuple[int, int] + rotation: int + + def __init__(self, *, + size: int, + origin: Tuple[int, int] = (0, 0), + rotation: int = 0) -> None: + a, b = origin + self.origin = a, b + self.size = size + self.rotation = rotation + + self._initialize_layout() + + def _initialize_layout(self) -> None: + up_qubits, down_qubits = _find_line_qubits(self.size, + self.origin, + self.rotation) + + up_even_pairs, up_odd_pairs = _get_even_odd_pairs(up_qubits) + down_even_pairs, down_odd_pairs = _get_even_odd_pairs(down_qubits) + + self._hop_even_pairs = down_even_pairs + up_even_pairs + self._hop_odd_pairs = down_odd_pairs + up_odd_pairs + + self._up_qubits = up_qubits + self._down_qubits = down_qubits + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return {cls.__name__: cls} + + @property + def up_qubits(self) -> List[cirq.GridQubit]: + return self._up_qubits + + @property + def down_qubits(self) -> List[cirq.GridQubit]: + return self._down_qubits + + @property + def all_qubits(self) -> List[cirq.GridQubit]: + return self.up_qubits + self.down_qubits + + @property + def up_even_pairs(self) -> GridQubitPairs: + return _get_even_pairs(self.up_qubits) + + @property + def down_even_pairs(self) -> GridQubitPairs: + return _get_even_pairs(self.down_qubits) + + @property + def up_odd_pairs(self) -> GridQubitPairs: + return _get_odd_pairs(self.up_qubits) + + @property + def down_odd_pairs(self) -> GridQubitPairs: + return _get_odd_pairs(self.down_qubits) + + @property + def interaction_pairs(self) -> GridQubitPairs: + return list(zip(self.up_qubits, self._down_qubits)) + + def default_layout(self) -> 'LineLayout': + return LineLayout(size=self.size) + + def text_diagram(self, draw_grid_coords: bool = True) -> str: + return _draw_chains(self.up_qubits, + self.down_qubits, + self.interaction_pairs, + draw_grid_coords) + + +@cirq.json_serializable_dataclass(init=False, unsafe_hash=True) +class ZigZagLayout: + """Mapping of problem qubits to two zig-zag lines on a grid. + + This mapping encodes fermions under Jordan-Wigner transformations to qubits + on a chip in a more compact way compared to LineLayout. Because of that, not + all on-site interactions can be realized simultaneously and a Fermionic + swaps are necessary to do them. This influences a cost of a single Trotter + step because the interaction terms are realized in two moments, and an + additional swapping layer is added. + + For example, the eight-site embedding on a grid looks like: + + 1↓━━1↑ + │ │ + 2↓━━2↑──3↑ + │ │ + 3↓──4↓━━4↑──5↑ + │ │ + 5↓──6↓━━6↑──7↑ + │ │ + 7↓──8↓━━8↑ + + where thin lines symbolize next-neighbour interactions and bold lines the + on-site interactions. + + When instance of this layout is used in FermiHubbardParameters, the + create_circuits function constructs a cirq.Circuit mapped to given qubits + which realize a Trotterized evolution. + + Attributes: + size: Number of sites for each chain. + origin: The starting qubit coordinates on a 2D grid where zig-zag is + traced from. + rotation: Counter-clockwise rotation of the (1, 0) vector which + indicates the direction where zig-zag should be traced. Only + multiplies of 90 are allowed. + flipped: If true, zig-zag is flipped along the axis perpendicular to the + traced direction. + exchange_chains: If true, qubits on up an down chain are exchanged with + each other. + reverse_chains: If true, qubits on each chain are reversed. + """ + + size: int + origin: Tuple[int, int] + rotation: int + flipped: bool + exchange_chains: bool + reverse_chains: bool + + def __init__(self, *, + size: int, + origin: Tuple[int, int] = (0, 0), + rotation: int = 0, + flipped: bool = False, + exchange_chains: bool = False, + reverse_chains: bool = False) -> None: + self.size = size + a, b = origin + self.origin = (a, b) + self.rotation = rotation + self.flipped = flipped + self.exchange_chains = exchange_chains + self.reverse_chains = reverse_chains + + self._initialize_layout() + + def _initialize_layout(self) -> None: + + up_qubits, down_qubits = _find_zigzag_qubits(self.size, + self.origin, + self.rotation, + self.flipped, + self.exchange_chains) + + self._int_even_pairs = [(down_qubits[i], up_qubits[i]) + for i in range(1, self.size, 2)] + self._int_even_other_qubits = down_qubits[0::2] + up_qubits[0::2] + + self._int_odd_pairs = ([(down_qubits[0], up_qubits[0])] + + [(down_qubits[i], up_qubits[i]) + for i in range(1, self.size - 2, 2)]) + self._int_odd_other_qubits = (down_qubits[2:-2:2] + down_qubits[-2:] + + up_qubits[2:-2:2] + up_qubits[-2:]) + + self._up_even_pairs = _get_even_pairs(up_qubits) + self._up_odd_pairs = _get_odd_pairs(up_qubits) + + self._down_even_pairs = _get_even_pairs(down_qubits) + self._down_odd_pairs = _get_odd_pairs(down_qubits) + + if self.reverse_chains: + up_qubits = list(reversed(up_qubits)) + down_qubits = list(reversed(down_qubits)) + else: + up_qubits = up_qubits + down_qubits = down_qubits + + self._up_qubits = up_qubits + self._down_qubits = down_qubits + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return {cls.__name__: cls} + + @property + def up_qubits(self) -> List[cirq.GridQubit]: + return self._up_qubits + + @property + def down_qubits(self) -> List[cirq.GridQubit]: + return self._down_qubits + + @property + def all_qubits(self) -> List[cirq.GridQubit]: + return self.up_qubits + self.down_qubits + + @property + def up_even_pairs(self) -> GridQubitPairs: + return self._up_even_pairs + + @property + def down_even_pairs(self) -> GridQubitPairs: + return self._down_even_pairs + + @property + def up_odd_pairs(self) -> GridQubitPairs: + return self._up_odd_pairs + + @property + def down_odd_pairs(self) -> GridQubitPairs: + return self._down_odd_pairs + + @property + def interaction_even_pairs(self) -> GridQubitPairs: + return self._int_even_pairs + + @property + def interaction_even_other_qubits(self) -> List[cirq.GridQubit]: + return self._int_even_other_qubits + + @property + def interaction_odd_pairs(self) -> GridQubitPairs: + return self._int_odd_pairs + + @property + def interaction_odd_other_qubits(self) -> List[cirq.GridQubit]: + return self._int_odd_other_qubits + + def default_layout(self) -> 'ZigZagLayout': + return ZigZagLayout(size=self.size) + + def text_diagram(self, draw_grid_coords: bool = True) -> str: + return _draw_chains( + self.up_qubits, + self.down_qubits, + self.interaction_even_pairs + self.interaction_odd_pairs, + draw_grid_coords) + + +def _find_line_qubits( + size: int, + origin: Tuple[int, int], + rotation: int +) -> Tuple[List[cirq.GridQubit], List[cirq.GridQubit]]: + if rotation % 90 != 0: + raise ValueError('Layout rotation must be a multiple of 90 degrees') + + def generate(row, col, drow, dcol) -> List[cirq.GridQubit]: + return [cirq.GridQubit(row + i * drow, col + i * dcol) + for i in range(size)] + + rotations = [(1, 0), (0, 1), (-1, 0), (0, -1)] + drow, dcol = rotations[(rotation % 360) // 90] + + up_row, up_col = origin + up_qubits = generate(up_row, up_col, drow, dcol) + + down_drow, down_dcol = rotations[((rotation + 270) % 360) // 90] + down_row, down_col = up_row + down_drow, up_col + down_dcol + down_qubits = generate(down_row, down_col, drow, dcol) + + return up_qubits, down_qubits + + +def _find_zigzag_qubits( + size: int, + origin: Tuple[int, int], + rotation: int, + flipped: bool, + exchange_chains: bool +) -> Tuple[List[cirq.GridQubit], List[cirq.GridQubit]]: + + if size % 2 == 1: + raise ValueError( + 'Odd number of sites is not supported for ZigZagLayout') + + if rotation % 90 != 0: + raise ValueError( + 'ZigZagLayout rotation must be a multiple of 90 degrees') + + # Compute qubits coordinates pinned to (0, 0) + ref_up = 0, 1 + up_coords = [(ref_up[0] + (i + 1) // 2, ref_up[1] + i // 2) + for i in range(size - 1)] + + ref_down = 1, 0 + down_coords = [(ref_down[0] + (i + 1) // 2, ref_down[1] + i // 2) + for i in range(size - 1)] + + if flipped: + up_coords = reversed([(0, 0)] + up_coords) + down_coords = reversed(down_coords + [(size // 2, size // 2)]) + else: + up_coords = up_coords + [(size // 2, size // 2)] + down_coords = [(0, 0)] + down_coords + + # Rotate qubit coordinates if necessary + rotation = (rotation // 90) % 4 + if rotation == 1: + up_coords = [(y, -x) for x, y in up_coords] + down_coords = [(y, -x) for x, y in down_coords] + elif rotation == 2: + up_coords = [(-x, -y) for x, y in up_coords] + down_coords = [(-x, -y) for x, y in down_coords] + elif rotation == 3: + up_coords = [(-y, x) for x, y in up_coords] + down_coords = [(-y, x) for x, y in down_coords] + + # Translate qubit coordinates to pin at origin + up_coords = [(x + origin[0], y + origin[1]) + for x, y in up_coords] + down_coords = [(x + origin[0], y + origin[1]) + for x, y in down_coords] + + up_qubits = [cirq.GridQubit(*coord) for coord in up_coords] + down_qubits = [cirq.GridQubit(*coord) for coord in down_coords] + + if exchange_chains: + up_qubits, down_qubits = down_qubits, up_qubits + + return up_qubits, down_qubits + + +def _get_even_odd_pairs(qubits: List[cirq.GridQubit], + ) -> Tuple[List[Tuple[cirq.GridQubit, cirq.GridQubit]], + List[Tuple[cirq.GridQubit, cirq.GridQubit]]]: + even_pairs = [(qubits[i + 1], qubits[i]) + for i in range(0, len(qubits) - 1, 2)] + odd_pairs = [(qubits[i + 1], qubits[i]) + for i in range(1, len(qubits) - 1, 2)] + return even_pairs, odd_pairs + + +def _get_even_pairs(qubits: List[cirq.GridQubit] + ) -> List[Tuple[cirq.GridQubit, cirq.GridQubit]]: + return [(qubits[i + 1], qubits[i]) for i in range(0, len(qubits) - 1, 2)] + + +def _get_odd_pairs(qubits: List[cirq.GridQubit] + ) -> List[Tuple[cirq.GridQubit, cirq.GridQubit]]: + return [(qubits[i + 1], qubits[i]) for i in range(1, len(qubits) - 1, 2)] + + +def _draw_chains(up_qubits: List[cirq.GridQubit], + down_qubits: List[cirq.GridQubit], + interactions: List[Tuple[cirq.GridQubit, cirq.GridQubit]], + draw_grid_coords: bool) -> str: + + def qubit_coords(qubit: cirq.GridQubit) -> Tuple[int, int]: + return qubit.col - min_col, qubit.row - min_row + + diagram = cirq.TextDiagramDrawer() + + min_col = min(qubit.col for qubit in up_qubits + down_qubits) + min_row = min(qubit.row for qubit in up_qubits + down_qubits) + + last_col, last_row = None, None + + for index, qubit in enumerate(up_qubits): + col, row = qubit_coords(qubit) + label = f'{index + 1}↑' + if draw_grid_coords: + label += f' {str(qubit)}' + diagram.write(col, row, label) + if index: + diagram.grid_line(last_col, last_row, col, row) + last_col, last_row = col, row + + for index, qubit in enumerate(down_qubits): + col, row = qubit_coords(qubit) + label = f'{index + 1}↓' + if draw_grid_coords: + label += f' {str(qubit)}' + diagram.write(col, row, label) + if index: + diagram.grid_line(last_col, last_row, col, row) + last_col, last_row = col, row + + for a, b in interactions: + diagram.grid_line(*qubit_coords(a), *qubit_coords(b), emphasize=True) + + return diagram.render(horizontal_spacing=3 if draw_grid_coords else 2, + vertical_spacing=2 if draw_grid_coords else 1, + use_unicode_characters=True) + + +QubitsLayout = Union[LineLayout, ZigZagLayout] +"""Arbitrary qubits layout""" diff --git a/recirq/fermi_hubbard/parameters.py b/recirq/fermi_hubbard/parameters.py new file mode 100644 index 00000000..ffd679be --- /dev/null +++ b/recirq/fermi_hubbard/parameters.py @@ -0,0 +1,598 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Containers to represent Fermi-Hubbard problem.""" + +from typing import Any, Dict, Iterable, Optional, Tuple, Type, Union + +import abc +from itertools import product +from numbers import Number + +import cirq +import numpy as np +import openfermion + +from recirq.fermi_hubbard.layouts import ( + LineLayout, + QubitsLayout, + ZigZagLayout +) + +Real = Union[int, float] + + +@cirq.json_serializable_dataclass(init=False) +class Hamiltonian: + """Single spin-chain Fermi-Hubbard Hamiltonian description. + + H = - Σ_i Σ_ν (J_i c_{i,ν}^† c_{i+1,ν} + h.c.) + + Σ_i U_i n_{i,↑} n_{i,↓} + + Σ_i Σ_ν V_i n_{i,ν} n_{i+1,ν} + + Σ_i Σ_ν (ε_{i,ν} - μ_ν) n_{i,ν}, + + where: + i = 1, 2, ..., sites_count are the site indices, + ν = ↑,↓ denote the two spin states, + c_{i,ν} are the fermionic annihilation operators, + n_{i,ν} = c_{i,ν}^† c_{i,ν} are the number operators. + + Attributes: + sites_count: Total number of sites, where each site has spin-up and + spin-down occupancy. + j: The hopping coefficients J_i. + u: The on-site interaction coefficients U_i. + v: Same-spin nearest-neighbour interaction coefficients V_i. + local_charge: Local electric field coefficients 0.5 * (ε_{i,↑} + + ε_{i,↓}). + local_spin: Local magnetic field coefficients 0.5 * (ε_{i,↑} - ε_{i,↓}). + mu_up: Local chemical potential for spin-up states μ_↑. + mu_down: Local chemical potential for spin-down states μ_↓. + """ + sites_count: int + j: Union[Real, Tuple[Real]] + u: Union[Real, Tuple[Real]] + v: Union[Real, Tuple[Real]] + local_charge: Union[Real, Tuple[Real]] + local_spin: Union[Real, Tuple[Real]] + mu_up: Union[Real, Tuple[Real]] + mu_down: Union[Real, Tuple[Real]] + + def __init__(self, *, + sites_count: int, + j: Union[Real, Iterable[Real]], + u: Union[Real, Iterable[Real]], + v: Union[Real, Iterable[Real]] = 0, + local_charge: Union[Real, Iterable[Real]] = 0, + local_spin: Union[Real, Iterable[Real]] = 0, + mu_up: Union[Real, Iterable[Real]] = 0, + mu_down: Union[Real, Iterable[Real]] = 0, + ) -> None: + self.sites_count = sites_count + self.j = _iterable_to_tuple(j) + self.u = _iterable_to_tuple(u) + self.v = _iterable_to_tuple(v) + self.local_charge = _iterable_to_tuple(local_charge) + self.local_spin = _iterable_to_tuple(local_spin) + self.mu_up = _iterable_to_tuple(mu_up) + self.mu_down = _iterable_to_tuple(mu_down) + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return {cls.__name__: cls} + + @property + def interactions_count(self) -> int: + return self.sites_count - 1 + + @property + def j_array(self) -> np.ndarray: + if isinstance(self.j, tuple): + return np.array(self.j) + return np.full(self.interactions_count, self.j) + + @property + def u_array(self) -> np.ndarray: + if isinstance(self.u, tuple): + return np.array(self.u) + return np.full(self.sites_count, self.u) + + @property + def v_array(self) -> np.ndarray: + if isinstance(self.v, tuple): + return np.array(self.v) + return np.full(self.interactions_count, self.v) + + @property + def local_charge_array(self) -> np.ndarray: + if isinstance(self.local_charge, tuple): + return np.array(self.local_charge) + return np.full(self.sites_count, self.local_charge) + + @property + def local_spin_array(self) -> np.ndarray: + if isinstance(self.local_spin, tuple): + return np.array(self.local_spin) + return np.full(self.sites_count, self.local_spin) + + @property + def mu_up_array(self) -> np.ndarray: + if isinstance(self.mu_up, tuple): + return np.array(self.mu_up) + return np.full(self.sites_count, self.mu_up) + + @property + def mu_down_array(self) -> np.ndarray: + if isinstance(self.mu_down, tuple): + return np.array(self.mu_down) + return np.full(self.sites_count, self.mu_down) + + @property + def local_up_array(self): + return (self.local_charge_array - + self.local_spin_array - + self.mu_up_array) + + @property + def local_down_array(self): + return (self.local_charge_array + + self.local_spin_array - + self.mu_down_array) + + def as_diagonal_coulomb_hamiltonian( + self + ) -> openfermion.DiagonalCoulombHamiltonian: + """Exports Fermi Hubbard Hamiltonian as DiagonalCoulombHamiltonian. + + Returns: Description of Fermi Hubbard problem as + openfermion.DiagonalCoulombHamiltonian. + """ + + def spin_map(j: int, spin: int) -> int: + """Mapping from site and spin to index (separated in spin sectors). + + Assigns indices 0 through self.sites_count - 1 when spin = 0 and + indices self.sites_count through 2 * self.sites_count - 1 when + spin = 1. + """ + return j + spin * self.sites_count + + modes = 2 * self.sites_count + + # Prepare one-body matrix T_ij. + one_body = np.zeros((modes, modes)) + + # Nearest-neighbor hopping terms. + t = self.j_array + for j, s in product(range(self.interactions_count), [0, 1]): + j1 = (j + 1) % self.sites_count + one_body[spin_map(j, s), spin_map(j1, s)] += -t[j] + one_body[spin_map(j1, s), spin_map(j, s)] += -t[j] + + # Local interaction terms. + local_up = self.local_up_array + local_down = self.local_down_array + for j in range(self.sites_count): + one_body[spin_map(j, 0), spin_map(j, 0)] += local_up[j] + one_body[spin_map(j, 1), spin_map(j, 1)] += local_down[j] + + # Prepare the two-body matrix V_ij. + two_body = np.zeros((modes, modes)) + + # On-site interaction terms. + u = self.u_array + for j in range(self.sites_count): + two_body[spin_map(j, 0), spin_map(j, 1)] += u[j] / 2. + two_body[spin_map(j, 1), spin_map(j, 0)] += u[j] / 2. + + # Nearest-neighbor interaction terms. + v = self.v_array + for j, (s0, s1) in product(range(self.interactions_count), + np.ndindex((2, 2))): + j1 = (j + 1) % self.sites_count + two_body[spin_map(j, s0), spin_map(j1, s1)] += v[j] / 2. + two_body[spin_map(j1, s1), spin_map(j, s0)] += v[j] / 2. + + return openfermion.DiagonalCoulombHamiltonian(one_body, two_body) + + +class SingleParticle: + """Base class for initial states that define single particle on a chain.""" + + @property + def particles(self) -> int: + return 1 + + @abc.abstractmethod + def get_amplitudes(self, sites_count: int) -> np.ndarray: + """Calculates fermionic amplitudes for each site. + + Args: + sites_count: Number of fermionic sites. + + Returns: + Complex array of size sites_count with amplitude for each fermion + on each site. Must be normalized to 1. + """ + + +@cirq.json_serializable_dataclass(init=False) +class FixedSingleParticle(SingleParticle): + """Fixed array of particle amplitudes. + + This initial state is fixed to constant number of sites. + """ + amplitudes: Tuple[Number] + + def __init__(self, *, amplitudes: Iterable[Number]) -> None: + _check_normalized(amplitudes) + self.amplitudes = tuple(amplitudes) + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return {cls.__name__: cls} + + def get_amplitudes(self, sites_count: int) -> np.ndarray: + if sites_count != len(self.amplitudes): + raise ValueError(f'Fixed single particle not compatible with ' + f'{sites_count} sites') + return np.array(self.potential) + + +@cirq.json_serializable_dataclass +class UniformSingleParticle(SingleParticle): + """Uniform single particle amplitudes initial state.""" + pass + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return {cls.__name__: cls} + + def get_amplitudes(self, sites_count: int) -> np.ndarray: + return np.full(sites_count, 1.0 / np.sqrt(sites_count)) + + +@cirq.json_serializable_dataclass(init=False) +class PhasedGaussianSingleParticle(SingleParticle): + """Gaussian shaped particle density distribution initial state. + + We first define the local density function for site i = 1, 2, ..., + sites_count as: + + d_i = e^{-0.5 * [(i - m) / σ]^2} / N, + + where + + N = Σ_i e^{-0.5 * [(i - m) / σ]^2} + + is the normalization factor. The amplitudes of the traveling Gaussian + wavepacket are: + + a_i = √d_i * e^{1j * (i - m) * k}. + + The sites_count is an argument to the get_potential method of this class. + + Attributes: + k: the phase gradient (velocity) of the Gaussian wavepacket. + sigma: the standard deviation of the Gaussian density distribution + spanning [0, 1] interval, σ = sigma * sites_count. + position: the mean position of the Gaussian density spanning [0, 1] + interval, m = position * (sites_count - 1) + 1. + """ + + k: Real + sigma: Real + position: Real + + def __init__(self, *, k: Real, sigma: Real, position: Real) -> None: + self.k = k + self.sigma = sigma + self.position = position + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return {cls.__name__: cls} + + def get_amplitudes(self, sites_count: int) -> np.ndarray: + return _phased_gaussian_amplitudes(sites_count, + self.k, + self.sigma, + self.position) + + +class TrappingPotential: + """Base class for initial states defined by trapping potential on a chain. + """ + + @abc.abstractmethod + def get_potential(self, sites_count: int) -> np.ndarray: + """Calculates trapping potential to use. + + Args: + sites_count: Number of fermionic sites. + + Returns: + Real array of size sites_count with trapping field magnitude at each + site. + """ + + def as_quadratic_hamiltonian(self, + sites_count: int, + j: Union[Real, Iterable[Real]], + ) -> openfermion.QuadraticHamiltonian: + """Creates a nonintercting Hamiltonian H_0 that describes particle + in a trapping potential: + + H_0 = - Σ_i (J_i c_i^† c_{i+1} + h.c.) + Σ_i ε_i n_i, + + where: + i = 1, 2, ..., sites_count are the site indices, + c_i are the fermionic annihilation operators, + n_i = c_i^† c_i are the number operators, + ε_i is the i-th element of potential array obtained through + get_potential(sites_count) call. + + Attributes: + sites_count: Total number of sites. + j: The hopping coefficients J_i. If Iterable is passed, its size + must be equal to sites_count - 1. + """ + return _potential_to_quadratic_hamiltonian( + self.get_potential(sites_count), j) + + +@cirq.json_serializable_dataclass(init=False) +class FixedTrappingPotential(TrappingPotential): + """Fixed array describing trapping potential.""" + particles: int + potential: Tuple[Real] + + def __init__(self, *, particles: int, potential: Iterable[Real]) -> None: + self.particles = particles + self.potential = tuple(potential) + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return {cls.__name__: cls} + + def get_potential(self, sites_count: int) -> np.ndarray: + if sites_count != len(self.potential): + raise ValueError(f'Fixed potential not compatible with ' + f'{sites_count} sites') + return np.array(self.potential) + + +@cirq.json_serializable_dataclass(init=False) +class UniformTrappingPotential(TrappingPotential): + """Uniform trapping potential initial state.""" + particles: int + + def __init__(self, *, particles: int) -> None: + self.particles = particles + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return {cls.__name__: cls} + + def get_potential(self, sites_count: int) -> np.ndarray: + return np.zeros(sites_count) + + +@cirq.json_serializable_dataclass(init=False) +class GaussianTrappingPotential(TrappingPotential): + """Gaussian shaped trapping potential for creating the initial state. + + The coefficient ε_i at each site i = 1, 2, ..., sites_count is equal: + + ε_i = scale * e^{−0.5 * (i − m)^2 / σ^2}. + + The sites_count is an argument to the get_potential method of this class. + + Args: + particles: the number of particles in the potential. + center: the center position of the Gaussian potential spanning [0, 1] + interval, m = center * (sites_count - 1) + 1. + sigma: the standard deviation of the Gaussian spanning [0, 1] interval, + σ = sigma * (sites_count - 1). + scale: the scale of the potential s. + """ + + particles: int + center: Real + sigma: Real + scale: Real + + def __init__(self, *, + particles: int, + center: Real, + sigma: Real, + scale: Real) -> None: + self.particles = particles + self.center = center + self.sigma = sigma + self.scale = scale + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return {cls.__name__: cls} + + def get_potential(self, sites_count: int) -> np.ndarray: + def gaussian(x: int) -> float: + return self.scale * np.exp(-0.5 * ((x - self.center) / + self.sigma) ** 2) + return np.array([gaussian(x) for x in np.linspace(0, 1, sites_count)]) + + +ChainInitialState = Union[FixedSingleParticle, + PhasedGaussianSingleParticle, + FixedTrappingPotential, + GaussianTrappingPotential, + UniformSingleParticle, + UniformTrappingPotential] +"""Initial state that describes independent, noninteracting chain.""" + + +@cirq.json_serializable_dataclass(init=False) +class IndependentChainsInitialState: + """Initial state with two independent, noninteracting chains.""" + + up: ChainInitialState + down: ChainInitialState + + def __init__(self, *, + up: ChainInitialState, + down: ChainInitialState) -> None: + self.up = up + self.down = down + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return { + cls.__name__: cls, + **FixedSingleParticle.cirq_resolvers(), + **PhasedGaussianSingleParticle.cirq_resolvers(), + **FixedTrappingPotential.cirq_resolvers(), + **GaussianTrappingPotential.cirq_resolvers(), + **UniformSingleParticle.cirq_resolvers(), + **UniformTrappingPotential.cirq_resolvers() + } + + @property + def up_particles(self) -> int: + return self.up.particles + + @property + def down_particles(self) -> int: + return self.down.particles + + +InitialState = IndependentChainsInitialState +"""Arbitrary initial state type.""" + + +@cirq.json_serializable_dataclass +class FermiHubbardParameters: + """Parameters of a Fermi-Hubbard problem instance. + + This container uniquely defines all the parameters necessary to simulate + the Fermi-Hubbard problem, including mapping of Fermions on the chip. + + Attributes: + hamiltonian: Hamiltonian used for dynamics evolution. + initial_state: Initial state description used to prepare quantum state + for dynamics evolution. + layout: Description of mapping of fermions on a quantum processor. + dt: Time evolution constant that defies Trotter step length. + """ + hamiltonian: Hamiltonian + initial_state: InitialState + layout: QubitsLayout + dt: float + + @classmethod + def cirq_resolvers(cls) -> Dict[str, Optional[Type]]: + return { + cls.__name__: cls, + **Hamiltonian.cirq_resolvers(), + **IndependentChainsInitialState.cirq_resolvers(), + **LineLayout.cirq_resolvers(), + **ZigZagLayout.cirq_resolvers(), + } + + @property + def sites_count(self) -> int: + return self.layout.size + + @property + def up_particles(self) -> int: + return self.initial_state.up_particles + + @property + def down_particles(self) -> int: + return self.initial_state.down_particles + + @property + def up_down_particles(self) -> Tuple[int, int]: + return self.up_particles, self.down_particles + + def representative_parameters(self) -> 'FermiHubbardParameters': + return FermiHubbardParameters(self.hamiltonian, + self.initial_state, + self.layout.default_layout(), + self.dt) + + def equals_for_rescaling(self, other: 'FermiHubbardParameters') -> bool: + if not isinstance(other, FermiHubbardParameters): + return False + if type(self.layout) != type(other.layout): + return False + if isinstance(self.layout, ZigZagLayout): + interacting = not np.allclose(self.hamiltonian.u, 0.0) + other_interacting = not np.allclose(other.hamiltonian.u, 0.0) + return interacting == other_interacting + return False + + +def _check_normalized(array: Iterable[Number]) -> None: + if not np.isclose(np.linalg.norm(array), 1): + raise ValueError('Array is not normalized') + + +def _phased_gaussian_amplitudes(sites_count: int, + k_t0: float, + sigma: float, + position: float) -> np.ndarray: + + def gaussian(x): + return np.exp(-0.5 * ((x - position) / sigma) ** 2) + + densities = np.array( + [gaussian(x) for x in np.linspace(0, 1, sites_count)], dtype=float) + densities /= np.sum(densities) + + return np.array( + [np.sqrt(densities[i]) * np.exp(1j * (x - position) * k_t0) + for i, x in enumerate(np.linspace(0, 1, sites_count))], dtype=complex) + + +def _potential_to_quadratic_hamiltonian( + potential: np.ndarray, + j: Union[Real, Iterable[Real]] +) -> openfermion.QuadraticHamiltonian: + sites_count = len(potential) + + if isinstance(j, Iterable): + j = np.array(j) + else: + j = np.full(sites_count - 1, j) + + if len(j) != sites_count - 1: + raise ValueError('Hopping coefficient size incompatible with potential') + + # Prepare one-body matrix T_ij. + one_body = np.zeros((sites_count, sites_count)) + + # Nearest-neighbor hopping terms. + for i in range(sites_count - 1): + one_body[i, i + 1] += -j[i] + one_body[i + 1, i] += -j[i] + + # Local interaction terms. + for i in range(sites_count): + one_body[i, i] += potential[i] + + return openfermion.QuadraticHamiltonian(one_body) + + +def _iterable_to_tuple(value: Any) -> Any: + return tuple(value) if isinstance(value, Iterable) else value diff --git a/recirq/fermi_hubbard/parameters_test.py b/recirq/fermi_hubbard/parameters_test.py new file mode 100644 index 00000000..e1e57250 --- /dev/null +++ b/recirq/fermi_hubbard/parameters_test.py @@ -0,0 +1,84 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from recirq.fermi_hubbard.parameters import ( + GaussianTrappingPotential, + PhasedGaussianSingleParticle, + UniformSingleParticle, + UniformTrappingPotential +) + +import numpy as np + + +def test_phased_gaussian_single_particle(): + + chain = PhasedGaussianSingleParticle(k=1.2 * 7, + sigma=1.2 / 7, + position=1.5 / 7) + amplitudes = chain.get_amplitudes(sites_count=8) + + np.testing.assert_allclose( + amplitudes, + [ + -0.09060882 - 0.3883731j, + 0.46578491 - 0.3186606j, + 0.46578491 + 0.3186606j, + -0.09060882 + 0.3883731j, + -0.19714994 + 0.02810304j, + -0.034451 - 0.06124629j, + 0.0111212 - 0.01354051j, + 0.00293383 + 0.00096188j + ], + rtol=1e-5 + ) + + assert np.isclose(np.linalg.norm(amplitudes), 1) + + +def test_uniform_single_particle(): + chain = UniformSingleParticle() + amplitudes = chain.get_amplitudes(sites_count=8) + np.testing.assert_allclose(amplitudes, 1 / np.sqrt(8)) + + +def test_gaussian_trapping_potential(): + + chain = GaussianTrappingPotential( + particles=2, + center=0.5, + sigma=1 / 7, + scale=-4) + potential = chain.get_potential(sites_count=8) + + np.testing.assert_allclose( + potential, + [ + -0.00874996, + -0.17574773, + -1.29860987, + -3.52998761, + -3.52998761, + -1.29860987, + -0.17574773, + -0.00874996 + ], + rtol=1e-5 + ) + + +def test_uniform_trapping_potential(): + chain = UniformTrappingPotential(particles=2) + potential = chain.get_potential(sites_count=8) + np.testing.assert_allclose(potential, 0.0) \ No newline at end of file diff --git a/recirq/fermi_hubbard/post_processing.py b/recirq/fermi_hubbard/post_processing.py new file mode 100644 index 00000000..9d7eb3e3 --- /dev/null +++ b/recirq/fermi_hubbard/post_processing.py @@ -0,0 +1,773 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Results post-processing and error mitigation.""" + +from typing import (Callable, Dict, Iterable, List, Optional, Sequence, Tuple, + Union) + +from collections import defaultdict +from dataclasses import dataclass +from functools import lru_cache, partial +import numpy as np +from scipy.optimize import least_squares +from scipy.stats import linregress + +from recirq.fermi_hubbard.execution import ( + ExperimentRun, + FermiHubbardExperiment, + run_experiment +) +from recirq.fermi_hubbard.parameters import FermiHubbardParameters + +import cirq + + +@dataclass +class Rescaling: + """Parameters for amplitudes rescaling. We assume the following + approximate relation: + + ( None: + self.average = list(np.array(a) for a in average) + self.std_error = list(np.array(e) for e in std_error) + self.std_dev = list(np.array(d) for d in std_dev) if std_dev else None + self.values = values + + @property + def chains_count(self) -> int: + return len(self.average) + + +@dataclass(init=False) +class AggregatedQuantity: + """Measured quantity with per-chain values. + + Each inner numpy array on the attributes below is a 1D array indexed by a + Trotter step count, it is of size η, where η is number of Trotter steps + analysed. + + Attributes: + average: List of average values for each chain. + std_error: List of standard errors of a mean for each chain. + std_dev: List of standard deviations for chain. + values: List of list of non-aggregated results. The outer list iterates + over chains analysed, the inner list iterates over Trotter steps + and the numpy array lists values for different experiment + realizations (configurations). + """ + + average: List[np.ndarray] + std_error: List[np.ndarray] + std_dev: Optional[List[np.ndarray]] + values: Optional[List[List[np.ndarray]]] + + def __init__(self, *, + average: Iterable[Iterable[float]], + std_error: Iterable[Iterable[float]], + std_dev: Optional[Iterable[Iterable[float]]] = None, + values: Optional[List[List[np.ndarray]]] = None + ) -> None: + self.average = list(np.array(a) for a in average) + self.std_error = list(np.array(e) for e in std_error) + self.std_dev = list(np.array(d) for d in std_dev) if std_dev else None + self.values = values + + @property + def chains_count(self) -> int: + return len(self.average) + + +class InstanceBundle: + """Bundle of many realizations of the same Fermi-Hubbard problem. + + This class is a holder for post-processed data extracted over many + realizations (with different qubits assignments assigned by different + layouts). + + It calculates various quantities with or without amplitudes rescaling. The + quantities are averaged over all experiments passed on initialization. + + The calculate rescaled quantities (or more precisely the intrinsic + rescaling), exact numerical simulations needs to be calculated. They are + calculated only once and cached for later use. + """ + + def __init__(self, + experiments: Iterable[FermiHubbardExperiment], + steps: Optional[Iterable[int]] = None, + rescale_steps: Optional[Iterable[int]] = None, + post_select: bool = True, + numerics_transform: Optional[ + Callable[[FermiHubbardParameters], FermiHubbardParameters] + ] = None, + exact_numerics: bool = False) -> None: + """Initializes experiment instance bundle. + + The initializer is fast and all the quantities are calculated lazily, on + demand. + + Args: + experiments: List of experiments to analyze. They must solve the + same Fermi-Hubbard problem (have exactly the same Hamiltonian, + initial state and Trotter step length). The only part which + might differ is qubits layout, which realizes different + configurations. + steps: Trotter steps to analyse. If provided, it must be a subset of + Trotter steps that occur on experiments list. If not provided, + the intersection of Trotter steps that occur on experiments + list will be used. + rescale_steps: Trotter steps which should be used for rescaling + technique. If not provided, all steps used for analysis will + be used. It might be desirable to restrict rescaling steps to + only the high quality experiments (for example of lower depth). + post_select: If true, the post selected results are analysed. + numerics_transform: Optional function that transforms the problem + parameters before running the numerical simulations (numerical + simulation are necessary in order to obtain the reference + amplitudes for rescaling). For example, it might be used to + compensate for parasitic cphase. + exact_numerics: If true, indicates that this bundle is result of + exact numerical simulation and rescaling technique might assume + perfect amplitudes. This variable is used internally. + """ + self.experiments = list(experiments) + self.exact_numerics = exact_numerics + self._exact_numerics_bundle: Optional['InstanceBundle'] = None + self._numerics_transform = numerics_transform + self._post_select = post_select + self._steps, self._runs, self._parameters = _split_experiments( + experiments, steps) + self._rescale_steps = list(rescale_steps) if rescale_steps else None + self._rescaling = None + + @property + def representative_parameters(self) -> FermiHubbardParameters: + """Problem parameters with a default layout.""" + return self._parameters + + @property + def steps(self) -> List[int]: + """List of Trotter steps analysed.""" + return self._steps + + @property + def u(self) -> float: + """Interaction strength parameter analysed.""" + return self._parameters.hamiltonian.u + + @property + def dt(self) -> float: + """Trotter step length analysed.""" + return self._parameters.dt + + @property + @lru_cache(maxsize=None) + def quantities(self) -> Dict[str, Callable[[], Union[None, + PerSiteQuantity, + AggregatedQuantity]]]: + quantity_funcs = [ + self.up_down_density, + self.up_down_position_average, + self.up_down_position_average_dt, + self.up_down_spreading, + self.up_down_spreading_dt, + self.charge_spin_density, + self.charge_spin_position_average, + self.charge_spin_position_average_dt, + self.charge_spin_spreading, + self.charge_spin_spreading_dt + ] + return { + **{func.__name__: partial(func, True) for func in quantity_funcs}, + **{f'{func.__name__}_norescale': partial(func, False) + for func in quantity_funcs}, + **{'scaling': self.scaling, + 'post_selection': self.post_selection} + } + + @property + def rescaling(self) -> Optional[Rescaling]: + """The rescaling value used for analysis. + + This might be set to arbitrary value and override the intrinsic + rescaling of the underlying data set. + """ + if self._rescaling is None: + return self.intrinsic_rescaling + return self._rescaling + + @rescaling.setter + def rescaling(self, value: Rescaling) -> None: + self._rescaling = value + self.up_down_density.cache_clear() + self.up_down_position_average.cache_clear() + self.up_down_position_average_dt.cache_clear() + self.up_down_spreading.cache_clear() + self.up_down_spreading_dt.cache_clear() + self.charge_spin_density.cache_clear() + self.charge_spin_position_average.cache_clear() + self.charge_spin_position_average_dt.cache_clear() + self.charge_spin_spreading.cache_clear() + self.charge_spin_spreading_dt.cache_clear() + + @property + @lru_cache(maxsize=None) + def intrinsic_rescaling(self) -> Optional[Rescaling]: + """Rescaling for the underlying data set calculated against the exact + numerical simulations. + """ + if self.exact_numerics: + return None + return _find_rescaling_factors(self.scaling(), + self.steps, + self._rescale_steps) + + @lru_cache(maxsize=None) + def up_down_density(self, rescaled: bool = True) -> PerSiteQuantity: + rescaling = self.rescaling if rescaled else None + return _extract_up_down_densities(self._parameters.up_down_particles, + self._runs, + rescaling, + post_select=self._post_select) + + @lru_cache(maxsize=None) + def up_down_position_average(self, rescaled: bool = True + ) -> AggregatedQuantity: + return _extract_position_average(self.up_down_density(rescaled)) + + @lru_cache(maxsize=None) + def up_down_position_average_dt(self, rescaled: bool = True + ) -> AggregatedQuantity: + return _find_derivative(self.up_down_position_average(rescaled), + self.dt) + + @lru_cache(maxsize=None) + def up_down_spreading(self, rescaled: bool = True) -> AggregatedQuantity: + return _extract_spreading(self.up_down_density(rescaled)) + + @lru_cache(maxsize=None) + def up_down_spreading_dt(self, rescaled: bool = True) -> AggregatedQuantity: + return _find_derivative(self.up_down_spreading(rescaled), self.dt) + + @lru_cache(maxsize=None) + def charge_spin_density(self, rescaled: bool = True) -> PerSiteQuantity: + rescaling = self.rescaling if rescaled else None + return _extract_charge_spin_densities( + self._parameters.up_down_particles, + self._runs, + rescaling, + post_select=self._post_select) + + @lru_cache(maxsize=None) + def charge_spin_position_average(self, rescaled: bool = True + ) -> AggregatedQuantity: + return _extract_position_average(self.charge_spin_density(rescaled)) + + @lru_cache(maxsize=None) + def charge_spin_position_average_dt(self, rescaled: bool = True + ) -> AggregatedQuantity: + return _find_derivative(self.charge_spin_position_average(rescaled), + self.dt) + + @lru_cache(maxsize=None) + def charge_spin_spreading(self, rescaled: bool = True + ) -> AggregatedQuantity: + return _extract_spreading(self.charge_spin_density(rescaled)) + + @lru_cache(maxsize=None) + def charge_spin_spreading_dt(self, rescaled: bool = True + ) -> AggregatedQuantity: + return _find_derivative(self.charge_spin_spreading(rescaled), self.dt) + + @lru_cache(maxsize=None) + def scaling(self) -> Optional[AggregatedQuantity]: + if self.exact_numerics: + return None + up_down = self.up_down_density(rescaled=False) + up_down_numerics = self.exact_numerics_bundle().up_down_density( + rescaled=False) + return _find_quantity_scaling(self._parameters.up_down_particles, + up_down, + up_down_numerics) + + @lru_cache(maxsize=None) + def post_selection(self) -> AggregatedQuantity: + return _extract_post_selection(self._runs) + + def exact_numerics_bundle(self) -> 'InstanceBundle': + """Retrieves the exact numerical simulation for an underlying problem. + + It triggers the simulation if neither self.exact_numerics_bundle nor + self.cache_exact_numerics ran before. + + Returns: + The instance of InstanceBundle which represents the exact numerical + simulation results for the underlying problem parameters. + """ + self.cache_exact_numerics() + return self._exact_numerics_bundle + + def cache_exact_numerics( + self, + keep_raw_result: bool = False, + threads: int = 4, + pre_run_func: Optional[Callable[[int, cirq.Circuit], None]] = None, + post_run_func: Optional[Callable[[int, cirq.Result], None]] = None + ) -> None: + """Calculates the exact numerical simulation for an underlying problem. + + Returns: + The instance of InstanceBundle which represents the exact numerical + simulation results for the underlying problem parameters. + """ + + if self._exact_numerics_bundle is not None: + return + + if self.exact_numerics: + self._exact_numerics_bundle = self + + if self._numerics_transform: + parameters = self._numerics_transform(self._parameters) + else: + parameters = self._parameters + + simulation = run_experiment(parameters, + self.steps, + keep_raw_result=keep_raw_result, + threads=threads, + pre_run_func=pre_run_func, + post_run_func=post_run_func) + self._exact_numerics_bundle = InstanceBundle((simulation,), + self.steps, + exact_numerics=True) + + +def _extract_up_down_densities( + up_down_particles: Tuple[int, int], + runs: Iterable[Iterable[ExperimentRun]], + rescaling: Optional[Rescaling] = None, + post_select: bool = True +) -> PerSiteQuantity: + + def up_down_transformation(up_density: np.ndarray, + down_density: np.ndarray, + ) -> Tuple[np.ndarray, np.ndarray]: + return np.array(up_density), np.array(down_density) + + return _extract_densities(up_down_particles=up_down_particles, + runs=runs, + transformation_func=up_down_transformation, + rescaling=rescaling, + post_select=post_select) + + +def _extract_charge_spin_densities( + up_down_particles: Tuple[int, int], + runs: Iterable[Iterable[ExperimentRun]], + rescaling: Optional[Rescaling] = None, + post_select: bool = True +) -> PerSiteQuantity: + + def charge_spin_transformation(up_density: np.ndarray, + down_density: np.ndarray + ) -> Tuple[np.ndarray, np.ndarray]: + up_density = np.array(up_density) + down_density = np.array(down_density) + return up_density + down_density, up_density - down_density + + return _extract_densities(up_down_particles=up_down_particles, + runs=runs, + transformation_func=charge_spin_transformation, + rescaling=rescaling, + post_select=post_select) + + +def _extract_densities( + up_down_particles: Tuple[int, int], + runs: Iterable[Iterable[ExperimentRun]], + transformation_func: Callable[[np.ndarray, np.ndarray], + Tuple[np.ndarray, np.ndarray]], + rescaling: Optional[Rescaling] = None, + post_select: bool = True +) -> PerSiteQuantity: + up_particles, down_particles = up_down_particles + + averages = [[], []] + std_devs = [[], []] + std_errors = [[], []] + values = [[], []] + + for run_layouts in runs: + for chain in range(2): + values[chain].append([]) + + for run in run_layouts: + if post_select: + results = run.result_post_selected + else: + results = run.result_raw + + up_density = np.array(results.up_density) + down_density = np.array(results.down_density) + + if rescaling: + up_density = _rescale_density(up_particles, + run.trotter_steps, + up_density, + rescaling) + down_density = _rescale_density(down_particles, + run.trotter_steps, + down_density, + rescaling) + + first, second = transformation_func(up_density, down_density) + values[0][-1].append(first) + values[1][-1].append(second) + + for chain in range(2): + values[chain][-1] = np.array(values[chain][-1]) + densities = values[chain][-1] + averages[chain].append(np.average(densities, axis=0)) + std_devs[chain].append(np.std(densities, axis=0, ddof=1)) + std_errors[chain].append(std_devs[chain][-1] / + np.sqrt(len(densities))) + + return PerSiteQuantity(average=averages, + std_dev=std_devs, + std_error=std_errors, + values=values) + + +def _rescale_density(particles: int, + step: int, + density: np.ndarray, + rescaling: Rescaling) -> np.ndarray: + scaling = rescaling.slope * step + rescaling.intercept + center = particles / len(density) + return (density - center) / scaling + center + + +def _extract_position_average(quantity: PerSiteQuantity) -> AggregatedQuantity: + + def position_average(values_step: np.ndarray) -> np.ndarray: + return sum(value * (index + 1) + for index, value in enumerate(values_step.T)) + + return _aggregate_quantity(quantity, position_average) + + +def _extract_spreading(quantity: PerSiteQuantity) -> AggregatedQuantity: + + def spreading(values_step: np.ndarray) -> np.ndarray: + center = 0.5 * (len(values_step.T) + 1) + return sum(value * abs(index + 1 - center) + for index, value in enumerate(values_step.T)) + + return _aggregate_quantity(quantity, spreading) + + +def _aggregate_quantity(quantity: PerSiteQuantity, + aggregate_func: Callable[[np.ndarray], np.ndarray] + ) -> AggregatedQuantity: + averages = [] + std_devs = [] + std_errors = [] + values = [] + for values_steps in quantity.values: + averages.append([]) + std_devs.append([]) + std_errors.append([]) + values.append([]) + for values_step in values_steps: + aggregate = aggregate_func(values_step) + values[-1].append(aggregate) + averages[-1].append(np.average(aggregate, axis=0)) + std_devs[-1].append(np.std(aggregate, axis=0, ddof=1)) + std_errors[-1].append(std_devs[-1][-1] / np.sqrt(len(aggregate))) + + return AggregatedQuantity(average=averages, + std_dev=std_devs, + std_error=std_errors, + values=values) + + +def _find_derivative(quantity: AggregatedQuantity, + dt: float + ) -> AggregatedQuantity: + + def find_derivative(values: Sequence[float]) -> List[float]: + if len(values) < 3: + raise ValueError('Derivative not supported for data sets with less ' + 'than three data points.') + derivative = [] + derivative.append((values[1] - values[0]) / dt) + for i in range(1, len(values) - 1): + derivative.append((values[i + 1] - values[i - 1]) / (2.0 * dt)) + derivative.append((values[-1] - values[-2]) / dt) + return derivative + + def find_error(errors: Sequence[float]) -> List[float]: + if len(errors) < 3: + raise ValueError('Derivative not supported for data sets with less ' + 'than three data points.') + derivative_errors = [] + derivative_errors.append(np.sqrt( + (errors[0] ** 2 + errors[1] ** 2) / dt ** 2)) + for i in range(1, len(errors) - 1): + derivative_errors.append(np.sqrt( + (errors[i + 1] ** 2 + errors[i - 1] ** 2) + / (2.0 * dt) ** 2)) + derivative_errors.append(np.sqrt( + (errors[-1] ** 2 + errors[-2] ** 2) / dt ** 2)) + return derivative_errors + + return AggregatedQuantity( + average=(find_derivative(average) for average in quantity.average), + std_error=(find_error(error) for error in quantity.std_error) + ) + + +def _extract_post_selection(runs: Iterable[Iterable[ExperimentRun]] + ) -> AggregatedQuantity: + averages = [] + std_devs = [] + std_errors = [] + values = [] + + for run_layouts in runs: + values.append([]) + for run in run_layouts: + fraction = (run.result_post_selected.measurements_count / + run.result.measurements_count) + values[-1].append(fraction) + + values[-1] = np.array(values[-1]) + fractions = values[-1] + averages.append(np.average(fractions)) + std_devs.append(np.std(fractions, ddof=1)) + std_errors.append(std_devs[-1] / np.sqrt(len(fractions))) + + return AggregatedQuantity(average=[averages], + std_error=[std_errors], + std_dev=[std_devs], + values=[values]) + + +def _find_rescaling_factors(scaling: AggregatedQuantity, + steps: Iterable[int], + rescale_steps: Optional[Iterable[int]] = None + ) -> Rescaling: + + # Extract steps and values for rescaling. + rescale_steps = set(rescale_steps) if rescale_steps else None + scales, errors, scaling_steps = [], [], [] + for index, step in enumerate(steps): + if rescale_steps is None or step in rescale_steps: + scaling_steps.append(step) + scales.append([scaling.average[chain][index] + for chain in range(len(scaling.average))]) + errors.append([scaling.std_error[chain][index] + for chain in range(len(scaling.std_error))]) + if rescale_steps: + rescale_steps.remove(step) + + if rescale_steps: + raise ValueError(f'Missing Trotter steps {rescale_steps} for rescaling') + + if not scales or not errors: + raise ValueError('No data points for rescaling') + + scaling_steps = np.array(scaling_steps) + scales = np.array(scales).T + errors = np.array(errors).T + + # Find the scaling for each site by aggregating all the chains. + expected = 0 + inv_variance_sum = 0 + for chain in range(len(scales)): + inv_variance = 1.0 / errors[chain, :] ** 2 + inv_variance_sum += inv_variance + expected += scales[chain, :] * inv_variance + expected /= inv_variance_sum + + # Fit a line to the per-site scaling factors. + def scales_linear_fit(x): + scale, offset = x + fit = scaling_steps * scale + offset + return expected - fit + + scale, offset = least_squares(scales_linear_fit, np.array([0.0, 1.0])).x + + return Rescaling(scale, offset) + + +def _find_quantity_scaling(particles: Sequence[int], + experiment: PerSiteQuantity, + numerics: PerSiteQuantity + ) -> AggregatedQuantity: + + assert len(particles) == len(experiment.average) == len(numerics.average), ( + "Incompatible array dimensions for scaling calculation") + + scales = [] + std_errors = [] + for experiment_steps, numerics_steps, chain_particles in zip( + experiment.average, numerics.average, particles): + scales.append([]) + std_errors.append([]) + for experiment_density, numerics_density in zip(experiment_steps, + numerics_steps): + center = chain_particles / len(experiment_density) + scale, _, _, _, error = linregress(numerics_density - center, + experiment_density - center) + scales[-1].append(scale) + std_errors[-1].append(error) + + return AggregatedQuantity(average=scales, std_error=std_errors) + + +def _split_experiments(experiments: Iterable[FermiHubbardExperiment], + steps: Optional[Iterable[int]] = None + ) -> Tuple[List[int], + List[List[ExperimentRun]], + FermiHubbardParameters]: + """Splits a list of experiments into list of runs for each Trotter step. + + Args: + experiments: List of experiments to analyze. They must solve the same + Fermi-Hubbard problem (have exactly the same Hamiltonian, initial + state and Trotter step length). The only part which might differ is + qubits layout, which realizes different configurations. + steps: Trotter steps to analyse. If provided, it must be a subset of + Trotter steps that occur on experiments list. If not provided, the + intersection of Trotter steps that occur on experiments list will be + used. + + Returns: + Tuple of: + - List of Trotter steps analysed. + - For each Trotter step, list of experiment runs from different + experiments. + - The representative problem parameters (parameters with a default + layout). + """ + + runs = defaultdict(lambda: []) + instance = None + for experiment in experiments: + parameters = experiment.parameters.representative_parameters() + if instance is None: + instance = parameters + elif instance != parameters: + raise ValueError( + 'Incompatible Fermi-Hubbard instances for instance analysis') + + for run in experiment.runs: + runs[run.trotter_steps].append(run) + + steps = list(sorted(steps if steps else runs.keys())) + + missing = set(steps).difference(runs.keys()) + if missing: + raise ValueError(f'No experiment to cover Trotter steps {steps}') + + if not all(np.array(steps[1:]) - steps[:-1] == 1): + raise ValueError(f'Nonconsecutive Trotter steps {steps}') + + return steps, [runs[step] for step in steps], instance + + +def find_bundles_rescalings( + bundles: Iterable[InstanceBundle] +) -> Tuple[Tuple[List[InstanceBundle], Rescaling], ...]: + """Finds a rescaling parameters for each subset of compatible instances. + + Args: + bundles: Iterable of bundles to extract rescaling values from. + + Returns: + Tuple of subgroups of bundles with an average rescaling value. + """ + + groups = [] + for bundle in bundles: + parameters = bundle.representative_parameters + for group in groups: + group_parameters = group[0].representative_parameters + if parameters.equals_for_rescaling(group_parameters): + group.append(bundle) + break + else: + groups.append([bundle]) + + def mean_rescaling(group: List[InstanceBundle]) -> Rescaling: + slopes, intercepts = [], [] + for instance in group: + slopes.append(instance.intrinsic_rescaling.slope) + intercepts.append(instance.intrinsic_rescaling.intercept) + return Rescaling(np.average(slopes), np.average(intercepts)) + + return tuple((group, mean_rescaling(group)) for group in groups) + + +def apply_rescalings_to_bundles( + rescalings: Tuple[Tuple[List[InstanceBundle], Rescaling], ...]) -> None: + """Applies the averaged rescaling to each bundle within the subgroups.""" + for group, rescaling in rescalings: + for bundle in group: + bundle.rescaling = rescaling diff --git a/recirq/fermi_hubbard/post_processing_test.py b/recirq/fermi_hubbard/post_processing_test.py new file mode 100644 index 00000000..46891edd --- /dev/null +++ b/recirq/fermi_hubbard/post_processing_test.py @@ -0,0 +1,586 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional + +import numpy as np + +from recirq.fermi_hubbard.execution import ( + ExperimentResult, + ExperimentRun, + FermiHubbardExperiment +) +from recirq.fermi_hubbard.layouts import ( + LineLayout +) +from recirq.fermi_hubbard.parameters import ( + FermiHubbardParameters, + Hamiltonian, + IndependentChainsInitialState, + UniformSingleParticle +) +from recirq.fermi_hubbard.post_processing import InstanceBundle + + +def test_up_down_density() -> None: + experiments = [ + _create_experiment( + sites_count=3, + trotter_steps=[1, 2], + results_post_selected=[ + ExperimentResult( + up_density=(0.0, 1.0, 2.0), + down_density=(3.0, 4.0, 5.0), + measurements_count=1 + ), + ExperimentResult( + up_density=(6.0, 7.0, 8.0), + down_density=(9.0, 10.0, 11.0), + measurements_count=1 + ) + ] + ), + _create_experiment( + sites_count=3, + trotter_steps=[1, 2], + results_post_selected=[ + ExperimentResult( + up_density=(12.0, 13.0, 14.0), + down_density=(17.0, 16.0, 15.0), + measurements_count=1 + ), + ExperimentResult( + up_density=(18.0, 19.0, 20.0), + down_density=(23.0, 22.0, 21.0), + measurements_count=1 + ) + ] + ) + ] + + bundle = InstanceBundle(experiments) + up_down_density = bundle.up_down_density(rescaled=False) + + np.testing.assert_allclose( + up_down_density.values, + [ # Chains + [ # Trotter steps + [ # Experiments + [0, 1, 2], # Sites + [12, 13, 14] + ], + [ + [6, 7, 8], + [18, 19, 20] + ] + ], + [ + [ + [3, 4, 5], + [17, 16, 15] + ], + [ + [9, 10, 11], + [23, 22, 21] + ] + ] + ] + ) + + np.testing.assert_allclose( + up_down_density.average, + [ # Chains + [ # Trotter steps + [6, 7, 8], # Sites + [12, 13, 14] + ], + [ + [10, 10, 10], + [16, 16, 16] + ] + ] + ) + + std_dev_5 = np.sqrt(5 ** 2 + 5 ** 2) + std_dev_6 = np.sqrt(6 ** 2 + 6 ** 2) + std_dev_7 = np.sqrt(7 ** 2 + 7 ** 2) + + np.testing.assert_allclose( + up_down_density.std_dev, + [ # Chains + [ # Trotter steps + [std_dev_6, std_dev_6, std_dev_6], # Sites + [std_dev_6, std_dev_6, std_dev_6] + ], + [ + [std_dev_7, std_dev_6, std_dev_5], + [std_dev_7, std_dev_6, std_dev_5] + ] + ] + ) + + std_error_5 = std_dev_5 / np.sqrt(2) + std_error_6 = std_dev_6 / np.sqrt(2) + std_error_7 = std_dev_7 / np.sqrt(2) + + np.testing.assert_allclose( + up_down_density.std_error, + [ # Chains + [ # Trotter steps + [std_error_6, std_error_6, std_error_6], # Sites + [std_error_6, std_error_6, std_error_6] + ], + [ + [std_error_7, std_error_6, std_error_5], + [std_error_7, std_error_6, std_error_5] + ] + ] + ) + + +def test_up_down_position_average() -> None: + experiments = [ + _create_experiment( + sites_count=3, + trotter_steps=[1, 2], + results_post_selected=[ + ExperimentResult( + up_density=(0.0, 1.0, 2.0), + down_density=(3.0, 4.0, 5.0), + measurements_count=1 + ), + ExperimentResult( + up_density=(6.0, 7.0, 8.0), + down_density=(9.0, 10.0, 11.0), + measurements_count=1 + ) + ] + ), + _create_experiment( + sites_count=3, + trotter_steps=[1, 2], + results_post_selected=[ + ExperimentResult( + up_density=(12.0, 13.0, 14.0), + down_density=(17.0, 16.0, 15.0), + measurements_count=1 + ), + ExperimentResult( + up_density=(18.0, 19.0, 20.0), + down_density=(23.0, 22.0, 21.0), + measurements_count=1 + ) + ] + ) + ] + + bundle = InstanceBundle(experiments) + up_down_position_average = bundle.up_down_position_average(rescaled=False) + + np.testing.assert_allclose( + up_down_position_average.values, + [ # Chains + [ # Trotter steps + [8, 80], # Experiments + [44, 116] + ], + [ + [26, 94], + [62, 130] + ] + ] + ) + + np.testing.assert_allclose( + up_down_position_average.average, + [ # Chains + [44, 80], # Trotter steps + [60, 96] + ] + ) + + std_dev_34 = np.sqrt(34 ** 2 + 34 ** 2) + std_dev_36 = np.sqrt(36 ** 2 + 36 ** 2) + + np.testing.assert_allclose( + up_down_position_average.std_dev, + [ # Chains + [std_dev_36, std_dev_36], # Trotter steps + [std_dev_34, std_dev_34] + ] + ) + + std_error_34 = std_dev_34 / np.sqrt(2) + std_error_36 = std_dev_36 / np.sqrt(2) + + np.testing.assert_allclose( + up_down_position_average.std_error, + [ # Chains + [std_error_36, std_error_36], # Trotter steps + [std_error_34, std_error_34] + ] + ) + + +def test_charge_spin_density() -> None: + experiments = [ + _create_experiment( + sites_count=3, + trotter_steps=[1, 2], + results_post_selected=[ + ExperimentResult( + up_density=(0.0, 1.0, 2.0), + down_density=(3.0, 4.0, 5.0), + measurements_count=1 + ), + ExperimentResult( + up_density=(6.0, 7.0, 8.0), + down_density=(9.0, 10.0, 11.0), + measurements_count=1 + ) + ] + ), + _create_experiment( + sites_count=3, + trotter_steps=[1, 2], + results_post_selected=[ + ExperimentResult( + up_density=(12.0, 13.0, 14.0), + down_density=(17.0, 16.0, 15.0), + measurements_count=1 + ), + ExperimentResult( + up_density=(18.0, 19.0, 20.0), + down_density=(23.0, 22.0, 21.0), + measurements_count=1 + ) + ] + ) + ] + + bundle = InstanceBundle(experiments) + charge_spin = bundle.charge_spin_density(rescaled=False) + + np.testing.assert_allclose( + charge_spin.values, + [ # Chains + [ # Trotter steps + [ # Experiments + [3, 5, 7], # Sites + [29, 29, 29] + ], + [ + [15, 17, 19], + [41, 41, 41] + ] + ], + [ + [ + [-3, -3, -3], + [-5, -3, -1] + ], + [ + [-3, -3, -3], + [-5, -3, -1] + ] + ] + ] + ) + + np.testing.assert_allclose( + charge_spin.average, + [ # Chains + [ # Trotter steps + [16, 17, 18], # Sites + [28, 29, 30] + ], + [ + [-4, -3, -2], + [-4, -3, -2] + ] + ] + ) + + std_dev_0 = np.sqrt(0 ** 2 + 0 ** 2) + std_dev_1 = np.sqrt(1 ** 2 + 1 ** 2) + std_dev_11 = np.sqrt(11 ** 2 + 11 ** 2) + std_dev_12 = np.sqrt(12 ** 2 + 12 ** 2) + std_dev_13 = np.sqrt(13 ** 2 + 13 ** 2) + + + np.testing.assert_allclose( + charge_spin.std_dev, + [ # Chains + [ # Trotter steps + [std_dev_13, std_dev_12, std_dev_11], # Sites + [std_dev_13, std_dev_12, std_dev_11] + ], + [ + [std_dev_1, std_dev_0, std_dev_1], + [std_dev_1, std_dev_0, std_dev_1] + ] + ] + ) + + std_error_0 = std_dev_0 / np.sqrt(2) + std_error_1 = std_dev_1 / np.sqrt(2) + std_error_11 = std_dev_11 / np.sqrt(2) + std_error_12 = std_dev_12 / np.sqrt(2) + std_error_13 = std_dev_13 / np.sqrt(2) + + np.testing.assert_allclose( + charge_spin.std_error, + [ # Chains + [ # Trotter steps + [std_error_13, std_error_12, std_error_11], # Sites + [std_error_13, std_error_12, std_error_11] + ], + [ + [std_error_1, std_error_0, std_error_1], + [std_error_1, std_error_0, std_error_1] + ] + ] + ) + + +def test_charge_spin_spreading() -> None: + experiments = [ + _create_experiment( + sites_count=3, + trotter_steps=[1, 2], + results_post_selected=[ + ExperimentResult( + up_density=(0.0, 1.0, 2.0), + down_density=(3.0, 4.0, 5.0), + measurements_count=1 + ), + ExperimentResult( + up_density=(6.0, 7.0, 8.0), + down_density=(9.0, 10.0, 11.0), + measurements_count=1 + ) + ] + ), + _create_experiment( + sites_count=3, + trotter_steps=[1, 2], + results_post_selected=[ + ExperimentResult( + up_density=(12.0, 13.0, 14.0), + down_density=(17.0, 16.0, 15.0), + measurements_count=1 + ), + ExperimentResult( + up_density=(18.0, 19.0, 20.0), + down_density=(23.0, 22.0, 21.0), + measurements_count=1 + ) + ] + ) + ] + + bundle = InstanceBundle(experiments) + charge_spin_spreading = bundle.charge_spin_spreading(rescaled=False) + + np.testing.assert_allclose( + charge_spin_spreading.values, + [ # Chains + [ # Trotter steps + [10, 58], # Experiments + [34, 82] + ], + [ + [-6, -6], + [-6, -6] + ] + ] + ) + + np.testing.assert_allclose( + charge_spin_spreading.average, + [ # Chains + [34, 58], # Trotter steps + [-6, -6] + ] + ) + + std_dev_0 = np.sqrt(0 ** 2 + 0 ** 2) + std_dev_24 = np.sqrt(24 ** 2 + 24 ** 2) + + np.testing.assert_allclose( + charge_spin_spreading.std_dev, + [ # Chains + [std_dev_24, std_dev_24], # Trotter steps + [std_dev_0, std_dev_0] + ] + ) + + std_error_0 = std_dev_0 / np.sqrt(2) + std_error_24 = std_dev_24 / np.sqrt(2) + + np.testing.assert_allclose( + charge_spin_spreading.std_error, + [ # Chains + [std_error_24, std_error_24], # Trotter steps + [std_error_0, std_error_0] + ] + ) + + +def test_post_selection() -> None: + experiments = [ + _create_experiment( + sites_count=1, + trotter_steps=[1, 2], + results=[ + ExperimentResult( + up_density=(0.0,), + down_density=(0.0,), + measurements_count=10 + ), + ExperimentResult( + up_density=(0.0,), + down_density=(0.0,), + measurements_count=10 + ) + ], + results_post_selected=[ + ExperimentResult( + up_density=(0.0,), + down_density=(0.0,), + measurements_count=3 + ), + ExperimentResult( + up_density=(0.0,), + down_density=(0.0,), + measurements_count=4 + ) + ] + ), + _create_experiment( + sites_count=1, + trotter_steps=[1, 2], + results=[ + ExperimentResult( + up_density=(0.0,), + down_density=(0.0,), + measurements_count=10 + ), + ExperimentResult( + up_density=(0.0,), + down_density=(0.0,), + measurements_count=10 + ) + ], + results_post_selected=[ + ExperimentResult( + up_density=(0.0,), + down_density=(0.0,), + measurements_count=5 + ), + ExperimentResult( + up_density=(0.0,), + down_density=(0.0,), + measurements_count=6 + ) + ] + ) + ] + + bundle = InstanceBundle(experiments) + post_selection = bundle.post_selection() + + np.testing.assert_allclose( + post_selection.values, + [ # Chains + [ # Trotter steps + [3 / 10, 5 / 10], # Experiments + [4 / 10, 6 / 10] + ] + ] + ) + + np.testing.assert_allclose( + post_selection.average, + [ # Chains + [4 / 10, 5 / 10] # Trotter steps + ] + ) + + std_dev_1_10 = np.sqrt((1 / 10) ** 2 + (1 / 10) ** 2) + + np.testing.assert_allclose( + post_selection.std_dev, + [ # Chains + [std_dev_1_10, std_dev_1_10] # Trotter steps + ] + ) + + std_error_1_10 = std_dev_1_10 / np.sqrt(2) + + np.testing.assert_allclose( + post_selection.std_error, + [ # Chains + [std_error_1_10, std_error_1_10] # Trotter steps + ] + ) + + +def _create_experiment( + sites_count: int, + trotter_steps: List[int], + results: Optional[List[ExperimentResult]] = None, + results_post_selected: Optional[List[ExperimentResult]] = None +) -> FermiHubbardExperiment: + + def create_null_results() -> List[ExperimentResult]: + return [ + ExperimentResult(up_density=(0.0,) * sites_count, + down_density=(0.0,) * sites_count, + measurements_count=0) + for _ in range(len(trotter_steps)) + ] + + if results is None: + results = create_null_results() + + if results_post_selected is None: + results_post_selected = create_null_results() + + runs = [] + for step, result, results_post_selected in zip( + trotter_steps, results, results_post_selected): + runs.append(ExperimentRun( + trotter_steps=step, + end_timestamp_sec=0.0, + result=result, + result_post_selected=results_post_selected) + ) + + return FermiHubbardExperiment( + parameters=FermiHubbardParameters( + hamiltonian=Hamiltonian( + sites_count=sites_count, + j=1.0, + u=2.0 + ), + initial_state=IndependentChainsInitialState( + up=UniformSingleParticle(), + down=UniformSingleParticle() + ), + layout=LineLayout(size=sites_count), + dt=0.3 + ), + runs=runs + ) diff --git a/recirq/fermi_hubbard/publication.py b/recirq/fermi_hubbard/publication.py new file mode 100644 index 00000000..20342181 --- /dev/null +++ b/recirq/fermi_hubbard/publication.py @@ -0,0 +1,211 @@ +# Copyright 2020 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Data specific to experiment published in arXiv:2009.XXXXX.""" + +from typing import Callable, Tuple + +from copy import deepcopy +import numpy as np + +from recirq.fermi_hubbard.decomposition import ( + ConvertToNonUniformSqrtIswapGates, + ParticleConservingParameters +) +from recirq.fermi_hubbard.layouts import ( + LineLayout, + QubitsLayout, + ZigZagLayout +) +from recirq.fermi_hubbard.parameters import ( + FermiHubbardParameters, + GaussianTrappingPotential, + Hamiltonian, + IndependentChainsInitialState, + PhasedGaussianSingleParticle, + UniformTrappingPotential +) + + +def gaussian_1u1d_instance(layout: QubitsLayout, + u: float, + dt: float = 0.3) -> FermiHubbardParameters: + """Predefined instance of two colliding Gaussian wavepackets. + + Args: + layout: Layout with qubits mapping. + u: Value of the interaction strength. + dt: Trotter step length. + + Returns: + Colliding Gaussian wavepackets problem parameters. + """ + + hamiltonian = Hamiltonian( + sites_count=layout.size, + j=1.0, + u=u + ) + + initial_state = IndependentChainsInitialState( + up=PhasedGaussianSingleParticle( + k=1.2 * 7, + sigma=1.2 / 7, + position=1.5 / 7 + ), + down=PhasedGaussianSingleParticle( + k=-1.2 * 7, + sigma=1.2 / 7, + position=5.5 / 7) + ) + + return FermiHubbardParameters( + hamiltonian=hamiltonian, + initial_state=initial_state, + layout=layout, + dt=dt + ) + + +def trapping_instance(layout: QubitsLayout, + u: float, + dt: float = 0.3, + up_particles: int = 2, + down_particles: int = 2) -> FermiHubbardParameters: + """Predefined initial state with up chain initialized by trapping potential. + + Args: + layout: Layout with qubits mapping. + u: Value of the interaction strength. + dt: Trotter step length. + up_particles: Number of up particles. + down_particles: Number of down particles. + + Returns: + Up particles trapped in Gaussian potential problem parameters. + """ + + hamiltonian = Hamiltonian( + sites_count=layout.size, + j=1.0, + u=u + ) + + initial_state = IndependentChainsInitialState( + up=GaussianTrappingPotential( + particles=up_particles, + center=0.5, + sigma=1 / 7, + scale=-4 + ), + down=UniformTrappingPotential(particles=down_particles) + ) + + return FermiHubbardParameters( + hamiltonian=hamiltonian, + initial_state=initial_state, + layout=layout, + dt=dt + ) + + +def parasitic_cphase_compensation( + cphase_angle: float +) -> Callable[[FermiHubbardParameters], FermiHubbardParameters]: + """Transformation of problem parameters which account for parasitic cphase. + + This transformation compensates for parasitic cphase effects by adding the + nearest-neighbor interaction terms V to the problem Hamiltonian, which are + dependent on the qubits layout used. + + The result of this function can be passed as a value to the + numerics_transform argument of the InstanceBundle class. + + Args: + cphase_angle: Average parasitic cphase angle value over all the + two-qubit interactions. + + Returns: + Fermi-Hubbard problem parameters transformation function which adds the + parasitic cphase compensation by adding nearest-neighbor interaction + terms V to the Hamiltonian. + """ + + def compensate(parameters: FermiHubbardParameters + ) -> FermiHubbardParameters: + + cphase = cphase_angle / parameters.dt + if isinstance(parameters.layout, ZigZagLayout): + v = np.zeros(parameters.sites_count - 1) + v[0::2] = 2.0 * cphase + v[1::2] = 4.0 * cphase + elif isinstance(parameters.layout, LineLayout): + v = np.full(parameters.sites_count - 1, 2.0 * cphase) + else: + raise ValueError(f'Unsupported layout {parameters.layout}') + + v_parameters = deepcopy(parameters) + v_parameters.hamiltonian.v = tuple(v) + return v_parameters + + return compensate + + +def ideal_sqrt_iswap_converter() -> ConvertToNonUniformSqrtIswapGates: + """Creates a converter which can decompose circuits to sqrt_iswap gate set. + """ + return ConvertToNonUniformSqrtIswapGates( + parameters={}, + parameters_when_missing=ParticleConservingParameters( + theta=np.pi / 4, + delta=0.0, + chi=0.0, + gamma=0.0, + phi=0.0 + ) + ) + + +def google_sqrt_iswap_converter() -> ConvertToNonUniformSqrtIswapGates: + """Creates a converter which can decompose circuits to imperfect sqrt_iswap + gate set. + + This converter assumes that each sqrt_iswap gate is really a + cirq.FSim(π/4, π/24) gate. + """ + return ConvertToNonUniformSqrtIswapGates( + parameters={}, + parameters_when_missing=ParticleConservingParameters( + theta=np.pi / 4, + delta=0.0, + chi=0.0, + gamma=0.0, + phi=np.pi / 24 + ) + ) + + +def rainbow23_layouts(sites_count: int = 8) -> Tuple[ZigZagLayout]: + """Creates a list of 16 that can be run on 23-qubit sub-grid of Rainbow + processor. + """ + return tuple(ZigZagLayout(size=sites_count, + origin=origin, + rotation=rotation, + flipped=flip, + exchange_chains=exchange, + reverse_chains=reverse) + for origin, rotation in (((4, 1), 0), ((8, 5), 180)) + for flip in (False, True) + for exchange in (False, True) + for reverse in (False, True)) diff --git a/requirements.txt b/requirements.txt index 1e187122..b72393f4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,3 +16,7 @@ scipy # hfvqe openfermion==0.11.0 + +# fermi_hubbard +openfermioncirq +tqdm # notebook progress bars \ No newline at end of file