From cc0330fe9f4029bf2acb6f8358ce4ecd1944f66b Mon Sep 17 00:00:00 2001 From: UCSD Training ADmin Date: Thu, 28 Jan 2016 12:14:59 -0800 Subject: [PATCH] Changes made during class on Thursday --- ..._alignment_expression_quantification.ipynb | 12 +- ...from_geo_and_differential_expression.ipynb | 1261 +++++++++++++++++ 2 files changed, 1269 insertions(+), 4 deletions(-) create mode 100644 weeks/week04/1_downloading_data_from_geo_and_differential_expression.ipynb diff --git a/weeks/week04/0_alignment_expression_quantification.ipynb b/weeks/week04/0_alignment_expression_quantification.ipynb index dba1c1a..cf0acc4 100644 --- a/weeks/week04/0_alignment_expression_quantification.ipynb +++ b/weeks/week04/0_alignment_expression_quantification.ipynb @@ -456,7 +456,7 @@ } }, "source": [ - "YOUR ANSWER HERE" + "---outReadsUnmapped Fastx" ] }, { @@ -655,7 +655,7 @@ } }, "source": [ - "YOUR ANSWER HERE" + "--single --fragment-length 200.1" ] }, { @@ -715,6 +715,8 @@ "Read about the [SAM](https://samtools.github.io/hts-specs/SAMv1.pdf) format and check out [this](https://broadinstitute.github.io/picard/explain-flags.html) helpful website for explaining SAM flags.\n", "\n", "1. Which file would you use to get the percentage of mapped reads from your data?\n", + "\n", + "\n", "2. Which file would you send to someone when the alignment didn't go as expected and thye asked for the parameters you used?\n", "\n", "To view the SAM file, you'll want to use `samtools`. Viewing directly with `head` gives you the \"header\" of the SAM file which has a bunch of parameters about the genome and the program that was used to align:\n", @@ -809,7 +811,9 @@ } }, "source": [ - "YOUR ANSWER HERE" + "1. -S\n", + "2. -b\n", + "3. -@ 4" ] }, { @@ -1084,4 +1088,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/weeks/week04/1_downloading_data_from_geo_and_differential_expression.ipynb b/weeks/week04/1_downloading_data_from_geo_and_differential_expression.ipynb new file mode 100644 index 0000000..f3f25a8 --- /dev/null +++ b/weeks/week04/1_downloading_data_from_geo_and_differential_expression.ipynb @@ -0,0 +1,1261 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "9d1236f66d04848571c34307db0cb47f", + "grade": false, + "grade_id": "1_intro", + "locked": true, + "solution": false + } + }, + "source": [ + "# Downloading data from GEO\n", + "\n", + "\n", + "## Reading list\n", + "\n", + "- [What the FPKM](https://haroldpimentel.wordpress.com/2014/05/08/what-the-fpkm-a-review-rna-seq-expression-units/) - Explain difference between TPM/FPKM/RPKM units\n", + "- [Pearson correlation](https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient) - linear correlation unit\n", + "\n", + "## Intro\n", + "\n", + "The Gene Expression Omnibus (GEO) is a website funded by the NIH to store the expression data associated with papers. Many papers require you to submit your data to GEO to be able to publish.\n", + "\n", + "Search [GEO](http://www.ncbi.nlm.nih.gov/geo) for the accession ID from [Shalek + Satija 2013](http://www.ncbi.nlm.nih.gov/pubmed/23685454). **Download the \"Series Matrix\" to your laptop** and **copy the link for the `GSE41265_allGenesTPM.txt.gz`\" file**. All the \"Series\" file formats contain the same information in different formats. The Matrix one is the easiest to understand.\n", + "\n", + "Open the \"Series Matrix\" in Excel (or equivalent) on your laptop. And look at the format and what's described." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "dcf6829045fe0665a46d6174944e0cd2", + "grade": false, + "grade_id": "2_wget", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "! wget [link to GSE41265_allGenesTPM.txt.gz file]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "b2ce3e4d92ade69f8547b9f1d1f66a20", + "grade": false, + "grade_id": "describe_packages", + "locked": true, + "solution": false + } + }, + "source": [ + "We'll be using three additional libraries in Python:\n", + "\n", + "1. [`numpy`](http://www.numpy.org/) - (pronounced \"num-pie\") which is basis for most scientific packages. It's basically a nice-looking Python interface to C code. It's very fast.\n", + "2. [`pandas`](http://pandas.pydata.org) - This is the \"DataFrames in Python.\" (like R's nice dataframes) They're a super convenient form that's based on `numpy` so they're fast. And you can do convenient things like calculate mea n and variance very easily.\n", + "3. [`matplotlib`](http://matplotlib.org/) - This is the base plotting library in Python.\n", + "4. [`scipy`](http://www.scipy.org/) - (pronounced \"sigh-pie\") Contains \n", + "4. [`seaborn`](http://web.stanford.edu/~mwaskom/software/seaborn/index.html) - Statistical plotting library. To be completely honest, R's plotting and graphics capabilities are much better than Python's. However, Python is a really nice langauge to learn and use, it's very memory efficient, can be parallized well, and has a very robust machine learning library, `scikit-learn`, which has a very nice and consistent interface. So this is Python's answer to `ggplot2` (very popular R library for plotting) to try and make plotting in Python nicer looking and to make statistical plots easier to do." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "9b9ff011461cb483c0a3c5c2d850b749", + "grade": false, + "grade_id": "3_imports", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "# We're doing \"import superlongname as abbrev\" for our laziness - this way we don't have to type out the whole thing each time.\n", + "\n", + "# Numerical python library (pronounced \"num-pie\")\n", + "import numpy as np\n", + "\n", + "# Dataframes in Python\n", + "import pandas as pd\n", + "\n", + "# Python plotting library\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# T-test of independent samples\n", + "from scipy.stats import ttest_ind\n", + "\n", + "# Statistical plotting library we'll use\n", + "import seaborn as sns\n", + "\n", + "# This is necessary to show the plotted figures inside the notebook -- \"inline\" with the notebook cells\n", + "%matplotlib inline\n", + "\n", + "# Read the data table\n", + "geo_expression = pd.read_table('GSE41265_allGenesTPM.txt.gz', \n", + " \n", + " # Sets the first (Python starts counting from 0 not 1) column as the row names\n", + " index_col=0, \n", + " \n", + " # Tells pandas to decompress the gzipped file\n", + " compression='gzip')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "91a83709f6af2ee27abab1807b032fe4", + "grade": false, + "grade_id": "4_look_at_df", + "locked": true, + "solution": false + } + }, + "source": [ + "Let's look at the top of the dataframe by using `head()`. By default, this shows the first 5 rows." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "890994ad468ad7529d7baa0a4bbd2dbf", + "grade": false, + "grade_id": "5_head_df", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "geo_expression.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "ef2e6d24e06d536e932949e2cbd40f75", + "grade": false, + "grade_id": "5_head_df_n", + "locked": true, + "solution": false + } + }, + "source": [ + "To specify a certain number of rows, put a number between the parentheses." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "a600224e6ea5acb0981cbb7cc6ab62d1", + "grade": false, + "grade_id": "5_head_df_8", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "geo_expression.head(8)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "478ddb9a89ec6970fdbc7f42bb2cc20d", + "grade": false, + "grade_id": "ex1_question", + "locked": true, + "solution": false + } + }, + "source": [ + "### Exercise 1: using `.head()`\n", + "\n", + "Show the first 17 rows of `geo_expression`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "3179664bec5d488ad1ff367f73c8abe7", + "grade": false, + "grade_id": "ex1_answer", + "locked": false, + "solution": true + } + }, + "outputs": [], + "source": [ + "# YOUR CODE HERE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "4687e408702a47c4331512bb99a879ad", + "grade": true, + "grade_id": "ex1_solution", + "locked": true, + "points": 1, + "solution": false + } + }, + "outputs": [], + "source": [ + "assert _.index.tolist() == ['XKR4', 'AB338584', 'B3GAT2', 'NPL', 'T2', 'T', 'PDE10A', '1700010I14RIK', \n", + " '6530411M01RIK', 'PABPC6', 'AK019626', 'AK020722', 'QK', 'B930003M22RIK',\n", + " 'RGS8', 'PACRG', 'AK038428']" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "b8ac47c7ac6c55e00f86257e4f42ffc8", + "grade": false, + "grade_id": "explain_seaborn_boxplot", + "locked": true, + "solution": false + } + }, + "source": [ + "Let's get a sense of this data by plotting the distributions using `boxplot` from seaborn. To save the output, we'll need to get access to the current figure, and save this to a variable using `plt.gcf()`. And then we'll save this figure with `fig.savefig(\"filename.pdf\")`. You can use other extensions (e.g. \"`.png`\", \"`.tiff`\" and it'll automatically save as that forma)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "32fbdfc1bce49aa639d3c85d2e6bc2c9", + "grade": false, + "grade_id": "run_seaborn_boxplot", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "sns.boxplot(geo_expression)\n", + "\n", + "# gcf = Get current figure\n", + "fig = plt.gcf()\n", + "fig.savefig('geo_expression_boxplot.pdf')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "1e356f3c2f978b3564d78054d371f457", + "grade": false, + "grade_id": "explain_log_scale", + "locked": true, + "solution": false + } + }, + "source": [ + "Oh right we have expression data and the scales are enormous... notice the 140,000 maximum. Let's add 1 to all values and take the log2 of the data. We add one because log(0) is undefined and then all our logged values start from zero too. This \"$\\log_2(TPM + 1)$\" is a very common transformation of expression data so it's easier to analyze." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "3c385e7b0e0d13a047438fb16ab575cf", + "grade": false, + "grade_id": "do_log_scale", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "expression_logged = np.log2(geo_expression+1)\n", + "expression_logged.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "0591be7ce718355d190d114034f18a1e", + "grade": false, + "grade_id": "seaborn_logged_expression", + "locked": true, + "solution": false + }, + "scrolled": false + }, + "outputs": [], + "source": [ + "sns.boxplot(expression_logged)\n", + "\n", + "# gcf = Get current figure\n", + "fig = plt.gcf()\n", + "fig.savefig('expression_logged_boxplot.pdf')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "26d71dc18f53b76b9d5ab8f15491d235", + "grade": false, + "grade_id": "ex2_question", + "locked": true, + "solution": false + } + }, + "source": [ + "### Exercise 2: Interpreting distributions\n", + "Now that these are on moreso on the same scale ...\n", + "\n", + "Q: What do you notice about the pooled samples (P1, P2, P3) that is different from the single cells?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true, + "deletable": false, + "nbgrader": { + "checksum": "0a1e0dd915e17c1f90215db608838236", + "grade": true, + "grade_id": "ex2_answer", + "locked": false, + "points": 2, + "solution": true + } + }, + "outputs": [], + "source": [ + "# YOUR CODE HERE" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "db966438110ed2dae2cdf6258e031549", + "grade": false, + "grade_id": "filtering", + "locked": true, + "solution": false + } + }, + "source": [ + "## Filtering expression data\n", + "\n", + "Seems like a lot of genes are near zero, which means we need to filter our genes.\n", + "\n", + "We can ask which genes have log2 expression values are less than 10 (weird example I know - stay with me). This creates a dataframe of `boolean` values of True/False." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "d7bb3501678238f1a41465f9e16659ac", + "grade": false, + "grade_id": "boolean_matrix", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "expression_logged < 10" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "fbfc82a628d9e94a5cd9d1e782bc0411", + "grade": false, + "grade_id": "explain_boolean_df", + "locked": true, + "solution": false + } + }, + "source": [ + "What's nice about booleans is that False is 0 and True is 1, so we can sum to get the number of \"Trues.\" This is a simple, clever way that we can filter on a count for the data. We **could** use this boolean dataframe to filter our original dataframe, but then we lose information. For all values that are less than 10, it puts in a \"not a number\" - \"NaN.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "32481371456e30b37ce803a2acd73ab1", + "grade": false, + "grade_id": "filter_with_boolean_df", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "expression_at_most_10 = expression_logged[expression_logged < 10]\n", + "expression_at_most_10" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "cda74abb93b8b4f13aba3d463b3a406b", + "grade": false, + "grade_id": "ex3_question", + "locked": true, + "solution": false + } + }, + "source": [ + "### Exercise 3: Crude filtering on expression data\n", + "\n", + "Create a dataframe called \"`expression_greater_than_5`\" which contains only values that are greater than 5 from `expression_logged`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true, + "deletable": false, + "nbgrader": { + "checksum": "d4ee2eb8657a57373b2e3861b0e0df30", + "grade": false, + "grade_id": "ex3_answer", + "locked": false, + "solution": true + } + }, + "outputs": [], + "source": [ + "# YOUR CODE HERE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true, + "deletable": false, + "nbgrader": { + "checksum": "4123a9852c2a8f388738f1e10f43d7e9", + "grade": true, + "grade_id": "ex3_test", + "locked": true, + "points": 2, + "solution": false + } + }, + "outputs": [], + "source": [ + "# This `assert` tests for the total number of \"NaN\"s (nulls) in the dataframe by getting a boolean matrix from\n", + "# `isnull()` and then summing twice to get the total\n", + "assert expression_greater_than_5.isnull().sum().sum() == 539146" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "2db5edb4cd9ecc42d78f1a8053a233fb", + "grade": false, + "grade_id": "smarter_filtering", + "locked": true, + "solution": false + } + }, + "source": [ + "\n", + "The crude filtering above is okay, but we're smarter than that. We want to use the filtering in the paper: \n", + "\n", + "> *... discarded genes that were not appreciably expressed (transcripts per million (TPM) > 1) in at least three individual cells, retaining 6,313 genes for further analysis.*\n", + "\n", + "We want to do THAT, but first we need a couple more concepts. The first one is summing booleans.\n", + "\n", + "## A smarter way to filter\n", + "\n", + "Remember that booleans are really 0s (`False`) and 1s (`True`)? This turns out to be VERY convenient and we can use this concept in clever ways.\n", + "\n", + "We can use `.sum()` on a boolean matrix to get the number of genes with expression greater than 10 for each sample:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "f767b67c376b04e51ce27aa5e321c7ef", + "grade": false, + "grade_id": "boolean_sum", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "(expression_logged > 10).sum()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "f92f2a650571923f467ec0421c3ff87c", + "grade": false, + "grade_id": "explain_sum_axis1", + "locked": true, + "solution": false + } + }, + "source": [ + "`pandas` is column-oriented and by default, it will give you a sum for each column. But **we** want a sum for each row. How do we do that?\n", + "\n", + "\n", + "We can sum the boolean matrix we created with \"`expression_logged < 10`\" along `axis=1` (along the samples) to get **for each gene, how many samples have expression less than 10**. In `pandas`, this column is called a \"`Series`\" because it has only one dimension - its length. Internally, `pandas` stores dataframes as a bunch of columns - specifically these `Series`ssssss.\n", + "\n", + "This turns out to be not that many." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "9b32cd77e232e4d0c37f285bc2b57d32", + "grade": false, + "grade_id": "boolean_sum_axis1", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "(expression_logged > 10).sum(axis=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "ac97d0d811684014dba3a24c502b5c4e", + "grade": false, + "grade_id": "ex4_question", + "locked": true, + "solution": false + } + }, + "source": [ + "Now we can apply ANOTHER filter and find genes that are \"present\" (expression greater than 10) in at least 5 samples. We'll save this as the variable `genes_of_interest`. Notice that this doesn't the `genes_of_interest` but rather the list at the bottom. This is because what you see under a code cell is the output of the last thing you called. The \"hash mark\"/\"number sign\" \"`#`\" is called a **comment character** and makes the rest of the line after it not read by the Python language.\n", + "\n", + "### Exercise 4: Commenting and uncommenting\n", + "\n", + "To see `genes_of_interest`, \"uncomment\" the line by removing the hash sign, and commenting out the list `[1, 2, 3]`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "00ecfb44b16b5d9c3b57f6d4c9bb8913", + "grade": false, + "grade_id": "ex4_answer", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "genes_of_interest = (expression_logged > 10).sum(axis=1) >= 5\n", + "# genes_of_interest\n", + "[1, 2, 3]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "997e5e0f67f8f893f9b3cc43f349395d", + "grade": true, + "grade_id": "ex4_test", + "locked": true, + "points": 2, + "solution": false + } + }, + "outputs": [], + "source": [ + "assert isinstance(_, pd.Series)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "9c4e6f3fc04066964e983b5c0652d943", + "grade": false, + "grade_id": "get_rows", + "locked": true, + "solution": false + } + }, + "source": [ + "## Getting only rows that you want (aka subsetting)\n", + "\n", + "Now we have some genes that we want to use - how do you pick just those? This can also be called \"subsetting\" and in `pandas` has the technical name [indexing](http://pandas.pydata.org/pandas-docs/stable/indexing.html)\n", + "\n", + "In `pandas`, to get the rows (genes) you want using their name (gene symbol) or boolean matrix, you use `.loc[rows_you_want]`. Check it out below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "6ee77eb36ce8d6d751aef7b61d3401a1", + "grade": false, + "grade_id": "show_row_Subsetting", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "expression_filtered = expression_logged.loc[genes_of_interest]\n", + "print(expression_filtered.shape) # shows (nrows, ncols) - like in manhattan you do the Street then the Avenue\n", + "expression_filtered.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "111d70a099561c8748ddd1aa98659f58", + "grade": false, + "grade_id": "ex5_question", + "locked": true, + "solution": false + } + }, + "source": [ + "Wow, our matrix is very small - 197 genes! We probably don't want to filter THAT much... I'd say a range of 5,000-15,000 genes after filtering is a good ballpark. Not too big so it's impossible to work with but not too small that you can't do any statistics.\n", + "\n", + "We'll get closer to the expression data created by the paper. Remember that they filtered on genes that had expression greater than 1 in at least 3 *single cells*. We'll filter for expression greater than 1 in at least 3 *samples* for now - we'll get to the single stuff in a bit. For now, we'll filter on all samples.\n", + "\n", + "### Exercise 5: Filtering on the presence of genes\n", + "\n", + "Create a dataframe called `expression_filtered_by_all_samples` that consists only of genes that have expression greater than 1 in at least 3 samples.\n", + "\n", + "#### Hint for `IndexingError: Unalignable boolean Series key provided`\n", + "\n", + "If you're getting this error, double-check your `.sum()` command. Did you remember to specify that you want to get the \"number present\" for each **gene** (row)? Remember that `.sum()` by default gives you the sum over columns. How do you get the sum over rows?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "a0faf5381f47412f35ec4d396b18196c", + "grade": false, + "grade_id": "ex5_answer", + "locked": false, + "solution": true + } + }, + "outputs": [], + "source": [ + "# YOUR CODE HERE\n", + "print(expression_filtered_by_all_samples.shape)\n", + "expression_filtered_by_all_samples.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true, + "deletable": false, + "nbgrader": { + "checksum": "18a49eb6343b4288a20bb55ad27298c1", + "grade": true, + "grade_id": "ex5_test", + "locked": true, + "points": 5, + "solution": false + } + }, + "outputs": [], + "source": [ + "assert expression_filtered_by_all_samples.shape == (9943, 21)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "a6422e612e68ed72b06e5d6e63eee909", + "grade": false, + "grade_id": "explain_boxplot_filtered_all_samples", + "locked": true, + "solution": false + } + }, + "source": [ + "Just for fun, let's see how our the distributions in our expression matrix have changed. If you wnat to save the figure" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "0760115865abdd218d1695de1c5c2ca4", + "grade": false, + "grade_id": "do_boxplot_filtered_all_samples", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "sns.boxplot(expression_filtered_by_all_samples)\n", + "\n", + "# gcf = Get current figure\n", + "fig = plt.gcf()\n", + "fig.savefig('expression_filtered_by_all_samples_boxplot.pdf')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "0aab90d4a140c7ca74026c8b9dec50e8", + "grade": false, + "grade_id": "subsetting_columns", + "locked": true, + "solution": false + } + }, + "source": [ + "## Getting only the columns you want\n", + "\n", + "In the next exercise, we'll get just the single cells\n", + "\n", + "For the next step, we're going to pull out just the pooled - which are conveniently labeled as \"P#\". We'll do this using a [list comprehension](http://www.pythonforbeginners.com/basics/list-comprehensions-in-python), which means we'll create a new list based on the items in `geo_expression.columns` and whether or not they start with the letter `'P'`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "f59d52c19044bc8ba61b209a1166583f", + "grade": false, + "grade_id": "list_comprehension_example", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "pooled_ids = [x for x in expression_logged.columns if x.startswith('P')]\n", + "pooled_ids" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "1ca93f9bd3b8abb59e49313d07360ca1", + "grade": false, + "grade_id": "subset_columns", + "locked": true, + "solution": false + } + }, + "source": [ + "We'll access the columns we want using this bracket notation (note that this only works for columns, not rows)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "91d8cc680c7a1542c21a9e86c59b87ca", + "grade": false, + "grade_id": "show_subset_columns", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "pooled = expression_logged[pooled_ids]\n", + "pooled.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "22eee373e463d204f19ecd1f6aea429c", + "grade": false, + "grade_id": "subset_columns_loc", + "locked": true, + "solution": false + } + }, + "source": [ + "We could do the same thing using `.loc` but we would need to put a colon \"`:`\" in the \"rows\" section (first place) to show that we want \"all rows.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "56f23770caf1453460a61774a41806f1", + "grade": false, + "grade_id": "show_subset_columns_loc", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "expression_logged.loc[:, pooled_ids].head()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "8156a739fa505658538c660c05aa3cc8", + "grade": false, + "grade_id": "ex6_question", + "locked": true, + "solution": false + } + }, + "source": [ + "### Exercise 6: Make a dataframe of only single samples\n", + "\n", + "Use list comprehensions to make a list called `single_ids` that consists only of single cells, and use that list to subset `expression_logged` and create a dataframe called `singles`. (Hint - how are the single cells ids different from the pooled ids?)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "5aeddd275fd6528bc2853eedf35841f9", + "grade": false, + "grade_id": "ex6_answser", + "locked": false, + "solution": true + } + }, + "outputs": [], + "source": [ + "# YOUR CODE HERE\n", + "print(singles.shape)\n", + "singles.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true, + "deletable": false, + "nbgrader": { + "checksum": "f3bbc53e892d060fd3ed4a2080db6a2f", + "grade": true, + "grade_id": "ex6_test", + "locked": true, + "points": 5, + "solution": false + } + }, + "outputs": [], + "source": [ + "assert singles.shape == (27723, 18)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "416d62b817c0764841de900a39ac91d9", + "grade": false, + "grade_id": "ex7_question", + "locked": true, + "solution": false + } + }, + "source": [ + "## Using two different dataframes for filtering\n", + "\n", + "### Exercise 7: Filter the full dataframe using the singles dataframe\n", + "\n", + "Now we'll actually do the filtering done by the paper. Using the `singles` dataframe you just created, get the genes that have expression greater than 1 in at least 3 single cells, and use that to filter `expression_logged`. Call this dataframe `expression_filtered_by_singles`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "38734e3d8af5889876e6c832f828d197", + "grade": false, + "grade_id": "ex7_answer", + "locked": false, + "solution": true + } + }, + "outputs": [], + "source": [ + "# YOUR CODE HERE\n", + "print(expression_filtered_by_singles.shape)\n", + "expression_filtered_by_singles.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true, + "deletable": false, + "nbgrader": { + "checksum": "8877f8e95666fc08af54483aafcfef24", + "grade": true, + "grade_id": "ex7_solution", + "locked": true, + "points": 10, + "solution": false + } + }, + "outputs": [], + "source": [ + "assert expression_filtered_by_singles.shape == (6312, 21)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "360185ccc7c8494f9f8739fa251ed601", + "grade": false, + "grade_id": "expression_filtered_by_singles_boxplot", + "locked": true, + "solution": false + } + }, + "source": [ + "Let's make a boxplot again to see how the data has changed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "8444dc3f576bcc9944d0f61d87bfc14f", + "grade": false, + "grade_id": "show_expression_filtered_by_singles_boxplot", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "sns.boxplot(expression_filtered_by_singles)\n", + "\n", + "fig = plt.gcf()\n", + "fig.savefig('expression_filtered_by_singles_boxplot.pdf')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "64b76d925e25071893a41fb076ac23e9", + "grade": false, + "grade_id": "why_did_this_matter", + "locked": true, + "solution": false + } + }, + "source": [ + "This is much nicer because now we don't have so many zeros and each sample has a reasonable dynamic range.\n", + "\n", + "## Why did this filtering even matter?\n", + "\n", + "You may be wondering, we did all this work to remove some zeros..... so the FPKM what? Let's take a look at how this affects the relationships between samples using `sns.jointplot` from seaborn, which will plot a correlation scatterplot. This also calculates the [Pearson correlation](https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient), a linear correlation metric.\n", + "\n", + "Let's first do this on the unlogged data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "a41f24e4ab1315fea1b29f6bd5b1732c", + "grade": false, + "grade_id": "jointplot_unlogged", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "sns.jointplot('S1', 'S2', geo_expression)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "2eff1b6e1987e0f4d3d4229442340923", + "grade": false, + "grade_id": "jointplot_logged_weird", + "locked": true, + "solution": false + } + }, + "source": [ + "Pretty funky looking huh? That's why we logged it :)\n", + "\n", + "Now let's try this on the logged data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "32929a1501d8e7db5b439de317c4ff86", + "grade": false, + "grade_id": "jointplot_expression_logged", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "sns.jointplot(expression_logged['S1'], expression_logged['S2'])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "45e02c41ba1635fad5f65ceee9174c56", + "grade": false, + "grade_id": "jointplot_expression_logged_changed", + "locked": true, + "solution": false + } + }, + "source": [ + "Hmm our pearson correlation increased from 0.62 to 0.64. Why could that be?\n", + "\n", + "Let's look at this same plot using the filtered data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": false, + "nbgrader": { + "checksum": "78df5310ff41cf06d89255009571445b", + "grade": false, + "grade_id": "show_expression_filtered_by_singles_jointplot", + "locked": true, + "solution": false + } + }, + "outputs": [], + "source": [ + "sns.jointplot('S1', 'S2', expression_filtered_by_singles)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": false, + "nbgrader": { + "checksum": "06248eb398241c9ce377975558ee413f", + "grade": false, + "grade_id": "ex8_question", + "locked": true, + "solution": false + } + }, + "source": [ + "And now our correlation went DOWN!? Why would that be? \n", + "\n", + "### Exercise 8: Discuss changes in correlation\n", + "\n", + "Take 2-5 sentences to explain why the correlation changed between the different datasets." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true, + "deletable": false, + "nbgrader": { + "checksum": "431a222b288fc2c0c3f07a474ca588b1", + "grade": true, + "grade_id": "ex8_answer", + "locked": false, + "points": 5, + "solution": true + } + }, + "outputs": [], + "source": [ + "# YOUR CODE HERE" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.1" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file