In [1]:
# Many fantastic pieces of free and open-source software can be used as key components to enable single cell analysis
# using python notebook. This scripts showed how to  scanpy result into single cell explorer. The analytic code parts
# are modified from scanpy tutorial "Clustering 3K PBMCs", the re-implemntation of Seurat’s (Satija et al., 2015)
# guided clustering tutorial. We gratefully acknowledge all authors for Suerat and Scanpy and their contribution.       
# we use 10K healthy donor's PBMCs data obtained from 10x Genomics 

#!mkdir data
#!wget http://cf.10xgenomics.com/samples/cell-exp/3.0.0/pbmc_10k_v3/pbmc_10k_v3_filtered_feature_bc_matrix.tar.gz -O data/pbmc_10k_v3_filtered_feature_bc_matrix.tar.gz
#!cd data; tar -xzf pbmc_10k_v3_filtered_feature_bc_matrix.tar.gz
In [ ]:
# Creat data folder and download demo data from 10X genomics
#!mkdir data
#!wget http://cf.10xgenomics.com/samples/cell-exp/3.0.0/pbmc_10k_v3/pbmc_10k_v3_filtered_feature_bc_matrix.tar.gz -O data/pbmc_10k_v3_filtered_feature_bc_matrix.tar.gz
#!cd data; tar -xzf pbmc_10k_v3_filtered_feature_bc_matrix.tar.gz
In [1]:
import scpipeline
In [2]:
# other libs
import os, sys, csv,json,datetime,time,math,scipy.stats,collections,re;
from sklearn import preprocessing;
import numpy as np;
import pandas as pd;
import os.path;
import scanpy;
import scanpy.api as sc
sc.settings.set_figure_params(dpi=80)
In [3]:
# read counts data from 10X V3 data (cell ranger V3 output) 
p =  scpipeline.ProcessPipline();
dataPath='./data/filtered_feature_bc_matrix/';  # the directory with the `.mtx` file
p.readData(dataPath)                            # read 10X '.mtx'data, compute mitochondra fraction, and create p.data 
p.data                                          # p.data: this is the data object we will use for QC            
--> This might be very slow. Consider passing `cache=True`, which enables much faster reading from a cache file.
filtered out 10502 genes that are detected in less than 1 cells
Out[3]:
AnnData object with n_obs × n_vars = 11769 × 23036 
    obs: 'n_genes', 'percent_mito', 'n_counts'
    var: 'gene_ids', 'n_cells'
In [4]:
### p.data is the data object for use 
sc.pl.highest_expr_genes(p.data, n_top=10)
In [5]:
# QC function
# def QC(self,max_n_genes="" ,min_n_genes="",min_n_cells="",max_percent_mito="")
# scanpy tutorial QC(self,max_n_genes=2500 ,min_n_genes=200,min_n_cells=3,max_percent_mito=0.05)

p.QC(min_n_genes=200,min_n_cells=3)
filter cells
filtered out 232 cells that have less than 200 genes expressed
filter genes
filtered out 2684 genes that are detected in less than 3 counts
In [6]:
## plot percentage of mitochondria 
sc.pl.violin(p.data, ['n_genes', 'n_counts', 'percent_mito'],
             jitter=0.4, multi_panel=True)
In [7]:
sc.pl.scatter(p.data, x='n_counts', y='n_genes')
sc.pl.scatter(p.data, x='n_counts', y='percent_mito')
In [8]:
# QC using percentage of mitochondria gene
p.QC(max_n_genes=5000, max_percent_mito=0.12)
"""
# for those who are more famaliar with scanpy: 
p.data = p.data[p.data.obs['n_genes'] < 5000, :]
p.data = p.data[p.data.obs['percent_mito'] < 0.12, :]
"""
filter n_genes < 5000
filter percent_mito < 0.12
Out[8]:
"\n# for those who are more famaliar with scanpy: \np.data = p.data[p.data.obs['n_genes'] < 5000, :]\np.data = p.data[p.data.obs['percent_mito'] < 0.12, :]\n"
In [9]:
# QC process in scanpy package will remove cell barcodes. However, for database loading, we adata should keep same number of barcodes as original one. 
# We copy data from p.data to adata, which will be loaded into database   

adata = p.data.copy()
In [10]:
adata
Out[10]:
AnnData object with n_obs × n_vars = 9571 × 20352 
    obs: 'n_genes', 'percent_mito', 'n_counts'
    var: 'gene_ids', 'n_cells', 'n_counts'
In [11]:
# normalization (library-size correct) the data matrix to 10,000 reads per cell
sc.pp.normalize_per_cell(adata, counts_per_cell_after=1e4)
In [12]:
#Logarithmize the data
sc.pp.log1p(adata)
In [13]:
adata.raw = adata
In [14]:
# highly variable genes
sc.pp.highly_variable_genes(adata, min_mean=0.0125, max_mean=3, min_disp=0.5)
--> added
    'highly_variable', boolean vector (adata.var)
    'means', float vector (adata.var)
    'dispersions', float vector (adata.var)
    'dispersions_norm', float vector (adata.var)
In [15]:
sc.pl.highly_variable_genes(adata)
In [16]:
adata = adata[:, adata.var['highly_variable']]
# regress out effects of total counts per cell and the percentage of mitochondrial genes.
sc.pp.regress_out(adata, ['n_counts', 'percent_mito'])
# Scale each gene to unit variance. Clip values exceeding standard deviation 10.
sc.pp.scale(adata, max_value=10)
regressing out ['n_counts', 'percent_mito']
    sparse input is densified and may lead to high memory use
    finished (0:00:24.10)
In [17]:
# Dimention reduction: PCA as a first step
sc.tl.pca(adata, svd_solver='arpack')
sc.pl.pca(adata, color=['CD3D','CD19'])
computing PCA on highly variable genes
In [18]:
adata
Out[18]:
AnnData object with n_obs × n_vars = 9571 × 2049 
    obs: 'n_genes', 'percent_mito', 'n_counts'
    var: 'gene_ids', 'n_cells', 'n_counts', 'highly_variable', 'means', 'dispersions', 'dispersions_norm'
    uns: 'pca'
    obsm: 'X_pca'
    varm: 'PCs'
In [19]:
sc.pl.pca_variance_ratio(adata, log=True)