Skip to content

Commit

Permalink
Merge pull request #37 from yuqiyuqitan/fix2
Browse files Browse the repository at this point in the history
Fix2
  • Loading branch information
mgbckr authored Jun 30, 2024
2 parents 8e9bf5e + e502606 commit c6836b4
Show file tree
Hide file tree
Showing 16 changed files with 476 additions and 84 deletions.
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,4 +92,3 @@ pytest
## General outline of SPACEc analysis

![SPACEc](https://github.com/yuqiyuqitan/SPACEc/tree/master/docs/overview.png?raw=true "")

2 changes: 1 addition & 1 deletion notebooks/0_tissue_extractor.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@
"metadata": {},
"outputs": [],
"source": [
"root_path = \"/home/tim/Dokumente/GitHub/SPACEc/\" # replace with your path\n",
"root_path = \"/home/user/path/SPACEc/\" # replace with your path\n",
"data_path = root_path + 'example_data/raw/' # where the data is stored\n",
"\n",
"# where you want to store the output\n",
Expand Down
4 changes: 2 additions & 2 deletions notebooks/1_cell_segmentation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,11 @@
"metadata": {},
"outputs": [],
"source": [
"root_path = \"/home/tim/Dokumente/GitHub/SPACEc/\"\n",
"root_path = \"/home/user/path/SPACEc/\" # inset your own path\n",
"data_path = root_path + 'example_data/raw/' # where the data is stored\n",
"\n",
"# where you want to store the output\n",
"output_dir = root_path + 'example_data/output/'\n",
"output_dir = root_path + 'example_data/output/' # inset your own path\n",
"os.makedirs(output_dir, exist_ok=True)"
]
},
Expand Down
4 changes: 2 additions & 2 deletions notebooks/2_preprocessing.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
"metadata": {},
"outputs": [],
"source": [
"root_path = \"/home/tim/Dokumente/GitHub/SPACEc/\" # replace with your path\n",
"root_path = \"/home/user/path/SPACEc/\" # inset your own path\n",
"data_path = root_path + 'example_data/raw/' # where the data is stored\n",
"\n",
"# where you want to store the output\n",
Expand Down Expand Up @@ -399,7 +399,7 @@
"# Identify the lowest 1% for cell size and nuclear marker intensity to get a better idea of potential segmentation artifacts.\n",
"df_filt = sp.pp.filter_data(\n",
" df_seg, \n",
" nuc_thres=1, # remove cells with DAPI intensity below threshold\n",
" nuc_thres=one_percent_nuc, # remove cells with DAPI intensity below threshold\n",
" size_thres=one_percent_area, # remove cells with area below threshold\n",
" nuc_marker=\"DAPI\", # name of nuclear marker\n",
" cell_size = \"area\", # name of cell size column\n",
Expand Down
2 changes: 1 addition & 1 deletion notebooks/3_cell_annotation_STELLAR.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@
"metadata": {},
"outputs": [],
"source": [
"root_path = \"/home/tim/Dokumente/GitHub/SPACEc/\" # replace with your path\n",
"root_path = \"/home/user/path/SPACEc/\" # inset your own path\n",
"data_path = root_path + 'example_data/raw/' # where the data is stored\n",
"\n",
"# where you want to store the output\n",
Expand Down
2 changes: 1 addition & 1 deletion notebooks/3_cell_annotation_ml.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
"metadata": {},
"outputs": [],
"source": [
"root_path = \"/home/tim/Dokumente/GitHub/SPACEc/\" # replace with your path\n",
"root_path = \"/home/user/path/SPACEc/\" # inset your own path\n",
"data_path = root_path + 'example_data/raw/' # where the data is stored\n",
"\n",
"# where you want to store the output\n",
Expand Down
2 changes: 1 addition & 1 deletion notebooks/3_clustering.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@
"metadata": {},
"outputs": [],
"source": [
"root_path = \"/home/tim/Dokumente/GitHub/SPACEc/\" # replace with your path\n",
"root_path = \"/home/user/path/SPACEc/\" # inset your own path\n",
"data_path = root_path + 'example_data/raw/' # where the data is stored\n",
"\n",
"# where you want to store the output\n",
Expand Down
2 changes: 1 addition & 1 deletion notebooks/4_cell_neighborhood_analysis.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
"metadata": {},
"outputs": [],
"source": [
"root_path = \"/home/tim/Dokumente/SPACEc_Apr_2024/\"\n",
"root_path = \"/home/user/path/SPACEc/\" # inset your own path\n",
"\n",
"data_path = root_path + 'data/' # where the data is stored\n",
"\n",
Expand Down
2 changes: 1 addition & 1 deletion notebooks/5_distance_permutation_analysis.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
"metadata": {},
"outputs": [],
"source": [
"root_path = \"/home/tim/Dokumente/SPACEc_Apr_2024/\"\n",
"root_path = \"/home/user/path/SPACEc/\" # inset your own path\n",
"\n",
"data_path = root_path + 'data/' # where the data is stored\n",
"\n",
Expand Down
4 changes: 2 additions & 2 deletions notebooks/6_patch_proximity_analysis.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
"metadata": {},
"outputs": [],
"source": [
"root_path = \"/home/tim/Dokumente/SPACEc_Apr_2024/\" \n",
"root_path = \"/home/user/path/SPACEc/\" # inset your own path\n",
"\n",
"data_path = root_path + 'data/' # where the data is stored\n",
"\n",
Expand Down Expand Up @@ -99,7 +99,7 @@
}
],
"source": [
"adata = sc.read('/home/tim/Dokumente/GitHub/SPACEc/tests/data/processed/tonsil/1/adata_nn_demo_annotated_long.h5ad')\n",
"adata = sc.read(output_dir + 'adata_nn_demo_annotated.h5ad')\n",
"adata"
]
},
Expand Down
4 changes: 2 additions & 2 deletions notebooks/7_TissUUmaps.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@
"metadata": {},
"outputs": [],
"source": [
"root_path = \"/home/tim/Dokumente/SPACEc_Apr_2024/\"\n",
"root_path = \"/home/user/path/SPACEc/\" # inset your own path\n",
"\n",
"data_path = root_path + 'data/' # where the data is stored\n",
"\n",
Expand Down Expand Up @@ -353,7 +353,7 @@
"image_list, csv_paths = sp.tl.tm_viewer(\n",
" adata,\n",
" images_pickle_path= output_dir + 'seg_output_tonsil2.pickle',\n",
" directory = \"/home/tim/Dokumente/SPACEc_Apr_2024/cache\",\n",
" directory = \"/home/user/path/SPACEc/cache\" # inset your own path where you want to cache your images for TM visualization (you can delete this once you are done with TM)\n",
" region_column = \"unique_region\",\n",
" region = \"reg002\",\n",
" xSelector = \"y\",\n",
Expand Down
68 changes: 64 additions & 4 deletions src/spacec/_shared/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,23 @@


def create_multichannel_tiff(input_dir, output_dir, output_filename):
"""
Create a multi-channel TIFF image from individual TIFF files.
Parameters
----------
input_dir : str
Directory containing the input TIFF files.
output_dir : str
Directory to save the output TIFF file.
output_filename : str
Name of the output TIFF file.
Returns
-------
list of str
List of channel names.
"""
# Get a list of all TIFF files in the input directory
tiff_files = [f for f in os.listdir(input_dir) if f.endswith((".tiff", ".tif"))]

Expand Down Expand Up @@ -35,18 +52,36 @@ def create_multichannel_tiff(input_dir, output_dir, output_filename):

# combine multiple channels in one image and add as new image to image_dict with the name segmentation_channel
def combine_channels(image_dict, channel_list, new_channel_name):
"""
Combine multiple channels into a single channel.
Parameters
----------
image_dict : dict
Dictionary with channel names as keys and images as values.
channel_list : list of str
List of channel names to combine.
new_channel_name : str
Name of the new channel.
Returns
-------
dict
Updated dictionary with the new channel added.
"""
# Determine bit depth of input images
bit_depth = image_dict[channel_list[0]].dtype

# Create empty image
new_image = np.zeros(
(image_dict[channel_list[0]].shape[0], image_dict[channel_list[0]].shape[1])
(image_dict[channel_list[0]].shape[0], image_dict[channel_list[0]].shape[1]),
dtype=bit_depth,
)

# Add channels to image as maximum projection
for channel in channel_list:
new_image = np.maximum(new_image, image_dict[channel])

# generate greyscale image
new_image = np.uint8(new_image)

# Add image to image_dict
image_dict[new_channel_name] = new_image

Expand All @@ -62,6 +97,31 @@ def format_CODEX(
stack=True,
input_format="Multichannel",
):
"""
Format images based on the input format.
Parameters
----------
image : ndarray or str
Input image or directory containing images.
channel_names : list of str, optional
List of channel names.
number_cycles : int, optional
Number of cycles in the CODEX format.
images_per_cycle : int, optional
Number of images per cycle in the CODEX format.
stack : bool, default=True
If True, stack the images in the list.
input_format : str, default="Multichannel"
Format of the input images. Options are "CODEX", "Multichannel", and "Channels".
Returns
-------
dict
Dictionary with channel names as keys and images as values. If `stack` is True and `input_format` is "CODEX",
also returns a stacked image as a numpy array.
"""

if input_format == "CODEX":
total_images = number_cycles * images_per_cycle
image_list = [None] * total_images # pre-allocated list
Expand Down
13 changes: 13 additions & 0 deletions src/spacec/helperfunctions/_general.py
Original file line number Diff line number Diff line change
Expand Up @@ -1053,6 +1053,18 @@ def is_dark(color):


def check_for_gpu():
"""
Check if a GPU is available for use by TensorFlow and PyTorch.
This function checks if a GPU is available for use by TensorFlow and PyTorch.
It prints a message indicating whether a GPU is available for each library,
and returns a boolean indicating whether a GPU is available for PyTorch.
Returns
-------
bool
True if a GPU is available for PyTorch, False otherwise.
"""
if tf.config.list_physical_devices("GPU"):
print("GPU is available to Tensorflow")
else:
Expand All @@ -1061,3 +1073,4 @@ def check_for_gpu():
use_GPU = use_gpu()
yn = ["GPU is not available to Pytorch", "GPU is available to Pytorch"]
print(f"{yn[use_GPU]}")
return use_GPU
71 changes: 70 additions & 1 deletion src/spacec/plotting/_general.py
Original file line number Diff line number Diff line change
Expand Up @@ -3816,6 +3816,34 @@ def cn_map(
output_dir="./",
rand_seed=1,
):
"""
Generates a CNMap plot using the provided data and parameters.
Parameters
----------
adata : anndata.AnnData
Annotated data matrix.
cnmap_dict : dict
Dictionary containing graph, tops, e0, e1, and simp_freqs.
cn_col : str
Column name in adata to be used for color coding.
palette : dict, optional
Color palette to use for the plot. If None, a random color palette is generated.
figsize : tuple, optional
Size of the figure. Defaults to (40, 20).
savefig : bool, optional
Whether to save the figure or not. Defaults to False.
output_fname : str, optional
The filename for the saved figure. Required if savefig is True. Defaults to "".
output_dir : str, optional
The directory where the figure will be saved. Defaults to "./".
rand_seed : int, optional
Seed for random number generator. Defaults to 1.
Returns
-------
None
"""
graph = cnmap_dict["g"]
tops = cnmap_dict["tops"]
e0 = cnmap_dict["e0"]
Expand Down Expand Up @@ -3854,6 +3882,46 @@ def cn_map(
c=col,
zorder=-1,
)
# Dummy scatter plots for legend
freqs = simp_freqs * 10000
max_size = max(freqs)
sizes = [
round(max_size) / 4,
round(max_size) / 2,
round(max_size),
] # Replace with the sizes you want in the legend
labels = [
str(round(max_size / 100) / 4) + "%",
str(round(max_size / 100) / 2) + "%",
str(round(max_size / 100)) + "%",
] # Replace with the labels you want in the legend

# Add legend
legend_elements = [
plt.Line2D(
[0],
[0],
marker="o",
color="w",
label=label,
markerfacecolor="black",
markersize=size**0.5,
)
for size, label in zip(sizes, labels)
]

# Add first legend
legend1 = plt.legend(
handles=legend_elements,
loc="lower right",
title="Total frequency",
title_fontsize=30,
fontsize=30,
handlelength=6,
handletextpad=1,
bbox_to_anchor=(0.0, -0.15, 1.0, 0.102),
)

if n in tops:
plt.text(
pos[n][0],
Expand Down Expand Up @@ -3903,10 +3971,11 @@ def cn_map(
]

# Add legend to bottom of plot
plt.gca().add_artist(legend1)
plt.legend(
handles=legend_patches,
bbox_to_anchor=(0.0, -0.15, 1.0, 0.102),
loc="lower center",
loc="lower left",
ncol=3,
borderaxespad=0.0,
fontsize=35,
Expand Down
17 changes: 15 additions & 2 deletions src/spacec/plotting/_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@
def segmentation_ch(
file_name, # image for segmentation
channel_file, # all channels used for staining
output_dir, #
output_dir,
savefig=False, # new
output_fname="", # new
extra_seg_ch_list=None, # channels used for membrane segmentation
nuclei_channel="DAPI",
input_format="Multichannel", # CODEX or Phenocycler --> This depends on the machine you are using and the resulting file format (see documentation above)
Expand Down Expand Up @@ -68,7 +70,18 @@ def segmentation_ch(
ax[1].imshow(image_dict["segmentation_channel"])
ax[0].set_title("nuclei")
ax[1].set_title("membrane")
plt.show()

# save or plot figure
if savefig:
plt.savefig(
output_dir + output_fname + ".pdf",
format="pdf",
dpi=300,
transparent=True,
bbox_inches="tight",
)
else:
plt.show()


def show_masks(
Expand Down
Loading

0 comments on commit c6836b4

Please sign in to comment.