-
Notifications
You must be signed in to change notification settings - Fork 5
203 misc blech post process tweaks and issues #205
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 5 commits
5fac19c
3613383
2cf30a5
a28569a
a447407
b1ed2b2
694ada5
55c90a0
bb490af
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -34,6 +34,8 @@ def __init__(self, sort_file_path): | |
sort_table.sort_values( | ||
['len_cluster','Split'], | ||
ascending=False, inplace=True) | ||
if 'level_0' in sort_table.columns: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
The issue is, that it then permanently writes that column to the output csv file. If you're using the csv as the input for cell sorting, you create it manually, yes, but then on the first run through post_process, it writes in that column. If you run the same CSV through post-process AGAIN, it tries to write So this is a little bit of a niche problem; you need to be running post_process with a csv input, and then re-run the same csv to create the error. I mainly ran into the issue as a result of testing my code as I familiarize myself with the pipeline. That said, I could imagine scenarios where you accidentally added the wrong cell to the spreadsheet, or you're not happy with a split/merge outcome, or something similar, and want to run post_process again, and if you're using the csv input (which I like a lot, being very pro-automation), then this saves you from needing to manually go in and delete |
||
sort_table.drop(columns=['level_0'], inplace=True) | ||
sort_table.reset_index(inplace=True) | ||
sort_table['unit_saved'] = False | ||
self.sort_table = sort_table | ||
|
@@ -160,7 +162,7 @@ def gen_select_cluster_plot(electrode_num, num_clusters, clusters): | |
ax[cluster_num,0].axis('off') | ||
ax[cluster_num, 1].imshow(waveform_plot,aspect='auto'); | ||
ax[cluster_num,1].axis('off') | ||
fig.suptitle('Are these the neurons you want to select?') | ||
fig.suptitle('Are these the neurons you want to select? Press q to exit plot') | ||
fig.tight_layout() | ||
plt.show() | ||
|
||
|
@@ -216,12 +218,18 @@ def generate_cluster_plots( | |
plt.tight_layout() | ||
plt.show() | ||
|
||
def get_clustering_params(): | ||
def get_clustering_params(this_sort_file_handler): | ||
""" | ||
Ask user for clustering parameters | ||
""" | ||
# Get clustering parameters from user | ||
n_clusters = int(input('Number of clusters (default=5): ') or "5") | ||
if (this_sort_file_handler.sort_table is not None): | ||
dat_row = this_sort_file_handler.current_row | ||
split_val = int(re.findall('[0-9]+', str(dat_row.Split))[0]) | ||
n_clusters = int(input(f'Number of clusters (sort file={split_val})') or split_val) | ||
else: | ||
n_clusters = int(input('Number of clusters (default=5): ') or "5") | ||
|
||
fields = [ | ||
'Max iterations', | ||
'Convergence criterion', | ||
|
@@ -982,15 +990,15 @@ def ask_split(self): | |
check_func = lambda x: x in ['y','n'], | ||
fail_response = 'Please enter (y/n)') | ||
if continue_bool: | ||
if msg == 'y': | ||
if msg == 'y': | ||
self.split = True | ||
elif msg == 'n': | ||
self.split = False | ||
|
||
def check_split_sort_file(self): | ||
abuzarmahmood marked this conversation as resolved.
Show resolved
Hide resolved
|
||
if self.this_sort_file_handler.sort_table is not None: | ||
dat_row = self.this_sort_file_handler.current_row | ||
if len(dat_row.Split) > 0: | ||
if not (dat_row.Split == ''): | ||
self.split=True | ||
else: | ||
self.split=False | ||
|
Uh oh!
There was an error while loading. Please reload this page.