You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I have followed tutorial "Atomic14" step-by-step but issue in "generate training data"
Need solution for this error.
CODE:
def process_files(file_names, label, repeat=1):
file_names = tf.repeat(file_names, repeat).numpy()
return [(process_file(file_name), label) for file_name in tqdm(file_names, desc=f"{word} ({label})", leave=False)]
def process_word(word, repeat=1):
# the index of the word word we are processing
label = words.index(word)
# get a list of files names for the word
file_names = [file_name for file_name in tqdm(get_files(word), desc="Checking", leave=False) if is_valid_file(file_name)]
# randomly shuffle the filenames
np.random.shuffle(file_names)
# split the files into train, validate and test buckets
train_size=int(TRAIN_SIZElen(file_names))
validation_size=int(VALIDATION_SIZElen(file_names))
test_size=int(TEST_SIZE*len(file_names))
# get the training samples
train.extend(
process_files(
file_names[:train_size],
label,
repeat=repeat
)
)
# and the validation samples
validate.extend(
process_files(
file_names[train_size:train_size+validation_size],
label,
repeat=repeat
)
)
# and the test samples
test.extend(
process_files(
file_names[train_size+validation_size:],
label,
repeat=repeat
)
)
for word in tqdm(words, desc="Processing words"):
if '_' not in word:
# add more examples of marvin to balance our training set
repeat = 70 if word == 'marvin' else 1
process_word(word, repeat=repeat)
print(len(train), len(test), len(validate))
Error :
AttributeError Traceback (most recent call last)
Cell In[11], line 47
44 if '_' not in word:
45 # add more examples of marvin to balance our training set
46 repeat = 70 if word == 'marvin' else 1
---> 47 process_word(word, repeat=repeat)
49 print(len(train), len(test), len(validate))
Cell In[11], line 10, in process_word(word, repeat)
8 label = words.index(word)
9 # get a list of files names for the word
---> 10 file_names = [file_name for file_name in tqdm(get_files(word), desc="Checking", leave=False) if is_valid_file(file_name)]
11 # randomly shuffle the filenames
12 np.random.shuffle(file_names)
Cell In[11], line 10, in (.0)
8 label = words.index(word)
9 # get a list of files names for the word
---> 10 file_names = [file_name for file_name in tqdm(get_files(word), desc="Checking", leave=False) if is_valid_file(file_name)]
11 # randomly shuffle the filenames
12 np.random.shuffle(file_names)
Cell In[9], line 29, in is_valid_file(file_name)
27 def is_valid_file(file_name):
28 #load the audio file
---> 29 audio_tensor = tfio.audio.AudiofIOTensor(file_name)
30 #cehck the file is long enough
31 if not is_correct_length(audio_tensor, EXPECTED_SAMPLES):
AttributeError: module 'tensorflow_io.python.api.audio' has no attribute 'AudiofIOTensor'
The text was updated successfully, but these errors were encountered:
shrutisvani30
changed the title
Error in : Generate Training Data
Generate Training Data
Feb 19, 2025
shrutisvani30
changed the title
Generate Training Data
Issue in : Generate Training Data
Feb 19, 2025
shrutisvani30
changed the title
Issue in : Generate Training Data
Issue in Generate Training Data
Feb 19, 2025
I have followed tutorial "Atomic14" step-by-step but issue in "generate training data"
Need solution for this error.
CODE:
def process_files(file_names, label, repeat=1):
file_names = tf.repeat(file_names, repeat).numpy()
return [(process_file(file_name), label) for file_name in tqdm(file_names, desc=f"{word} ({label})", leave=False)]
def process_word(word, repeat=1):
# the index of the word word we are processing
label = words.index(word)
# get a list of files names for the word
file_names = [file_name for file_name in tqdm(get_files(word), desc="Checking", leave=False) if is_valid_file(file_name)]
# randomly shuffle the filenames
np.random.shuffle(file_names)
# split the files into train, validate and test buckets
train_size=int(TRAIN_SIZElen(file_names))
validation_size=int(VALIDATION_SIZElen(file_names))
test_size=int(TEST_SIZE*len(file_names))
# get the training samples
train.extend(
process_files(
file_names[:train_size],
label,
repeat=repeat
)
)
# and the validation samples
validate.extend(
process_files(
file_names[train_size:train_size+validation_size],
label,
repeat=repeat
)
)
# and the test samples
test.extend(
process_files(
file_names[train_size+validation_size:],
label,
repeat=repeat
)
)
for word in tqdm(words, desc="Processing words"):
if '_' not in word:
# add more examples of marvin to balance our training set
repeat = 70 if word == 'marvin' else 1
process_word(word, repeat=repeat)
print(len(train), len(test), len(validate))
Error :
AttributeError Traceback (most recent call last)
Cell In[11], line 47
44 if '_' not in word:
45 # add more examples of marvin to balance our training set
46 repeat = 70 if word == 'marvin' else 1
---> 47 process_word(word, repeat=repeat)
49 print(len(train), len(test), len(validate))
Cell In[11], line 10, in process_word(word, repeat)
8 label = words.index(word)
9 # get a list of files names for the word
---> 10 file_names = [file_name for file_name in tqdm(get_files(word), desc="Checking", leave=False) if is_valid_file(file_name)]
11 # randomly shuffle the filenames
12 np.random.shuffle(file_names)
Cell In[11], line 10, in (.0)
8 label = words.index(word)
9 # get a list of files names for the word
---> 10 file_names = [file_name for file_name in tqdm(get_files(word), desc="Checking", leave=False) if is_valid_file(file_name)]
11 # randomly shuffle the filenames
12 np.random.shuffle(file_names)
Cell In[9], line 29, in is_valid_file(file_name)
27 def is_valid_file(file_name):
28 #load the audio file
---> 29 audio_tensor = tfio.audio.AudiofIOTensor(file_name)
30 #cehck the file is long enough
31 if not is_correct_length(audio_tensor, EXPECTED_SAMPLES):
AttributeError: module 'tensorflow_io.python.api.audio' has no attribute 'AudiofIOTensor'
The text was updated successfully, but these errors were encountered: