def loadData(self,mnist_url): f = gzip.open(str(mnist_url), 'rb') training_data, validation_data, test_data = pickle.load(f, encoding="latin1") f.close() training_inputs = [np.reshape(x, (784, 1)) for x in training_data[0]] #(50000,784,1) training_results = [self.vectorized_result(y) for y in training_data[1]] #(50000,10,1) training_data = zip(training_inputs, training_results) # validation_inputs = [np.reshape(x, (784, 1)) for x in validation_data[0]] # validation_data = zip(validation_inputs, validation_data[1]) # test_inputs = [np.reshape(x, (784, 1)) for x in test_data[0]] test_data = zip(test_inputs, test_data[1]) return (training_inputs,training_results,test_data)
时间: 2024-02-23 14:56:33 浏览: 88
这段代码定义了一个名为"loadData"的函数,它的参数是"mnist_url",表示MNIST数据集的路径。在函数中,首先使用gzip模块打开数据集文件,然后使用pickle模块将数据集文件中的数据读取出来,并将其分为训练数据、验证数据和测试数据。其中,训练数据和验证数据被合并成了一个列表"training_data",测试数据被转换为一个列表"test_data"。接下来,将训练数据中的每个样本按照784维展开成一个784x1的矩阵,并将其存储在"training_inputs"列表中。将训练数据的标签向量化,即将每个标签转换为一个10维的向量,其中对应标签的位置为1,其他位置为0,并将其存储在"training_results"列表中。最后,将"training_inputs"和"training_results"打包成一个元组,并将其作为函数的返回值。注释掉的代码是用来处理验证数据的,因为在这个函数中并没有使用验证数据,所以被注释掉了。
相关问题
import gzip import os import pickle import numpy as np def load_mnist(path, kind='train'): labels_path = os.path.join(path, '%s-labels-idx1-ubyte.gz' % kind) images_path = os.path.join(path, '%s-images-idx3-ubyte.gz' % kind) with gzip.open(labels_path, 'rb') as lbpath: labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8) with gzip.open(images_path, 'rb') as imgpath: images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(len(labels), 784) return images, labels def get_mnist_data(num_training=5000, num_validation=500, num_test=500): mnist_dir = r'D:\daima\mnist' # 修改为mnist数据集所在的目录 X_train, y_train = load_mnist(mnist_dir, kind='train') X_test, y_test = load_mnist(mnist_dir, kind='t10k') print(X_train.shape) mask = range(num_training, num_training + num_validation) X_val = X_train[mask] y_val = y_train[mask] mask = range(num_training) X_train = X_train[mask] y_train = y_train[mask] mask = range(num_test) X_test = X_test[mask] y_test = y_test[mask] X_train = X_train.astype('float32') / 255 X_val = X_val.astype('float32') / 255 X_test = X_test.astype('float32') / 255 return X_train, y_train, X_val, y_val, X_test, y_testTypeError: tuple indices must be integers or slices, not str
这段代码出现了一个错误:TypeError: tuple indices must be integers or slices, not str。这是因为在load_mnist函数中返回的是一个元组类型的变量(images, labels),而在get_mnist_data函数中却使用了类似字典的方式访问这个元组变量,如X_train, y_train = load_mnist(mnist_dir, kind='train'),应该改为X_train, y_train = load_mnist(mnist_dir, kind=0),X_test, y_test = load_mnist(mnist_dir, kind=1),这样就能够正确访问这个元组变量的元素了。
import os.path import gzip import pickle import os import numpy as np import urllib url_base = 'http://yann.lecun.com/exdb/mnist/' key_file = { 'train_img':'train-images-idx3-ubyte.gz', 'train_label':'train-labels-idx1-ubyte.gz', 'test_img':'t10k-images-idx3-ubyte.gz', 'test_label':'t10k-labels-idx1-ubyte.gz' } dataset_dir = os.path.dirname(os.path.abspath("_file_")) save_file = dataset_dir + "/mnist.pkl" train_num=60000 test_num=10000 img_dim=(1,28,28) img_size=784 def _download(file_name): file_path = dataset_dir+"/"+file_name if os.path.exists(file_path): return print("Downloading"+file_name+" ... ") urllib.request.urlretrieve(url_base + file_name,file_path) print("Done") def download_mnist(): for v in key_file.values(): _download(v) def _load_label(file_name): file_path = dataset_dir+ "/" +file_name print("Converting" + file_name +"to Numpy Array ...") with gzip.open(file_path,'rb') as f: labels = np.frombuffer(f.read(),np.uint8,offset=8) print("Done") return labels def _load_img(file_name): file_path=dataset_dir+"/"+file_name print("Converting"+file_name+"to Numpy Array ...") with gzip.open(file_path,'rb') as f: data = np.frombuffer(f.read(),np.uint8,offset=16) data = data.reshape(-1,img_size) print("Done") return data def _convert_numpy(): dataset = {} dataset['train_img'] = _load_img(key_file['train_img']) dataset['train_label'] = _load_label(key_file['train_label']) dataset['test_img'] = _load_img(key_file['test_img']) dataset['test_label'] = _load_label(key_file['test_label']) return dataset def init_mnist(): download_mnist() dataset = _convert_numpy() print("Creating pickle file ...") with open(save_file,'wb') as f: pickle.dump(dataset,f,-1) print("Done") if __name__ =='__main__': init_mnist()
这段代码是用于下载MNIST数据集并将数据集转换成Numpy数组格式的函数。MNIST数据集是一个手写数字识别数据集,包含了60000张训练图片和10000张测试图片。在函数中,首先定义了数据集的下载地址和四个文件的名称,然后定义了四个函数用于下载和转换数据集。其中,_load_label和_load_img函数用于将标签和图片数据转换成Numpy数组格式,并且在转换过程中使用了gzip库解压缩数据。_convert_numpy函数用于将四个Numpy数组合成一个字典类型的数据集。最后,init_mnist函数用于下载数据集并将数据集转换成Numpy数组格式,并使用pickle库将数据集保存到本地文件中。如果MNIST数据集已经下载并保存到本地文件中,则直接加载本地文件中的数据集。
阅读全文