python量化之股票分析

您所在的位置:网站首页 股票价格预测分析 python量化之股票分析

python量化之股票分析

2023-02-10 21:57| 来源: 网络整理| 查看: 265

# -*- coding: utf-8 -*- """ Created on Mon Feb 22 13:21:22 2016 K-NearestNeighbor """ import numpy as np import operator class KNNClassifier(): """This is a Nearest Neighbor classifier. """ #定义k的值 def __init__(self, k=3): self._k = k #计算新样本与已知分类样本的距离并从小到大排列 def _calEDistance(self, inSample, dataset): m = dataset.shape[0] diffMat = np.tile(inSample, (m,1)) - dataset sqDiffMat = diffMat**2 #每个元素平方 sqDistances = sqDiffMat.sum(axis = 1) #求和 distances = sqDistances ** 0.5 #开根号 return distances.argsort() #按距离的从小到达排列的下标值 def _classify0(self, inX, dataSet, labels): k = self._k dataSetSize = dataSet.shape[0] diffMat = np.tile(inX, (dataSetSize,1)) - dataSet sqDiffMat = diffMat**2 sqDistances = sqDiffMat.sum(axis=1) distances = sqDistances**0.5 sortedDistIndicies = distances.argsort() classCount={} for i in range(k): voteIlabel = labels[sortedDistIndicies[i]] classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1 sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True) return sortedClassCount[0][0] #对一个样本进行分类 def _classify(self, sample, train_X, train_y): #数据类型检测 if isinstance(sample, np.ndarray) and isinstance(train_X, np.ndarray) \ and isinstance(train_y, np.ndarray): pass else: try: sample = np.array(sample) train_X = np.array(train_X) train_y = np.array(train_y) except: raise TypeError("numpy.ndarray required for train_X and ..") sortedDistances = self._calEDistance(sample, train_X) classCount = {} for i in range(self._k): oneVote = train_y[sortedDistances[i]] #获取最近的第i个点的类别 classCount[oneVote] = classCount.get(oneVote, 0) + 1 sortedClassCount = sorted(classCount.iteritems(),\ key=operator.itemgetter(1), reverse=True) #print "the sample :", sample, "is classified as",sortedClassCount[0][0] return sortedClassCount[0][0] def classify(self, test_X, train_X, train_y): results = [] #数据类型检测 if isinstance(test_X, np.ndarray) and isinstance(train_X, np.ndarray) \ and isinstance(train_y, np.ndarray): pass else: try: test_X = np.array(test_X) train_X = np.array(train_X) train_y = np.array(train_y) except: raise TypeError("numpy.ndarray required for train_X and ..") d = len(np.shape(test_X)) if d == 1: sample = test_X result = self._classify(sample, train_X, train_y) results.append(result) else: for i in range(len(test_X)): sample = test_X[i] result = self._classify(sample, train_X, train_y) results.append(result) return results if __name__=="__main__": train_X = [[1, 2, 0, 1, 0], [0, 1, 1, 0, 1], [1, 0, 0, 0, 1], [2, 1, 1, 0, 1], [1, 1, 0, 1, 1]] train_y = [1, 1, 0, 0, 0] clf = KNNClassifier(k = 3) sample = [[1,2,0,1,0],[1,2,0,1,1]] result = clf.classify(sample, train_X, train_y) View Code

 

第二部分:KNN测试代码

# -*- coding: utf-8 -*- """ Created on Mon Feb 22 13:21:22 2016 K-NearestNeighbor """ import numpy as np import operator class KNNClassifier(): """This is a Nearest Neighbor classifier. """ #定义k的值 def __init__(self, k=3): self._k = k #计算新样本与已知分类样本的距离并从小到大排列 def _calEDistance(self, inSample, dataset): m = dataset.shape[0] diffMat = np.tile(inSample, (m,1)) - dataset sqDiffMat = diffMat**2 #每个元素平方 sqDistances = sqDiffMat.sum(axis = 1) #求和 distances = sqDistances ** 0.5 #开根号 return distances.argsort() #按距离的从小到达排列的下标值 def _classify0(self, inX, dataSet, labels): k = self._k dataSetSize = dataSet.shape[0] diffMat = np.tile(inX, (dataSetSize,1)) - dataSet sqDiffMat = diffMat**2 sqDistances = sqDiffMat.sum(axis=1) distances = sqDistances**0.5 sortedDistIndicies = distances.argsort() classCount={} for i in range(k): voteIlabel = labels[sortedDistIndicies[i]] classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1 sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True) return sortedClassCount[0][0] #对一个样本进行分类 def _classify(self, sample, train_X, train_y): #数据类型检测 if isinstance(sample, np.ndarray) and isinstance(train_X, np.ndarray) \ and isinstance(train_y, np.ndarray): pass else: try: sample = np.array(sample) train_X = np.array(train_X) train_y = np.array(train_y) except: raise TypeError("numpy.ndarray required for train_X and ..") sortedDistances = self._calEDistance(sample, train_X) classCount = {} for i in range(self._k): oneVote = train_y[sortedDistances[i]] #获取最近的第i个点的类别 classCount[oneVote] = classCount.get(oneVote, 0) + 1 sortedClassCount = sorted(classCount.iteritems(),\ key=operator.itemgetter(1), reverse=True) #print "the sample :", sample, "is classified as",sortedClassCount[0][0] return sortedClassCount[0][0] def classify(self, test_X, train_X, train_y): results = [] #数据类型检测 if isinstance(test_X, np.ndarray) and isinstance(train_X, np.ndarray) \ and isinstance(train_y, np.ndarray): pass else: try: test_X = np.array(test_X) train_X = np.array(train_X) train_y = np.array(train_y) except: raise TypeError("numpy.ndarray required for train_X and ..") d = len(np.shape(test_X)) if d == 1: sample = test_X result = self._classify(sample, train_X, train_y) results.append(result) else: for i in range(len(test_X)): sample = test_X[i] result = self._classify(sample, train_X, train_y) results.append(result) return results if __name__=="__main__": train_X = [[1, 2, 0, 1, 0], [0, 1, 1, 0, 1], [1, 0, 0, 0, 1], [2, 1, 1, 0, 1], [1, 1, 0, 1, 1]] train_y = [1, 1, 0, 0, 0] clf = KNNClassifier(k = 3) sample = [[1,2,0,1,0],[1,2,0,1,1]] result = clf.classify(sample, train_X, train_y) View Code

 

Python 决策树算法(ID3 &C4.5)

决策树(Decision Tree)算法:按照样本的属性逐步进行分类,为了能够使分类更快、更有效。每一个新分类属性的选择依据可以是信息增益IG和信息增益率IGR,前者为最基本的ID3算法,后者为改进后的C4.5算法。

以ID3为例,其训练过程的编程思路如下:

(1)输入x、y(x为样本,y为label),行为样本,列为样本特征。

(2)计算信息增益IG,获取使IG最大的特征。

(3)获得删除最佳分类特征后的样本阵列。

(4)按照最佳分类特征的属性值将更新后的样本进行归类。

属性值1(x1,y1)    属性值2(x2,y2)    属性值(x3,y3)

(5)分别对以上类别重复以上操作直至到达叶节点(递归调用)。

叶节点的特征:

(1)所有的标签值y都一样。

(2)没有特征可以继续划分。

测试过程的编程思路如下:

(1)读取训练好的决策树。

(2)从根节点开始递归遍历整个决策树直到到达叶节点为止。

以下为具体代码,训练后的决策树结构为递归套用的字典,其是由特征值组成的索引加上label组成的。

 

# -*- coding: utf-8 -*- """ Created on Mon Nov 07 09:06:37 2016 @author: yehx """ # -*- coding: utf-8 -*- """ Created on Sun Feb 21 12:17:10 2016 Decision Tree Source Code @author: liudiwei """ import os import numpy as np class DecitionTree(): """This is a decision tree classifier. """ def __init__(self, criteria='ID3'): self._tree = None if criteria == 'ID3' or criteria == 'C4.5': self._criteria = criteria else: raise Exception("criterion should be ID3 or C4.5") def _calEntropy(slef, y): ''' 功能:_calEntropy用于计算香农熵 e=-sum(pi*log pi) 参数:其中y为数组array 输出:信息熵entropy ''' n = y.shape[0] labelCounts = {} for label in y: if label not in labelCounts.keys(): labelCounts[label] = 1 else: labelCounts[label] += 1 entropy = 0.0 for key in labelCounts: prob = float(labelCounts[key])/n entropy -= prob * np.log2(prob) return entropy def _splitData(self, X, y, axis, cutoff): """ 参数:X为特征,y为label,axis为某个特征的下标,cutoff是下标为axis特征取值值 输出:返回数据集中特征下标为axis,特征值等于cutoff的子数据集 先将特征列从样本矩阵里除去,然后将属性值为cutoff的数据归为一类 """ ret = [] featVec = X[:,axis] n = X.shape[1] #特征个数 #除去第axis列特征后的样本矩阵 X = X[:,[i for i in range(n) if i!=axis]] for i in range(len(featVec)): if featVec[i] == cutoff: ret.append(i) return X[ret, :], y[ret] def _chooseBestSplit(self, X, y): """ID3 & C4.5 参数:X为特征,y为label 功能:根据信息增益或者信息增益率来获取最好的划分特征 输出:返回最好划分特征的下标 """ numFeat = X.shape[1] baseEntropy = self._calEntropy(y) bestSplit = 0.0 best_idx = -1 for i in range(numFeat): featlist = X[:,i] #得到第i个特征对应的特征列 uniqueVals = set(featlist) curEntropy = 0.0 splitInfo = 0.0 for value in uniqueVals: sub_x, sub_y = self._splitData(X, y, i, value) prob = len(sub_y)/float(len(y)) #计算某个特征的某个值的概率 curEntropy += prob * self._calEntropy(sub_y) #迭代计算条件熵 splitInfo -= prob * np.log2(prob) #分裂信息,用于计算信息增益率 IG = baseEntropy - curEntropy if self._criteria=="ID3": if IG > bestSplit: bestSplit = IG best_idx = i if self._criteria=="C4.5": if splitInfo == 0.0: pass IGR = IG/splitInfo if IGR > bestSplit: bestSplit = IGR best_idx = i return best_idx def _majorityCnt(self, labellist): """ 参数:labellist是类标签,序列类型为list 输出:返回labellist中出现次数最多的label """ labelCount={} for vote in labellist: if vote not in labelCount.keys(): labelCount[vote] = 0 labelCount[vote] += 1 sortedClassCount = sorted(labelCount.iteritems(), key=lambda x:x[1], \ reverse=True) return sortedClassCount[0][0] def _createTree(self, X, y, featureIndex): """ 参数:X为特征,y为label,featureIndex类型是元组,记录X特征在原始数据中的下标 输出:根据当前的featureIndex创建一颗完整的树 """ labelList = list(y) #如果所有的标签都一样(叶节点),直接返回标签 if labelList.count(labelList[0]) == len(labelList): return labelList[0] #如果没有特征可以继续划分,那么将所有的label归为大多数的一类,并返回标签 if len(featureIndex) == 0: return self._majorityCnt(labelList) #返回最佳分类特征的下标 bestFeatIndex = self._chooseBestSplit(X,y) #返回最佳分类特征的索引 bestFeatAxis = featureIndex[bestFeatIndex] featureIndex = list(featureIndex) #获得删除最佳分类特征索引后的列表 featureIndex.remove(bestFeatAxis) featureIndex = tuple(featureIndex) myTree = {bestFeatAxis:{}} featValues = X[:, bestFeatIndex] uniqueVals = set(featValues) for value in uniqueVals: #对每个value递归地创建树 sub_X, sub_y = self._splitData(X,y, bestFeatIndex, value) myTree[bestFeatAxis][value] = self._createTree(sub_X, sub_y, \ featureIndex) return myTree def fit(self, X, y): """ 参数:X是特征,y是类标签 注意事项:对数据X和y进行类型检测,保证其为array 输出:self本身 """ if isinstance(X, np.ndarray) and isinstance(y, np.ndarray): pass else: try: X = np.array(X) y = np.array(y) except: raise TypeError("numpy.ndarray required for X,y") featureIndex = tuple(['x'+str(i) for i in range(X.shape[1])]) self._tree = self._createTree(X,y,featureIndex) return self #allow using: clf.fit().predict() def _classify(self, tree, sample): """ 用训练好的模型对输入数据进行分类 注意:决策树的构建是一个递归的过程,用决策树分类也是一个递归的过程 _classify()一次只能对一个样本(sample)分类 """ featIndex = tree.keys()[0] #得到数的根节点值 secondDict = tree[featIndex] #得到以featIndex为划分特征的结果 axis=featIndex[1:] #得到根节点特征在原始数据中的下标 key = sample[int(axis)] #获取待分类样本中下标为axis的值 valueOfKey = secondDict[key] #获取secondDict中keys为key的value值 if type(valueOfKey).__name__=='dict': #如果value为dict,则继续递归分类 return self._classify(valueOfKey, sample) else: return valueOfKey def predict(self, X): if self._tree==None: raise NotImplementedError("Estimator not fitted, call `fit` first") #对X的类型进行检测,判断其是否是数组 if isinstance(X, np.ndarray): pass else: try: X = np.array(X) except: raise TypeError("numpy.ndarray required for X") if len(X.shape) == 1: return self._classify(self._tree, X) else: result = [] for i in range(X.shape[0]): value = self._classify(self._tree, X[i]) print str(i+1)+"-th sample is classfied as:", value result.append(value) return np.array(result) def show(self, outpdf): if self._tree==None: pass #plot the tree using matplotlib import treePlotter treePlotter.createPlot(self._tree, outpdf) if __name__=="__main__": trainfile=r"data\train.txt" testfile=r"data\test.txt" import sys sys.path.append(r"F:\CSU\Github\MachineLearning\lib") import dataload as dload train_x, train_y = dload.loadData(trainfile) test_x, test_y = dload.loadData(testfile) clf = DecitionTree(criteria="C4.5") clf.fit(train_x, train_y) result = clf.predict(test_x) outpdf = r"tree.pdf" clf.show(outpdf) View Code

 

 

 

Python K均值聚类

 

Python K均值聚类是一种无监督的机器学习算法,能够实现自动归类的功能。

算法步骤如下:

(1)随机产生K个分类中心,一般称为质心。

(2)将所有样本划分到距离最近的质心代表的分类中。(距离可以是欧氏距离、曼哈顿距离、夹角余弦等)

(3)计算分类后的质心,可以用同一类中所有样本的平均属性来代表新的质心。

(4)重复(2)(3)两步,直到满足以下其中一个条件:

    1)分类结果没有发生改变。

    2)最小误差(如平方误差)达到所要求的范围。

    3)迭代总数达到设置的最大值。

常见的K均值聚类算法还有2分K均值聚类算法,其步骤如下:

(1)将所有样本作为一类。

(2)按照传统K均值聚类的方法将样本分为两类。

(3)对以上两类分别再分为两类,且分别计算两种情况下误差,仅保留误差更小的分类;即第(2)步产生的两类其中一类保留,另一类进行再次分类。

(4)重复对已有类别分别进行二分类,同理保留误差最小的分类,直到达到所需要的分类数目。

具体Python代码如下:

 

# -*- coding: utf-8 -*- """ Created on Tue Nov 08 14:01:44 2016 K - means cluster """ import numpy as np class KMeansClassifier(): "this is a k-means classifier" def __init__(self, k=3, initCent='random', max_iter=500 ): self._k = k self._initCent = initCent self._max_iter = max_iter self._clusterAssment = None self._labels = None self._sse = None def _calEDist(self, arrA, arrB): """ 功能:欧拉距离距离计算 输入:两个一维数组 """ return np.math.sqrt(sum(np.power(arrA-arrB, 2))) def _calMDist(self, arrA, arrB): """ 功能:曼哈顿距离距离计算 输入:两个一维数组 """ return sum(np.abs(arrA-arrB)) def _randCent(self, data_X, k): """ 功能:随机选取k个质心 输出:centroids #返回一个m*n的质心矩阵 """ n = data_X.shape[1] #获取特征的维数 centroids = np.empty((k,n)) #使用numpy生成一个k*n的矩阵,用于存储质心 for j in range(n): minJ = min(data_X[:, j]) rangeJ = float(max(data_X[:, j] - minJ)) #使用flatten拉平嵌套列表(nested list) centroids[:, j] = (minJ + rangeJ * np.random.rand(k, 1)).flatten() return centroids def fit(self, data_X): """ 输入:一个m*n维的矩阵 """ if not isinstance(data_X, np.ndarray) or \ isinstance(data_X, np.matrixlib.defmatrix.matrix): try: data_X = np.asarray(data_X) except: raise TypeError("numpy.ndarray resuired for data_X") m = data_X.shape[0] #获取样本的个数 #一个m*2的二维矩阵,矩阵第一列存储样本点所属的族的索引值, #第二列存储该点与所属族的质心的平方误差 self._clusterAssment = np.zeros((m,2)) if self._initCent == 'random': self._centroids = self._randCent(data_X, self._k) clusterChanged = True for _ in range(self._max_iter): #使用"_"主要是因为后面没有用到这个值 clusterChanged = False for i in range(m): #将每个样本点分配到离它最近的质心所属的族 minDist = np.inf #首先将minDist置为一个无穷大的数 minIndex = -1 #将最近质心的下标置为-1 for j in range(self._k): #次迭代用于寻找最近的质心 arrA = self._centroids[j,:] arrB = data_X[i,:] distJI = self._calEDist(arrA, arrB) #计算误差值 if distJI 40: tempPrice = priceChangeRate[39:(openDays - 1)] for rate in range(len(tempPrice)): tempPrice[rate] = "%.3f" %tempPrice[rate] fileName = '' fileName = fileName.join(df['market_cap'].columns[i].split('.')) + '.csv' fileName tempPrice.to_csv(fileName) View Code

 

 

Python Logistic 回归分类

 Logistic回归可以认为是线性回归的延伸,其作用是对二分类样本进行训练,从而对达到预测新样本分类的目的。假设有一组已知分类的MxN维样本X,M为样本数,N为特征维度,其相应的已知分类标签为Mx1维矩阵Y。那么Logistic回归的实现思路如下:(1)用一组权重值W(Nx1)对X的特征进行线性变换,得到变换后的样本X’(Mx1),其目标是使属于不同分类的样本X’存在一个明显的一维边界。(2)然后再对样本X’进一步做函数变换,从而使处于一维边界两测的值变换到相应的范围之内。(3)训练过程就是通过改变W尽可能使得到的值位于一维边界两侧,并且与已知分类相符。(4)对于Logistic回归,就是将原样本的边界变换到x=0这个边界。下面是Logistic回归的典型代码:

# -*- coding: utf-8 -*- """ Created on Wed Nov 09 15:21:48 2016 Logistic回归分类 """ import numpy as np class LogisticRegressionClassifier(): def __init__(self): self._alpha = None #定义一个sigmoid函数 def _sigmoid(self, fx): return 1.0/(1 + np.exp(-fx)) #alpha为步长(学习率);maxCycles最大迭代次数 def _gradDescent(self, featData, labelData, alpha, maxCycles): dataMat = np.mat(featData) #size: m*n labelMat = np.mat(labelData).transpose() #size: m*1 m, n = np.shape(dataMat) weigh = np.ones((n, 1)) for i in range(maxCycles): hx = self._sigmoid(dataMat * weigh) error = labelMat - hx #size:m*1 weigh = weigh + alpha * dataMat.transpose() * error#根据误差修改回归系数 return weigh #使用梯度下降方法训练模型,如果使用其它的寻参方法,此处可以做相应修改 def fit(self, train_x, train_y, alpha=0.01, maxCycles=100): return self._gradDescent(train_x, train_y, alpha, maxCycles) #使用学习得到的参数进行分类 def predict(self, test_X, test_y, weigh): dataMat = np.mat(test_X) labelMat = np.mat(test_y).transpose() #使用transpose()转置 hx = self._sigmoid(dataMat*weigh) #size:m*1 m = len(hx) error = 0.0 for i in range(m): if int(hx[i]) > 0.5: print str(i+1)+'-th sample ', int(labelMat[i]), 'is classfied as: 1' if int(labelMat[i]) != 1: error += 1.0 print "classify error." else: print str(i+1)+'-th sample ', int(labelMat[i]), 'is classfied as: 0' if int(labelMat[i]) != 0: error += 1.0 print "classify error." error_rate = error/m print "error rate is:", "%.4f" %error_rate return error_rate View Code

 

 

Python 朴素贝叶斯(Naive Bayes)分类

Naïve Bayes 分类的核心是计算条件概率P(y|x),其中y为类别,x为特征向量。其意义是在x样本出现时,它被划分为y类的可能性(概率)。通过计算不同分类下的概率,进而把样本划分到概率最大的一类。

根据条件概率的计算公式可以得到:

P(y|x) = P(y)*P(x|y)/P(x)。

      由于在计算不同分类概率是等式右边的分母是相同的,所以只需比较分子的大小。并且,如果各个样本特征是独立分布的,那么p(x

|y)等于p(xi|y)相乘。

      下面以文本分类来介绍Naïve Bayes分类的应用。其思路如下:

(1)建立词库,即无重复的单词表。

(2)分别计算词库中类别标签出现的概率P(y)。

(3)分别计算各个类别标签下不同单词出现的概率P(xi|y)。

(4)在不同类别下,将待分类样本各个特征出现概率((xi|y)相乘,然后在乘以对应的P(y)。

(5)比较不同类别下(4)中结果,将待分类样本分到取值最大的类别。

下面是Naïve Bayes 文本分类的Python代码,其中为了方便计算,程序中借助log对数函数将乘法转化为了加法。

# -*- coding: utf-8 -*- """ Created on Mon Nov 14 11:15:47 2016 Naive Bayes Clssification """ # -*- coding: utf-8 -*- import numpy as np class NaiveBayes: def __init__(self): self._creteria = "NB" def _createVocabList(self, dataList): """ 创建一个词库向量 """ vocabSet = set([]) for line in dataList: print set(line) vocabSet = vocabSet | set(line) return list(vocabSet) #文档词集模型 def _setOfWords2Vec(self, vocabList, inputSet): """ 功能:根据给定的一行词,将每个词映射到此库向量中,出现则标记为1,不出现则为0 """ outputVec = [0] * len(vocabList) for word in inputSet: if word in vocabList: outputVec[vocabList.index(word)] = 1 else: print "the word:%s is not in my vocabulary!" % word return outputVec # 修改 _setOfWordsVec 文档词袋模型 def _bagOfWords2VecMN(self, vocabList, inputSet): """ 功能:对每行词使用第二种统计策略,统计单个词的个数,然后映射到此库中 输出:一个n维向量,n为词库的长度,每个取值为单词出现的次数 """ returnVec = [0]*len(vocabList) for word in inputSet: if word in vocabList: returnVec[vocabList.index(word)] += 1 # 更新此处代码 return returnVec def _trainNB(self, trainMatrix, trainLabel): """ 输入:训练矩阵和类别标签,格式为numpy矩阵格式 功能:计算条件概率和类标签概率 """ numTrainDocs = len(trainMatrix) #统计样本个数 numWords = len(trainMatrix[0]) #统计特征个数,理论上是词库的长度 pNeg = sum(trainLabel)/float(numTrainDocs) #计算负样本出现的概率 p0Num = np.ones(numWords) #初始样本个数为1,防止条件概率为0,影响结果 p1Num = np.ones(numWords) #作用同上 p0InAll = 2.0 #词库中只有两类,所以此处初始化为2(use laplace) p1InAll = 2.0 # 再单个文档和整个词库中更新正负样本数据 for i in range(numTrainDocs): if trainLabel[i] == 1: p1Num += trainMatrix[i] p1InAll += sum(trainMatrix[i]) else: p0Num += trainMatrix[i] p0InAll += sum(trainMatrix[i]) print p1InAll #计算给定类别的条件下,词汇表中单词出现的概率 #然后取log对数,解决条件概率乘积下溢 p0Vect = np.log(p0Num/p0InAll) #计算类标签为0时的其它属性发生的条件概率 p1Vect = np.log(p1Num/p1InAll) #log函数默认以e为底 #p(ci|w=0) return p0Vect, p1Vect, pNeg def _classifyNB(self, vecSample, p0Vec, p1Vec, pNeg): """ 使用朴素贝叶斯进行分类,返回结果为0/1 """ prob_y0 = sum(vecSample * p0Vec) + np.log(1-pNeg) prob_y1 = sum(vecSample * p1Vec) + np.log(pNeg) #log是以e为底 if prob_y0 0: randIndex = random.sample(range(totLen-int(lenData)-1,totLen-subLen), N60) for i in randIndex: dataOut.append(dataArr[i:(i+subLen),columnCnt]) dataOut = np.array(dataOut) return dataOut if __name__=="__main__": datafile = "00100 (3).csv" data = loadCSVfile1(datafile) df = pd.DataFrame(data) m, n = np.shape(data) dataOut = dataProcess(data, 30) m, n = np.shape(dataOut) #保存处理结果 csvfile = file('csvtest.csv', 'wb') writer = csv.writer(csvfile) writer.writerows(dataOut) csvfile.close() View Code

http://blog.sina.com.cn/s/articlelist_6017673753_0_1.html

https://www.cnblogs.com/ttrrpp/

 

 

 

 

 

 

 

 

 



【本文地址】


今日新闻


推荐新闻


CopyRight 2018-2019 办公设备维修网 版权所有 豫ICP备15022753号-3