def fun ( num ): length = len ( num ) sum =0 for i in range ( length ): #********** FOUND sum +=8* i * int ( num [ length -1-i]) return sum def main (): num = input ("请输入一个8进制数,并按回车继续:") #********** FOUND *** if ( len ( num )>=5): print ("错误:不得大于5位八进制数字字符!\ n ") exit (0) for i in range ( len ( num )): #*** FOUND )********** if ( num [ i ]<'0' and num [ i ]>'7'): print ("错误:% s 不是一个八进制数字\ n "% num [ i ]) exit (0) print ("转成10进制数为:", fun ( num )) if __name__ =='__name__': main()
时间: 2023-06-06 07:09:09 浏览: 222
这是一个Python函数的开头,函数名为"fun",括号里的参数为"num"。以下是函数的主体部分:
先用"len()"函数求出参数"num"的长度,并将其赋值给变量"length"。然后创建一个初始值为0的变量"sum"。
接下来用"for"循环遍历"num"中的每一个元素。用"range()"函数生成一个从0到"length-1"的整数序列,并用变量"i"依次代表序列中的每一个整数。
在循环体中,将"num[i]"加到"sum"中去。这样,当循环结束时,"sum"就是"num"中所有元素的和。
最后,这个函数没有返回值,也没有其他操作。
相关问题
循环神经网络从零开始实现代码要求: 1.加载时光机器数据集timemachine.txt,实现循环神经网络的训练过程, 2.输出训练过程的困惑度图。
好的,下面是一个简单的循环神经网络(RNN)从零开始实现的代码示例,使用Python和NumPy库。我们将加载时光机器数据集(timemachine.txt),实现RNN的训练过程,并输出训练过程的困惑度图。
首先,确保你已经安装了必要的库:
```bash
pip install numpy matplotlib
```
然后,使用以下代码:
```python
import numpy as np
import matplotlib.pyplot as plt
import random
# 加载时光机器数据集
with open('timemachine.txt', 'r', encoding='utf-8') as f:
data = f.read()
# 创建字符到索引的映射
chars = list(set(data))
char_to_idx = {ch: idx for idx, ch in enumerate(chars)}
idx_to_char = {idx: ch for idx, ch in enumerate(chars)}
vocab_size = len(chars)
# 超参数
hidden_size = 100
seq_length = 25
learning_rate = 1e-1
epochs = 10
# 初始化参数
Wxh = np.random.randn(hidden_size, vocab_size) * 0.01
Whh = np.random.randn(hidden_size, hidden_size) * 0.01
Why = np.random.randn(vocab_size, hidden_size) * 0.01
bh = np.zeros((hidden_size, 1))
by = np.zeros((vocab_size, 1))
# 损失函数
def lossFun(inputs, targets, hprev):
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = np.copy(hprev)
loss = 0
# 前向传播
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size, 1))
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh)
ys[t] = np.dot(Why, hs[t]) + by
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))
loss += -np.log(ps[t][targets[t], 0])
# 反向传播
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
delta = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1
dWhy += np.dot(dy, hs[t].T)
dby += dy
delta = np.dot(Why.T, dy) + delta
dh = (1 - hs[t] ** 2) * delta
dbh += dh
dWxh += np.dot(dh, xs[t].T)
dWhh += np.dot(dh, hs[t-1].T)
delta = np.dot(Whh.T, dh)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam)
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs) - 1]
# 训练模型
def train():
n, p = 0, 0
smooth_loss = -np.log(1.0 / vocab_size) * seq_length
hprev = np.zeros((hidden_size, 1))
while n < epochs:
if p + seq_length + 1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size, 1))
p = 0
inputs = [char_to_idx[ch] for ch in data[p:p+seq_length]]
targets = [char_to_idx[ch] for ch in data[p+1:p+seq_length+1]]
loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 100 == 0:
print('迭代次数:', n, '损失:', smooth_loss)
for param, dparam in zip([Wxh, Whh, Why, bh, by],
[dWxh, dWhh, dWhy, dbh, dby]):
param -= learning_rate * dparam
p += seq_length
n += 1
return Wxh, Whh, Why, bh, by
# 生成文本
def generate(Wxh, Whh, Why, bh, by, num_chars):
hprev = np.zeros((hidden_size, 1))
input_char = [char_to_idx[random.choice(chars)]]
output = []
for _ in range(num_chars):
x = np.zeros((vocab_size, 1))
x[input_char[-1]] = 1
h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, hprev) + bh)
y = np.dot(Why, h) + by
p = np.exp(y) / np.sum(np.
阅读全文