# 张量

1,572次阅读

Tensorflow 自己定义的一套数据结构

name

shape

dtype

>>> a=tf.constant(4)
>>> a
<tf.Tensor 'Const:0' shape=() dtype=int32>
>>> a=tf.constant(10)
>>> a
<tf.Tensor 'Const_1:0' shape=() dtype=int32>


## shape

rank_4_tensor = tf.zeros([3, 2, 4, 5])


• 索引从 0 开始编制
• 负索引表示按倒序编制索引
• 冒号 : 用于切片 start:stop:step

### reshape

tf.Tensor(
[[[ 0  1  2  3  4]
[ 5  6  7  8  9]]

[[10 11 12 13 14]
[15 16 17 18 19]]

[[20 21 22 23 24]
[25 26 27 28 29]]], shape=(3, 2, 5), dtype=int32)


print(tf.reshape(rank_3_tensor, [3*2, 5]), "\\n")
print(tf.reshape(rank_3_tensor, [3, -1]))

tf.Tensor(
[[ 0  1  2  3  4]
[ 5  6  7  8  9]
[10 11 12 13 14]
[15 16 17 18 19]
[20 21 22 23 24]
[25 26 27 28 29]], shape=(6, 5), dtype=int32)

tf.Tensor(
[[ 0  1  2  3  4  5  6  7  8  9]
[10 11 12 13 14 15 16 17 18 19]
[20 21 22 23 24 25 26 27 28 29]], shape=(3, 10), dtype=int32)


# Sparse tensors store values by index in a memory-efficient manner
sparse_tensor = tf.sparse.SparseTensor(indices=[[0, 0], [1, 2]],
values=[1, 2],
dense_shape=[3, 4])
print(sparse_tensor, "\\n")

# We can convert sparse tensors to dense
print(tf.sparse.to_dense(sparse_tensor))


SparseTensor(indices=tf.Tensor(
[[0 0]
[1 2]], shape=(2, 2), dtype=int64), values=tf.Tensor([1 2], shape=(2,), dtype=int32), dense_shape=tf.Tensor([3 4], shape=(2,), dtype=int64))

tf.Tensor(
[[1 0 0 0]
[0 0 2 0]
[0 0 0 0]], shape=(3, 4), dtype=int32)



### 关于张量的操作

tf.ones_like
tf.string_to_number
.....


## 变量的定义

### Variable 与 get_variable 之间的区别

import  tensorflow as tf
with tf.variable_scope("one"):
a = tf.get_variable("v", [1]) #a.name == "one/v:0"
print(a)
# with tf.variable_scope("one"):
#     b = tf.get_variable("v", [1]) #ValueError: Variable one/v already exists
with tf.variable_scope("one", reuse = True):
c = tf.get_variable("v", [1]) #c.name == "one/v:0"
print(c)
with tf.variable_scope("two"):
d = tf.get_variable("v", [1]) #d.name == "two/v:0"
print(d)
e = tf.Variable(1, name = "v", expected_shape = [1]) #e.name == "two/v_1:0"
print(e)

assert(a is c)  #Assertion is true, they refer to the same object.
assert(a is d)  #AssertionError: they are different objects
assert(d is e)  #AssertionError: they are different objects


<tf.Variable 'one/v:0' shape=(1,) dtype=float32_ref>
<tf.Variable 'one/v:0' shape=(1,) dtype=float32_ref>
<tf.Variable 'two/v:0' shape=(1,) dtype=float32_ref>
<tf.Variable 'two/v_1:0' shape=() dtype=int32_ref>
Traceback (most recent call last):
File "f:/vivocode/tfwork/1_1.py", line 17, in <module>
assert(a is d)  #AssertionError: they are different objects
AssertionError


### name_scope 与 variable_scope

name_scope 只能限制 OP

with tf.name_scope("my_scope"):
v1 = tf.get_variable("var1", [1], dtype=tf.float32)
v2 = tf.Variable(1, name="var2", dtype=tf.float32)

print(v1.name)  # var1:0
print(v2.name)  # my_scope/var2:0



with tf.variable_scope("my_scope"):
v1 = tf.get_variable("var1", [1], dtype=tf.float32)
v2 = tf.Variable(1, name="var2", dtype=tf.float32)

print(v1.name)  # my_scope/var1:0
print(v2.name)  # my_scope/var2:0



with tf.name_scope("foo"):
with tf.variable_scope("var_scope"):
v = tf.get_variable("var", [1])
with tf.name_scope("bar"):
with tf.variable_scope("var_scope", reuse=True):
v1 = tf.get_variable("var", [1])
assert v1 == v
print(v.name)   # var_scope/var:0
print(v1.name)  # var_scope/var:0


### 关于参数共享

with tf.compat.v1.variable_scope('sparse_autoreuse', reuse=tf.compat.v1.AUTO_REUSE):
'''
定义各种参数，要求它们的name 是一致的'''

pass


• 不需要重复创建参数
• 节约存储空间
• 特征之间的约束？