首页 >> 中医刮痧

人工智能倡导 | VGG-16迁移模型

发布时间:2025年10月21日 12:17

esized_img) # [1, height, width, depth] * n

iflen(imgs[k]) == 400: # only use 400 imgs to reduce my memory load

break

# fake length data for tiger and cat

tigers_y = np.maximum( 20, np.random.randn(len(imgs[ 'tiger']), 1) * 30+ 100)

cat_y = np.maximum( 10, np.random.randn(len(imgs[ 'kittycat']), 1) * 8+ 40)

returnimgs[ 'tiger'], imgs[ 'kittycat'], tigers_y, cat_y

classVgg16:

vgg_mean = [ 103.939, 116.779, 123.68]

def曲在init曲在( self, vgg16_npy_path=None, restore_from=None) :

# pre-trained parameters

try:

self.data_dict = np.load(vgg16_npy_path,allow_pickle=True, encoding= 'latin1').item

except FileNotFoundError:

print( '请下载')

self.tfx = tf.placeholder(tf.float32, [None, 224, 224, 3])

self.tfy = tf.placeholder(tf.float32, [None, 1])

# Convert RGB to BGR

red, green, blue = tf.split(axis= 3, num_or_size_splits= 3, value= self.tfx * 255.0)

bgr = tf.concat(axis= 3, values=[

blue - self.vgg_mean[ 0],

green - self.vgg_mean[ 1],

red - self.vgg_mean[ 2],

])

# pre-trained VGG layers are fixed in fine-tune

conv1_1 = self.conv_layer(bgr, "conv1_1")

conv1_2 = self.conv_layer(conv1_1, "conv1_2")

pool1 = self.max_pool(conv1_2, 'pool1')

conv2_1 = self.conv_layer(pool1, "conv2_1")

conv2_2 = self.conv_layer(conv2_1, "conv2_2")

pool2 = self.max_pool(conv2_2, 'pool2')

conv3_1 = self.conv_layer(pool2, "conv3_1")

conv3_2 = self.conv_layer(conv3_1, "conv3_2")

conv3_3 = self.conv_layer(conv3_2, "conv3_3")

pool3 = self.max_pool(conv3_3, 'pool3')

conv4_1 = self.conv_layer(pool3, "conv4_1")

conv4_2 = self.conv_layer(conv4_1, "conv4_2")

conv4_3 = self.conv_layer(conv4_2, "conv4_3")

pool4 = self.max_pool(conv4_3, 'pool4')

conv5_1 = self.conv_layer(pool4, "conv5_1")

conv5_2 = self.conv_layer(conv5_1, "conv5_2")

conv5_3 = self.conv_layer(conv5_2, "conv5_3")

pool5 = self.max_pool(conv5_3, 'pool5')

# detach original VGG fc layers and

# reconstruct your own fc layers serve for your own purpose

self.flatten = tf.reshape(pool5, [- 1, 7* 7* 512])

self.fc6 = tf.layers.dense( self.flatten, 256, tf.nn.relu, name= 'fc6')

self.out = tf.layers.dense( self.fc6, 1, name= 'out')

self.sess = tf.Session

ifrestore_from:

saver = tf.train.Saver

saver.restore( self.sess, restore_from)

else:# training graph

self.loss = tf.losses.mean_squared_error(labels= self.tfy, predictions= self.out)

self.train_op = tf.train.RMSPropOptimizer( 0. 001).minimize( self.loss)

self.sess.run(tf.global_variables_initializer)

defmax_pool( self, bottom, name) :

returntf.nn.max_pool(bottom, ksize=[ 1, 2, 2, 1], strides=[ 1, 2, 2, 1], padding= 'SAME', name=name)

defconv_layer( self, bottom, name) :

with tf.variable_scope(name): # CNN's filter is constant, NOT Variable that can be trained

conv = tf.nn.conv2d(bottom, self.data_dict[name][ 0], [ 1, 1, 1, 1], padding= 'SAME')

lout = tf.nn.relu(tf.nn.bias_add(conv, self.data_dict[name][ 1]))

returnlout

deftrain( self, x, y) :

loss, _= self.sess.run([ self.loss, self.train_op], { self. tfx:x, self. tfy:y})

returnloss

defpredict( self, paths) :

fig, axs = plt.subplots( 1, 2)

fori, path inenumerate(paths):

x = load_img(path)

length = self.sess.run( self.out, { self. tfx:x})

axs[i].imshow(x[ 0])

axs[i].set_title( 'Len: %.1f cm'% length)

axs[i].set_xticks();

axs[i].set_yticks()

plt.show

defsave( self, path= './for_transfer_learning/model/transfer_learn') :

saver = tf.train.Saver

saver.save( self.sess, path, write_meta_graph=False)

deftrain:

tigers_x, cats_x, tigers_y, cats_y = load_data

# plot fake length distribution

plt.hist(tigers_y, bins= 20, label= 'Tigers')

plt.hist(cats_y, bins= 10, label= 'Cats')

plt.legend

plt.xlabel( 'length')

plt.show

xs = np.concatenate(tigers_x + cats_x, axis= 0)

ys = np.concatenate((tigers_y, cats_y), axis= 0)

vgg = Vgg16(vgg16_npy_path= './for_transfer_learning/vgg16.npy')

print( 'Net built')

fori inrange( 100):

b_idx = np.random.randint( 0, len(xs), 6)

train_loss = vgg.train(xs[b_idx], ys[b_idx])

print(i, 'train loss: ', train_loss)

vgg.save( './for_transfer_learning/model/transfer_learn') # save learned fc layers

defeval:

vgg = Vgg16(vgg16_npy_path= './for_transfer_learning/vgg16.npy',

restore_from= './for_transfer_learning/model/transfer_learn')

vgg.predict(

[ './for_transfer_learning/data/kittycat/23066047.d6694f.jpg', './for_transfer_learning/data/tiger/37425296_58a9896259.jpg'])

if曲在name_ _== '曲在main曲在':

# download

#train

eval

下述讲解

智能

引人入胜回顾

实现层面知识图谱

A*算法实现均值方向上建设

基于规则导致的单的解谜

4

参看出版物

《智能》

尚文倩 编著

定价:59.8元

内容简介

本书系统参阅了智能的基本原理、基本技术、基本步骤和科技层面等内容,比较全面地揭示了60多年来智能层面的进展,并根据智能的转变动向对一些基本上内容做了取舍。续编合计9章。第1章参阅智能的方法论、转变近现代、科技层面等。其后8章的内容分为两大其余部分:第一其余部分(第2~5章)主要主人翁基本上智能的方法论、原理、步骤和技术,扩及知识表示、搜索策略性、确定性解谜和不确定解谜的方面技术与步骤;第二其余部分(第6~9章)主要主人翁现代智能的技术的转变和步骤,扩及机器修习、信息挖掘、大信息、深度修习的**技术与步骤。本书提供了8个实践工程项目案例,并且每章上面附有习题,以供读者练习。本书主要作为计算机专业和其他方面学科方面课程参考书,也除此以外有关领域专家参看。

5

引人入胜推荐

百度小处理程序新游戏合计同开发│碰倍为数小新游戏(附源码+截图)

Flink演算基础│Scala演算初级实践

Flink演算基础│FlinkCEP演算实践

Flink演算基础│DataStream API演算实践

Flink演算基础│DataSet API演算实践

为数 据量化实弹│客户效用量化

信息量化实弹│价格得出挑战

信息量化实弹│时间多肽得出

信息量化实弹│KaggleTitanic生存得出

广州看男科去哪里比较好
暑湿感冒的症状
江苏白癜风医院哪家最好
上海白癜风专科医院
济南妇科医院哪家比较好
宝宝肚子胀气怎么办
儿童止咳化痰常备药品清单
皮肤肿瘤
新冠药
止咳糖浆哪个比较好一点

上一篇: 爆红、高潮与低谷,当快钱识破慢功夫的合成生物

下一篇: 医用DNA纳米机器人现身

友情链接