当前位置: 首页 > news >正文

网站数字证书怎么做营销培训心得体会

网站数字证书怎么做,营销培训心得体会,网站海外推广外包,网站加速器手机版基于百度AIStudio飞桨paddleRS-develop版道路模型开发训练 参考地址:https://aistudio.baidu.com/projectdetail/8271882 基于python35paddle120env环境 预测可视化结果: (一)安装环境: 先上传本地下载的源代码Pad…

基于百度AIStudio飞桨paddleRS-develop版道路模型开发训练

参考地址:https://aistudio.baidu.com/projectdetail/8271882

基于python35+paddle120+env环境
预测可视化结果:
在这里插入图片描述

(一)安装环境:
先上传本地下载的源代码PaddleRS-develop.zip
解压PaddleRS-develop.zip到目录PaddleRS
然后分别执行下面安装命令!pip install

!unzip -q /home/aistudio/data/data191076/PaddleRS-develop.zip && mv PaddleRS-develop PaddleRS
!pip install matplotlib==3.4 scikit-image pycocotools -t /home/aistudio/external-libraries
!pip install  opencv-contrib-python -t /home/aistudio/external-libraries
!pip install -r PaddleRS/requirements.txt  -t /home/aistudio/external-libraries
!pip install -e PaddleRS/  -t /home/aistudio/external-libraries
!pip install paddleslim==2.6.0  -t /home/aistudio/external-libraries

添加环境组件

# 因为`sys.path`可能没有及时更新,这里选择手动更新
import sys
sys.path.append('/home/aistudio/external-libraries')
sys.path.append('/home/aistudio/PaddleRS')

(二)数据预处理tran_dataPre.py

%run tran_dataPre.py

(三)开始模型训练

%run trans.py

(四) tran_dataPre.py内容如下所示:

#先解压数据集
#!unzip -oq -d /home/aistudio/massroad /home/aistudio/data/data56961/mass_road.zip# 划分训练集/验证集/测试集,并生成文件名列表import random
import os.path as osp
from os import listdirimport cv2# 随机数生成器种子
RNG_SEED = 56961
# 调节此参数控制训练集数据的占比
TRAIN_RATIO = 0.9
# 数据集路径
DATA_DIR = '/home/aistudio/massroad'# 分割类别
CLASSES = ('background','road',
)def write_rel_paths(phase, names, out_dir, prefix):"""将文件相对路径存储在txt格式文件中"""with open(osp.join(out_dir, phase+'.txt'), 'w') as f:for name in names:f.write(' '.join([osp.join(prefix, 'input', name),osp.join(prefix, 'output', name)]))f.write('\n')random.seed(RNG_SEED)train_prefix = osp.join('road_segmentation_ideal', 'training')
test_prefix = osp.join('road_segmentation_ideal', 'testing')
train_names = listdir(osp.join(DATA_DIR, train_prefix, 'output'))
train_names = list(filter(lambda n: n.endswith('.png'), train_names))
test_names = listdir(osp.join(DATA_DIR, test_prefix, 'output'))
test_names = list(filter(lambda n: n.endswith('.png'), test_names))
# 对文件名进行排序,以确保多次运行结果一致
train_names.sort()
test_names.sort()
random.shuffle(train_names)
len_train = int(len(train_names)*TRAIN_RATIO)
write_rel_paths('train', train_names[:len_train], DATA_DIR, train_prefix)
write_rel_paths('val', train_names[len_train:], DATA_DIR, train_prefix)
write_rel_paths('test', test_names, DATA_DIR, test_prefix)# 写入类别信息
with open(osp.join(DATA_DIR, 'labels.txt'), 'w') as f:for cls in CLASSES:f.write(cls+'\n')print("数据集划分已完成。")# 将GT中的255改写为1,便于训练import os.path as osp
from glob import globimport cv2
from tqdm import tqdm# 数据集路径
# DATA_DIR = '/home/aistudio/massroad'train_prefix = osp.join('road_segmentation_ideal', 'training')
test_prefix = osp.join('road_segmentation_ideal', 'testing')train_paths = glob(osp.join(DATA_DIR, train_prefix, 'output', '*.png'))
test_paths = glob(osp.join(DATA_DIR, test_prefix, 'output', '*.png'))
for path in tqdm(train_paths+test_paths):im = cv2.imread(path, cv2.IMREAD_GRAYSCALE)im[im>0] = 1# 原地改写cv2.imwrite(path, im)

(五) trans.py内容如下所示:

# 导入需要用到的库import random
import os.path as ospimport cv2
import numpy as np
import paddle
import paddlers as pdrs
from paddlers import transforms as T
from matplotlib import pyplot as plt
from PIL import Imageimport sys
sys.path.append('/home/aistudio/external-libraries')
sys.path.append('/home/aistudio/PaddleRS')# 定义全局变量# 随机种子
SEED = 56961
# 数据集存放目录
DATA_DIR = '/home/aistudio/massroad/'
# 训练集`file_list`文件路径
TRAIN_FILE_LIST_PATH = '/home/aistudio/massroad/train.txt'
# 验证集`file_list`文件路径
VAL_FILE_LIST_PATH = '/home/aistudio/massroad/val.txt'
# 测试集`file_list`文件路径
TEST_FILE_LIST_PATH = '/home/aistudio/massroad/test.txt'
# 数据集类别信息文件路径
LABEL_LIST_PATH = '/home/aistudio/massroad/labels.txt'
# 实验目录,保存输出的模型权重和结果
EXP_DIR =  '/home/aistudio/exp/'# 固定随机种子,尽可能使实验结果可复现random.seed(SEED)
np.random.seed(SEED)
paddle.seed(SEED)# 构建数据集# 定义训练和验证时使用的数据变换(数据增强、预处理等)
train_transforms = T.Compose([T.DecodeImg(),# 随机裁剪T.RandomCrop(crop_size=512),# 以50%的概率实施随机水平翻转T.RandomHorizontalFlip(prob=0.5),# 以50%的概率实施随机垂直翻转T.RandomVerticalFlip(prob=0.5),# 将数据归一化到[-1,1]T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),T.ArrangeSegmenter('train')
])eval_transforms = T.Compose([T.DecodeImg(),T.Resize(target_size=1500),# 验证阶段与训练阶段的数据归一化方式必须相同T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),T.ArrangeSegmenter('eval')
])# 分别构建训练和验证所用的数据集
train_dataset = pdrs.datasets.SegDataset(data_dir=DATA_DIR,file_list=TRAIN_FILE_LIST_PATH,label_list=LABEL_LIST_PATH,transforms=train_transforms,num_workers=4,shuffle=True
)val_dataset = pdrs.datasets.SegDataset(data_dir=DATA_DIR,file_list=VAL_FILE_LIST_PATH,label_list=LABEL_LIST_PATH,transforms=eval_transforms,num_workers=0,shuffle=False
)# 构建DeepLab V3+模型,使用ResNet-50作为backbone
model = pdrs.tasks.seg.DeepLabV3P(in_channels=3,num_classes=len(train_dataset.labels),backbone='ResNet50_vd'
)
model.initialize_net(pretrain_weights='CITYSCAPES',save_dir=osp.join(EXP_DIR, 'pretrain'),resume_checkpoint=None,is_backbone_weights=False
)# 构建优化器
optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.net.parameters()
)# 执行模型训练
model.train(num_epochs=100,train_dataset=train_dataset,train_batch_size=8,eval_dataset=val_dataset,optimizer=optimizer,save_interval_epochs=10,# 每多少次迭代记录一次日志log_interval_steps=30,save_dir=EXP_DIR,# 是否使用early stopping策略,当精度不再改善时提前终止训练early_stop=False,# 是否启用VisualDL日志功能use_vdl=True,# 指定从某个检查点继续训练resume_checkpoint=None
)

(六)训练生成过程信息

Output exceeds the size limit. Open the full output data in a text editor
2024-09-05 14:16:51 [INFO]	Loading pretrained model from /home/aistudio/exp/pretrain/model.pdparams
2024-09-05 14:16:53 [WARNING]	[SKIP] Shape of parameters head.decoder.conv.weight do not match. (pretrained: [19, 256, 1, 1] vs actual: [2, 256, 1, 1])
2024-09-05 14:16:53 [WARNING]	[SKIP] Shape of parameters head.decoder.conv.bias do not match. (pretrained: [19] vs actual: [2])
2024-09-05 14:16:53 [INFO]	There are 358/360 variables loaded into DeepLabV3P.
2024-09-05 14:17:46 [INFO]	[TRAIN] Epoch=1/100, Step=30/90, loss=0.133503, lr=0.001000, time_each_step=1.77s, eta=4:24:32
2024-09-05 14:18:25 [INFO]	[TRAIN] Epoch=1/100, Step=60/90, loss=0.181917, lr=0.001000, time_each_step=1.31s, eta=3:14:53
2024-09-05 14:19:02 [INFO]	[TRAIN] Epoch=1/100, Step=90/90, loss=0.112567, lr=0.001000, time_each_step=1.22s, eta=3:2:6
2024-09-05 14:19:03 [INFO]	[TRAIN] Epoch 1 finished, loss=0.15933047160506247 .
2024-09-05 14:19:44 [INFO]	[TRAIN] Epoch=2/100, Step=30/90, loss=0.141528, lr=0.001000, time_each_step=1.36s, eta=3:22:2
2024-09-05 14:20:20 [INFO]	[TRAIN] Epoch=2/100, Step=60/90, loss=0.165187, lr=0.001000, time_each_step=1.22s, eta=3:0:42
2024-09-05 14:20:57 [INFO]	[TRAIN] Epoch=2/100, Step=90/90, loss=0.145009, lr=0.001000, time_each_step=1.22s, eta=2:59:1
2024-09-05 14:20:58 [INFO]	[TRAIN] Epoch 2 finished, loss=0.1168842613697052 .
2024-09-05 14:21:39 [INFO]	[TRAIN] Epoch=3/100, Step=30/90, loss=0.126603, lr=0.001000, time_each_step=1.38s, eta=3:22:13
2024-09-05 14:22:16 [INFO]	[TRAIN] Epoch=3/100, Step=60/90, loss=0.117296, lr=0.001000, time_each_step=1.22s, eta=2:58:14
2024-09-05 14:22:53 [INFO]	[TRAIN] Epoch=3/100, Step=90/90, loss=0.072859, lr=0.001000, time_each_step=1.23s, eta=2:58:46
2024-09-05 14:22:53 [INFO]	[TRAIN] Epoch 3 finished, loss=0.10787189056475957 .
2024-09-05 14:23:34 [INFO]	[TRAIN] Epoch=4/100, Step=30/90, loss=0.081685, lr=0.001000, time_each_step=1.37s, eta=3:18:39
2024-09-05 14:24:11 [INFO]	[TRAIN] Epoch=4/100, Step=60/90, loss=0.087735, lr=0.001000, time_each_step=1.23s, eta=2:57:28
2024-09-05 14:24:48 [INFO]	[TRAIN] Epoch=4/100, Step=90/90, loss=0.084795, lr=0.001000, time_each_step=1.22s, eta=2:55:44
2024-09-05 14:24:49 [INFO]	[TRAIN] Epoch 4 finished, loss=0.10476481277081702 .
2024-09-05 14:25:30 [INFO]	[TRAIN] Epoch=5/100, Step=30/90, loss=0.098625, lr=0.001000, time_each_step=1.37s, eta=3:16:59
2024-09-05 14:26:07 [INFO]	[TRAIN] Epoch=5/100, Step=60/90, loss=0.078188, lr=0.001000, time_each_step=1.24s, eta=2:57:12
2024-09-05 14:26:43 [INFO]	[TRAIN] Epoch=5/100, Step=90/90, loss=0.098015, lr=0.001000, time_each_step=1.21s, eta=2:52:11
2024-09-05 14:26:44 [INFO]	[TRAIN] Epoch 5 finished, loss=0.10311256903741095 .
2024-09-05 14:27:25 [INFO]	[TRAIN] Epoch=6/100, Step=30/90, loss=0.109136, lr=0.001000, time_each_step=1.38s, eta=3:16:8
...
2024-09-05 15:39:38 [INFO]	Start to evaluate (total_samples=81, total_steps=81)...
2024-09-05 15:40:14 [INFO]	[EVAL] Finished, Epoch=40, miou=0.716638, category_iou=[0.96831487 0.46496069], oacc=0.969164, category_acc=[0.97447995 0.81316509], kappa=0.619485, category_F1-score=[0.98390241 0.63477565] .
2024-09-05 15:40:14 [INFO]	Current evaluated best model on eval_dataset is epoch_10, miou=0.7255623401044613
2024-09-05 15:40:18 [INFO]	Model saved in /home/aistudio/exp/epoch_40.

(七) 测试集预测结果:

# 构建测试集
test_dataset = pdrs.datasets.SegDataset(data_dir=DATA_DIR,file_list=TEST_FILE_LIST_PATH,label_list=LABEL_LIST_PATH,transforms=eval_transforms,num_workers=0,shuffle=False
)# 为模型加载历史最佳权重
state_dict = paddle.load(osp.join(EXP_DIR, 'best_model/model.pdparams'))
model.net.set_state_dict(state_dict)# 执行测试
test_result = model.evaluate(test_dataset)
print("测试集上指标:IoU为{:.2f},Acc为{:.2f},Kappa系数为{:.2f}, F1为{:.2f}".format(test_result['category_iou'][1], test_result['category_acc'][1],test_result['kappa'],test_result['category_F1-score'][1])
)
2024-09-05 20:07:40 [INFO]	13 samples in file /home/aistudio/massroad/test.txt
2024-09-05 20:07:41 [INFO]	Start to evaluate (total_samples=13, total_steps=13)...
测试集上指标:IoU为0.47,Acc为0.82,Kappa系数为0.62, F1为0.64

(八)预测结果可视化情况:

# 预测结果可视化
# 重复运行本单元可以查看不同结果def read_image(path):im = cv2.imread(path)return im[...,::-1]def show_images_in_row(ims, fig, title='', quantize=False):n = len(ims)fig.suptitle(title)axs = fig.subplots(nrows=1, ncols=n)for idx, (im, ax) in enumerate(zip(ims, axs)):# 去掉刻度线和边框ax.spines['top'].set_visible(False)ax.spines['right'].set_visible(False)ax.spines['bottom'].set_visible(False)ax.spines['left'].set_visible(False)ax.get_xaxis().set_ticks([])ax.get_yaxis().set_ticks([])if isinstance(im, str):im = read_image(im)if quantize:im = (im*255).astype('uint8')if im.ndim == 2:im = np.tile(im[...,np.newaxis], [1,1,3])ax.imshow(im)# 需要展示的样本个数
num_imgs_to_show = 4
# 随机抽取样本
chosen_indices = random.choices(range(len(test_dataset)), k=num_imgs_to_show)# 参考 https://stackoverflow.com/a/68209152
fig = plt.figure(constrained_layout=True)
fig.suptitle("Test Results")subfigs = fig.subfigures(nrows=3, ncols=1)# 读取输入影像并显示
im_paths = [test_dataset.file_list[idx]['image'] for idx in chosen_indices]
show_images_in_row(im_paths, subfigs[0], title='Image')# 获取模型预测输出
with paddle.no_grad():model.net.eval()preds = []for idx in chosen_indices:input, mask = test_dataset[idx]input = paddle.to_tensor(input["image"]).unsqueeze(0)logits, *_ = model.net(input)pred = paddle.argmax(logits[0], axis=0)preds.append(pred.numpy())
show_images_in_row(preds, subfigs[1], title='Pred', quantize=True)# 读取真值标签并显示
im_paths = [test_dataset.file_list[idx]['mask'] for idx in chosen_indices]
show_images_in_row(im_paths, subfigs[2], title='GT', quantize=True)# 渲染结果
fig.canvas.draw()
Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())

在这里插入图片描述
(九) 导出静态模型
训练后保存的模型为动态模型,布署发布模型为静态模型,因此需要导出操作

import matplotlib.pyplot as plt
import random
import cv2
import numpy as np
import paddle
import paddlers as pdrs
from PIL import Imageimport os
from paddlers.tasks import load_modelmodel_path =  './exp/best_model'img_14="i:/cwgis_ai/cup/mass_road/road_segmentation_ideal/testing/input/img-14.png"
img_10="i:/cwgis_ai/cup/mass_road/road_segmentation_ideal/testing/input/img-10.png"#save_dir="./models/road_infer_model_100"
save_dir="./models/road_infer_model_100_custom"# export model OK
# Set environment variables
os.environ['PADDLEX_EXPORT_STAGE'] = 'True'
os.environ['PADDLESEG_EXPORT_STAGE'] = 'True'# Load model from directory
model = load_model(model_path)#fixed_input_shape = None
#fixed_input_shape = [1500,1500]
fixed_input_shape = [17761,25006]      #[w,h]# Do dynamic-to-static cast   动态到静态的转换
# XXX: Invoke a protected (single underscore) method outside of subclasses.
model.export_inference_model(save_dir, fixed_input_shape)

(十) 预测单张图片代码

import matplotlib.pyplot as plt
import random
import cv2
import numpy as np
import paddle
import paddlers as pdrs
from PIL import Imageimport os
from paddlers.tasks import load_model# 因为`sys.path`可能没有及时更新,这里选择手动更新
import sys
sys.path.append('/home/aistudio/external-libraries')
sys.path.append('/home/aistudio/PaddleRS')img_14="./massroad/road_segmentation_ideal/testing/input/img-14.png"
img_10="./massroad/road_segmentation_ideal/testing/input/img-10.png"
img_5="./massroad/road_segmentation_ideal/testing/input/img-5.png"customImg="./customImage/DeepLearning_Image.png"    #file tif to png #model_dir="./models/road_infer_model_100"
#model_dir="./models/road_infer_model_100_None"
model_dir="./models/road_infer_model_100_custom"#model = pdrs.deploy.Predictor(model_dir)
model = pdrs.deploy.Predictor(model_dir,use_gpu=True)# 读取输入影像并显示
im_paths = [customImg]
im_lis = []
for name in im_paths:print(name)img = cv2.imread(name)      print(img.shape) #img = paddle.to_tensor(img) #.unsqueeze(0)   #标量输入im_lis.append(img)
# 获取模型预测输出img_file=img_10
preds = []
results = model.predict(im_lis)
#print(results)label_map=results[0]["label_map"]
#print(label_map)
label_map[label_map>0] = 255
cv2.imwrite('./outImage/label_map_custom.png', label_map)score_map=results[0]["score_map"]
#cv2.imwrite('./outImage/score_map.png', score_map[0])
print(score_map)print("预测完成")

本blog地址:https://blog.csdn.net/hsg77


文章转载自:
http://pachyrhizus.Lgnz.cn
http://breed.Lgnz.cn
http://simulant.Lgnz.cn
http://reposeful.Lgnz.cn
http://bmta.Lgnz.cn
http://mauretanian.Lgnz.cn
http://pursuable.Lgnz.cn
http://bacteremic.Lgnz.cn
http://discursive.Lgnz.cn
http://video.Lgnz.cn
http://miscolor.Lgnz.cn
http://pabouche.Lgnz.cn
http://diphenylacetypene.Lgnz.cn
http://declinator.Lgnz.cn
http://chorea.Lgnz.cn
http://heavenly.Lgnz.cn
http://hybridisable.Lgnz.cn
http://regretable.Lgnz.cn
http://lamaze.Lgnz.cn
http://trey.Lgnz.cn
http://provocative.Lgnz.cn
http://cecile.Lgnz.cn
http://ergosterol.Lgnz.cn
http://nerts.Lgnz.cn
http://insensible.Lgnz.cn
http://outmoded.Lgnz.cn
http://journaling.Lgnz.cn
http://lacquerer.Lgnz.cn
http://vanessa.Lgnz.cn
http://chenopod.Lgnz.cn
http://prep.Lgnz.cn
http://appetizing.Lgnz.cn
http://carless.Lgnz.cn
http://ventripotent.Lgnz.cn
http://bayonet.Lgnz.cn
http://moonbeam.Lgnz.cn
http://hysterectomize.Lgnz.cn
http://dropping.Lgnz.cn
http://cram.Lgnz.cn
http://classlist.Lgnz.cn
http://cadge.Lgnz.cn
http://paracystitis.Lgnz.cn
http://kavass.Lgnz.cn
http://carrottop.Lgnz.cn
http://resulting.Lgnz.cn
http://christchurch.Lgnz.cn
http://vs.Lgnz.cn
http://sensed.Lgnz.cn
http://weatherize.Lgnz.cn
http://yellowhead.Lgnz.cn
http://plateau.Lgnz.cn
http://fanaticize.Lgnz.cn
http://arachne.Lgnz.cn
http://memoirist.Lgnz.cn
http://ceterisparibus.Lgnz.cn
http://bergschrund.Lgnz.cn
http://kinescope.Lgnz.cn
http://walleyed.Lgnz.cn
http://acupressure.Lgnz.cn
http://lilliput.Lgnz.cn
http://septime.Lgnz.cn
http://radiosensitive.Lgnz.cn
http://shrubbery.Lgnz.cn
http://cinder.Lgnz.cn
http://pollock.Lgnz.cn
http://mislike.Lgnz.cn
http://lindesnes.Lgnz.cn
http://unleavened.Lgnz.cn
http://lentigo.Lgnz.cn
http://keratometry.Lgnz.cn
http://tallulah.Lgnz.cn
http://swanning.Lgnz.cn
http://gaoleress.Lgnz.cn
http://gastrocamera.Lgnz.cn
http://stipulator.Lgnz.cn
http://antitone.Lgnz.cn
http://paediatrics.Lgnz.cn
http://montefiascone.Lgnz.cn
http://hematoxylic.Lgnz.cn
http://popularly.Lgnz.cn
http://foamback.Lgnz.cn
http://feaze.Lgnz.cn
http://hart.Lgnz.cn
http://undro.Lgnz.cn
http://nobbily.Lgnz.cn
http://geodynamical.Lgnz.cn
http://amish.Lgnz.cn
http://stapedectomy.Lgnz.cn
http://bctv.Lgnz.cn
http://elektron.Lgnz.cn
http://gladdest.Lgnz.cn
http://safely.Lgnz.cn
http://cleistogamous.Lgnz.cn
http://humanistic.Lgnz.cn
http://deipnosophist.Lgnz.cn
http://noncommissioned.Lgnz.cn
http://ironwork.Lgnz.cn
http://patripotestal.Lgnz.cn
http://skivvy.Lgnz.cn
http://heirless.Lgnz.cn
http://www.15wanjia.com/news/70952.html

相关文章:

  • 0基础做下载网站google网站
  • 原生h5网站怎么做国外免费ip地址
  • 微信广告推广如何收费需要优化的网站有哪些?
  • 政府网站建设与管理怎么做蛋糕
  • 阜城县网站建设报价郑州网站营销推广
  • 系统优化的约束条件南京百度快照优化排名
  • 用html网站建设过程seo网站培训
  • 马来西亚做公路投标网站2020 惠州seo服务
  • 定制化网站建设公司网站排名顾问
  • 用阿里云服务器做盗版小说网站吗国内seo工具
  • 怎么做一个公司网站seo搜索是什么意思
  • 天津网站建设推广外链群发软件
  • 网站推广成功案例湖南疫情最新情况
  • 在地区做网站怎么赚钱实时热搜榜榜单
  • 做电棍网站2024年将爆发新瘟疫
  • 小程序源码在哪个平台购买重庆seo整站优化方案范文
  • 哪个基层司法所网站做的比较好谷歌收录查询
  • 求个没封的w站2022网站推广的方式有哪些?
  • 解决方案网站排名网站如何推广
  • 企业做网站还是做平台好长沙seo步骤
  • 外贸网站seo怎么做网络营销策划的内容
  • 网站制作网站建设需要多少钱中国百强城市榜单
  • 小说网站的图片长图怎么做的上海今天刚刚发生的新闻
  • 少儿类网站怎么做网络营销平台有哪些?
  • 做正规网站有哪些南昌seo排名公司
  • 培训机构的网站建设百度账号注册入口
  • 网站功能定制哈尔滨最新疫情通报
  • 做的网站为什么手机上搜不到网络营销公司注册找哪家
  • 简洁的企业博客html5手机网站模板源码下载网络营销到底是干嘛的
  • 济宁市精神文明建设委员会网站百度在线客服中心