Python使用5行代碼批量做小姐姐的素描圖

我給大傢帶來的是 50行代碼,生成一張素描圖。讓自己也是一個素描“大師”。那廢話不多說,我們直接先來看看效果吧。

上圖的右邊就是我們的效果,那具體有哪些步驟呢?

1. 流程分析

對於上面的流程來說是非常簡單的,接下來我們來看看具體的實現。

2. 具體實現

安裝所需要的庫:

pip install opencv-python

導入所需要的庫:

import cv2

編寫主體代碼也是非常的簡單的,代碼如下:

import cv2
SRC = 'images/image_1.jpg'

image_rgb = cv2.imread(SRC)
image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
image_blur = cv2.GaussianBlur(image_gray, ksize=(21, 21), sigmaX=0, sigmaY=0)
image_blend = cv2.divide(image_gray, image_blur, scale=255)
cv2.imwrite('result.jpg', image_blend)

那上面的代碼其實並不難,那接下來為瞭讓小夥伴們能更好的理解,我編寫瞭如下代碼:

"""
project = 'Code', file_name = 'study.py', author = 'AI悅創'
time = '2020/5/19 8:35', product_name = PyCharm, 公眾號:AI悅創
code is far away from bugs with the god animal protecting
    I love animals. They taste delicious.
"""
import cv2

# 原圖路徑
SRC = 'images/image_1.jpg'

# 讀取圖片
image_rgb = cv2.imread(SRC)
# cv2.imshow('rgb', image_rgb) # 原圖
# cv2.waitKey(0)
# exit()
image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
# cv2.imshow('gray', image_gray) # 灰度圖
# cv2.waitKey(0)
# exit()
image_bulr = cv2.GaussianBlur(image_gray, ksize=(21, 21), sigmaX=0, sigmaY=0)
cv2.imshow('image_blur', image_bulr) # 高斯虛化
cv2.waitKey(0)
exit()

# divide: 提取兩張差別較大的線條和內容
image_blend = cv2.divide(image_gray, image_bulr, scale=255)
# cv2.imshow('image_blend', image_blend) # 素描
cv2.waitKey(0)
# cv2.imwrite('result1.jpg', image_blend)

那上面的代碼,我們是在原有的基礎上添加瞭,一些實時展示的代碼,來方便同學們理解。
其實有同學會問,我用軟件不就可以直接生成素描圖嗎?
那程序的好處是什麼?
程序的好處就是如果你的圖片量多的話,這個時候使用程序批量生成也是非常方便高效的。
這樣我們的就完成,把小姐姐的圖片變成瞭素描,skr~。

3. 百度圖片爬蟲+生成素描圖

不過,這還不是我們的海量圖片,為瞭達到海量這個詞呢,我寫瞭一個百度圖片爬蟲,不過本文不是教如何寫爬蟲代碼的,這裡我就直接放出爬蟲代碼,符和軟件工程規范:

# Crawler.Spider.py
import re
import os
import time
import collections
from collections import namedtuple

import requests
from concurrent import futures
from tqdm import tqdm
from enum import Enum

BASE_URL = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={keyword}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=&hd=&latest=©right=&word={keyword}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn={page}&rn=30&gsm=&1568638554041='

HEADERS = {
 'Referer': 'http://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fr=&sf=1&fmq=1567133149621_R&pv=&ic=0&nc=1&z=0&hd=0&latest=0©right=0&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&sid=&word=%E5%A3%81%E7%BA%B8',
 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
 'X-Requested-With': 'XMLHttpRequest', }


class BaiDuSpider:
 def __init__(self, max_works, images_type):
  self.max_works = max_works
  self.HTTPStatus = Enum('Status', ['ok', 'not_found', 'error'])
  self.result = namedtuple('Result', 'status data')
  self.session = requests.session()
  self.img_type = images_type
  self.img_num = None
  self.headers = HEADERS
  self.index = 1
 
 def get_img(self, img_url):
  res = self.session.get(img_url)
  if res.status_code != 200:
   res.raise_for_status()
  
  return res.content
 
 def download_one(self, img_url, verbose):
  try:
   image = self.get_img(img_url)
  except requests.exceptions.HTTPError as e:
   res = e.response
   if res.status_code == 404:
    status = self.HTTPStatus.not_found
    msg = 'not_found'
   else:
    raise
  else:
   self.save_img(self.img_type, image)
   status = self.HTTPStatus.ok
   msg = 'ok'
  
  if verbose:
   print(img_url, msg)
  
  return self.result(status, msg)
 
 def get_img_url(self):
  urls = [BASE_URL.format(keyword=self.img_type, page=page) for page in self.img_num]
  for url in urls:
   res = self.session.get(url, headers=self.headers)
   if res.status_code == 200:
    img_list = re.findall(r'"thumbURL":"(.*?)"', res.text)
    # 返回出圖片地址,配合其他函數運行
    yield {img_url for img_url in img_list}
   elif res.status_code == 404:
    print('-----訪問失敗,找不到資源-----')
    yield None
   elif res.status_code == 403:
    print('*****訪問失敗,服務器拒絕訪問*****')
    yield None
   else:
    print('>>> 網絡連接失敗 <<<')
    yield None
 
 def download_many(self, img_url_set, verbose=False):
  if img_url_set:
   counter = collections.Counter()
   with futures.ThreadPoolExecutor(self.max_works) as executor:
    to_do_map = {}
    for img in img_url_set:
     future = executor.submit(self.download_one, img, verbose)
     to_do_map[future] = img
    done_iter = futures.as_completed(to_do_map)
   
   if not verbose:
    done_iter = tqdm(done_iter, total=len(img_url_set))
   for future in done_iter:
    try:
     res = future.result()
    except requests.exceptions.HTTPError as e:
     error_msg = 'HTTP error {res.status_code} - {res.reason}'
     error_msg = error_msg.format(res=e.response)
    except requests.exceptions.ConnectionError:
     error_msg = 'ConnectionError error'
    else:
     error_msg = ''
     status = res.status
    
    if error_msg:
     status = self.HTTPStatus.error
    
    counter[status] += 1
    
    if verbose and error_msg:
     img = to_do_map[future]
     print('***Error for {} : {}'.format(img, error_msg))
   return counter
  else:
   pass
 
 def save_img(self, img_type, image):
  with open('{}/{}.jpg'.format(img_type, self.index), 'wb') as f:
   f.write(image)
  self.index += 1
 
 def what_want2download(self):
  # self.img_type = input('請輸入你想下載的圖片類型,什麼都可以哦~ >>> ')
  try:
   os.mkdir(self.img_type)
  except FileExistsError:
   pass
  img_num = input('請輸入要下載的數量(1位數代表30張,列如輸入1就是下載30張,2就是60張):>>> ')
  while True:
   if img_num.isdigit():
    img_num = int(img_num) * 30
    self.img_num = range(30, img_num + 1, 30)
    break
   else:
    img_num = input('輸入錯誤,請重新輸入要下載的數量>>> ')
 
 def main(self):
  # 獲取圖片類型和下載的數量
  total_counter = {}
  self.what_want2download()
  for img_url_set in self.get_img_url():
   if img_url_set:
    counter = self.download_many(img_url_set, False)
    for key in counter:
     if key in total_counter:
      total_counter[key] += counter[key]
     else:
      total_counter[key] = counter[key]
   
   else:
    # 可以為其添加報錯功能
    pass
  
  time.sleep(.5)
  return total_counter

if __name__ == '__main__':
 max_works = 20
 bd_spider = BaiDuSpider(max_works)
 print(bd_spider.main())
# Sketch_the_generated_code.py
import cv2
def drawing(src, id=None):
 image_rgb = cv2.imread(src)
 image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
 image_blur = cv2.GaussianBlur(image_gray, ksize=(21, 21), sigmaX=0, sigmaY=0)
 image_blend = cv2.divide(image_gray, image_blur, scale=255)
 cv2.imwrite(f'Drawing_images/result-{id}.jpg', image_blend)

# image_list.image_list_path.py
import os
from natsort import natsorted

IMAGES_LIST = []

def image_list(path):
 global IMAGES_LIST
 for root, dirs, files in os.walk(path):
  # 按文件名排序
  # files.sort()
  files = natsorted(files)
  # 遍歷所有文件
  for file in files:
   # 如果後綴名為 .jpg
   if os.path.splitext(file)[1] == '.jpg':
    # 拼接成完整路徑
    # print(file)
    filePath = os.path.join(root, file)
    print(filePath)
    # 添加到數組
    IMAGES_LIST.append(filePath)
 return IMAGES_LIST

# main.py
import time

from Sketch_the_generated_code import drawing
from Crawler.Spider import BaiDuSpider
from image_list.image_list_path import image_list
import os

MAX_WORDS = 20

if __name__ == '__main__':
 # now_path = os.getcwd()
 # img_type = 'ai'
 img_type = input('請輸入你想下載的圖片類型,什麼都可以哦~ >>> ')
 bd_spider = BaiDuSpider(MAX_WORDS, img_type)
 print(bd_spider.main())
 time.sleep(10) # 這裡設置睡眠時間,讓有足夠的時間去添加,這樣讀取就,去掉或者太短會報錯,所以
 for index, path in enumerate(image_list(img_type)):
  drawing(src = path, id = index)

所以最終的目錄結構如下所示:

C:.
│  main.py
│  Sketch_the_generated_code.py
│
├─Crawler
│  │  Spider.py
│  │
│  └─__pycache__
│          Spider.cpython-37.pyc
│
├─drawing
│  │  result.jpg
│  │  result1.jpg
│  │  Sketch_the_generated_code.py
│  │  study.py
│  │
│  ├─images
│  │      image_1.jpg
│  │
│  └─__pycache__
│          Sketch_the_generated_code.cpython-37.pyc
│
├─Drawing_images
├─image_list
│  │  image_list_path.py
│  │
│  └─__pycache__
│          image_list_path.cpython-37.pyc
│
└─__pycache__
        Sketch_the_generated_code.cpython-37.pyc

至此,全部代碼已經完成。

到此這篇關於Python使用5行代碼批量做小姐姐的素描圖的文章就介紹到這瞭,更多相關Python 批量做素描圖內容請搜索WalkonNet以前的文章或繼續瀏覽下面的相關文章希望大傢以後多多支持WalkonNet!

推薦閱讀: