@Article{iasc.2020.012144, AUTHOR = {Shiyin Qiu, David Lodder, Feifan Du}, TITLE = {HGG-CNN: The Generation of the Optimal Robotic Grasp Pose Based on Vision}, JOURNAL = {Intelligent Automation \& Soft Computing}, VOLUME = {26}, YEAR = {2020}, NUMBER = {6}, PAGES = {1517--1529}, URL = {http://www.techscience.com/iasc/v26n6/41017}, ISSN = {2326-005X}, ABSTRACT = {Robotic grasping is an important issue in the field of robot control. In order to solve the problem of optimal grasping pose of the robotic arm, based on the Generative Grasping Convolutional Neural Network (GG-CNN), a new convolutional neural network called Hybrid Generative Grasping Convolutional Neural Network (HGG-CNN) is proposed by combining three small network structures called Inception Block, Dense Block and SELayer. This new type of convolutional neural network structure can improve the accuracy rate of grasping pose based on the GG-CNN network, thereby improving the success rate of grasping. In addition, the HGG-CNN convolutional neural network structure can also overcome the problem that the original GG-CNN network structure has in yielding a recognition rate of less than 70% for complex artificial irregular objects. After experimental tests, the HGG-CNN convolutional neural network can improve the average grasping pose accuracy of the original GG-CNN network from 83.83% to 92.48%. For irregular objects with complex man-made shapes such as spoons, the recognition rate of grasping pose can also be increased from 21.38% to 55.33%.}, DOI = {10.32604/iasc.2020.012144} }