-
Notifications
You must be signed in to change notification settings - Fork 1
/
render_blender_uniform.py
211 lines (180 loc) · 7.92 KB
/
render_blender_uniform.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
# A simple script that uses blender to render views of a single object by rotation the camera around it.
# Also produces depth map at the same time.
#
# Example:
# blender --background --python render_blender_uniform.py -- --ntheta 12 --nphi 6 --output_dir "" /path/to/my.obj
#
import argparse, sys, os
parser = argparse.ArgumentParser(description='Renders given obj file by rotation a camera around it.')
parser.add_argument('--ntheta', type=int, default=12,
help='number of sampling points along the longtitude')
parser.add_argument('--nphi', type=int, default=6,
help='number of sampling points along the latitude')
parser.add_argument('--npoints', type=int, default=5000,
help='number of sampling points for the initial point cloud')
parser.add_argument('obj', type=str,
help='Path to the obj file to be rendered.')
parser.add_argument('--scale', type=float, default=1,
help='Scaling factor applied to model. Depends on size of mesh.')
parser.add_argument('--remove_doubles', type=bool, default=False,
help='Remove double vertices to improve mesh quality.')
parser.add_argument('--edge_split', type=bool, default=False,
help='Adds edge split filter.')
parser.add_argument('--depth_scale', type=float, default=1.4,
help='Scaling that is applied to depth. Depends on size of mesh. Try out various values until you get a good result. Ignored if format is OPEN_EXR.')
parser.add_argument('--color_depth', type=str, default='8',
help='Number of bit per channel used for output. Either 8 or 16.')
parser.add_argument('--format', type=str, default='PNG',
help='Format of files generated. Either PNG or OPEN_EXR')
parser.add_argument('--output_dir', type=str)
argv = sys.argv[sys.argv.index("--") + 1:]
args = parser.parse_args(argv)
import numpy as np
import bpy
import cv2
import trimesh
from plyfile import PlyData, PlyElement
# Import OBJ file
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.select_by_type(type='MESH')
bpy.ops.object.delete()
imported_obj = bpy.ops.import_scene.obj(filepath=args.obj)
# Assumes imported obj has one main object
obj_object = bpy.context.selected_objects[0]
# Reset object's location
obj_object.location = (0, 0, 0)
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
# Scale, remove doubles, add edge split if specified
if args.scale != 1:
obj_object.scale = (args.scale, args.scale, args.scale)
if args.remove_doubles:
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.remove_doubles()
bpy.ops.object.mode_set(mode='OBJECT')
if args.edge_split:
modifier = obj_object.modifiers.new(name='EdgeSplit', type='EDGE_SPLIT')
modifier.split_angle = 1.32645
bpy.ops.object.modifier_apply(modifier="EdgeSplit")
# Render
# Setup lights and camera
light_data = bpy.data.lights.new(name="Light", type='POINT')
light_data.energy = 1
light_object = bpy.data.objects.new(name="Light", object_data=light_data)
bpy.context.collection.objects.link(light_object)
light_object.location = (3, 3, 5)
# Render settings
def parent_obj_to_camera(b_camera):
origin = (0, 0, 0)
b_empty = bpy.data.objects.new("Empty", None)
b_empty.location = origin
b_camera.parent = b_empty # setup parenting
scn = bpy.context.scene
scn.collection.objects.link(b_empty)
bpy.context.view_layer.objects.active = b_empty
# scn.objects.active = b_empty
return b_empty
def camera_info(param):
"params: [theta, phi, rho, x, y, z, f]"
theta = np.deg2rad(param[0])
phi = np.deg2rad(param[1])
# print(param[0],param[1], theta, phi, param[6])
camY = param[3]*np.sin(phi) * param[6]
temp = param[3]*np.cos(phi) * param[6]
camX = temp * np.cos(theta)
camZ = temp * np.sin(theta)
cam_pos = np.array([camX, camY, camZ])
axisZ = cam_pos.copy()
axisY = np.array([0, 1, 0])
axisX = np.cross(axisY, axisZ)
# axisY = np.cross(axisZ, axisX)
# cam_mat = np.array([unit(axisX), unit(axisY), unit(axisZ)])
print("cam axis", camX, camY, camZ)
return camX, -camZ, camY
def storePly(path, xyz, rgb, normals):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
scene = bpy.context.scene
# Resolution
scene.render.resolution_x = 400
scene.render.resolution_y = 400
scene.render.resolution_percentage = 100
# Background
bpy.context.scene.render.dither_intensity = 0.0
bpy.context.scene.render.film_transparent = True
cam = scene.objects['Camera']
cam_constraint = cam.constraints.new(type='TRACK_TO')
cam_constraint.track_axis = 'TRACK_NEGATIVE_Z'
cam_constraint.up_axis = 'UP_Y'
b_empty = parent_obj_to_camera(cam)
cam_constraint.target = b_empty
scene.render.image_settings.file_format = 'PNG' # set output format to .png
rotation_mode = 'XYZ'
theta_stepsize = 360 / args.ntheta
phi_stepsize = 180 / args.nphi
output_dir = args.output_dir
output_image_dir = os.path.join(output_dir, "images")
output_camera_dir = os.path.join(output_dir, "cameras")
os.makedirs(output_image_dir, exist_ok=True)
os.makedirs(output_camera_dir, exist_ok=True)
r = 1.5 # distance to object center
# generate camera viewpoints
cam_coordinates = []
for lat in np.arange(-90, 91, phi_stepsize):
if lat == -90 or lat == 90:
cam_coordinates.append((0, 0, r * np.sin(np.radians(lat))))
else:
for lon in np.arange(0, 360, theta_stepsize):
lat_rad = np.radians(lat)
lon_rad = np.radians(lon)
x = r * np.cos(lat_rad) * np.cos(lon_rad)
y = r * np.cos(lat_rad) * np.sin(lon_rad)
z = r * np.sin(lat_rad)
cam_coordinates.append((x, y, z))
# save camera intrinsics
cam_intrinsics_path = os.path.join(output_camera_dir, "intrinsics.txt")
width = scene.render.resolution_x # pixels
height = scene.render.resolution_y # pixels
FovX = cam.data.angle_x
FovY = 2 * np.arctan(height * np.tan(FovX / 2) / width)
with open(cam_intrinsics_path, 'w') as f:
f.write('Width {}\n'.format(width))
f.write('Height {}\n'.format(height))
f.write('FovX {}\n'.format(FovX))
f.write('FovY {}'.format(FovY))
cam_extrinsics = np.zeros((len(cam_coordinates), 4, 4))
cam_extrinsics_path = os.path.join(output_camera_dir, "extrinsics.npy")
# render images uniformly & save camera extrinsics
for i, coords in enumerate(cam_coordinates):
camX, camY, camZ = coords
cam.location = (camX, camY, camZ)
scene.render.filepath = output_image_dir + '/{0:02d}'.format(i)
bpy.ops.render.render(write_still=True) # render still
cam_extrinsics[i, ...] = np.array(cam.matrix_world)
np.save(cam_extrinsics_path, cam_extrinsics)
print("Images and camera parameters saved!")
# sample initial point cloud
mesh = trimesh.load(args.obj, force='mesh')
try:
points, face_index, colors = trimesh.sample.sample_surface(mesh, count=args.npoints, sample_color=True)
face_normals = mesh.face_normals
normals = face_normals[face_index]
output_obj_path = os.path.join(output_dir, 'pointcloud.ply')
storePly(output_obj_path, points, colors[:, :3], normals)
print(".ply saved!")
except:
print("Mesh color is NoneType! Set color to gray")
points, face_index = trimesh.sample.sample_surface(mesh, count=args.npoints)
colors = np.ones((args.npoints, 3)) * 128 # set to gray
face_normals = mesh.face_normals
normals = face_normals[face_index]
output_obj_path = os.path.join(output_dir, 'pointcloud.ply')
storePly(output_obj_path, points, colors[:, :3], normals)