Spaces:
				
			
			
	
			
			
		Build error
		
	
	
	
			
			
	
	
	
	
		
		
		Build error
		
	attempt 3d reconstruction
Browse files- README.md +2 -2
 - app.py +35 -6
 - packages.txt +1 -0
 - requirements.txt +4 -1
 
    	
        README.md
    CHANGED
    
    | 
         @@ -1,10 +1,10 @@ 
     | 
|
| 1 | 
         
             
            ---
         
     | 
| 2 | 
         
            -
            title: Dpt Depth Estimation
         
     | 
| 3 | 
         
             
            emoji: ⚡
         
     | 
| 4 | 
         
             
            colorFrom: blue
         
     | 
| 5 | 
         
             
            colorTo: red
         
     | 
| 6 | 
         
             
            sdk: gradio
         
     | 
| 7 | 
         
            -
            sdk_version: 2. 
     | 
| 8 | 
         
             
            app_file: app.py
         
     | 
| 9 | 
         
             
            pinned: false
         
     | 
| 10 | 
         
             
            ---
         
     | 
| 
         | 
|
| 1 | 
         
             
            ---
         
     | 
| 2 | 
         
            +
            title: Dpt Depth Estimation + 3D
         
     | 
| 3 | 
         
             
            emoji: ⚡
         
     | 
| 4 | 
         
             
            colorFrom: blue
         
     | 
| 5 | 
         
             
            colorTo: red
         
     | 
| 6 | 
         
             
            sdk: gradio
         
     | 
| 7 | 
         
            +
            sdk_version: 2.9.1
         
     | 
| 8 | 
         
             
            app_file: app.py
         
     | 
| 9 | 
         
             
            pinned: false
         
     | 
| 10 | 
         
             
            ---
         
     | 
    	
        app.py
    CHANGED
    
    | 
         @@ -3,6 +3,7 @@ from transformers import DPTFeatureExtractor, DPTForDepthEstimation 
     | 
|
| 3 | 
         
             
            import torch
         
     | 
| 4 | 
         
             
            import numpy as np
         
     | 
| 5 | 
         
             
            from PIL import Image
         
     | 
| 
         | 
|
| 6 | 
         | 
| 7 | 
         
             
            torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
         
     | 
| 8 | 
         | 
| 
         @@ -26,21 +27,49 @@ def process_image(image): 
     | 
|
| 26 | 
         
             
                                    align_corners=False,
         
     | 
| 27 | 
         
             
                             ).squeeze()
         
     | 
| 28 | 
         
             
                output = prediction.cpu().numpy()
         
     | 
| 29 | 
         
            -
                 
     | 
| 30 | 
         
            -
                 
     | 
| 31 | 
         
            -
                 
     | 
| 
         | 
|
| 
         | 
|
| 32 | 
         | 
| 33 | 
         
            -
                return result
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 34 | 
         | 
| 35 | 
         
            -
            title = "Interactive demo: DPT"
         
     | 
| 36 | 
         
             
            description = "Demo for Intel's DPT, a Dense Prediction Transformer for state-of-the-art dense prediction tasks such as semantic segmentation and depth estimation."
         
     | 
| 37 | 
         
             
            examples =[['cats.jpg']]
         
     | 
| 38 | 
         | 
| 39 | 
         
             
            iface = gr.Interface(fn=process_image, 
         
     | 
| 40 | 
         
             
                                 inputs=gr.inputs.Image(type="pil"), 
         
     | 
| 41 | 
         
            -
                                 outputs=gr.outputs. 
     | 
| 42 | 
         
             
                                 title=title,
         
     | 
| 43 | 
         
             
                                 description=description,
         
     | 
| 44 | 
         
             
                                 examples=examples,
         
     | 
| 
         | 
|
| 45 | 
         
             
                                 enable_queue=True)
         
     | 
| 46 | 
         
             
            iface.launch(debug=True)
         
     | 
| 
         | 
|
| 3 | 
         
             
            import torch
         
     | 
| 4 | 
         
             
            import numpy as np
         
     | 
| 5 | 
         
             
            from PIL import Image
         
     | 
| 6 | 
         
            +
            import open3d as o3d
         
     | 
| 7 | 
         | 
| 8 | 
         
             
            torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
         
     | 
| 9 | 
         | 
| 
         | 
|
| 27 | 
         
             
                                    align_corners=False,
         
     | 
| 28 | 
         
             
                             ).squeeze()
         
     | 
| 29 | 
         
             
                output = prediction.cpu().numpy()
         
     | 
| 30 | 
         
            +
                depth_image = (output * 255 / np.max(output)).astype('uint8')
         
     | 
| 31 | 
         
            +
                # create_obj(formatted, "test.obj")
         
     | 
| 32 | 
         
            +
                create_obj_2(np.array(image), depth_image)
         
     | 
| 33 | 
         
            +
                # img = Image.fromarray(formatted)
         
     | 
| 34 | 
         
            +
                return "output.gltf"
         
     | 
| 35 | 
         | 
| 36 | 
         
            +
                # return result
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
                # gradio.inputs.Image3D(self, label=None, optional=False)
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
            def create_obj_2(rgb_image, depth_image):
         
     | 
| 41 | 
         
            +
                depth_o3d = o3d.geometry.Image(depth_image)
         
     | 
| 42 | 
         
            +
                image_o3d = o3d.geometry.Image(rgb_image)
         
     | 
| 43 | 
         
            +
                rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(image_o3d, depth_o3d)
         
     | 
| 44 | 
         
            +
                w = int(depth_image.shape[0])
         
     | 
| 45 | 
         
            +
                h = int(depth_image.shape[1])
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
                FOV = np.pi/4
         
     | 
| 48 | 
         
            +
                camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()
         
     | 
| 49 | 
         
            +
                camera_intrinsic.set_intrinsics(w, h, w*0.5, h*0.5, w*0.5, h*0.5 )
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
                pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image,camera_intrinsic)
         
     | 
| 52 | 
         
            +
                print('normals')
         
     | 
| 53 | 
         
            +
                pcd.normals = o3d.utility.Vector3dVector(np.zeros((1, 3)))  # invalidate existing normals
         
     | 
| 54 | 
         
            +
                pcd.estimate_normals()
         
     | 
| 55 | 
         
            +
                # pcd.orient_normals_consistent_tangent_plane(100)
         
     | 
| 56 | 
         
            +
                print('run Poisson surface reconstruction')
         
     | 
| 57 | 
         
            +
                with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:
         
     | 
| 58 | 
         
            +
                    mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=9)
         
     | 
| 59 | 
         
            +
                print(mesh)
         
     | 
| 60 | 
         
            +
                o3d.io.write_triangle_mesh("output.gltf",mesh,write_triangle_uvs=True)
         
     | 
| 61 | 
         
            +
                return "output.gltf"
         
     | 
| 62 | 
         | 
| 63 | 
         
            +
            title = "Interactive demo: DPT + 3D"
         
     | 
| 64 | 
         
             
            description = "Demo for Intel's DPT, a Dense Prediction Transformer for state-of-the-art dense prediction tasks such as semantic segmentation and depth estimation."
         
     | 
| 65 | 
         
             
            examples =[['cats.jpg']]
         
     | 
| 66 | 
         | 
| 67 | 
         
             
            iface = gr.Interface(fn=process_image, 
         
     | 
| 68 | 
         
             
                                 inputs=gr.inputs.Image(type="pil"), 
         
     | 
| 69 | 
         
            +
                                 outputs=gr.outputs.Image3D(label="predicted depth", clear_color=[1.0,1.0,1.0,1.0]),
         
     | 
| 70 | 
         
             
                                 title=title,
         
     | 
| 71 | 
         
             
                                 description=description,
         
     | 
| 72 | 
         
             
                                 examples=examples,
         
     | 
| 73 | 
         
            +
                                 allow_flagging="never",
         
     | 
| 74 | 
         
             
                                 enable_queue=True)
         
     | 
| 75 | 
         
             
            iface.launch(debug=True)
         
     | 
    	
        packages.txt
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            libgl1-mesa-glx
         
     | 
    	
        requirements.txt
    CHANGED
    
    | 
         @@ -1,4 +1,7 @@ 
     | 
|
| 1 | 
         
             
            torch
         
     | 
| 2 | 
         
             
            git+https://github.com/nielsrogge/transformers.git@add_dpt_redesign#egg=transformers
         
     | 
| 3 | 
         
             
            numpy
         
     | 
| 4 | 
         
            -
            Pillow
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
             
            torch
         
     | 
| 2 | 
         
             
            git+https://github.com/nielsrogge/transformers.git@add_dpt_redesign#egg=transformers
         
     | 
| 3 | 
         
             
            numpy
         
     | 
| 4 | 
         
            +
            Pillow
         
     | 
| 5 | 
         
            +
            gradio>=2.9.1
         
     | 
| 6 | 
         
            +
            jinja2
         
     | 
| 7 | 
         
            +
            open3d
         
     |