// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2019 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include "foundation/PxMemory.h" #include "EdgeList.h" #include "Adjacencies.h" #include "MeshCleaner.h" #include "CmRadixSortBuffered.h" #include "CookingUtils.h" #include "PsArray.h" #include "PsFoundation.h" #include "ConvexPolygonsBuilder.h" using namespace physx; #define USE_PRECOMPUTED_HULL_PROJECTION static PX_INLINE void Flip(HullTriangleData& data) { PxU32 tmp = data.mRef[2]; data.mRef[2] = data.mRef[1]; data.mRef[1] = tmp; } ////////////////////////////////////////////////////////////////////////// //! A generic couple structure class Pair : public Ps::UserAllocated { public: PX_FORCE_INLINE Pair() {} PX_FORCE_INLINE Pair(PxU32 i0, PxU32 i1) : id0(i0), id1(i1) {} PX_FORCE_INLINE ~Pair() {} //! Operator for "if(Pair==Pair)" PX_FORCE_INLINE bool operator==(const Pair& p) const { return (id0==p.id0) && (id1==p.id1); } //! Operator for "if(Pair!=Pair)" PX_FORCE_INLINE bool operator!=(const Pair& p) const { return (id0!=p.id0) || (id1!=p.id1); } PxU32 id0; //!< First index of the pair PxU32 id1; //!< Second index of the pair }; PX_COMPILE_TIME_ASSERT(sizeof(Pair)==8); ////////////////////////////////////////////////////////////////////////// // construct a plane template PX_INLINE PxPlane PlaneEquation(const T& t, const PxVec3* verts) { const PxVec3& p0 = verts[t.v[0]]; const PxVec3& p1 = verts[t.v[1]]; const PxVec3& p2 = verts[t.v[2]]; return PxPlane(p0, p1, p2); } ////////////////////////////////////////////////////////////////////////// // negate plane static PX_FORCE_INLINE void negatePlane(Gu::HullPolygonData& data) { data.mPlane.n = -data.mPlane.n; data.mPlane.d = -data.mPlane.d; } ////////////////////////////////////////////////////////////////////////// // Inverse a buffer in-place static bool inverseBuffer(PxU32 nbEntries, PxU8* entries) { if(!nbEntries || !entries) return false; for(PxU32 i=0; i < (nbEntries>>1); i++) Ps::swap(entries[i], entries[nbEntries-1-i]); return true; } ////////////////////////////////////////////////////////////////////////// // Extracts a line-strip from a list of non-sorted line-segments (slow) static bool findLineStrip(Ps::Array& lineStrip, const Ps::Array& lineSegments) { // Ex: // // 4-2 // 0-1 // 2-3 // 4-0 // 7-3 // 7-1 // // => 0-1-7-3-2-4-0 // 0-0-1-1-2-2-3-3-4-4-7-7 // 0-1 // 0-4 // 1-7 // 2-3 // 2-4 // 3-7 // Naive implementation below Ps::Array Copy(lineSegments); RunAgain: { PxU32 nbSegments = Copy.size(); for(PxU32 j=0;j remove both PX_ASSERT(Copy.size()>=2); Copy.remove(i); Copy.remove(j); goto RunAgain; } } } // Goes through when everything's fine } PxU32 ref0 = 0xffffffff; PxU32 ref1 = 0xffffffff; if(Copy.size()>=1) { Pair* Segments = Copy.begin(); if(Segments) { ref0 = Segments->id0; ref1 = Segments->id1; lineStrip.pushBack(ref0); lineStrip.pushBack(ref1); PX_ASSERT(Copy.size()>=1); Copy.remove(0); } } Wrap: // Look for same vertex ref in remaining segments PxU32 nb = Copy.size(); if(!nb) { // ### check the line is actually closed? return true; } for(PxU32 i=0;i r1 - x lineStrip.pushBack(newRef0); // Output the other reference ref0 = newRef1; ref1 = newRef0; Copy.remove(i); goto Wrap; } } return false; } ////////////////////////////////////////////////////////////////////////// // Test for duplicate triangles PX_COMPILE_TIME_ASSERT(sizeof(Gu::TriangleT)==sizeof(PxVec3)); // ... static bool TestDuplicateTriangles(PxU32& nbFaces, Gu::TriangleT* faces, bool repair) { if(!nbFaces || !faces) return true; Gu::TriangleT* indices32 = reinterpret_cast*>(PxAlloca(nbFaces*sizeof(Gu::TriangleT))); for(PxU32 i=0;i(indices32), nbFaces); REDUCEDCLOUD rc; reducer.Reduce(&rc); if(rc.NbRVerts* curTri = reinterpret_cast*>(&rc.RVerts[i]); faces[i].v[0] = curTri->v[0]; faces[i].v[1] = curTri->v[1]; faces[i].v[2] = curTri->v[2]; } } return false; // Test failed } return true; // Test succeeded } ////////////////////////////////////////////////////////////////////////// // plane culling test static PX_FORCE_INLINE bool testCulling(const Gu::TriangleT& triangle, const PxVec3* verts, const PxVec3& center) { const PxPlane plane(verts[triangle.v[0]], verts[triangle.v[1]], verts[triangle.v[2]]); return plane.distance(center)>0.0f; } ////////////////////////////////////////////////////////////////////////// // face normals test static bool TestUnifiedNormals(PxU32 nbVerts, const PxVec3* verts, PxU32 nbFaces, Gu::TriangleT* faces, bool repair) { if(!nbVerts || !verts || !nbFaces || !faces) return false; // Unify normals so that all hull faces are well oriented // Compute geometric center - we need a vertex inside the hull const float coeff = 1.0f / float(nbVerts); PxVec3 geomCenter(0.0f, 0.0f, 0.0f); for(PxU32 i=0;i* faces, PxU32& nbVerts, PxVec3* verts) { // Brute force mesh cleaning. // PT: I added this back on Feb-18-05 because it fixes bugs with hulls from QHull. MeshCleaner cleaner(nbVerts, verts, nbFaces, faces->v, 0.0f); if (!cleaner.mNbTris) return false; nbVerts = cleaner.mNbVerts; nbFaces = cleaner.mNbTris; PxMemCopy(verts, cleaner.mVerts, cleaner.mNbVerts*sizeof(PxVec3)); for (PxU32 i = 0; i < cleaner.mNbTris; i++) { faces[i].v[0] = cleaner.mIndices[i * 3 + 0]; faces[i].v[1] = cleaner.mIndices[i * 3 + 1]; faces[i].v[2] = cleaner.mIndices[i * 3 + 2]; } // Get rid of duplicates TestDuplicateTriangles(nbFaces, faces, true); // Unify normals TestUnifiedNormals(nbVerts, verts, nbFaces, faces, true); // Remove zero-area triangles // TestZeroAreaTriangles(nbFaces, faces, verts, true); // Unify normals again TestUnifiedNormals(nbVerts, verts, nbFaces, faces, true); // Get rid of duplicates again TestDuplicateTriangles(nbFaces, faces, true); return true; } ////////////////////////////////////////////////////////////////////////// // check the newly constructed faces static bool CheckFaces(PxU32 nbFaces, const Gu::TriangleT* faces, PxU32 nbVerts, const PxVec3* verts) { // Remove const since we use functions that can do both testing & repairing. But we won't change the data. Gu::TriangleT* f = const_cast*>(faces); // Test duplicate faces if(!TestDuplicateTriangles(nbFaces, f, false)) return false; // Test unified normals if(!TestUnifiedNormals(nbVerts, verts, nbFaces, f, false)) return false; return true; } ////////////////////////////////////////////////////////////////////////// // compute the newell plane from the face verts static bool computeNewellPlane(PxPlane& plane, PxU32 nbVerts, const PxU8* indices, const PxVec3* verts) { if(!nbVerts || !indices || !verts) return false; PxVec3 centroid(0,0,0), normal(0,0,0); for(PxU32 i=nbVerts-1, j=0; j& polygon_data, const ConvexPolygonsBuilder& hull, Ps::Array& triangle_data, Ps::Array& redundantVertices) { const PxU32* dFaces = reinterpret_cast(hull.getFaces()); bool needToSplitPolygons = false; bool* polygonMarkers = reinterpret_cast(PxAlloca(nb_polygons*sizeof(bool))); PxMemZero(polygonMarkers, nb_polygons*sizeof(bool)); bool* redundancyMarkers = reinterpret_cast(PxAlloca(redundantVertices.size()*sizeof(bool))); PxMemZero(redundancyMarkers, redundantVertices.size()*sizeof(bool)); // parse through the redundant vertices and if we cannot remove them split just the actual polygon if possible Ps::Array polygonsContainer; PxU32 numEntries = 0; for (PxU32 i = redundantVertices.size(); i--;) { numEntries = 0; polygonsContainer.clear(); // go through polygons, if polygons does have only 3 verts we cannot remove any vertex from it, try to decompose the second one PxU32* Data = polygon_data.begin(); for(PxU32 t=0;t=3); // Else something very wrong happened... for(PxU32 j=0;j newPolygon_data; Ps::Array newTriangle_data; PxU32 newNb_polygons = 0; PxU32* data = polygon_data.begin(); PxU32* triData = triangle_data.begin(); for(PxU32 i=0;i& polygon_data, const ConvexHull& hull) * \param nb_polygons [out] number of extracted polygons * \param polygon_data [out] polygon data: (Nb indices, index 0, index 1... index N)(Nb indices, index 0, index 1... index N)(...) * \param hull [in] convex hull * \param triangle_data [out] triangle data * \param rendundantVertices [out] redundant vertices found inside the polygons - we want to remove them because of PCM * \return true if success */ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// static bool extractHullPolygons(PxU32& nb_polygons, Ps::Array& polygon_data, const ConvexPolygonsBuilder& hull, Ps::Array* triangle_data, Ps::Array& rendundantVertices) { PxU32 nbFaces = hull.getNbFaces(); const PxVec3* hullVerts = hull.mHullDataHullVertices; const PxU32 nbVertices = hull.mHull->mNbHullVertices; const PxU16* wFaces = NULL; const PxU32* dFaces = reinterpret_cast(hull.getFaces()); PX_ASSERT(wFaces || dFaces); ADJACENCIESCREATE create; create.NbFaces = nbFaces; create.DFaces = dFaces; create.WFaces = wFaces; create.Verts = hullVerts; //Create.Epsilon = 0.01f; // PT: trying to fix Rob Elam bug. Also fixes TTP 2467 // Create.Epsilon = 0.001f; // PT: for "Bruno's bug" create.Epsilon = 0.005f; // PT: middle-ground seems to fix both. Expose this param? AdjacenciesBuilder adj; if(!adj.Init(create)) return false; PxU32 nbBoundaryEdges = adj.ComputeNbBoundaryEdges(); if(nbBoundaryEdges) return false; // A valid hull shouldn't have open edges!! bool* markers = reinterpret_cast(PxAlloca(nbFaces*sizeof(bool))); PxMemZero(markers, nbFaces*sizeof(bool)); PxU8* vertexMarkers = reinterpret_cast(PxAlloca(nbVertices*sizeof(PxU8))); PxMemZero(vertexMarkers, nbVertices*sizeof(PxU8)); PxU32 currentFace = 0; // Start with first triangle nb_polygons = 0; do { currentFace = 0; while(currentFace& indices, const AdjTriangle* faces, PxU32 current, bool* inMarkers) { if(inMarkers[current]) return; inMarkers[current] = true; indices.pushBack(current); const AdjTriangle& AT = faces[current]; // We can floodfill through inactive edges since the mesh is convex (inactive==planar) if(!AT.HasActiveEdge01()) FloodFill(indices, faces, AT.GetAdjTri(EDGE01), inMarkers); if(!AT.HasActiveEdge20()) FloodFill(indices, faces, AT.GetAdjTri(EDGE02), inMarkers); if(!AT.HasActiveEdge12()) FloodFill(indices, faces, AT.GetAdjTri(EDGE12), inMarkers); } static bool GetNeighborFace(PxU32 index,PxU32 triangleIndex,const AdjTriangle* faces, const PxU32* dfaces, PxU32& neighbor, PxU32& current) { PxU32 currentIndex = index; PxU32 previousIndex = index; bool firstFace = true; bool next = true; while (next) { const AdjTriangle& currentAT = faces[currentIndex]; PxU32 refTr0 = dfaces[currentIndex*3 + 0]; PxU32 refTr1 = dfaces[currentIndex*3 + 1]; PxU32 edge[2]; edge[0] = 1; edge[1] = 2; if(triangleIndex == refTr0) { edge[0] = 0; edge[1] = 1; } else { if(triangleIndex == refTr1) { edge[0] = 0; edge[1] = 2; } } if(currentAT.HasActiveEdge(edge[0]) && currentAT.HasActiveEdge(edge[1])) { return false; } if(!currentAT.HasActiveEdge(edge[0]) && !currentAT.HasActiveEdge(edge[1])) { // not interested in testing transition vertices if(currentIndex == index) { return false; } // transition one for (PxU32 i = 0; i < 2; i++) { PxU32 testIndex = currentAT.GetAdjTri(SharedEdgeIndex(edge[i])); // exit if we circle around the vertex back to beginning if(testIndex == index && previousIndex != index) { return false; } if(testIndex != previousIndex) { // move to next previousIndex = currentIndex; currentIndex = testIndex; break; } } } else { if(!currentAT.HasActiveEdge(edge[0])) { PxU32 t = edge[0]; edge[0] = edge[1]; edge[1] = t; } if(currentAT.HasActiveEdge(edge[0])) { PxU32 testIndex = currentAT.GetAdjTri(SharedEdgeIndex(edge[0])); if(firstFace) { firstFace = false; } else { neighbor = testIndex; current = currentIndex; return true; } } if(!currentAT.HasActiveEdge(edge[1])) { PxU32 testIndex = currentAT.GetAdjTri(SharedEdgeIndex(edge[1])); if(testIndex != index) { previousIndex = currentIndex; currentIndex = testIndex; } } } } return false; } static bool CheckFloodFillFace(PxU32 index,const AdjTriangle* faces, const PxU32* dfaces) { if(!dfaces) return true; const AdjTriangle& checkedAT = faces[index]; PxU32 refTr0 = dfaces[index*3 + 0]; PxU32 refTr1 = dfaces[index*3 + 1]; PxU32 refTr2 = dfaces[index*3 + 2]; for (PxU32 i = 0; i < 3; i++) { if(!checkedAT.HasActiveEdge(i)) { PxU32 testTr0 = refTr1; PxU32 testTr1 = refTr2; PxU32 testIndex0 = 0; PxU32 testIndex1 = 1; if(i == 0) { testTr0 = refTr0; testTr1 = refTr1; testIndex0 = 1; testIndex1 = 2; } else { if(i == 1) { testTr0 = refTr0; testTr1 = refTr2; testIndex0 = 0; testIndex1 = 2; } } PxU32 adjFaceTested = checkedAT.GetAdjTri(SharedEdgeIndex(testIndex0)); PxU32 neighborIndex00; PxU32 neighborIndex01; bool found0 = GetNeighborFace(index,testTr0,faces,dfaces, neighborIndex00, neighborIndex01); PxU32 neighborIndex10; PxU32 neighborIndex11; bool found1 = GetNeighborFace(adjFaceTested,testTr0,faces,dfaces, neighborIndex10, neighborIndex11); if(found0 && found1 && neighborIndex00 == neighborIndex11 && neighborIndex01 == neighborIndex10) { return false; } adjFaceTested = checkedAT.GetAdjTri(SharedEdgeIndex(testIndex1)); found0 = GetNeighborFace(index,testTr1,faces,dfaces,neighborIndex00,neighborIndex01); found1 = GetNeighborFace(adjFaceTested,testTr1,faces,dfaces,neighborIndex10,neighborIndex11); if(found0 && found1 && neighborIndex00 == neighborIndex11 && neighborIndex01 == neighborIndex10) { return false; } } } return true; } static bool CheckFloodFill(Ps::Array& indices,AdjTriangle* faces,bool* inMarkers, const PxU32* dfaces) { bool valid = true; for(PxU32 i=0;i indices; // Indices of triangles forming hull polygon bool doFill = true; while (doFill) { Local::FloodFill(indices, adj.mFaces, currentFace, markers); doFill = Local::CheckFloodFill(indices,adj.mFaces,markers, dFaces); } // Now it would be nice to recreate a closed linestrip, similar to silhouette extraction. The line is composed of active edges, this time. Ps::Array activeSegments; //Container ActiveSegments; // Loop through triangles composing the polygon for(PxU32 i=0;i lineStrip; if(findLineStrip(lineStrip, activeSegments)) { PxU32 nb = lineStrip.size(); if(nb) { const PxU32* entries = lineStrip.begin(); PX_ASSERT(entries[0] == entries[nb-1]); // findLineStrip() is designed that way. Might not be what we want! // We get rid of the last (duplicated) index polygon_data.pushBack(nb-1); for (PxU32 i = 0; i < nb-1; i++) { vertexMarkers[entries[i]]++; polygon_data.pushBack(entries[i]); } nb_polygons++; // Loop through vertices composing the line strip polygon end mark the redundant vertices inside the polygon for(PxU32 i=0;ipushBack(indices.size()); for (PxU32 j = 0; j < indices.size(); j++) triangle_data->pushBack(indices[j]); } } } else { Ps::getFoundation().error(PxErrorCode::eINVALID_OPERATION, __FILE__, __LINE__, "Meshmerizer::extractHullPolygons: line strip extraction failed"); return false; } } } while(currentFace!=nbFaces); for (PxU32 i = 0; i < nbVertices; i++) { if(vertexMarkers[i] < 3) { if(rendundantVertices.find(i) == rendundantVertices.end()) rendundantVertices.pushBack(i); } } if(rendundantVertices.size() > 0 && triangle_data) checkRedundantVertices(nb_polygons,polygon_data,hull,*triangle_data,rendundantVertices); return true; } ////////////////////////////////////////////////////////////////////////// ConvexPolygonsBuilder::ConvexPolygonsBuilder(Gu::ConvexHullData* hull, const bool buildGRBData) : ConvexHullBuilder(hull, buildGRBData), mNbHullFaces(0), mFaces(NULL) { } ////////////////////////////////////////////////////////////////////////// ConvexPolygonsBuilder::~ConvexPolygonsBuilder() { PX_DELETE_POD(mFaces); } ////////////////////////////////////////////////////////////////////////// // compute hull polygons from given hull triangles bool ConvexPolygonsBuilder::computeHullPolygons(const PxU32& nbVerts,const PxVec3* verts, const PxU32& nbTriangles, const PxU32* triangles) { PX_ASSERT(triangles); PX_ASSERT(verts); mHullDataHullVertices = NULL; mHullDataPolygons = NULL; mHullDataVertexData8 = NULL; mHullDataFacesByEdges8 = NULL; mHullDataFacesByVertices8 = NULL; mNbHullFaces = nbTriangles; mHull->mNbHullVertices = Ps::to8(nbVerts); // allocate additional vec3 for V4 safe load in VolumeInteration mHullDataHullVertices = reinterpret_cast(PX_ALLOC(sizeof(PxVec3) * mHull->mNbHullVertices + 1, "PxVec3")); PxMemCopy(mHullDataHullVertices, verts, mHull->mNbHullVertices*sizeof(PxVec3)); mFaces = PX_NEW(HullTriangleData)[mNbHullFaces]; for(PxU32 i=0;i* hullAsIndexedTriangle = reinterpret_cast*>(mFaces); // We don't trust the user at all... So, clean the hull. PxU32 nbHullVerts = mHull->mNbHullVertices; CleanFaces(mNbHullFaces, hullAsIndexedTriangle, nbHullVerts, mHullDataHullVertices); PX_ASSERT(nbHullVerts<256); mHull->mNbHullVertices = Ps::to8(nbHullVerts); // ...and then run the full tests again. if(!CheckFaces(mNbHullFaces, hullAsIndexedTriangle, mHull->mNbHullVertices, mHullDataHullVertices)) return false; // Transform triangles-to-polygons if(!createPolygonData()) return false; return checkHullPolygons(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /** * Computes polygon data. * \return true if success */ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// bool ConvexPolygonsBuilder::createPolygonData() { // Cleanup mHull->mNbPolygons = 0; PX_DELETE_POD(mHullDataVertexData8); PX_DELETE_POD(mHullDataFacesByVertices8); PX_FREE_AND_RESET(mHullDataPolygons); // Extract polygon data from triangle data Ps::Array temp; Ps::Array temp2; Ps::Array rendundantVertices; PxU32 nbPolygons; if(!extractHullPolygons(nbPolygons, temp, *this, &temp2,rendundantVertices)) return false; PxVec3* reducedHullDataHullVertices = mHullDataHullVertices; PxU8 numReducedHullDataVertices = mHull->mNbHullVertices; if(rendundantVertices.size() > 0) { numReducedHullDataVertices = Ps::to8(mHull->mNbHullVertices - rendundantVertices.size()); reducedHullDataHullVertices = static_cast (PX_ALLOC_TEMP(sizeof(PxVec3)*numReducedHullDataVertices,"Reduced vertices hull data")); PxU8* remapTable = PX_NEW(PxU8)[mHull->mNbHullVertices]; PxU8 currentIndex = 0; for (PxU8 i = 0; i < mHull->mNbHullVertices; i++) { if(rendundantVertices.find(i) == rendundantVertices.end()) { PX_ASSERT(currentIndex < numReducedHullDataVertices); reducedHullDataHullVertices[currentIndex] = mHullDataHullVertices[i]; remapTable[i] = currentIndex; currentIndex++; } else { remapTable[i] = 0xFF; } } PxU32* data = temp.begin(); for(PxU32 i=0;i=3); // Else something very wrong happened... for(PxU32 j=0;jmNbHullVertices); data[j] = remapTable[data[j]]; } data += nbVerts; } PX_DELETE_POD(remapTable); } if(nbPolygons>255) { Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexHullBuilder: convex hull has more than 255 polygons!"); return false; } // Precompute hull polygon structures mHull->mNbPolygons = Ps::to8(nbPolygons); mHullDataPolygons = reinterpret_cast(PX_ALLOC(sizeof(Gu::HullPolygonData)*mHull->mNbPolygons, "Gu::HullPolygonData")); PxMemZero(mHullDataPolygons, sizeof(Gu::HullPolygonData)*mHull->mNbPolygons); // The winding hasn't been preserved so we need to handle this. Basically we need to "unify normals" // exactly as we did at hull creation time - except this time we work on polygons PxVec3 geomCenter; computeGeomCenter(geomCenter, mNbHullFaces, mFaces); // Loop through polygons // We have N polygons => remove N entries for number of vertices PxU32 tmp = temp.size() - nbPolygons; mHullDataVertexData8 = PX_NEW(PxU8)[tmp]; PxU8* dest = mHullDataVertexData8; const PxU32* data = temp.begin(); const PxU32* triData = temp2.begin(); for(PxU32 i=0;i=3); // Else something very wrong happened... mHullDataPolygons[i].mNbVerts = Ps::to8(nbVerts); PxU32 index = 0; for(PxU32 j=0;j& T = reinterpret_cast&>(mFaces[triIndex]); const PxPlane PL = PlaneEquation(T, mHullDataHullVertices); if(k==0 && PL.n.dot(mHullDataPolygons[i].mPlane.n) < 0.0f) { flip = true; } } if(flip) { negatePlane(mHullDataPolygons[i]); inverseBuffer(mHullDataPolygons[i].mNbVerts, dest); } for(PxU32 j=0;jmNbHullVertices;j++) { float d = - (mHullDataPolygons[i].mPlane.n).dot(mHullDataHullVertices[j]); if(d0.0f) { inverseBuffer(mHullDataPolygons[i].mNbVerts, dest); negatePlane(mHullDataPolygons[i]); PX_ASSERT(mHullDataPolygons[i].mPlane.distance(geomCenter)<=0.0f); } // Next one data += nbVerts; // Skip vertex indices dest += mHullDataPolygons[i].mNbVerts; } if(reducedHullDataHullVertices != mHullDataHullVertices) { PxMemCopy(mHullDataHullVertices,reducedHullDataHullVertices,sizeof(PxVec3)*numReducedHullDataVertices); PX_FREE(reducedHullDataHullVertices); mHull->mNbHullVertices = numReducedHullDataVertices; } //calculate the vertex map table if(!calculateVertexMapTable(nbPolygons)) return false; #ifdef USE_PRECOMPUTED_HULL_PROJECTION // Loop through polygons for(PxU32 j=0;jmNbHullVertices; const PxVec3* verts = mHullDataHullVertices; Gu::HullPolygonData& polygon = mHullDataPolygons[j]; PxReal min = PX_MAX_F32; PxU8 minIndex = 0xff; for (PxU8 i = 0; i < nbVerts; i++) { float dp = (*verts++).dot(polygon.mPlane.n); if(dp < min) { min = dp; minIndex = i; } } polygon.mMinIndex = minIndex; } #endif // Triangulate newly created polygons to recreate a clean vertex cloud. return createTrianglesFromPolygons(); } ////////////////////////////////////////////////////////////////////////// // create back triangles from polygons bool ConvexPolygonsBuilder::createTrianglesFromPolygons() { if (!mHull->mNbPolygons || !mHullDataPolygons) return false; PxU32 maxNbTriangles = 0; for (PxU32 i = 0; i < mHull->mNbPolygons; i++) { if (mHullDataPolygons[i].mNbVerts < 3) { Ps::getFoundation().error(PxErrorCode::eINTERNAL_ERROR, __FILE__, __LINE__, "ConvexHullBuilder::CreateTrianglesFromPolygons: convex hull has a polygon with less than 3 vertices!"); return false; } maxNbTriangles += mHullDataPolygons[i].mNbVerts - 2; } HullTriangleData* tmpFaces = PX_NEW(HullTriangleData)[maxNbTriangles]; HullTriangleData* currFace = tmpFaces; PxU32 nbTriangles = 0; const PxU8* vertexData = mHullDataVertexData8; const PxVec3* hullVerts = mHullDataHullVertices; for (PxU32 i = 0; i < mHull->mNbPolygons; i++) { const PxU8* data = vertexData + mHullDataPolygons[i].mVRef8; PxU32 nbVerts = mHullDataPolygons[i].mNbVerts; // Triangulate the polygon such that all all generated triangles have one and the same vertex // in common. // // Make sure to avoid creating zero area triangles. Imagine the following polygon: // // 4 3 // *------------------* // | | // *---*----*----*----* // 5 6 0 1 2 // // Choosing vertex 0 as the shared vertex, the following zero area triangles will be created: // [0 1 2], [0 5 6] // // Check for these triangles and discard them // Note: Such polygons should only occur if the user defines the convex hull, i.e., the triangles // of the convex shape, himself. If the convex hull is built from the vertices only, the // hull algorithm removes the useless vertices. // for (PxU32 j = 0; j < nbVerts - 2; j++) { currFace->mRef[0] = data[0]; currFace->mRef[1] = data[(j + 1) % nbVerts]; currFace->mRef[2] = data[(j + 2) % nbVerts]; const PxVec3& p0 = hullVerts[currFace->mRef[0]]; const PxVec3& p1 = hullVerts[currFace->mRef[1]]; const PxVec3& p2 = hullVerts[currFace->mRef[2]]; const float area = ((p1 - p0).cross(p2 - p0)).magnitudeSquared(); if (area != 0.0f) // Else discard the triangle { nbTriangles++; currFace++; } } } PX_DELETE_POD(mFaces); HullTriangleData* faces; PX_ASSERT(nbTriangles <= maxNbTriangles); if (maxNbTriangles == nbTriangles) { // No zero area triangles, hence the face buffer has correct size and can be used directly. faces = tmpFaces; } else { // Resize face buffer because some triangles were discarded. faces = PX_NEW(HullTriangleData)[nbTriangles]; if (!faces) { PX_DELETE_POD(tmpFaces); return false; } PxMemCopy(faces, tmpFaces, sizeof(HullTriangleData)*nbTriangles); PX_DELETE_POD(tmpFaces); } mFaces = faces; mNbHullFaces = nbTriangles; // TODO: at this point useless vertices should be removed from the hull. The current fix is to initialize // support vertices to known valid vertices, but it's not really convincing. // Re-unify normals PxVec3 geomCenter; computeGeomCenter(geomCenter, mNbHullFaces, mFaces); for (PxU32 i = 0; i < mNbHullFaces; i++) { const PxPlane P(hullVerts[mFaces[i].mRef[0]], hullVerts[mFaces[i].mRef[1]], hullVerts[mFaces[i].mRef[2]]); if (P.distance(geomCenter) > 0.0f) { Flip(mFaces[i]); } } return true; }