<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <title>Regarding MPI_Allgather collective calls in MPI</title>
  <link rel="alternate" href="https://conferences.xsede.org/c/message_boards/find_thread?p_l_id=&amp;threadId=416795" />
  <subtitle>Regarding MPI_Allgather collective calls in MPI</subtitle>
  <entry>
    <title>Regarding MPI_Allgather collective calls in MPI</title>
    <link rel="alternate" href="https://conferences.xsede.org/c/message_boards/find_message?p_l_id=&amp;messageId=416794" />
    <author>
      <name />
    </author>
    <id>https://conferences.xsede.org/c/message_boards/find_message?p_l_id=&amp;messageId=416794</id>
    <updated>2012-11-16T16:06:19Z</updated>
    <published>2012-11-16T16:06:19Z</published>
    <summary type="html">The implementation of MPI_Allgather uses different algorithms internally based on the message size and number of processes. For example bruck for short messages, ring for larger messages. Why do they use different implementations? Please explain.</summary>
    <dc:date>2012-11-16T16:06:19Z</dc:date>
  </entry>
</feed>

