From 18addf6c2c27cc6112a104a99c412c6e68dc2a93 Mon Sep 17 00:00:00 2001 From: Yves Date: Tue, 29 Dec 2009 07:06:27 -0800 Subject: [PATCH] Initial setup of the repository --- COPYING | 674 ++++++++++ COPYING.LESSER | 165 +++ Makefile | 14 + debian/README.Debian | 6 + debian/changelog | 6 + debian/compat | 1 + debian/control | 12 + debian/copyright | 35 + debian/cron.d.ex | 4 + debian/dirs | 2 + debian/emacsen-install.ex | 45 + debian/emacsen-remove.ex | 15 + debian/emacsen-startup.ex | 25 + debian/feedingit-default.ex | 10 + debian/feedingit.doc-base.EX | 22 + debian/init.d.ex | 81 ++ debian/manpage.1.ex | 59 + debian/manpage.sgml.ex | 156 +++ debian/manpage.xml.ex | 148 +++ debian/menu.ex | 2 + debian/postinst.ex | 41 + debian/postrm.ex | 39 + debian/preinst.ex | 37 + debian/prerm.ex | 40 + debian/rules | 98 ++ debian/watch.ex | 22 + setup.py | 56 + src/FeedingIt | 3 + src/FeedingIt.desktop | 7 + src/FeedingIt.py | 355 ++++++ src/feedparser.py | 2858 ++++++++++++++++++++++++++++++++++++++++++ src/feedparser.pyo | Bin 0 -> 102392 bytes src/test.py | 109 ++ 33 files changed, 5147 insertions(+) create mode 100644 COPYING create mode 100644 COPYING.LESSER create mode 100644 Makefile create mode 100644 debian/README.Debian create mode 100644 debian/changelog create mode 100644 debian/compat create mode 100644 debian/control create mode 100644 debian/copyright create mode 100644 debian/cron.d.ex create mode 100644 debian/dirs create mode 100644 debian/docs create mode 100644 debian/emacsen-install.ex create mode 100644 debian/emacsen-remove.ex create mode 100644 debian/emacsen-startup.ex create mode 100644 debian/feedingit-default.ex create mode 100644 debian/feedingit.doc-base.EX create mode 100644 debian/init.d.ex create mode 100644 debian/manpage.1.ex create mode 100644 debian/manpage.sgml.ex create mode 100644 debian/manpage.xml.ex create mode 100644 debian/menu.ex create mode 100644 debian/postinst.ex create mode 100644 debian/postrm.ex create mode 100644 debian/preinst.ex create mode 100644 debian/prerm.ex create mode 100755 debian/rules create mode 100644 debian/watch.ex create mode 100644 setup.py create mode 100644 src/FeedingIt create mode 100644 src/FeedingIt.desktop create mode 100644 src/FeedingIt.py create mode 100644 src/feedparser.py create mode 100644 src/feedparser.pyo create mode 100644 src/test.py diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..94a9ed0 --- /dev/null +++ b/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/COPYING.LESSER b/COPYING.LESSER new file mode 100644 index 0000000..fc8a5de --- /dev/null +++ b/COPYING.LESSER @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..10ae70f --- /dev/null +++ b/Makefile @@ -0,0 +1,14 @@ +# This Makefile is only used when building Debian packages + +DESTDIR=/ + +all: + +install: + install -d ${DESTDIR}/usr/bin + install src/FeedingIt ${DESTDIR}/usr/bin + install -d ${DESTDIR}/opt/FeedingIt + install src/FeedingIt.py ${DESTDIR}/opt/FeedingIt + install src/feedparser.py ${DESTDIR}/opt/FeedingIt + install -d ${DESTDIR}/usr/share/applications/hildon + install src/FeedingIt.desktop ${DESTDIR}/usr/share/applications/hildon \ No newline at end of file diff --git a/debian/README.Debian b/debian/README.Debian new file mode 100644 index 0000000..5f7fc10 --- /dev/null +++ b/debian/README.Debian @@ -0,0 +1,6 @@ +feedingit for Debian +-------------------- + + + + -- unknown Mon, 28 Dec 2009 17:59:19 -0800 diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 0000000..d11bac0 --- /dev/null +++ b/debian/changelog @@ -0,0 +1,6 @@ +feedingit (0.1-1) unstable; urgency=low + + * Initial release (Closes: #nnnn) + + -- unknown Mon, 28 Dec 2009 17:59:19 -0800 + diff --git a/debian/compat b/debian/compat new file mode 100644 index 0000000..7ed6ff8 --- /dev/null +++ b/debian/compat @@ -0,0 +1 @@ +5 diff --git a/debian/control b/debian/control new file mode 100644 index 0000000..166fe74 --- /dev/null +++ b/debian/control @@ -0,0 +1,12 @@ +Source: feedingit +Section: unknown +Priority: extra +Maintainer: unknown +Build-Depends: debhelper (>= 5) +Standards-Version: 3.7.2 + +Package: feedingit +Architecture: any +Depends: ${shlibs:Depends}, ${misc:Depends}, python-gtkhtml2, python +Description: Simple RSS Reader + Simple RSS Reader, based on feedparser.py diff --git a/debian/copyright b/debian/copyright new file mode 100644 index 0000000..d527f6c --- /dev/null +++ b/debian/copyright @@ -0,0 +1,35 @@ +This package was debianized by unknown on +Mon, 28 Dec 2009 17:59:19 -0800. + +It was downloaded from + +Upstream Author: + +Copyright: + +License: + + This package is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This package is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this package; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +On Debian systems, the complete text of the GNU General +Public License can be found in `/usr/share/common-licenses/GPL'. + +The Debian packaging is (C) 2009, unknown and +is licensed under the GPL, see above. + + +# Please also look if there are files or directories which have a +# different copyright/license attached and list them here. diff --git a/debian/cron.d.ex b/debian/cron.d.ex new file mode 100644 index 0000000..cc0277d --- /dev/null +++ b/debian/cron.d.ex @@ -0,0 +1,4 @@ +# +# Regular cron jobs for the feedingit package +# +0 4 * * * root feedingit_maintenance diff --git a/debian/dirs b/debian/dirs new file mode 100644 index 0000000..ca882bb --- /dev/null +++ b/debian/dirs @@ -0,0 +1,2 @@ +usr/bin +usr/sbin diff --git a/debian/docs b/debian/docs new file mode 100644 index 0000000..e69de29 diff --git a/debian/emacsen-install.ex b/debian/emacsen-install.ex new file mode 100644 index 0000000..a6adfc1 --- /dev/null +++ b/debian/emacsen-install.ex @@ -0,0 +1,45 @@ +#! /bin/sh -e +# /usr/lib/emacsen-common/packages/install/feedingit + +# Written by Jim Van Zandt , borrowing heavily +# from the install scripts for gettext by Santiago Vila +# and octave by Dirk Eddelbuettel . + +FLAVOR=$1 +PACKAGE=feedingit + +if [ ${FLAVOR} = emacs ]; then exit 0; fi + +echo install/${PACKAGE}: Handling install for emacsen flavor ${FLAVOR} + +#FLAVORTEST=`echo $FLAVOR | cut -c-6` +#if [ ${FLAVORTEST} = xemacs ] ; then +# SITEFLAG="-no-site-file" +#else +# SITEFLAG="--no-site-file" +#fi +FLAGS="${SITEFLAG} -q -batch -l path.el -f batch-byte-compile" + +ELDIR=/usr/share/emacs/site-lisp/${PACKAGE} +ELCDIR=/usr/share/${FLAVOR}/site-lisp/${PACKAGE} + +# Install-info-altdir does not actually exist. +# Maybe somebody will write it. +if test -x /usr/sbin/install-info-altdir; then + echo install/${PACKAGE}: install Info links for ${FLAVOR} + install-info-altdir --quiet --section "" "" --dirname=${FLAVOR} /usr/info/${PACKAGE}.info.gz +fi + +install -m 755 -d ${ELCDIR} +cd ${ELDIR} +FILES=`echo *.el` +cp ${FILES} ${ELCDIR} +cd ${ELCDIR} + +cat << EOF > path.el +(setq load-path (cons "." load-path) byte-compile-warnings nil) +EOF +${FLAVOR} ${FLAGS} ${FILES} +rm -f *.el path.el + +exit 0 diff --git a/debian/emacsen-remove.ex b/debian/emacsen-remove.ex new file mode 100644 index 0000000..9d784c7 --- /dev/null +++ b/debian/emacsen-remove.ex @@ -0,0 +1,15 @@ +#!/bin/sh -e +# /usr/lib/emacsen-common/packages/remove/feedingit + +FLAVOR=$1 +PACKAGE=feedingit + +if [ ${FLAVOR} != emacs ]; then + if test -x /usr/sbin/install-info-altdir; then + echo remove/${PACKAGE}: removing Info links for ${FLAVOR} + install-info-altdir --quiet --remove --dirname=${FLAVOR} /usr/info/feedingit.info.gz + fi + + echo remove/${PACKAGE}: purging byte-compiled files for ${FLAVOR} + rm -rf /usr/share/${FLAVOR}/site-lisp/${PACKAGE} +fi diff --git a/debian/emacsen-startup.ex b/debian/emacsen-startup.ex new file mode 100644 index 0000000..4c5a503 --- /dev/null +++ b/debian/emacsen-startup.ex @@ -0,0 +1,25 @@ +;; -*-emacs-lisp-*- +;; +;; Emacs startup file, e.g. /etc/emacs/site-start.d/50feedingit.el +;; for the Debian feedingit package +;; +;; Originally contributed by Nils Naumann +;; Modified by Dirk Eddelbuettel +;; Adapted for dh-make by Jim Van Zandt + +;; The feedingit package follows the Debian/GNU Linux 'emacsen' policy and +;; byte-compiles its elisp files for each 'emacs flavor' (emacs19, +;; xemacs19, emacs20, xemacs20...). The compiled code is then +;; installed in a subdirectory of the respective site-lisp directory. +;; We have to add this to the load-path: +(let ((package-dir (concat "/usr/share/" + (symbol-name flavor) + "/site-lisp/feedingit"))) +;; If package-dir does not exist, the feedingit package must have +;; removed but not purged, and we should skip the setup. + (when (file-directory-p package-dir) + (setq load-path (cons package-dir load-path)) + (autoload 'feedingit-mode "feedingit-mode" + "Major mode for editing feedingit files." t) + (add-to-list 'auto-mode-alist '("\\.feedingit$" . feedingit-mode)))) + diff --git a/debian/feedingit-default.ex b/debian/feedingit-default.ex new file mode 100644 index 0000000..7b4e24d --- /dev/null +++ b/debian/feedingit-default.ex @@ -0,0 +1,10 @@ +# Defaults for feedingit initscript +# sourced by /etc/init.d/feedingit +# installed at /etc/default/feedingit by the maintainer scripts + +# +# This is a POSIX shell fragment +# + +# Additional options that are passed to the Daemon. +DAEMON_OPTS="" diff --git a/debian/feedingit.doc-base.EX b/debian/feedingit.doc-base.EX new file mode 100644 index 0000000..29ef39a --- /dev/null +++ b/debian/feedingit.doc-base.EX @@ -0,0 +1,22 @@ +Document: feedingit +Title: Debian feedingit Manual +Author: +Abstract: This manual describes what feedingit is + and how it can be used to + manage online manuals on Debian systems. +Section: unknown + +Format: debiandoc-sgml +Files: /usr/share/doc/feedingit/feedingit.sgml.gz + +Format: postscript +Files: /usr/share/doc/feedingit/feedingit.ps.gz + +Format: text +Files: /usr/share/doc/feedingit/feedingit.text.gz + +Format: HTML +Index: /usr/share/doc/feedingit/html/index.html +Files: /usr/share/doc/feedingit/html/*.html + + diff --git a/debian/init.d.ex b/debian/init.d.ex new file mode 100644 index 0000000..63004c7 --- /dev/null +++ b/debian/init.d.ex @@ -0,0 +1,81 @@ +#! /bin/sh +# +# skeleton example file to build /etc/init.d/ scripts. +# This file should be used to construct scripts for /etc/init.d. +# +# Written by Miquel van Smoorenburg . +# Modified for Debian +# by Ian Murdock . +# +# Version: @(#)skeleton 1.9 26-Feb-2001 miquels@cistron.nl +# + +PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin +DAEMON=/usr/sbin/feedingit +NAME=feedingit +DESC=feedingit + +test -x $DAEMON || exit 0 + +# Include feedingit defaults if available +if [ -f /etc/default/feedingit ] ; then + . /etc/default/feedingit +fi + +set -e + +case "$1" in + start) + echo -n "Starting $DESC: " + start-stop-daemon --start --quiet --pidfile /var/run/$NAME.pid \ + --exec $DAEMON -- $DAEMON_OPTS + echo "$NAME." + ;; + stop) + echo -n "Stopping $DESC: " + start-stop-daemon --stop --quiet --pidfile /var/run/$NAME.pid \ + --exec $DAEMON + echo "$NAME." + ;; + #reload) + # + # If the daemon can reload its config files on the fly + # for example by sending it SIGHUP, do it here. + # + # If the daemon responds to changes in its config file + # directly anyway, make this a do-nothing entry. + # + # echo "Reloading $DESC configuration files." + # start-stop-daemon --stop --signal 1 --quiet --pidfile \ + # /var/run/$NAME.pid --exec $DAEMON + #;; + force-reload) + # + # If the "reload" option is implemented, move the "force-reload" + # option to the "reload" entry above. If not, "force-reload" is + # just the same as "restart" except that it does nothing if the + # daemon isn't already running. + # check wether $DAEMON is running. If so, restart + start-stop-daemon --stop --test --quiet --pidfile \ + /var/run/$NAME.pid --exec $DAEMON \ + && $0 restart \ + || exit 0 + ;; + restart) + echo -n "Restarting $DESC: " + start-stop-daemon --stop --quiet --pidfile \ + /var/run/$NAME.pid --exec $DAEMON + sleep 1 + start-stop-daemon --start --quiet --pidfile \ + /var/run/$NAME.pid --exec $DAEMON -- $DAEMON_OPTS + echo "$NAME." + ;; + *) + N=/etc/init.d/$NAME + # echo "Usage: $N {start|stop|restart|reload|force-reload}" >&2 + echo "Usage: $N {start|stop|restart|force-reload}" >&2 + exit 1 + ;; +esac + +exit 0 diff --git a/debian/manpage.1.ex b/debian/manpage.1.ex new file mode 100644 index 0000000..837f0e0 --- /dev/null +++ b/debian/manpage.1.ex @@ -0,0 +1,59 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH FEEDINGIT SECTION "December 28, 2009" +.\" Please adjust this date whenever revising the manpage. +.\" +.\" Some roff macros, for reference: +.\" .nh disable hyphenation +.\" .hy enable hyphenation +.\" .ad l left justify +.\" .ad b justify to both left and right margins +.\" .nf disable filling +.\" .fi enable filling +.\" .br insert line break +.\" .sp insert n+1 empty lines +.\" for manpage-specific macros, see man(7) +.SH NAME +feedingit \- program to do something +.SH SYNOPSIS +.B feedingit +.RI [ options ] " files" ... +.br +.B bar +.RI [ options ] " files" ... +.SH DESCRIPTION +This manual page documents briefly the +.B feedingit +and +.B bar +commands. +.PP +.\" TeX users may be more comfortable with the \fB\fP and +.\" \fI\fP escape sequences to invode bold face and italics, +.\" respectively. +\fBfeedingit\fP is a program that... +.SH OPTIONS +These programs follow the usual GNU command line syntax, with long +options starting with two dashes (`-'). +A summary of options is included below. +For a complete description, see the Info files. +.TP +.B \-h, \-\-help +Show summary of options. +.TP +.B \-v, \-\-version +Show version of program. +.SH SEE ALSO +.BR bar (1), +.BR baz (1). +.br +The programs are documented fully by +.IR "The Rise and Fall of a Fooish Bar" , +available via the Info system. +.SH AUTHOR +feedingit was written by . +.PP +This manual page was written by unknown , +for the Debian project (but may be used by others). diff --git a/debian/manpage.sgml.ex b/debian/manpage.sgml.ex new file mode 100644 index 0000000..d1a98d1 --- /dev/null +++ b/debian/manpage.sgml.ex @@ -0,0 +1,156 @@ + manpage.1'. You may view + the manual page with: `docbook-to-man manpage.sgml | nroff -man | + less'. A typical entry in a Makefile or Makefile.am is: + +manpage.1: manpage.sgml + docbook-to-man $< > $@ + + + The docbook-to-man binary is found in the docbook-to-man package. + Please remember that if you create the nroff version in one of the + debian/rules file targets (such as build), you will need to include + docbook-to-man in your Build-Depends control field. + + --> + + + FIRSTNAME"> + SURNAME"> + + December 28, 2009"> + + SECTION"> + yves@marcoz.org"> + + FEEDINGIT"> + + + Debian"> + GNU"> + GPL"> +]> + + + +
+ &dhemail; +
+ + &dhfirstname; + &dhsurname; + + + 2003 + &dhusername; + + &dhdate; +
+ + &dhucpackage; + + &dhsection; + + + &dhpackage; + + program to do something + + + + &dhpackage; + + + + + + + + DESCRIPTION + + This manual page documents briefly the + &dhpackage; and bar + commands. + + This manual page was written for the &debian; distribution + because the original program does not have a manual page. + Instead, it has documentation in the &gnu; + Info format; see below. + + &dhpackage; is a program that... + + + + OPTIONS + + These programs follow the usual &gnu; command line syntax, + with long options starting with two dashes (`-'). A summary of + options is included below. For a complete description, see the + Info files. + + + + + + + + Show summary of options. + + + + + + + + Show version of program. + + + + + + SEE ALSO + + bar (1), baz (1). + + The programs are documented fully by The Rise and + Fall of a Fooish Bar available via the + Info system. + + + AUTHOR + + This manual page was written by &dhusername; &dhemail; for + the &debian; system (but may be used by others). Permission is + granted to copy, distribute and/or modify this document under + the terms of the &gnu; General Public License, Version 2 any + later version published by the Free Software Foundation. + + + On Debian systems, the complete text of the GNU General Public + License can be found in /usr/share/common-licenses/GPL. + + + +
+ + + + diff --git a/debian/manpage.xml.ex b/debian/manpage.xml.ex new file mode 100644 index 0000000..a6011aa --- /dev/null +++ b/debian/manpage.xml.ex @@ -0,0 +1,148 @@ + +.
will be generated. You may view the +manual page with: nroff -man .
| less'. A +typical entry in a Makefile or Makefile.am is: + +DB2MAN=/usr/share/sgml/docbook/stylesheet/xsl/nwalsh/\ +manpages/docbook.xsl +XP=xsltproc -''-nonet + +manpage.1: manpage.dbk + $(XP) $(DB2MAN) $< + +The xsltproc binary is found in the xsltproc package. The +XSL files are in docbook-xsl. Please remember that if you +create the nroff version in one of the debian/rules file +targets (such as build), you will need to include xsltproc +and docbook-xsl in your Build-Depends control field. + +--> + + + FIRSTNAME"> + SURNAME"> + + December 28, 2009"> + + SECTION"> + yves@marcoz.org"> + + FEEDINGIT"> + + + Debian"> + GNU"> + GPL"> +]> + + + +
+ &dhemail; +
+ + &dhfirstname; + &dhsurname; + + + 2003 + &dhusername; + + &dhdate; +
+ + &dhucpackage; + + &dhsection; + + + &dhpackage; + + program to do something + + + + &dhpackage; + + + + + + + + DESCRIPTION + + This manual page documents briefly the + &dhpackage; and bar + commands. + + This manual page was written for the &debian; distribution + because the original program does not have a manual page. + Instead, it has documentation in the &gnu; + Info format; see below. + + &dhpackage; is a program that... + + + + OPTIONS + + These programs follow the usual &gnu; command line syntax, + with long options starting with two dashes (`-'). A summary of + options is included below. For a complete description, see the + Info files. + + + + + + + + Show summary of options. + + + + + + + + Show version of program. + + + + + + SEE ALSO + + bar (1), baz (1). + + The programs are documented fully by The Rise and + Fall of a Fooish Bar available via the + Info system. + + + AUTHOR + + This manual page was written by &dhusername; &dhemail; for + the &debian; system (but may be used by others). Permission is + granted to copy, distribute and/or modify this document under + the terms of the &gnu; General Public License, Version 2 any + later version published by the Free Software Foundation. + + + On Debian systems, the complete text of the GNU General Public + License can be found in /usr/share/common-licenses/GPL. + + + +
+ diff --git a/debian/menu.ex b/debian/menu.ex new file mode 100644 index 0000000..b440db3 --- /dev/null +++ b/debian/menu.ex @@ -0,0 +1,2 @@ +?package(feedingit):needs="X11|text|vc|wm" section="Apps/see-menu-manual"\ + title="feedingit" command="/usr/bin/feedingit" diff --git a/debian/postinst.ex b/debian/postinst.ex new file mode 100644 index 0000000..ff994dd --- /dev/null +++ b/debian/postinst.ex @@ -0,0 +1,41 @@ +#!/bin/sh +# postinst script for feedingit +# +# see: dh_installdeb(1) + +set -e + +# summary of how this script can be called: +# * `configure' +# * `abort-upgrade' +# * `abort-remove' `in-favour' +# +# * `abort-remove' +# * `abort-deconfigure' `in-favour' +# `removing' +# +# for details, see http://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + configure) + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 + + diff --git a/debian/postrm.ex b/debian/postrm.ex new file mode 100644 index 0000000..b962cae --- /dev/null +++ b/debian/postrm.ex @@ -0,0 +1,39 @@ +#!/bin/sh +# postrm script for feedingit +# +# see: dh_installdeb(1) + +set -e + +# summary of how this script can be called: +# * `remove' +# * `purge' +# * `upgrade' +# * `failed-upgrade' +# * `abort-install' +# * `abort-install' +# * `abort-upgrade' +# * `disappear' +# +# for details, see http://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + purge|remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) + ;; + + *) + echo "postrm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 + + diff --git a/debian/preinst.ex b/debian/preinst.ex new file mode 100644 index 0000000..a5b99ca --- /dev/null +++ b/debian/preinst.ex @@ -0,0 +1,37 @@ +#!/bin/sh +# preinst script for feedingit +# +# see: dh_installdeb(1) + +set -e + +# summary of how this script can be called: +# * `install' +# * `install' +# * `upgrade' +# * `abort-upgrade' +# for details, see http://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + install|upgrade) + ;; + + abort-upgrade) + ;; + + *) + echo "preinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 + + diff --git a/debian/prerm.ex b/debian/prerm.ex new file mode 100644 index 0000000..967a168 --- /dev/null +++ b/debian/prerm.ex @@ -0,0 +1,40 @@ +#!/bin/sh +# prerm script for feedingit +# +# see: dh_installdeb(1) + +set -e + +# summary of how this script can be called: +# * `remove' +# * `upgrade' +# * `failed-upgrade' +# * `remove' `in-favour' +# * `deconfigure' `in-favour' +# `removing' +# +# for details, see http://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + remove|upgrade|deconfigure) + ;; + + failed-upgrade) + ;; + + *) + echo "prerm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 + + diff --git a/debian/rules b/debian/rules new file mode 100755 index 0000000..6ca63f8 --- /dev/null +++ b/debian/rules @@ -0,0 +1,98 @@ +#!/usr/bin/make -f +# -*- makefile -*- +# Sample debian/rules that uses debhelper. +# This file was originally written by Joey Hess and Craig Small. +# As a special exception, when this file is copied by dh-make into a +# dh-make output file, you may use that output file without restriction. +# This special exception was added by Craig Small in version 0.37 of dh-make. + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + + + + +CFLAGS = -Wall -g + +ifneq (,$(findstring noopt,$(DEB_BUILD_OPTIONS))) + CFLAGS += -O0 +else + CFLAGS += -O2 +endif + +configure: configure-stamp +configure-stamp: + dh_testdir + # Add here commands to configure the package. + + touch configure-stamp + + +build: build-stamp + +build-stamp: configure-stamp + dh_testdir + + # Add here commands to compile the package. + $(MAKE) + #docbook-to-man debian/feedingit.sgml > feedingit.1 + + touch $@ + +clean: + dh_testdir + dh_testroot + rm -f build-stamp configure-stamp + + # Add here commands to clean up after the build process. + -$(MAKE) clean + + dh_clean + +install: build + dh_testdir + dh_testroot + dh_clean -k + dh_installdirs + + # Add here commands to install the package into debian/feedingit. + $(MAKE) DESTDIR=$(CURDIR)/debian/feedingit install + + +# Build architecture-independent files here. +binary-indep: build install +# We have nothing to do by default. + +# Build architecture-dependent files here. +binary-arch: build install + dh_testdir + dh_testroot + dh_installchangelogs + dh_installdocs + dh_installexamples +# dh_install +# dh_installmenu +# dh_installdebconf +# dh_installlogrotate +# dh_installemacsen +# dh_installpam +# dh_installmime +# dh_python +# dh_installinit +# dh_installcron +# dh_installinfo + dh_installman + dh_link + dh_strip + dh_compress + dh_fixperms +# dh_perl +# dh_makeshlibs + dh_installdeb + dh_shlibdeps + dh_gencontrol + dh_md5sums + dh_builddeb + +binary: binary-indep binary-arch +.PHONY: build clean binary-indep binary-arch binary install configure diff --git a/debian/watch.ex b/debian/watch.ex new file mode 100644 index 0000000..6c9d3a7 --- /dev/null +++ b/debian/watch.ex @@ -0,0 +1,22 @@ +# Example watch control file for uscan +# Rename this file to "watch" and then you can run the "uscan" command +# to check for upstream updates and more. +# See uscan(1) for format + +# Compulsory line, this is a version 3 file +version=3 + +# Uncomment to examine a Webpage +# +#http://www.example.com/downloads.php feedingit-(.*)\.tar\.gz + +# Uncomment to examine a Webserver directory +#http://www.example.com/pub/feedingit-(.*)\.tar\.gz + +# Uncommment to examine a FTP server +#ftp://ftp.example.com/pub/feedingit-(.*)\.tar\.gz debian uupdate + +# Uncomment to find new files on sourceforge, for debscripts >= 2.9 +# http://sf.net/feedingit/feedingit-(.*)\.tar\.gz + + diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..59d7363 --- /dev/null +++ b/setup.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python2.5 + +''' + Setup script, setup.py, is used to create installable packet from Python project. + For more information see http://docs.python.org/distutils/setupscript.html + + This script with Makefile is used to generate the Debian package. +''' + +from distutils.core import setup +import os, os.path; + + +# Source directory. +source_dir = 'src'; + +# Executables. These files will be installed into bin folder (example /usr/local/bin). +scripts = ['src/FeedingIt'] + +data_files = [] + +# Included packages from source directory. +packages = [''] + +package_dir = {'' : source_dir} + +def path_to_package(base_dir, path): + ''' + Convert directory path to package name. + ''' + head, tail = os.path.split(path) + + if head == '' or head == base_dir: + return tail + else: + return path_to_package(base_dir, head) + "." + tail + + +''' + Append all packages from source_dir ('src'). +''' +for dirpath, dirnames, filenames in os.walk(source_dir): + if "__init__.py" in filenames: + packages.append(path_to_package(source_dir, dirpath)) + + +setup( + name = 'feedingit', + version = '0.1', + + packages = packages, + package_dir = package_dir, + scripts = scripts, + data_files = data_files +) + diff --git a/src/FeedingIt b/src/FeedingIt new file mode 100644 index 0000000..d0856d8 --- /dev/null +++ b/src/FeedingIt @@ -0,0 +1,3 @@ +#!/bin/sh +cd /opt/FeedingIt +python2.5 FeedingIt.py \ No newline at end of file diff --git a/src/FeedingIt.desktop b/src/FeedingIt.desktop new file mode 100644 index 0000000..4ba47e6 --- /dev/null +++ b/src/FeedingIt.desktop @@ -0,0 +1,7 @@ +[Desktop Entry] +Encoding=UTF-8 +Version=0.10 +Type=Application +Name=FeedingIt +Exec=/usr/bin/FeedingIt +Icon=xournal diff --git a/src/FeedingIt.py b/src/FeedingIt.py new file mode 100644 index 0000000..eaef9e5 --- /dev/null +++ b/src/FeedingIt.py @@ -0,0 +1,355 @@ +#!/usr/bin/env python2.5 + +# +# Copyright (c) 2007-2008 INdT. +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see . +# + +# ============================================================================ +# Name : FeedingIt.py +# Author : Yves Marcoz +# Version : 0.1 +# Description : PyGtk Example +# ============================================================================ + +import gtk +import feedparser +import pango +import hildon +import gtkhtml2 +import time +import webbrowser +import pickle +from os.path import isfile, isdir +from os import mkdir +import md5 +import sys + +CONFIGDIR="/home/user/.feedingit/" + +def getId(string): + return md5.new(string).hexdigest() + +class Feed: + # Contains all the info about a single feed (articles, ...), and expose the data + def __init__(self, name, url): + self.feed = [] + self.name = name + self.url = url + self.updateTime = "Never" + #self.feed=feedparser.parse(url) + + def updateFeed(self): + self.feed=feedparser.parse(self.url) + self.updateTime = time.asctime() + file = open(CONFIGDIR+getId(self.name), "w") + pickle.dump(self, file ) + file.close() + + def getUpdateTime(self): + return self.updateTime + + def getEntries(self): + try: + return self.feed["entries"] + except: + return [] + + def getItem(self, index): + try: + return self.feed["entries"][index] + except: + return [] + + def getArticle(self, index): + entry = self.feed["entries"][index] + text = "

" + entry["title"] + "

" + text = text + "Date: " + time.strftime("%a, %d %b %Y %H:%M:%S",entry["updated_parsed"]) + "" + text = text + "
" + text = text + entry["summary"] + return text + +class Listing: + # Lists all the feeds in a dictionary, and expose the data + + def updateFeeds(self): + for key in self.listOfFeeds.keys(): + self.feeds[key].updateFeed() + + def getFeed(self, key): + return self.feeds[key] + + def getFeedUpdateTime(self, key): + return self.feeds[key].getUpdateTime() + + def getFeedTitle(self, key): + return self.listOfFeeds[key]["title"] + + def getFeedUrl(self, key): + return self.listOfFeeds[key]["url"] + + def getListOfFeeds(self): + return self.listOfFeeds.keys() + + def addFeed(self, title, url): + self.listOfFeeds[getId(title)] = {"title":title, "url":url} + self.saveConfig() + self.feeds[getId(title)] = Feed(title, url) + + def saveConfig(self): + file = open(CONFIGDIR+"feeds.pickle", "w") + pickle.dump(self.listOfFeeds, file) + file.close() + + def __init__(self): + self.feeds = {} + if isfile(CONFIGDIR+"feeds.pickle"): + file = open(CONFIGDIR+"feeds.pickle") + self.listOfFeeds = pickle.load(file) + file.close() + else: + self.listOfFeeds = {getId("Slashdot"):{"title":"Slashdot", "url":"http://rss.slashdot.org/Slashdot/slashdot"}, } + for key in self.listOfFeeds.keys(): + if isfile(CONFIGDIR+key): + file = open(CONFIGDIR+key) + self.feeds[key] = pickle.load(file) + file.close() + else: + self.feeds[key] = Feed(self.listOfFeeds[key]["title"], self.listOfFeeds[key]["url"]) + self.saveConfig() + + +class AddWidgetWizard(hildon.WizardDialog): + + def __init__(self, parent): + # Create a Notebook + self.notebook = gtk.Notebook() + + self.nameEntry = hildon.Entry(gtk.HILDON_SIZE_AUTO) + self.nameEntry.set_placeholder("Enter Feed Name") + + self.urlEntry = hildon.Entry(gtk.HILDON_SIZE_AUTO) + + self.urlEntry.set_placeholder("Enter a URL") + + labelEnd = gtk.Label("Success") + + self.notebook.append_page(self.nameEntry, None) + self.notebook.append_page(self.urlEntry, None) + self.notebook.append_page(labelEnd, None) + + hildon.WizardDialog.__init__(self, parent, "Add Feed", self.notebook) + + # Set a handler for "switch-page" signal + #self.notebook.connect("switch_page", self.on_page_switch, self) + + # Set a function to decide if user can go to next page + self.set_forward_page_func(self.some_page_func) + + self.show_all() + print dir(self) + + def getData(self): + return (self.nameEntry.get_text(), self.urlEntry.get_text()) + + def on_page_switch(self, notebook, page, num, dialog): + print >>sys.stderr, "Page %d" % num + return True + + def some_page_func(self, nb, current, userdata): + # Validate data for 1st page + print current + if current == 0: + entry = nb.get_nth_page(current) + # Check the name is not null + return len(entry.get_text()) != 0 + elif current == 1: + entry = nb.get_nth_page(current) + # Check the url is not null, and starts with http + print ( (len(entry.get_text()) != 0) and (entry.get_text().startswith("http")) ) + return ( (len(entry.get_text()) != 0) and (entry.get_text().startswith("http")) ) + elif current != 2: + return False + else: + return True + +class FeedingIt: + def __init__(self): + # Init the windows + self.window = hildon.StackableWindow() + menu = hildon.AppMenu() + # Create a button and add it to the menu + button = hildon.GtkButton(gtk.HILDON_SIZE_AUTO) + button.set_label("Update All Feeds") + button.connect("clicked", self.button_update_clicked, "All") + menu.append(button) + button = hildon.GtkButton(gtk.HILDON_SIZE_AUTO) + button.set_label("Add Feed") + button.connect("clicked", self.button_add_clicked) + menu.append(button) + + self.window.set_app_menu(menu) + menu.show_all() + + self.feedWindow = hildon.StackableWindow() + self.articleWindow = hildon.StackableWindow() + + self.listing = Listing() + #self.listing.downloadFeeds() + self.displayListing() + + #self.window.show_all() + #self.displayFeed(self.listing.getFeed(0)) + + def button_add_clicked(self, button): + wizard = AddWidgetWizard(self.window) + ret = wizard.run() + if ret == 2: + (title, url) = wizard.getData() + if (not title == '') and (not url == ''): + self.listing.addFeed(title, url) + wizard.destroy() + self.displayListing() + + def button_update_clicked(self, button, key): + hildon.hildon_gtk_window_set_progress_indicator(self.window, 1) + if key == "All": + self.listing.updateFeeds() + else: + self.listing.getFeed(key).updateFeed() + self.displayListing() + hildon.hildon_gtk_window_set_progress_indicator(self.window, 0) + + def displayListing(self): + try: + self.window.remove(self.pannableListing) + except: + pass + self.vboxListing = gtk.VBox(False,10) + self.pannableListing = hildon.PannableArea() + self.pannableListing.add_with_viewport(self.vboxListing) + + for key in self.listing.getListOfFeeds(): + #button = gtk.Button(item) + button = hildon.Button(gtk.HILDON_SIZE_AUTO_WIDTH | gtk.HILDON_SIZE_FINGER_HEIGHT, + hildon.BUTTON_ARRANGEMENT_VERTICAL) + button.set_text(self.listing.getFeedTitle(key), self.listing.getFeedUpdateTime(key)) + button.set_alignment(0,0,1,1) + #label = button.child + #label.modify_font(pango.FontDescription("sans 10")) + button.connect("clicked", self.buttonFeedClicked, self, self.window, key) + self.vboxListing.pack_start(button, expand=False) + self.window.add(self.pannableListing) + self.window.show_all() + + def displayFeed(self, key): + # Initialize the feed panel + self.vboxFeed = gtk.VBox(False, 10) + self.pannableFeed = hildon.PannableArea() + self.pannableFeed.add_with_viewport(self.vboxFeed) + + index = 0 + for item in self.listing.getFeed(key).getEntries(): + #button = hildon.Button(gtk.HILDON_SIZE_AUTO_WIDTH | gtk.HILDON_SIZE_FINGER_HEIGHT, + # hildon.BUTTON_ARRANGEMENT_HORIZONTAL) + #button.set_text(item["title"], time.strftime("%a, %d %b %Y %H:%M:%S",item["updated_parsed"])) + #button.set_text(item["title"], time.asctime(item["updated_parsed"])) + #button.set_text(item["title"],"") + #button.set_alignment(0,0,1,1) + #button.set_markup(True) + button = gtk.Button(item["title"]) + button.set_alignment(0,0) + label = button.child + #label.set_markup(item["title"]) + label.modify_font(pango.FontDescription("sans 16")) + button.connect("clicked", self.button_clicked, self, self.window, key, index) + self.vboxFeed.pack_start(button, expand=False) + index=index+1 + + self.feedWindow.add(self.pannableFeed) + self.feedWindow.show_all() + + def displayArticle(self, key, index): + text = self.listing.getFeed(key).getArticle(index) + self.articleWindow = hildon.StackableWindow() + # Init the article display + self.view = gtkhtml2.View() + self.document = gtkhtml2.Document() + self.view.set_document(self.document) + self.pannable_article = hildon.PannableArea() + + #self.view.connect("on_url", self._signal_on_url) + self.document.connect("link_clicked", self._signal_link_clicked) + #self.document.connect("request-url", self._signal_request_url) + + self.document.clear() + self.document.open_stream("text/html") + self.document.write_stream(text) + self.document.close_stream() + + self.pannable_article.add_with_viewport(self.view) + self.articleWindow.add(self.pannable_article) + self.articleWindow.show_all() + +# def _signal_on_url(self, object, url): +# if url == None: url = "" +# else: url = self._complete_url(url) + #self.emit("status_changed", url) + + def _signal_link_clicked(self, object, link): + #self.emit("open_uri", self._complete_url(link)) + #os.spawnl(os.P_NOWAIT, '/usr/bin/browser', '/usr/bin/browser', '--url', link) + webbrowser.open(link) + +# def _signal_request_url(self, object, url, stream): +# stream.write(self._fetch_url(self._complete_url(url))) +# +# def _complete_url(self, url): +# import string, urlparse, urllib +# url = urllib.quote(url, safe=string.punctuation) +# if urlparse.urlparse(url)[0] == '': +# return urlparse.urljoin(self.location, url) +# else: +# return url +# +# def _open_url(self, url, headers=[]): +# import urllib2 +# opener = urllib2.build_opener() +# opener.addheaders = [('User-agent', 'Wikitin')]+headers +# return opener.open(url) +# +# def _fetch_url(self, url, headers=[]): +# return self._open_url(url, headers).read() + + + def button_clicked(widget, button, app, window, key, index): + app.displayArticle(key, index) + + def buttonFeedClicked(widget, button, app, window, key): + app.displayFeed(key) + + def run(self): + self.window.connect("destroy", gtk.main_quit) + #self.window.show_all() + gtk.main() + +if __name__ == "__main__": + if not isdir(CONFIGDIR): + try: + mkdir(CONFIGDIR) + except: + print "Error: Can't create configuration directory" + sys.exit(1) + app = FeedingIt() + app.run() diff --git a/src/feedparser.py b/src/feedparser.py new file mode 100644 index 0000000..bb802df --- /dev/null +++ b/src/feedparser.py @@ -0,0 +1,2858 @@ +#!/usr/bin/env python +"""Universal feed parser + +Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds + +Visit http://feedparser.org/ for the latest version +Visit http://feedparser.org/docs/ for the latest documentation + +Required: Python 2.1 or later +Recommended: Python 2.3 or later +Recommended: CJKCodecs and iconv_codec +""" + +__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs" +__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.""" +__author__ = "Mark Pilgrim " +__contributors__ = ["Jason Diamond ", + "John Beimler ", + "Fazal Majid ", + "Aaron Swartz ", + "Kevin Marks "] +_debug = 0 + +# HTTP "User-Agent" header to send to servers when downloading feeds. +# If you are embedding feedparser in a larger application, you should +# change this to your application name and URL. +USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ + +# HTTP "Accept" header to send to servers when downloading feeds. If you don't +# want to send an Accept header, set this to None. +ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" + +# List of preferred XML parsers, by SAX driver name. These will be tried first, +# but if they're not installed, Python will keep searching through its own list +# of pre-installed parsers until it finds one that supports everything we need. +PREFERRED_XML_PARSERS = ["drv_libxml2"] + +# If you want feedparser to automatically run HTML markup through HTML Tidy, set +# this to 1. Requires mxTidy +# or utidylib . +TIDY_MARKUP = 0 + +# List of Python interfaces for HTML Tidy, in order of preference. Only useful +# if TIDY_MARKUP = 1 +PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] + +# ---------- required modules (should come with any Python distribution) ---------- +import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2 +try: + from cStringIO import StringIO as _StringIO +except: + from StringIO import StringIO as _StringIO + +# ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- + +# gzip is included with most Python distributions, but may not be available if you compiled your own +try: + import gzip +except: + gzip = None +try: + import zlib +except: + zlib = None + +# If a real XML parser is available, feedparser will attempt to use it. feedparser has +# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the +# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some +# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. +try: + import xml.sax + xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers + from xml.sax.saxutils import escape as _xmlescape + _XML_AVAILABLE = 1 +except: + _XML_AVAILABLE = 0 + def _xmlescape(data): + data = data.replace('&', '&') + data = data.replace('>', '>') + data = data.replace('<', '<') + return data + +# base64 support for Atom feeds that contain embedded binary data +try: + import base64, binascii +except: + base64 = binascii = None + +# cjkcodecs and iconv_codec provide support for more character encodings. +# Both are available from http://cjkpython.i18n.org/ +try: + import cjkcodecs.aliases +except: + pass +try: + import iconv_codec +except: + pass + +# chardet library auto-detects character encodings +# Download from http://chardet.feedparser.org/ +try: + import chardet + if _debug: + import chardet.constants + chardet.constants._debug = 1 +except: + chardet = None + +# ---------- don't touch these ---------- +class ThingsNobodyCaresAboutButMe(Exception): pass +class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass +class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass +class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass +class UndeclaredNamespace(Exception): pass + +sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') +sgmllib.special = re.compile('' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0) + + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + + # special hack for better tracking of empty textinput/image elements in illformed feeds + if (not prefix) and tag not in ('title', 'link', 'description', 'name'): + self.intextinput = 0 + if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): + self.inimage = 0 + + # call special handler (if defined) or default handler + methodname = '_start_' + prefix + suffix + try: + method = getattr(self, methodname) + return method(attrsD) + except AttributeError: + return self.push(prefix + suffix, 1) + + def unknown_endtag(self, tag): + if _debug: sys.stderr.write('end %s\n' % tag) + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + + # call special handler (if defined) or default handler + methodname = '_end_' + prefix + suffix + try: + method = getattr(self, methodname) + method() + except AttributeError: + self.pop(prefix + suffix) + + # track inline content + if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + # element declared itself as escaped markup, but it isn't really + self.contentparams['type'] = 'application/xhtml+xml' + if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': + tag = tag.split(':')[-1] + self.handle_data('' % tag, escape=0) + + # track xml:base and xml:lang going out of scope + if self.basestack: + self.basestack.pop() + if self.basestack and self.basestack[-1]: + self.baseuri = self.basestack[-1] + if self.langstack: + self.langstack.pop() + if self.langstack: # and (self.langstack[-1] is not None): + self.lang = self.langstack[-1] + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + if not self.elementstack: return + ref = ref.lower() + if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): + text = '&#%s;' % ref + else: + if ref[0] == 'x': + c = int(ref[1:], 16) + else: + c = int(ref) + text = unichr(c).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + if not self.elementstack: return + if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) + if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): + text = '&%s;' % ref + else: + # entity resolution graciously donated by Aaron Swartz + def name2cp(k): + import htmlentitydefs + if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3 + return htmlentitydefs.name2codepoint[k] + k = htmlentitydefs.entitydefs[k] + if k.startswith('&#') and k.endswith(';'): + return int(k[2:-1]) # not in latin-1 + return ord(k) + try: name2cp(ref) + except KeyError: text = '&%s;' % ref + else: text = unichr(name2cp(ref)).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_data(self, text, escape=1): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + if not self.elementstack: return + if escape and self.contentparams.get('type') == 'application/xhtml+xml': + text = _xmlescape(text) + self.elementstack[-1][2].append(text) + + def handle_comment(self, text): + # called for each comment, e.g. + pass + + def handle_pi(self, text): + # called for each processing instruction, e.g. + pass + + def handle_decl(self, text): + pass + + def parse_declaration(self, i): + # override internal declaration handler to handle CDATA blocks + if _debug: sys.stderr.write('entering parse_declaration\n') + if self.rawdata[i:i+9] == '', i) + if k == -1: k = len(self.rawdata) + self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) + return k+3 + else: + k = self.rawdata.find('>', i) + return k+1 + + def mapContentType(self, contentType): + contentType = contentType.lower() + if contentType == 'text': + contentType = 'text/plain' + elif contentType == 'html': + contentType = 'text/html' + elif contentType == 'xhtml': + contentType = 'application/xhtml+xml' + return contentType + + def trackNamespace(self, prefix, uri): + loweruri = uri.lower() + if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: + self.version = 'rss090' + if loweruri == 'http://purl.org/rss/1.0/' and not self.version: + self.version = 'rss10' + if loweruri == 'http://www.w3.org/2005/atom' and not self.version: + self.version = 'atom10' + if loweruri.find('backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + uri = 'http://backend.userland.com/rss' + loweruri = uri + if self._matchnamespaces.has_key(loweruri): + self.namespacemap[prefix] = self._matchnamespaces[loweruri] + self.namespacesInUse[self._matchnamespaces[loweruri]] = uri + else: + self.namespacesInUse[prefix or ''] = uri + + def resolveURI(self, uri): + return _urljoin(self.baseuri or '', uri) + + def decodeEntities(self, element, data): + return data + + def push(self, element, expectingText): + self.elementstack.append([element, expectingText, []]) + + def pop(self, element, stripWhitespace=1): + if not self.elementstack: return + if self.elementstack[-1][0] != element: return + + element, expectingText, pieces = self.elementstack.pop() + output = ''.join(pieces) + if stripWhitespace: + output = output.strip() + if not expectingText: return output + + # decode base64 content + if base64 and self.contentparams.get('base64', 0): + try: + output = base64.decodestring(output) + except binascii.Error: + pass + except binascii.Incomplete: + pass + + # resolve relative URIs + if (element in self.can_be_relative_uri) and output: + output = self.resolveURI(output) + + # decode entities within embedded markup + if not self.contentparams.get('base64', 0): + output = self.decodeEntities(element, output) + + # remove temporary cruft from contentparams + try: + del self.contentparams['mode'] + except KeyError: + pass + try: + del self.contentparams['base64'] + except KeyError: + pass + + # resolve relative URIs within embedded markup + if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: + if element in self.can_contain_relative_uris: + output = _resolveRelativeURIs(output, self.baseuri, self.encoding) + + # sanitize embedded markup + if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types: + if element in self.can_contain_dangerous_markup: + output = _sanitizeHTML(output, self.encoding) + + if self.encoding and type(output) != type(u''): + try: + output = unicode(output, self.encoding) + except: + pass + + # categories/tags/keywords/whatever are handled in _end_category + if element == 'category': + return output + + # store output in appropriate place(s) + if self.inentry and not self.insource: + if element == 'content': + self.entries[-1].setdefault(element, []) + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element].append(contentparams) + elif element == 'link': + self.entries[-1][element] = output + if output: + self.entries[-1]['links'][-1]['href'] = output + else: + if element == 'description': + element = 'summary' + self.entries[-1][element] = output + if self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element + '_detail'] = contentparams + elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage): + context = self._getContext() + if element == 'description': + element = 'subtitle' + context[element] = output + if element == 'link': + context['links'][-1]['href'] = output + elif self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + context[element + '_detail'] = contentparams + return output + + def pushContent(self, tag, attrsD, defaultContentType, expectingText): + self.incontent += 1 + self.contentparams = FeedParserDict({ + 'type': self.mapContentType(attrsD.get('type', defaultContentType)), + 'language': self.lang, + 'base': self.baseuri}) + self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) + self.push(tag, expectingText) + + def popContent(self, tag): + value = self.pop(tag) + self.incontent -= 1 + self.contentparams.clear() + return value + + def _mapToStandardPrefix(self, name): + colonpos = name.find(':') + if colonpos <> -1: + prefix = name[:colonpos] + suffix = name[colonpos+1:] + prefix = self.namespacemap.get(prefix, prefix) + name = prefix + ':' + suffix + return name + + def _getAttribute(self, attrsD, name): + return attrsD.get(self._mapToStandardPrefix(name)) + + def _isBase64(self, attrsD, contentparams): + if attrsD.get('mode', '') == 'base64': + return 1 + if self.contentparams['type'].startswith('text/'): + return 0 + if self.contentparams['type'].endswith('+xml'): + return 0 + if self.contentparams['type'].endswith('/xml'): + return 0 + return 1 + + def _itsAnHrefDamnIt(self, attrsD): + href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) + if href: + try: + del attrsD['url'] + except KeyError: + pass + try: + del attrsD['uri'] + except KeyError: + pass + attrsD['href'] = href + return attrsD + + def _save(self, key, value): + context = self._getContext() + context.setdefault(key, value) + + def _start_rss(self, attrsD): + versionmap = {'0.91': 'rss091u', + '0.92': 'rss092', + '0.93': 'rss093', + '0.94': 'rss094'} + if not self.version: + attr_version = attrsD.get('version', '') + version = versionmap.get(attr_version) + if version: + self.version = version + elif attr_version.startswith('2.'): + self.version = 'rss20' + else: + self.version = 'rss' + + def _start_dlhottitles(self, attrsD): + self.version = 'hotrss' + + def _start_channel(self, attrsD): + self.infeed = 1 + self._cdf_common(attrsD) + _start_feedinfo = _start_channel + + def _cdf_common(self, attrsD): + if attrsD.has_key('lastmod'): + self._start_modified({}) + self.elementstack[-1][-1] = attrsD['lastmod'] + self._end_modified() + if attrsD.has_key('href'): + self._start_link({}) + self.elementstack[-1][-1] = attrsD['href'] + self._end_link() + + def _start_feed(self, attrsD): + self.infeed = 1 + versionmap = {'0.1': 'atom01', + '0.2': 'atom02', + '0.3': 'atom03'} + if not self.version: + attr_version = attrsD.get('version') + version = versionmap.get(attr_version) + if version: + self.version = version + else: + self.version = 'atom' + + def _end_channel(self): + self.infeed = 0 + _end_feed = _end_channel + + def _start_image(self, attrsD): + self.inimage = 1 + self.push('image', 0) + context = self._getContext() + context.setdefault('image', FeedParserDict()) + + def _end_image(self): + self.pop('image') + self.inimage = 0 + + def _start_textinput(self, attrsD): + self.intextinput = 1 + self.push('textinput', 0) + context = self._getContext() + context.setdefault('textinput', FeedParserDict()) + _start_textInput = _start_textinput + + def _end_textinput(self): + self.pop('textinput') + self.intextinput = 0 + _end_textInput = _end_textinput + + def _start_author(self, attrsD): + self.inauthor = 1 + self.push('author', 1) + _start_managingeditor = _start_author + _start_dc_author = _start_author + _start_dc_creator = _start_author + _start_itunes_author = _start_author + + def _end_author(self): + self.pop('author') + self.inauthor = 0 + self._sync_author_detail() + _end_managingeditor = _end_author + _end_dc_author = _end_author + _end_dc_creator = _end_author + _end_itunes_author = _end_author + + def _start_itunes_owner(self, attrsD): + self.inpublisher = 1 + self.push('publisher', 0) + + def _end_itunes_owner(self): + self.pop('publisher') + self.inpublisher = 0 + self._sync_author_detail('publisher') + + def _start_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('contributor', 0) + + def _end_contributor(self): + self.pop('contributor') + self.incontributor = 0 + + def _start_dc_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('name', 0) + + def _end_dc_contributor(self): + self._end_name() + self.incontributor = 0 + + def _start_name(self, attrsD): + self.push('name', 0) + _start_itunes_name = _start_name + + def _end_name(self): + value = self.pop('name') + if self.inpublisher: + self._save_author('name', value, 'publisher') + elif self.inauthor: + self._save_author('name', value) + elif self.incontributor: + self._save_contributor('name', value) + elif self.intextinput: + context = self._getContext() + context['textinput']['name'] = value + _end_itunes_name = _end_name + + def _start_width(self, attrsD): + self.push('width', 0) + + def _end_width(self): + value = self.pop('width') + try: + value = int(value) + except: + value = 0 + if self.inimage: + context = self._getContext() + context['image']['width'] = value + + def _start_height(self, attrsD): + self.push('height', 0) + + def _end_height(self): + value = self.pop('height') + try: + value = int(value) + except: + value = 0 + if self.inimage: + context = self._getContext() + context['image']['height'] = value + + def _start_url(self, attrsD): + self.push('href', 1) + _start_homepage = _start_url + _start_uri = _start_url + + def _end_url(self): + value = self.pop('href') + if self.inauthor: + self._save_author('href', value) + elif self.incontributor: + self._save_contributor('href', value) + elif self.inimage: + context = self._getContext() + context['image']['href'] = value + elif self.intextinput: + context = self._getContext() + context['textinput']['link'] = value + _end_homepage = _end_url + _end_uri = _end_url + + def _start_email(self, attrsD): + self.push('email', 0) + _start_itunes_email = _start_email + + def _end_email(self): + value = self.pop('email') + if self.inpublisher: + self._save_author('email', value, 'publisher') + elif self.inauthor: + self._save_author('email', value) + elif self.incontributor: + self._save_contributor('email', value) + _end_itunes_email = _end_email + + def _getContext(self): + if self.insource: + context = self.sourcedata + elif self.inentry: + context = self.entries[-1] + else: + context = self.feeddata + return context + + def _save_author(self, key, value, prefix='author'): + context = self._getContext() + context.setdefault(prefix + '_detail', FeedParserDict()) + context[prefix + '_detail'][key] = value + self._sync_author_detail() + + def _save_contributor(self, key, value): + context = self._getContext() + context.setdefault('contributors', [FeedParserDict()]) + context['contributors'][-1][key] = value + + def _sync_author_detail(self, key='author'): + context = self._getContext() + detail = context.get('%s_detail' % key) + if detail: + name = detail.get('name') + email = detail.get('email') + if name and email: + context[key] = '%s (%s)' % (name, email) + elif name: + context[key] = name + elif email: + context[key] = email + else: + author = context.get(key) + if not author: return + emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author) + if not emailmatch: return + email = emailmatch.group(0) + # probably a better way to do the following, but it passes all the tests + author = author.replace(email, '') + author = author.replace('()', '') + author = author.strip() + if author and (author[0] == '('): + author = author[1:] + if author and (author[-1] == ')'): + author = author[:-1] + author = author.strip() + context.setdefault('%s_detail' % key, FeedParserDict()) + context['%s_detail' % key]['name'] = author + context['%s_detail' % key]['email'] = email + + def _start_subtitle(self, attrsD): + self.pushContent('subtitle', attrsD, 'text/plain', 1) + _start_tagline = _start_subtitle + _start_itunes_subtitle = _start_subtitle + + def _end_subtitle(self): + self.popContent('subtitle') + _end_tagline = _end_subtitle + _end_itunes_subtitle = _end_subtitle + + def _start_rights(self, attrsD): + self.pushContent('rights', attrsD, 'text/plain', 1) + _start_dc_rights = _start_rights + _start_copyright = _start_rights + + def _end_rights(self): + self.popContent('rights') + _end_dc_rights = _end_rights + _end_copyright = _end_rights + + def _start_item(self, attrsD): + self.entries.append(FeedParserDict()) + self.push('item', 0) + self.inentry = 1 + self.guidislink = 0 + id = self._getAttribute(attrsD, 'rdf:about') + if id: + context = self._getContext() + context['id'] = id + self._cdf_common(attrsD) + _start_entry = _start_item + _start_product = _start_item + + def _end_item(self): + self.pop('item') + self.inentry = 0 + _end_entry = _end_item + + def _start_dc_language(self, attrsD): + self.push('language', 1) + _start_language = _start_dc_language + + def _end_dc_language(self): + self.lang = self.pop('language') + _end_language = _end_dc_language + + def _start_dc_publisher(self, attrsD): + self.push('publisher', 1) + _start_webmaster = _start_dc_publisher + + def _end_dc_publisher(self): + self.pop('publisher') + self._sync_author_detail('publisher') + _end_webmaster = _end_dc_publisher + + def _start_published(self, attrsD): + self.push('published', 1) + _start_dcterms_issued = _start_published + _start_issued = _start_published + + def _end_published(self): + value = self.pop('published') + self._save('published_parsed', _parse_date(value)) + _end_dcterms_issued = _end_published + _end_issued = _end_published + + def _start_updated(self, attrsD): + self.push('updated', 1) + _start_modified = _start_updated + _start_dcterms_modified = _start_updated + _start_pubdate = _start_updated + _start_dc_date = _start_updated + + def _end_updated(self): + value = self.pop('updated') + parsed_value = _parse_date(value) + self._save('updated_parsed', parsed_value) + _end_modified = _end_updated + _end_dcterms_modified = _end_updated + _end_pubdate = _end_updated + _end_dc_date = _end_updated + + def _start_created(self, attrsD): + self.push('created', 1) + _start_dcterms_created = _start_created + + def _end_created(self): + value = self.pop('created') + self._save('created_parsed', _parse_date(value)) + _end_dcterms_created = _end_created + + def _start_expirationdate(self, attrsD): + self.push('expired', 1) + + def _end_expirationdate(self): + self._save('expired_parsed', _parse_date(self.pop('expired'))) + + def _start_cc_license(self, attrsD): + self.push('license', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('license') + + def _start_creativecommons_license(self, attrsD): + self.push('license', 1) + + def _end_creativecommons_license(self): + self.pop('license') + + def _addTag(self, term, scheme, label): + context = self._getContext() + tags = context.setdefault('tags', []) + if (not term) and (not scheme) and (not label): return + value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) + if value not in tags: + tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label})) + + def _start_category(self, attrsD): + if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) + term = attrsD.get('term') + scheme = attrsD.get('scheme', attrsD.get('domain')) + label = attrsD.get('label') + self._addTag(term, scheme, label) + self.push('category', 1) + _start_dc_subject = _start_category + _start_keywords = _start_category + + def _end_itunes_keywords(self): + for term in self.pop('itunes_keywords').split(): + self._addTag(term, 'http://www.itunes.com/', None) + + def _start_itunes_category(self, attrsD): + self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) + self.push('category', 1) + + def _end_category(self): + value = self.pop('category') + if not value: return + context = self._getContext() + tags = context['tags'] + if value and len(tags) and not tags[-1]['term']: + tags[-1]['term'] = value + else: + self._addTag(value, None, None) + _end_dc_subject = _end_category + _end_keywords = _end_category + _end_itunes_category = _end_category + + def _start_cloud(self, attrsD): + self._getContext()['cloud'] = FeedParserDict(attrsD) + + def _start_link(self, attrsD): + attrsD.setdefault('rel', 'alternate') + attrsD.setdefault('type', 'text/html') + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + expectingText = self.infeed or self.inentry or self.insource + context = self._getContext() + context.setdefault('links', []) + context['links'].append(FeedParserDict(attrsD)) + if attrsD['rel'] == 'enclosure': + self._start_enclosure(attrsD) + if attrsD.has_key('href'): + expectingText = 0 + if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): + context['link'] = attrsD['href'] + else: + self.push('link', expectingText) + _start_producturl = _start_link + + def _end_link(self): + value = self.pop('link') + context = self._getContext() + if self.intextinput: + context['textinput']['link'] = value + if self.inimage: + context['image']['link'] = value + _end_producturl = _end_link + + def _start_guid(self, attrsD): + self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') + self.push('id', 1) + + def _end_guid(self): + value = self.pop('id') + self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) + if self.guidislink: + # guid acts as link, but only if 'ispermalink' is not present or is 'true', + # and only if the item doesn't already have a link element + self._save('link', value) + + def _start_title(self, attrsD): + self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) + _start_dc_title = _start_title + _start_media_title = _start_title + + def _end_title(self): + value = self.popContent('title') + context = self._getContext() + if self.intextinput: + context['textinput']['title'] = value + elif self.inimage: + context['image']['title'] = value + _end_dc_title = _end_title + _end_media_title = _end_title + + def _start_description(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) + + def _start_abstract(self, attrsD): + self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) + + def _end_description(self): + if self._summaryKey == 'content': + self._end_content() + else: + value = self.popContent('description') + context = self._getContext() + if self.intextinput: + context['textinput']['description'] = value + elif self.inimage: + context['image']['description'] = value + self._summaryKey = None + _end_abstract = _end_description + + def _start_info(self, attrsD): + self.pushContent('info', attrsD, 'text/plain', 1) + _start_feedburner_browserfriendly = _start_info + + def _end_info(self): + self.popContent('info') + _end_feedburner_browserfriendly = _end_info + + def _start_generator(self, attrsD): + if attrsD: + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + self._getContext()['generator_detail'] = FeedParserDict(attrsD) + self.push('generator', 1) + + def _end_generator(self): + value = self.pop('generator') + context = self._getContext() + if context.has_key('generator_detail'): + context['generator_detail']['name'] = value + + def _start_admin_generatoragent(self, attrsD): + self.push('generator', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('generator') + self._getContext()['generator_detail'] = FeedParserDict({'href': value}) + + def _start_admin_errorreportsto(self, attrsD): + self.push('errorreportsto', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('errorreportsto') + + def _start_summary(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self._summaryKey = 'summary' + self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) + _start_itunes_summary = _start_summary + + def _end_summary(self): + if self._summaryKey == 'content': + self._end_content() + else: + self.popContent(self._summaryKey or 'summary') + self._summaryKey = None + _end_itunes_summary = _end_summary + + def _start_enclosure(self, attrsD): + attrsD = self._itsAnHrefDamnIt(attrsD) + self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD)) + href = attrsD.get('href') + if href: + context = self._getContext() + if not context.get('id'): + context['id'] = href + + def _start_source(self, attrsD): + self.insource = 1 + + def _end_source(self): + self.insource = 0 + self._getContext()['source'] = copy.deepcopy(self.sourcedata) + self.sourcedata.clear() + + def _start_content(self, attrsD): + self.pushContent('content', attrsD, 'text/plain', 1) + src = attrsD.get('src') + if src: + self.contentparams['src'] = src + self.push('content', 1) + + def _start_prodlink(self, attrsD): + self.pushContent('content', attrsD, 'text/html', 1) + + def _start_body(self, attrsD): + self.pushContent('content', attrsD, 'application/xhtml+xml', 1) + _start_xhtml_body = _start_body + + def _start_content_encoded(self, attrsD): + self.pushContent('content', attrsD, 'text/html', 1) + _start_fullitem = _start_content_encoded + + def _end_content(self): + copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) + value = self.popContent('content') + if copyToDescription: + self._save('description', value) + _end_body = _end_content + _end_xhtml_body = _end_content + _end_content_encoded = _end_content + _end_fullitem = _end_content + _end_prodlink = _end_content + + def _start_itunes_image(self, attrsD): + self.push('itunes_image', 0) + self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) + _start_itunes_link = _start_itunes_image + + def _end_itunes_block(self): + value = self.pop('itunes_block', 0) + self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 + + def _end_itunes_explicit(self): + value = self.pop('itunes_explicit', 0) + self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0 + +if _XML_AVAILABLE: + class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): + def __init__(self, baseuri, baselang, encoding): + if _debug: sys.stderr.write('trying StrictFeedParser\n') + xml.sax.handler.ContentHandler.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + self.bozo = 0 + self.exc = None + + def startPrefixMapping(self, prefix, uri): + self.trackNamespace(prefix, uri) + + def startElementNS(self, name, qname, attrs): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if lowernamespace.find('backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + namespace = 'http://backend.userland.com/rss' + lowernamespace = namespace + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = None + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): + raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix + if prefix: + localname = prefix + ':' + localname + localname = str(localname).lower() + if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname)) + + # qname implementation is horribly broken in Python 2.1 (it + # doesn't report any), and slightly broken in Python 2.2 (it + # doesn't report the xml: namespace). So we match up namespaces + # with a known list first, and then possibly override them with + # the qnames the SAX parser gives us (if indeed it gives us any + # at all). Thanks to MatejC for helping me test this and + # tirelessly telling me that it didn't work yet. + attrsD = {} + for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): + lowernamespace = (namespace or '').lower() + prefix = self._matchnamespaces.get(lowernamespace, '') + if prefix: + attrlocalname = prefix + ':' + attrlocalname + attrsD[str(attrlocalname).lower()] = attrvalue + for qname in attrs.getQNames(): + attrsD[str(qname).lower()] = attrs.getValueByQName(qname) + self.unknown_starttag(localname, attrsD.items()) + + def characters(self, text): + self.handle_data(text) + + def endElementNS(self, name, qname): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = '' + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if prefix: + localname = prefix + ':' + localname + localname = str(localname).lower() + self.unknown_endtag(localname) + + def error(self, exc): + self.bozo = 1 + self.exc = exc + + def fatalError(self, exc): + self.error(exc) + raise exc + +class _BaseHTMLProcessor(sgmllib.SGMLParser): + elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', + 'img', 'input', 'isindex', 'link', 'meta', 'param'] + + def __init__(self, encoding): + self.encoding = encoding + if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) + sgmllib.SGMLParser.__init__(self) + + def reset(self): + self.pieces = [] + sgmllib.SGMLParser.reset(self) + + def _shorttag_replace(self, match): + tag = match.group(1) + if tag in self.elements_no_end_tag: + return '<' + tag + ' />' + else: + return '<' + tag + '>' + + def feed(self, data): + data = re.compile(r'', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace + data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data) + data = data.replace(''', "'") + data = data.replace('"', '"') + if self.encoding and type(data) == type(u''): + data = data.encode(self.encoding) + sgmllib.SGMLParser.feed(self, data) + + def normalize_attrs(self, attrs): + # utility method to be called by descendants + attrs = [(k.lower(), v) for k, v in attrs] + attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] + return attrs + + def unknown_starttag(self, tag, attrs): + # called for each start tag + # attrs is a list of (attr, value) tuples + # e.g. for
, tag='pre', attrs=[('class', 'screen')]
+        if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
+        uattrs = []
+        # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
+        for key, value in attrs:
+            if type(value) != type(u''):
+                value = unicode(value, self.encoding)
+            uattrs.append((unicode(key, self.encoding), value))
+        strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
+        if tag in self.elements_no_end_tag:
+            self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
+        else:
+            self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
+
+    def unknown_endtag(self, tag):
+        # called for each end tag, e.g. for 
, tag will be 'pre' + # Reconstruct the original end tag. + if tag not in self.elements_no_end_tag: + self.pieces.append("" % locals()) + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + # Reconstruct the original character reference. + self.pieces.append('&#%(ref)s;' % locals()) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + # Reconstruct the original entity reference. + self.pieces.append('&%(ref)s;' % locals()) + + def handle_data(self, text): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + # Store the original text verbatim. + if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) + self.pieces.append(text) + + def handle_comment(self, text): + # called for each HTML comment, e.g. + # Reconstruct the original comment. + self.pieces.append('' % locals()) + + def handle_pi(self, text): + # called for each processing instruction, e.g. + # Reconstruct original processing instruction. + self.pieces.append('' % locals()) + + def handle_decl(self, text): + # called for the DOCTYPE, if present, e.g. + # + # Reconstruct original DOCTYPE + self.pieces.append('' % locals()) + + _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match + def _scan_name(self, i, declstartpos): + rawdata = self.rawdata + n = len(rawdata) + if i == n: + return None, -1 + m = self._new_declname_match(rawdata, i) + if m: + s = m.group() + name = s.strip() + if (i + len(s)) == n: + return None, -1 # end of buffer + return name.lower(), m.end() + else: + self.handle_data(rawdata) +# self.updatepos(declstartpos, i) + return None, -1 + + def output(self): + '''Return processed HTML as a single string''' + return ''.join([str(p) for p in self.pieces]) + +class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): + def __init__(self, baseuri, baselang, encoding): + sgmllib.SGMLParser.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + + def decodeEntities(self, element, data): + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace('"', '"') + data = data.replace(''', ''') + data = data.replace(''', ''') + if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace(''', "'") + return data + +class _RelativeURIResolver(_BaseHTMLProcessor): + relative_uris = [('a', 'href'), + ('applet', 'codebase'), + ('area', 'href'), + ('blockquote', 'cite'), + ('body', 'background'), + ('del', 'cite'), + ('form', 'action'), + ('frame', 'longdesc'), + ('frame', 'src'), + ('iframe', 'longdesc'), + ('iframe', 'src'), + ('head', 'profile'), + ('img', 'longdesc'), + ('img', 'src'), + ('img', 'usemap'), + ('input', 'src'), + ('input', 'usemap'), + ('ins', 'cite'), + ('link', 'href'), + ('object', 'classid'), + ('object', 'codebase'), + ('object', 'data'), + ('object', 'usemap'), + ('q', 'cite'), + ('script', 'src')] + + def __init__(self, baseuri, encoding): + _BaseHTMLProcessor.__init__(self, encoding) + self.baseuri = baseuri + + def resolveURI(self, uri): + return _urljoin(self.baseuri, uri) + + def unknown_starttag(self, tag, attrs): + attrs = self.normalize_attrs(attrs) + attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + +def _resolveRelativeURIs(htmlSource, baseURI, encoding): + if _debug: sys.stderr.write('entering _resolveRelativeURIs\n') + p = _RelativeURIResolver(baseURI, encoding) + p.feed(htmlSource) + return p.output() + +class _HTMLSanitizer(_BaseHTMLProcessor): + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', + 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', + 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', + 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', + 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', + 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', + 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', + 'thead', 'tr', 'tt', 'u', 'ul', 'var'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', + 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', + 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', + 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', + 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', + 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', + 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', + 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', + 'usemap', 'valign', 'value', 'vspace', 'width'] + + unacceptable_elements_with_end_tag = ['script', 'applet'] + + def reset(self): + _BaseHTMLProcessor.reset(self) + self.unacceptablestack = 0 + + def unknown_starttag(self, tag, attrs): + if not tag in self.acceptable_elements: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack += 1 + return + attrs = self.normalize_attrs(attrs) + attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes] + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + + def unknown_endtag(self, tag): + if not tag in self.acceptable_elements: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack -= 1 + return + _BaseHTMLProcessor.unknown_endtag(self, tag) + + def handle_pi(self, text): + pass + + def handle_decl(self, text): + pass + + def handle_data(self, text): + if not self.unacceptablestack: + _BaseHTMLProcessor.handle_data(self, text) + +def _sanitizeHTML(htmlSource, encoding): + p = _HTMLSanitizer(encoding) + p.feed(htmlSource) + data = p.output() + if TIDY_MARKUP: + # loop through list of preferred Tidy interfaces looking for one that's installed, + # then set up a common _tidy function to wrap the interface-specific API. + _tidy = None + for tidy_interface in PREFERRED_TIDY_INTERFACES: + try: + if tidy_interface == "uTidy": + from tidy import parseString as _utidy + def _tidy(data, **kwargs): + return str(_utidy(data, **kwargs)) + break + elif tidy_interface == "mxTidy": + from mx.Tidy import Tidy as _mxtidy + def _tidy(data, **kwargs): + nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) + return data + break + except: + pass + if _tidy: + utf8 = type(data) == type(u'') + if utf8: + data = data.encode('utf-8') + data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") + if utf8: + data = unicode(data, 'utf-8') + if data.count(''): + data = data.split('>', 1)[1] + if data.count('= '2.3.3' + assert base64 != None + user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':') + realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] + self.add_password(realm, host, user, passw) + retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) + self.reset_retry_count() + return retry + except: + return self.http_error_default(req, fp, code, msg, headers) + +def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers): + """URL, filename, or string --> stream + + This function lets you define parsers that take any input source + (URL, pathname to local or network file, or actual data as a string) + and deal with it in a uniform manner. Returned object is guaranteed + to have all the basic stdio read methods (read, readline, readlines). + Just .close() the object when you're done with it. + + If the etag argument is supplied, it will be used as the value of an + If-None-Match request header. + + If the modified argument is supplied, it must be a tuple of 9 integers + as returned by gmtime() in the standard Python time module. This MUST + be in GMT (Greenwich Mean Time). The formatted date/time will be used + as the value of an If-Modified-Since request header. + + If the agent argument is supplied, it will be used as the value of a + User-Agent request header. + + If the referrer argument is supplied, it will be used as the value of a + Referer[sic] request header. + + If handlers is supplied, it is a list of handlers used to build a + urllib2 opener. + """ + + if hasattr(url_file_stream_or_string, 'read'): + return url_file_stream_or_string + + if url_file_stream_or_string == '-': + return sys.stdin + + if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): + if not agent: + agent = USER_AGENT + # test for inline user:password for basic auth + auth = None + if base64: + urltype, rest = urllib.splittype(url_file_stream_or_string) + realhost, rest = urllib.splithost(rest) + if realhost: + user_passwd, realhost = urllib.splituser(realhost) + if user_passwd: + url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) + auth = base64.encodestring(user_passwd).strip() + # try to open with urllib2 (to use optional headers) + request = urllib2.Request(url_file_stream_or_string) + request.add_header('User-Agent', agent) + if etag: + request.add_header('If-None-Match', etag) + if modified: + # format into an RFC 1123-compliant timestamp. We can't use + # time.strftime() since the %a and %b directives can be affected + # by the current locale, but RFC 2616 states that dates must be + # in English. + short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) + if referrer: + request.add_header('Referer', referrer) + if gzip and zlib: + request.add_header('Accept-encoding', 'gzip, deflate') + elif gzip: + request.add_header('Accept-encoding', 'gzip') + elif zlib: + request.add_header('Accept-encoding', 'deflate') + else: + request.add_header('Accept-encoding', '') + if auth: + request.add_header('Authorization', 'Basic %s' % auth) + if ACCEPT_HEADER: + request.add_header('Accept', ACCEPT_HEADER) + request.add_header('A-IM', 'feed') # RFC 3229 support + opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) + opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent + try: + return opener.open(request) + finally: + opener.close() # JohnD + + # try to open with native open function (if url_file_stream_or_string is a filename) + try: + return open(url_file_stream_or_string) + except: + pass + + # treat url_file_stream_or_string as string + return _StringIO(str(url_file_stream_or_string)) + +_date_handlers = [] +def registerDateHandler(func): + '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' + _date_handlers.insert(0, func) + +# ISO-8601 date parsing routines written by Fazal Majid. +# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 +# parser is beyond the scope of feedparser and would be a worthwhile addition +# to the Python library. +# A single regular expression cannot parse ISO 8601 date formats into groups +# as the standard is highly irregular (for instance is 030104 2003-01-04 or +# 0301-04-01), so we use templates instead. +# Please note the order in templates is significant because we need a +# greedy match. +_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO', + 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', + '-YY-?MM', '-OOO', '-YY', + '--MM-?DD', '--MM', + '---DD', + 'CC', ''] +_iso8601_re = [ + tmpl.replace( + 'YYYY', r'(?P\d{4})').replace( + 'YY', r'(?P\d\d)').replace( + 'MM', r'(?P[01]\d)').replace( + 'DD', r'(?P[0123]\d)').replace( + 'OOO', r'(?P[0123]\d\d)').replace( + 'CC', r'(?P\d\d$)') + + r'(T?(?P\d{2}):(?P\d{2})' + + r'(:(?P\d{2}))?' + + r'(?P[+-](?P\d{2})(:(?P\d{2}))?|Z)?)?' + for tmpl in _iso8601_tmpl] +del tmpl +_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] +del regex +def _parse_date_iso8601(dateString): + '''Parse a variety of ISO-8601-compatible formats like 20040105''' + m = None + for _iso8601_match in _iso8601_matches: + m = _iso8601_match(dateString) + if m: break + if not m: return + if m.span() == (0, 0): return + params = m.groupdict() + ordinal = params.get('ordinal', 0) + if ordinal: + ordinal = int(ordinal) + else: + ordinal = 0 + year = params.get('year', '--') + if not year or year == '--': + year = time.gmtime()[0] + elif len(year) == 2: + # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 + year = 100 * int(time.gmtime()[0] / 100) + int(year) + else: + year = int(year) + month = params.get('month', '-') + if not month or month == '-': + # ordinals are NOT normalized by mktime, we simulate them + # by setting month=1, day=ordinal + if ordinal: + month = 1 + else: + month = time.gmtime()[1] + month = int(month) + day = params.get('day', 0) + if not day: + # see above + if ordinal: + day = ordinal + elif params.get('century', 0) or \ + params.get('year', 0) or params.get('month', 0): + day = 1 + else: + day = time.gmtime()[2] + else: + day = int(day) + # special case of the century - is the first year of the 21st century + # 2000 or 2001 ? The debate goes on... + if 'century' in params.keys(): + year = (int(params['century']) - 1) * 100 + 1 + # in ISO 8601 most fields are optional + for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: + if not params.get(field, None): + params[field] = 0 + hour = int(params.get('hour', 0)) + minute = int(params.get('minute', 0)) + second = int(params.get('second', 0)) + # weekday is normalized by mktime(), we can ignore it + weekday = 0 + # daylight savings is complex, but not needed for feedparser's purposes + # as time zones, if specified, include mention of whether it is active + # (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and + # and most implementations have DST bugs + daylight_savings_flag = 0 + tm = [year, month, day, hour, minute, second, weekday, + ordinal, daylight_savings_flag] + # ISO 8601 time zone adjustments + tz = params.get('tz') + if tz and tz != 'Z': + if tz[0] == '-': + tm[3] += int(params.get('tzhour', 0)) + tm[4] += int(params.get('tzmin', 0)) + elif tz[0] == '+': + tm[3] -= int(params.get('tzhour', 0)) + tm[4] -= int(params.get('tzmin', 0)) + else: + return None + # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) + # which is guaranteed to normalize d/m/y/h/m/s. + # Many implementations have bugs, but we'll pretend they don't. + return time.localtime(time.mktime(tm)) +registerDateHandler(_parse_date_iso8601) + +# 8-bit date handling routines written by ytrewq1. +_korean_year = u'\ub144' # b3e2 in euc-kr +_korean_month = u'\uc6d4' # bff9 in euc-kr +_korean_day = u'\uc77c' # c0cf in euc-kr +_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr +_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr + +_korean_onblog_date_re = \ + re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ + (_korean_year, _korean_month, _korean_day)) +_korean_nate_date_re = \ + re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ + (_korean_am, _korean_pm)) +def _parse_date_onblog(dateString): + '''Parse a string according to the OnBlog 8-bit date format''' + m = _korean_onblog_date_re.match(dateString) + if not m: return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_onblog) + +def _parse_date_nate(dateString): + '''Parse a string according to the Nate 8-bit date format''' + m = _korean_nate_date_re.match(dateString) + if not m: return + hour = int(m.group(5)) + ampm = m.group(4) + if (ampm == _korean_pm): + hour += 12 + hour = str(hour) + if len(hour) == 1: + hour = '0' + hour + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_nate) + +_mssql_date_re = \ + re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') +def _parse_date_mssql(dateString): + '''Parse a string according to the MS SQL date format''' + m = _mssql_date_re.match(dateString) + if not m: return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_mssql) + +# Unicode strings for Greek date strings +_greek_months = \ + { \ + u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 + u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 + u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 + u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 + u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 + u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 + u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 + u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 + u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 + u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 + u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 + u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 + u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 + u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 + u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 + u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 + u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 + } + +_greek_wdays = \ + { \ + u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 + u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 + u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 + u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 + u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 + u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 + u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 + } + +_greek_date_format_re = \ + re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') + +def _parse_date_greek(dateString): + '''Parse a string according to a Greek 8-bit date format.''' + m = _greek_date_format_re.match(dateString) + if not m: return + try: + wday = _greek_wdays[m.group(1)] + month = _greek_months[m.group(3)] + except: + return + rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ + {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ + 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': m.group(8)} + if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) + return _parse_date_rfc822(rfc822date) +registerDateHandler(_parse_date_greek) + +# Unicode strings for Hungarian date strings +_hungarian_months = \ + { \ + u'janu\u00e1r': u'01', # e1 in iso-8859-2 + u'febru\u00e1ri': u'02', # e1 in iso-8859-2 + u'm\u00e1rcius': u'03', # e1 in iso-8859-2 + u'\u00e1prilis': u'04', # e1 in iso-8859-2 + u'm\u00e1ujus': u'05', # e1 in iso-8859-2 + u'j\u00fanius': u'06', # fa in iso-8859-2 + u'j\u00falius': u'07', # fa in iso-8859-2 + u'augusztus': u'08', + u'szeptember': u'09', + u'okt\u00f3ber': u'10', # f3 in iso-8859-2 + u'november': u'11', + u'december': u'12', + } + +_hungarian_date_format_re = \ + re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') + +def _parse_date_hungarian(dateString): + '''Parse a string according to a Hungarian 8-bit date format.''' + m = _hungarian_date_format_re.match(dateString) + if not m: return + try: + month = _hungarian_months[m.group(2)] + day = m.group(3) + if len(day) == 1: + day = '0' + day + hour = m.group(4) + if len(hour) == 1: + hour = '0' + hour + except: + return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ + {'year': m.group(1), 'month': month, 'day': day,\ + 'hour': hour, 'minute': m.group(5),\ + 'zonediff': m.group(6)} + if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_hungarian) + +# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by +# Drake and licensed under the Python license. Removed all range checking +# for month, day, hour, minute, and second, since mktime will normalize +# these later +def _parse_date_w3dtf(dateString): + def __extract_date(m): + year = int(m.group('year')) + if year < 100: + year = 100 * int(time.gmtime()[0] / 100) + int(year) + if year < 1000: + return 0, 0, 0 + julian = m.group('julian') + if julian: + julian = int(julian) + month = julian / 30 + 1 + day = julian % 30 + 1 + jday = None + while jday != julian: + t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) + jday = time.gmtime(t)[-2] + diff = abs(jday - julian) + if jday > julian: + if diff < day: + day = day - diff + else: + month = month - 1 + day = 31 + elif jday < julian: + if day + diff < 28: + day = day + diff + else: + month = month + 1 + return year, month, day + month = m.group('month') + day = 1 + if month is None: + month = 1 + else: + month = int(month) + day = m.group('day') + if day: + day = int(day) + else: + day = 1 + return year, month, day + + def __extract_time(m): + if not m: + return 0, 0, 0 + hours = m.group('hours') + if not hours: + return 0, 0, 0 + hours = int(hours) + minutes = int(m.group('minutes')) + seconds = m.group('seconds') + if seconds: + seconds = int(seconds) + else: + seconds = 0 + return hours, minutes, seconds + + def __extract_tzd(m): + '''Return the Time Zone Designator as an offset in seconds from UTC.''' + if not m: + return 0 + tzd = m.group('tzd') + if not tzd: + return 0 + if tzd == 'Z': + return 0 + hours = int(m.group('tzdhours')) + minutes = m.group('tzdminutes') + if minutes: + minutes = int(minutes) + else: + minutes = 0 + offset = (hours*60 + minutes) * 60 + if tzd[0] == '+': + return -offset + return offset + + __date_re = ('(?P\d\d\d\d)' + '(?:(?P-|)' + '(?:(?P\d\d\d)' + '|(?P\d\d)(?:(?P=dsep)(?P\d\d))?))?') + __tzd_re = '(?P[-+](?P\d\d)(?::?(?P\d\d))|Z)' + __tzd_rx = re.compile(__tzd_re) + __time_re = ('(?P\d\d)(?P:|)(?P\d\d)' + '(?:(?P=tsep)(?P\d\d(?:[.,]\d+)?))?' + + __tzd_re) + __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) + __datetime_rx = re.compile(__datetime_re) + m = __datetime_rx.match(dateString) + if (m is None) or (m.group() != dateString): return + gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) + if gmt[0] == 0: return + return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) +registerDateHandler(_parse_date_w3dtf) + +def _parse_date_rfc822(dateString): + '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' + data = dateString.split() + if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: + del data[0] + if len(data) == 4: + s = data[3] + i = s.find('+') + if i > 0: + data[3:] = [s[:i], s[i+1:]] + else: + data.append('') + dateString = " ".join(data) + if len(data) < 5: + dateString += ' 00:00:00 GMT' + tm = rfc822.parsedate_tz(dateString) + if tm: + return time.gmtime(rfc822.mktime_tz(tm)) +# rfc822.py defines several time zones, but we define some extra ones. +# 'ET' is equivalent to 'EST', etc. +_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} +rfc822._timezones.update(_additional_timezones) +registerDateHandler(_parse_date_rfc822) + +def _parse_date(dateString): + '''Parses a variety of date formats into a 9-tuple in GMT''' + for handler in _date_handlers: + try: + date9tuple = handler(dateString) + if not date9tuple: continue + if len(date9tuple) != 9: + if _debug: sys.stderr.write('date handler function must return 9-tuple\n') + raise ValueError + map(int, date9tuple) + return date9tuple + except Exception, e: + if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) + pass + return None + +def _getCharacterEncoding(http_headers, xml_data): + '''Get the character encoding of the XML document + + http_headers is a dictionary + xml_data is a raw string (not Unicode) + + This is so much trickier than it sounds, it's not even funny. + According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type + is application/xml, application/*+xml, + application/xml-external-parsed-entity, or application/xml-dtd, + the encoding given in the charset parameter of the HTTP Content-Type + takes precedence over the encoding given in the XML prefix within the + document, and defaults to 'utf-8' if neither are specified. But, if + the HTTP Content-Type is text/xml, text/*+xml, or + text/xml-external-parsed-entity, the encoding given in the XML prefix + within the document is ALWAYS IGNORED and only the encoding given in + the charset parameter of the HTTP Content-Type header should be + respected, and it defaults to 'us-ascii' if not specified. + + Furthermore, discussion on the atom-syntax mailing list with the + author of RFC 3023 leads me to the conclusion that any document + served with a Content-Type of text/* and no charset parameter + must be treated as us-ascii. (We now do this.) And also that it + must always be flagged as non-well-formed. (We now do this too.) + + If Content-Type is unspecified (input was local file or non-HTTP source) + or unrecognized (server just got it totally wrong), then go by the + encoding given in the XML prefix of the document and default to + 'iso-8859-1' as per the HTTP specification (RFC 2616). + + Then, assuming we didn't find a character encoding in the HTTP headers + (and the HTTP Content-type allowed us to look in the body), we need + to sniff the first few bytes of the XML data and try to determine + whether the encoding is ASCII-compatible. Section F of the XML + specification shows the way here: + http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + + If the sniffed encoding is not ASCII-compatible, we need to make it + ASCII compatible so that we can sniff further into the XML declaration + to find the encoding attribute, which will tell us the true encoding. + + Of course, none of this guarantees that we will be able to parse the + feed in the declared character encoding (assuming it was declared + correctly, which many are not). CJKCodecs and iconv_codec help a lot; + you should definitely install them if you can. + http://cjkpython.i18n.org/ + ''' + + def _parseHTTPContentType(content_type): + '''takes HTTP Content-Type header and returns (content type, charset) + + If no charset is specified, returns (content type, '') + If no content type is specified, returns ('', '') + Both return parameters are guaranteed to be lowercase strings + ''' + content_type = content_type or '' + content_type, params = cgi.parse_header(content_type) + return content_type, params.get('charset', '').replace("'", '') + + sniffed_xml_encoding = '' + xml_encoding = '' + true_encoding = '' + http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type')) + # Must sniff for non-ASCII-compatible character encodings before + # searching for XML declaration. This heuristic is defined in + # section F of the XML specification: + # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + try: + if xml_data[:4] == '\x4c\x6f\xa7\x94': + # EBCDIC + xml_data = _ebcdic_to_ascii(xml_data) + elif xml_data[:4] == '\x00\x3c\x00\x3f': + # UTF-16BE + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'): + # UTF-16BE with BOM + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x3f\x00': + # UTF-16LE + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'): + # UTF-16LE with BOM + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\x00\x3c': + # UTF-32BE + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x00\x00': + # UTF-32LE + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\xfe\xff': + # UTF-32BE with BOM + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\xff\xfe\x00\x00': + # UTF-32LE with BOM + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') + elif xml_data[:3] == '\xef\xbb\xbf': + # UTF-8 with BOM + sniffed_xml_encoding = 'utf-8' + xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') + else: + # ASCII-compatible + pass + xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) + except: + xml_encoding_match = None + if xml_encoding_match: + xml_encoding = xml_encoding_match.groups()[0].lower() + if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): + xml_encoding = sniffed_xml_encoding + acceptable_content_type = 0 + application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') + text_content_types = ('text/xml', 'text/xml-external-parsed-entity') + if (http_content_type in application_content_types) or \ + (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): + acceptable_content_type = 1 + true_encoding = http_encoding or xml_encoding or 'utf-8' + elif (http_content_type in text_content_types) or \ + (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): + acceptable_content_type = 1 + true_encoding = http_encoding or 'us-ascii' + elif http_content_type.startswith('text/'): + true_encoding = http_encoding or 'us-ascii' + elif http_headers and (not http_headers.has_key('content-type')): + true_encoding = xml_encoding or 'iso-8859-1' + else: + true_encoding = xml_encoding or 'utf-8' + return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type + +def _toUTF8(data, encoding): + '''Changes an XML data stream on the fly to specify a new encoding + + data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already + encoding is a string recognized by encodings.aliases + ''' + if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) + # strip Byte Order Mark (if present) + if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-16be': + sys.stderr.write('trying utf-16be instead\n') + encoding = 'utf-16be' + data = data[2:] + elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-16le': + sys.stderr.write('trying utf-16le instead\n') + encoding = 'utf-16le' + data = data[2:] + elif data[:3] == '\xef\xbb\xbf': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-8': + sys.stderr.write('trying utf-8 instead\n') + encoding = 'utf-8' + data = data[3:] + elif data[:4] == '\x00\x00\xfe\xff': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-32be': + sys.stderr.write('trying utf-32be instead\n') + encoding = 'utf-32be' + data = data[4:] + elif data[:4] == '\xff\xfe\x00\x00': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-32le': + sys.stderr.write('trying utf-32le instead\n') + encoding = 'utf-32le' + data = data[4:] + newdata = unicode(data, encoding) + if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) + declmatch = re.compile('^<\?xml[^>]*?>') + newdecl = '''''' + if declmatch.search(newdata): + newdata = declmatch.sub(newdecl, newdata) + else: + newdata = newdecl + u'\n' + newdata + return newdata.encode('utf-8') + +def _stripDoctype(data): + '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) + + rss_version may be 'rss091n' or None + stripped_data is the same XML document, minus the DOCTYPE + ''' + entity_pattern = re.compile(r']*?)>', re.MULTILINE) + data = entity_pattern.sub('', data) + doctype_pattern = re.compile(r']*?)>', re.MULTILINE) + doctype_results = doctype_pattern.findall(data) + doctype = doctype_results and doctype_results[0] or '' + if doctype.lower().count('netscape'): + version = 'rss091n' + else: + version = None + data = doctype_pattern.sub('', data) + return version, data + +def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): + '''Parse a feed from a URL, file, stream, or string''' + result = FeedParserDict() + result['feed'] = FeedParserDict() + result['entries'] = [] + if _XML_AVAILABLE: + result['bozo'] = 0 + if type(handlers) == types.InstanceType: + handlers = [handlers] + try: + f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) + data = f.read() + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + f = None + + # if feed is gzip-compressed, decompress it + if f and data and hasattr(f, 'headers'): + if gzip and f.headers.get('content-encoding', '') == 'gzip': + try: + data = gzip.GzipFile(fileobj=_StringIO(data)).read() + except Exception, e: + # Some feeds claim to be gzipped but they're not, so + # we get garbage. Ideally, we should re-request the + # feed without the 'Accept-encoding: gzip' header, + # but we don't. + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + elif zlib and f.headers.get('content-encoding', '') == 'deflate': + try: + data = zlib.decompress(data, -zlib.MAX_WBITS) + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + + # save HTTP headers + if hasattr(f, 'info'): + info = f.info() + result['etag'] = info.getheader('ETag') + last_modified = info.getheader('Last-Modified') + if last_modified: + result['modified'] = _parse_date(last_modified) + if hasattr(f, 'url'): + result['href'] = f.url + result['status'] = 200 + if hasattr(f, 'status'): + result['status'] = f.status + if hasattr(f, 'headers'): + result['headers'] = f.headers.dict + if hasattr(f, 'close'): + f.close() + + # there are four encodings to keep track of: + # - http_encoding is the encoding declared in the Content-Type HTTP header + # - xml_encoding is the encoding declared in the ; changed +# project name +#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); +# removed unnecessary urllib code -- urllib2 should always be available anyway; +# return actual url, status, and full HTTP headers (as result['url'], +# result['status'], and result['headers']) if parsing a remote feed over HTTP -- +# this should pass all the HTTP tests at ; +# added the latest namespace-of-the-week for RSS 2.0 +#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom +# User-Agent (otherwise urllib2 sends two, which confuses some servers) +#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for +# inline and as used in some RSS 2.0 feeds +#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or +# textInput, and also to return the character encoding (if specified) +#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking +# nested divs within content (JohnD); fixed missing sys import (JohanS); +# fixed regular expression to capture XML character encoding (Andrei); +# added support for Atom 0.3-style links; fixed bug with textInput tracking; +# added support for cloud (MartijnP); added support for multiple +# category/dc:subject (MartijnP); normalize content model: 'description' gets +# description (which can come from description, summary, or full content if no +# description), 'content' gets dict of base/language/type/value (which can come +# from content:encoded, xhtml:body, content, or fullitem); +# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang +# tracking; fixed bug tracking unknown tags; fixed bug tracking content when +# element is not in default namespace (like Pocketsoap feed); +# resolve relative URLs in link, guid, docs, url, comments, wfw:comment, +# wfw:commentRSS; resolve relative URLs within embedded HTML markup in +# description, xhtml:body, content, content:encoded, title, subtitle, +# summary, info, tagline, and copyright; added support for pingback and +# trackback namespaces +#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback +# namespaces, as opposed to 2.6 when I said I did but didn't really; +# sanitize HTML markup within some elements; added mxTidy support (if +# installed) to tidy HTML markup within some elements; fixed indentation +# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available +# (FazalM); universal date parsing and normalization (FazalM): 'created', modified', +# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed', +# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified' +# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa +#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory +# leak not closing url opener (JohnD); added dc:publisher support (MarekK); +# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK) +#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed
tags in +# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL); +# fixed relative URI processing for guid (skadz); added ICBM support; added +# base64 support +#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many +# blogspot.com sites); added _debug variable +#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing +#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available); +# added several new supported namespaces; fixed bug tracking naked markup in +# description; added support for enclosure; added support for source; re-added +# support for cloud which got dropped somehow; added support for expirationDate +#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking +# xml:base URI, one for documents that don't define one explicitly and one for +# documents that define an outer and an inner xml:base that goes out of scope +# before the end of the document +#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level +#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version'] +# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized; +# added support for creativeCommons:license and cc:license; added support for +# full Atom content model in title, tagline, info, copyright, summary; fixed bug +# with gzip encoding (not always telling server we support it when we do) +#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail +# (dictionary of 'name', 'url', 'email'); map author to author_detail if author +# contains name + email address +#3.0b8 - 1/28/2004 - MAP - added support for contributor +#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added +# support for summary +#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from +# xml.util.iso8601 +#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain +# dangerous markup; fiddled with decodeEntities (not right); liberalized +# date parsing even further +#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right); +# added support to Atom 0.2 subtitle; added support for Atom content model +# in copyright; better sanitizing of dangerous HTML elements with end tags +# (script, frameset) +#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img, +# etc.) in embedded markup, in either HTML or XHTML form (
,
,
) +#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under +# Python 2.1 +#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS; +# fixed bug capturing author and contributor URL; fixed bug resolving relative +# links in author and contributor URL; fixed bug resolvin relative links in +# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's +# namespace tests, and included them permanently in the test suite with his +# permission; fixed namespace handling under Python 2.1 +#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15) +#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023 +#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei); +# use libxml2 (if available) +#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author +# name was in parentheses; removed ultra-problematic mxTidy support; patch to +# workaround crash in PyXML/expat when encountering invalid entities +# (MarkMoraes); support for textinput/textInput +#3.0b20 - 4/7/2004 - MAP - added CDF support +#3.0b21 - 4/14/2004 - MAP - added Hot RSS support +#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in +# results dict; changed results dict to allow getting values with results.key +# as well as results[key]; work around embedded illformed HTML with half +# a DOCTYPE; work around malformed Content-Type header; if character encoding +# is wrong, try several common ones before falling back to regexes (if this +# works, bozo_exception is set to CharacterEncodingOverride); fixed character +# encoding issues in BaseHTMLProcessor by tracking encoding and converting +# from Unicode to raw strings before feeding data to sgmllib.SGMLParser; +# convert each value in results to Unicode (if possible), even if using +# regex-based parsing +#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain +# high-bit characters in attributes in embedded HTML in description (thanks +# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in +# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking +# about a mapped key +#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and +# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could +# cause the same encoding to be tried twice (even if it failed the first time); +# fixed DOCTYPE stripping when DOCTYPE contained entity declarations; +# better textinput and image tracking in illformed RSS 1.0 feeds +#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed +# my blink tag tests +#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that +# failed to parse utf-16 encoded feeds; made source into a FeedParserDict; +# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url; +# added support for image; refactored parse() fallback logic to try other +# encodings if SAX parsing fails (previously it would only try other encodings +# if re-encoding failed); remove unichr madness in normalize_attrs now that +# we're properly tracking encoding in and out of BaseHTMLProcessor; set +# feed.language from root-level xml:lang; set entry.id from rdf:about; +# send Accept header +#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between +# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are +# windows-1252); fixed regression that could cause the same encoding to be +# tried twice (even if it failed the first time) +#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types; +# recover from malformed content-type header parameter with no equals sign +# ('text/xml; charset:iso-8859-1') +#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities +# to Unicode equivalents in illformed feeds (aaronsw); added and +# passed tests for converting character entities to Unicode equivalents +# in illformed feeds (aaronsw); test for valid parsers when setting +# XML_AVAILABLE; make version and encoding available when server returns +# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like +# digest auth or proxy support); add code to parse username/password +# out of url and send as basic authentication; expose downloading-related +# exceptions in bozo_exception (aaronsw); added __contains__ method to +# FeedParserDict (aaronsw); added publisher_detail (aaronsw) +#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always +# convert feed to UTF-8 before passing to XML parser; completely revamped +# logic for determining character encoding and attempting XML parsing +# (much faster); increased default timeout to 20 seconds; test for presence +# of Location header on redirects; added tests for many alternate character +# encodings; support various EBCDIC encodings; support UTF-16BE and +# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support +# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no +# XML parsers are available; added support for 'Content-encoding: deflate'; +# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules +# are available +#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure +# problem tracking xml:base and xml:lang if element declares it, child +# doesn't, first grandchild redeclares it, and second grandchild doesn't; +# refactored date parsing; defined public registerDateHandler so callers +# can add support for additional date formats at runtime; added support +# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added +# zopeCompatibilityHack() which turns FeedParserDict into a regular +# dictionary, required for Zope compatibility, and also makes command- +# line debugging easier because pprint module formats real dictionaries +# better than dictionary-like objects; added NonXMLContentType exception, +# which is stored in bozo_exception when a feed is served with a non-XML +# media type such as 'text/plain'; respect Content-Language as default +# language if not xml:lang is present; cloud dict is now FeedParserDict; +# generator dict is now FeedParserDict; better tracking of xml:lang, +# including support for xml:lang='' to unset the current language; +# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default +# namespace; don't overwrite final status on redirects (scenarios: +# redirecting to a URL that returns 304, redirecting to a URL that +# redirects to another URL with a different type of redirect); add +# support for HTTP 303 redirects +#4.0 - MAP - support for relative URIs in xml:base attribute; fixed +# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229; +# support for Atom 1.0; support for iTunes extensions; new 'tags' for +# categories/keywords/etc. as array of dict +# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0 +# terminology; parse RFC 822-style dates with no time; lots of other +# bug fixes +#4.1 - MAP - removed socket timeout; added support for chardet library diff --git a/src/feedparser.pyo b/src/feedparser.pyo new file mode 100644 index 0000000000000000000000000000000000000000..c609ca252a56cf73d8eceda9b40b0c0b039bc230 GIT binary patch literal 102392 zcmd442Vfk>bv`}^fCXUhT{?;)1vY?95+zZTNDw3;ks!4IMd?TiKHM&V6M;MM_COLc zB^SxPBrb_#JMO*3CGNc^aZQ}Waf;)b*lA8L&M%4o-}k-Uy#)wLvI)w6QTOn6c6N5= z&71e$ym>SG+y64F=|#8fytCk<|8B$I+wlwUX>`taE(e%#xs3DcT&~Xf^)6TM{08Sw zasE{2Pjmis*FVMOkkaUKNNjRJ#x2g_-VB$+?U}A0&rjFwM(59R{%qHevYMPf$N6*J zX=Ixb<1<}uW{l5rIb^mxvt4d>jL&g7BwG4hmzx{o^IUFTjL&zu`7yr0&8F$ZU^0H+vx6BEmR@}yr^moxPyeT)m!Is*CEd|<{5jZ+2xwY+}R$N zyWZv2yI_hdue6$?>ozb6wG7s~;zrjqg>M1<5~{4%YyCwo*yQ@aLW^1Xhn>4Ox7qni zT(DWGOOwmnW&M&R-Fyu1r$5I3E#?|?2+^z=yS|DqQtOIf#k!B$4iEIF}k;o<>n~AglX(h4+$W|iT zfNUqS1IYD6ZUAy4k)1$pB62g3TZrreax0PDKyD*)JCHp@_5x{hyK*of*&35N|dc0jvbtO0UJHOinNCnRvOHvOw|88F74m$sMeEXi{ zc8Bxt)$Kc+pL4+?SG-g3kl^`pq6&OfEgyPSWY^Lw2CB^L^(B&hK-6zw-y2Uv&Qc4xG{tvE=dbhFq5mJh$&u-sMnnTy(qQF+F*k+%>y#76T`uQN+~fT7YTyYM+~tB3%#ra&UGAjwA8@%-&VRDY zq2(7`4vl}%<(?FC!$U4O>4H-(xQ`@UOxDQ9_nzW{9v3``-KQHM*HiVDg5FZFw>(Wr zzLI<;MT7L|yvTvH4=YEXa`Y*O2IM0c0MFn^cqWi%5qUO{=fuc!i98P}&nNN%ATNxO zUx|@lCGsNNdU1@rgvd*g@@p~j>qK6Ll;4PvmlJsfQeH{qH-Ws0$g6?8CPrRMkarS!7m(i~@@^pSK_LS%S>G$VfX&_~ zy4)|i++U;1Z{vaY^T`hY`5=)G0r@bIj{x~7k&glSIFU~P`6Q7~0r@nM&j9%>kwAr*M=%=*UTRF5!ux|+XB3+q_Mxnrf`*`N~U2eW-a;Ae;Pl`yC@H66^C{J|j1 zdR<-FEiE^l-z02n%N9en5oz1Ee^Yi(wLF9z+c#zLgk2$*3JaT>?k zl`pXJTgsLG)@)z7lC2H|*};4@2&-9EqgXCI>N9@15MJ~k5=VxDQZ--Y<4s<0W~5jN z{F}2!N2>$nQg&O*)+}Dd*Hmz~P#!{Ve@uq$m&nj|$DM6uKPZH1U9nIuo$VBW#>%F%kyZVksMzM=FItDZRx~ zzA{RB4uzYt=Ze(I(CitOBcZ~1J1 zvSJ#P%GF{a*wmE8-C~#>MC({Y`eooFK3S^?NKhEe7l(pM%LEm`i&6xLD}a{yBcRXZ z*MY=msbkqL%bsWj*~_?B$qtd(iuu8?Mq>4g9!&Kpy14sbM_0D%$o}r*d%X56@T1<5 zyF2!^@5}DJH`{%%J==EV=)GRYfrH)IgGUbSYxlacdph?awX@sn*n6z|2okQ_(}gFm zYvPSPo%d$j?>XwVW9T07vK@zy9_m10DBauB+1=6JwJF=t*>>pIzK+fVo3bb%+j*or zd#K}ZM>n!{AKAp0HeLK!_Q?M1;dZa>AWGS@x8qPp_r0pf{*LZWzH2{TwkLaZkJsJN zcI?m|FMITucl1bCdlQ?rucNE&(4LON?fY8rcDym$es_Ckced-`oZZAS3_jP#fZQX254Qm78P`yK&vRy~p+d7D}-_s5f?D6j1gfg2@ zW>@=N$B+p(v-|cO-gBV6E8F}S6hW7?9rM}`v&u*IH+3D`+tuCCeXP4Zd*H~CeWG($ zyLWd-TYJ|n*+WN2`Tf~rUG1CjhVDIlF?$u1!foJtk9BpBW=$QP-R+)t>}Yq#k{Pq>|Ly$>H=3?po;B>V(SdMbC)H})prR?6II5dbs7|ZDUk=W8}iBaCI$h$xP z0My>${QbpRndi=(YZ+onOR>~fZXFsOEQhV8a%ZpUqM~E&gCoB2a-4B znHZjHfo?&zW@NiFI14$!MuthQU^o~FTUb^&T&`N?BJ#Sh1t8JH`=KX~N)NZL4YM03 z)}G;0@koAncrcRet$9#w++p17^Zlt^Dk5;Dw8#+(4_%r<1*$i95Ri^`V zL7kQQJC0Zi^95lcKO7X9qd>qPHE0`t;mrUSoCBTU27(U=Gm;0B6nTQiq`(0*wU(!= zc^V$?tg^&)A#1lTKQw%cu6KtGyzZ~s&o8deHv^9L3~6CUjW&8+RcxC%)e zmzl4B)fK?I2SBK>v)o(uN86x-!#%xF=zB-1hl2w1@;|n0FMfJDz(g|Rye_L^9g;=y zo}N;EDCp_2m-fIQ7#YN6ts$4&8jpN{iLnd$&Z(}t41d}N@|Ao6YOuWo%K$Ao0t>GK zYvb|O-J6M0yjcK~Sku`k?iu*SHC+n)qM9Bnoi3Hnm42xeor8)!GgZ-fz&p#Odk!CJ zE0?M;ce_W2f4QWbiyA#ERl^0qkHN5ptp@YE6D+5Z@Jpp4srKwz9Yc2aiTvgV_H53b zIAh)!|jhvu;iE`P)t)dC%tk`OST&Hm*N$U+t<&;V@!MQUz+G zFPbZDk?gC4;g*}WgrrH7zBSw28B`_e)~f(z*;*Qx=NN4NLD-V%JR=(2Q90Xc%hq(d)mG_N+bF-)R_Rt-rLzLm2uw!}76VoK#Z{m*W-iqQGg1A822)!OmaCkv zs;Gf-6;-XNAJTVUu~3z!VJZBz;TLWM7;x^vj5~i7RnERsi`_|R8~k7IF5v%+yHMwL zotfp11MO0gohF)ChA5Xm1mnI+DOf2;%bXTGTa#dyqcr#$A9a5-{)W^bE;BVV&CSfr z&QvMbYL(od?>n_Z1GJ7~6pXu!fABn9!t>7@b`RA#D+zCbBix2@MkZnd z+2cY3-D6;b;#9q77kp|rF?HXm&F;Y|dfPPC?!pv4R9={>uqAb!d6UcFC+`|7XW8|R zvDuZR+9Yk@>runVP9^KWQxo);!7k)E5Hp178!6TQiF?iy``^W)|Rj zL1tm5K&3>Iv9e;cGLZB4Mw16{*uxa*2gNI3w|e#V^sn_{3LW&;Z?pB&0bHg&b1m$cr2R}(`#G9M=hS*}8JfBPO@;nI zQx!pwQ8P!^b0mT&P%Rk2Gly{x1bI5u2*x->z0&bGWOycn)0{J#mLBO+rEB^ztaoYXmT%fIH6XC7>i4CnzRypqUoL zP_y>`1TTw5+N#XFx`fj>+Qexzg4hbR92|79OkmbY9tKrN1_pa)Bg%5UCSm8d6S*1wMhmqb&4J5m5AceFU(UoHqc_j9&^|#stLU6zQG#$ze&Y z(HQ-klbM;|mNp~PIJgp-*drMSE)mZ#LuY}C zgcyHpl$@_<4wNwZNE|5k#09!46B&!?E|b+`wz`Iw#95v*BkwvCSR>2jQslo`z3Bj& zj$uIAXeO~dO0U|Rgk8q#s+^NQP#zB2%0t6g(d#V^7OSHN^M%v@i5v1@x;hMJ5|2ZV z?m@9OIRYAF*u(m7Lk2YT5x4`JFvICB1}vp!T?W=McnWj^>HPW(=!$_1n7)PKZi#^C zu}Fo&=0)+y+SNP-?=%xg9$l1-A;roRf0KY>p!iIJrx3i1-~|LPB;b%OlHZFQj71tK zMNTw|oZuAc%_&lX70ElrR}nmefW$98i+~QEBHfS07Zdyn!AgQv1a$2n+zsHV1WzM) zI>EyPj}TC37ncyw1zUVJ!E*?nOYl5`=MzwqV#)wO%cl6N1TP|>)1vqif|nBf8o{p< zG!Xm-!OIC=LGVg~g#_%`;;RXk6J!ZqOYk~^*Au*f;Ee=tB6u^wTL|7tKm(=NLa>XV znSd-hjw-DS%u$iEh;C%$Y zP4Iq#4-kBi;6nr-Cin;e2C6G=B3Mg6tykPla2vr#2|h;fae_|}e3IZ(1fM4O48dm! zo4-td}7(A{xLU5Mg9Km^l zQGy2uzCrMZ1b;;E#{}Ob_!EL}5&S8^pAq~y!Cw%3o8T`AzC-X`g6|Q0pWp`sKP31o zg1;vC5y6uQ_7LnPXd~E1&`z+Q-~hovf)0Wo6Z{Rq-xB;C!QT`71Hpp?cM#l3aERbA zK_|fxf};d?5qJbYA^1mve304tYMQ}C2Qwg3%@N|NQ2_7M6B3MGOg5cQ%&mnj&!Se{7 zPcVgGD#0{@UnO`E!D51!5WJM&*9d-{pn>2w2wqO`3W8S>EF@S&@M?nP1X+UD61j-WoxQ5_!1bKp9f&zh05D@ec^b-sa z6bU|0@CAY|68sLq?-KkT!Iuc`Cpb+oNH9cDA}A9K6PzKa5PX^7D+FI9_!`0234Wj8 z4+tJ22nnhLBLrs&&Jmm^7$tas;2Q*gNbpAle@yUAf)OG!BDqSzkJg}`O`tQk}rjt z<=kIJSV%o`#4gOjfVn4-1K!L=+)-u*`DsF_Wfyxkz%yP6Ihye^a5*OUTof|C0htWy zzJbNpO+(cCqy3v&Hxr9CH)q0i$bDb)39Lk74f0g<7i)|gH@BSHfN;t6&8-{OH@9rK zZGEAR1>;8okn1!Ts)DSviLNBSxZSuxJfz4)Q$irLSekyqx8%0J4(EDj-)8xf)28$Z8yWQ1w}9hR}+&h6Q`Z8Z4kaTK=|7F;8$Efwt!)+aTRwz zVABJ3JYd5E_B&w50(K~1dqTf$Eqn9~m@_|_`xmfC%6(@)(fil0(;xgy|s*vRln-rLBg~Lt-BG}vY z5+P{yONCtJuw{X_u6F*f3&}ccTVTp+hrJ6#u5rON+9&pMU1It66++fJY-M1EbuQ56 zFKlYyWwXQ11|sVnb~g~&-~w#x+UT&+ftQ=O$BNsvUaQL%7qqxstKb$v?6^V-c3izq z8L-`IyUSsV72x%PHwfP7uup;o?sWbegxut?Re~wlkMt%Xw>a#XU<$S*y+z2a&VQ?r z-7eVea@dLVHeKHCg46!hfNqv#D<|mE_c}Z@6{!CICZ+*k$5u(b_)Hr!|n@s%yQT)^a17Z zl=wj#$veiP)}`b2;qH0mOcsk17#6 zZccGa4k5PWd`yW?;%*!6zX9xZ*x4_*c?2wV=kl|HG@C6m z58f@hpy+bgUG!;PVt>bJm&2(1j4rXQW60&OlLN5q{Ld;8TQY`S?u;O|D1AsyI zt|~Yph`kp`!La?jGMwj@i`=N-1A^E^fs_jl+f7&#ZJ59&3G6)KCH4|M#pRyrum^>g zPjlFaLgeWVn^A~hY<@||BhLS_kY~iZQJ(3dJt)s|!LwZM*@Dk;zU@MJuJgaH{LgdP zhQgwr?}F#M+zSL>DEKQ5n^Sn}R~>e!5P6XcUgUBwcG#=J%a^#|B`)_;hixmo{56NI zD@1tlOLf+%Y8`j!-5|X{HWl^od0*q_Hh?{+~q#u{J+=bC!PNfLO#Vv z5n9xmtcO=+{1&RvEkq1}bB1CE4kyAhbMio)?SRg>Kd>C@R77={HmyOHsg<<-g<;T@whgXu1bz)(7+=#$d8OGpS>p;5xz}LWGw~8V@wq?~?mvp;dXblDt4ueV{|JGJj%V!|+ zP#7+o_yOItp2cjjIGC0~lC?^8WF?zU5fp=QB zkUX5z^#|p2kBuF=+grD6ZAFgObN%7Y9X-S4aE%Ht7J7%qNP}`%Ed^y2&wEDC;!wUn zcwE#`&Q|#Wiq(-)5T+@^Dp{(^-`Bk_Y&}}`+wx&`GpN$ySN$}vriI`{dF37$OpEeM zb~A$APFqtn#?yok14R7~w2bBl%GT^55MQEY7;X%XxAgcM%KPEz;^3flfTHyZ+*Lt= z*J{GAHGa-sI9w@)Lt1N%hmsN?Wqj%y?xIZCh0MOK6|?jrBmw&rdp|6s)plQ|`gH3} zH{H~_Wn1gEZJUv4b2y5nXY2rpx|PqR36pvda(oM!K~FK}EWgHH74)V1>XM{HtYc6K zO4uDC2C4*S&OW~9ig+1zb)GfuFtVq+`;u?8tVYdnFdq&)E>2WVt7BNs52y8mLhbTl z3;K<>)CWXbgVNbx5G%7L5TN{9YQy?bs#iIS?T`o(sg6LZvD@GROKXS$G7&Z}l?L+I zwps}XgHa6t@(i*DdFaRfR&SI~rWDJi-#F5n1fWJ$9erHd6I=q9U8Fr5@H+5JIZc^M zzLFVB<)Kk)Uv*%N#JluUh>Xp1+lbY&T6|ob!ktRR((nkRY~&LxoYpv`%Q6~7k7pdh znP?AMrO)-93$Lw}a4r}eq~#U(*cv7lfJmjr*>kkR*yddEbTK`MF4;_GH1<3{Pz6t| zOueOMKxxD(gLs6@m#$7NBesXMBJ<9gTrj43pf7B#l^iqia2Y0UReVgHAUQO;4-v{{ z16Wfi|2(>aWypXMh*eQz70{HrrtBkQaFe!oGZ2qHVTr@&iBP&2Th@XSoQcfKE!9F` zAC+ZUDQw!KU)eIi1n}rz_vml-==k&Y5YXuys*kA{MY}~vO7DIX!MN8=JHJM%eVca{ z_)>bBr{LFyUw9upk8q_bP*>4oaG}C|h)@QLDM1|@-0+Qv3f`gMAO#9P%uv#+wL74J zLBa?GhTD}PW({2X!tGsWu8#tOakP%Yih3Hgi2y#&CLW!E{5|jhDTJ?QCS90Jfd@z) z+#a}p44D%naQ|2e+&_lQj}f?kECuc#L*V`~WHCD+s=$&I4iAvMae4ge6>+Ur#>lD| z!FGs@8*V`Ch*kG$hK%Ui%EfU7Xgqcn zj7t2#Yi;JRh7MLRV(Ap+l*OZ0Vh8^Sw(D0Eh;NT6&=4u~cT=QU)PE()Zl=G*l4=&9 z+RsAX=#_Bg=Z8YYAE}6*Xcq#a6BTlnG=nQDGx1z(G*=<{RC{{-p_!s9_K96hLuTH? z#L?kE(}BICY++o~k<@G0+8NKyE*$B9-mewV)rGaZHW^pKx=dT`#C3wN_vM1(KQ zG$EpSW@be#BCR2gQmg%)S-Xq4wYm5=&4M83p%kw0wBZ+iq#jLyY`7{8w>8;-!2o`b zj+~K{z<6%pCZW1BQ7qc;2xUFV!+IJE2Pzf*mW?CtM3?k5rPd13f5vEEXC6__uzDPjo}v{7i>5u zPvYHH1Vosgd=N4VgL%4wKASY0kyKiN@{sJ@{nM?6D7?w>4u(ALlyP<#h=gSeHi5CB zCD zj!okubD4@wNmpN(!Nz6IydaZd${aNSVZWA%qOX<#nT|7ko-6OO9P=4W0RhUAw#*I4&r2M~G36|6WbC8Jj+_jiJ&7C8H4xJCSqOGXac=?3fVX}j zg)K;qfD7@+Ol$c(_aF#_rGiE7!YsFV{+VahJv5U~lMk`A3LC2)&Sc_EGl=;}%D%r| z+ia3IV*3@Ea1u*~pyO=ElVVP(ZbI$wv^6Ug@qQhKaA(K?tsv|yHq+}2jK)*gA4v1@I(Hr%Z$p~AObvo^fOYd{G_{499LqN(4HOYfZoG=wCFDbZ7Q7q#v= zY-1achJrH%N6<*`^oSfiTH5u#iCkKE6}?4p)x0a><`j5KAWbr@1b`L&I0 zy209L56t6v0CJYg&w7wM;`EFvFQQoGo9UeOK1A?gf{zeXQ&u*AXY2v>KF^z9Az(CxG#Z|PB3|${Al?@UzQ}ZP4Wbnoj;^SGWgH#}2b#&Hraq4|Bg<#5(7T&A zs4}HH^+3;wZKtdi%(2f?bJ<8G4ugEf^B!YMqTJh0iOH(E9gg(jw(JI+=rd6E*-yG- zmy@HNsX?P%!^Umc9`-D1X-1t;OJ7yDJ#(ObW@at+ajnSAs%ye;7Ig3|TsLAr*W7w; zYn_Yj39~Xw>u;@_ms!)eAk$I5II|M@79r>C`bEk;tA2L0ogvyh!jVWqON0M+WZgqQ zfCz}YOEfr-i;JVipD>Z z+B{j2FP@R8Ea^{(aomno7Lu?GJzXb?q2ZWONSB(*LV0)|>I9(<66IFf0aID=qa;a_ zR91_rERnXVdJHQmUH52=VXNgI1ZZ6#8OoIssqmP5J-j!fOTQV3I*Pu8C5^xTC zS#cb(ssZe?Ww4u;A(EA2IiA3>XFKJ1-@e2C?6f~O>CX-3l)qt{`ttm?`0owT^^Nv- zdm;K8h<(drzRf4tIi`wU$tQTpd8;Z}m^`kElqR<1 zSD>-}ghRh-xb;GqX>_lS_Kgd+JTdh0!|@6 zLEK8j*7&Ij{4@jn1p7M0Pn@QADu@rh0-O^EXGla2Tw_BI2vRdu+53ZbrbNGO+deZ= zuBs<-xRClMk2M+DPV2x@1&~tFf;1v%x`gKdKnoxHeKejxCyo&*tMVKTB4|d?Gud+w z`tcG*1gQ*4YP^)!niPf-fKmjp7lGIy0ZKn8MowC zuL%!L4%^bIum%iIN3TTNr)j3l0l#L!sS}P|8H?77YH)wm;~pt*D#0|?u95ZBApH=r zan@~tY5zy?GlB^QDRy`T z1WUG(vc(##>^id1I027&@OtAX8D{2l1ygc@%>B*rz{1QpHc-H5(cuu_NgH-452H=y zE;-Xm$x`-+(OdSt6td*2Xj{*_-2@^l1>mHRRoZ+J$;fxPdJ4KMm`5s+)I$Fm#rR6_ zb-bAoSLqGH%r_|&(uj=Zu=(^?v3jhFtL=|{!mGvM5LIiqxcdJOHKMT+)rjX?txf7= zokxyIbkoOiQGKih1u(CP;3ccU(q#F-#sb`h60hX$cq$Q{fU+Q#wqSlpTB@$dQ-;l# z=GD`vz!+KrZ9{2{*#}C=;0l^+NmmJuOpZi*iF`h_moy@Sn2y}psbG^tJ1H(4K6oDO zr6s^7?OD?L%dXWY+IY(437vX->eOx2}GPG#+Cdz&7i-J>=NS; z|7HNQLUK)LR7zVEH7A~&QNX$PMGd3^i3}3@lvhBw&vFZJM8V9u$RI%msaeUmW4J(< zLB>6tn5=fd4ZWu5NC+{`VbCSk-Au;P5t8Y;M6#W7!({k%BasJnDi=zpB$331j$*{A zgwURp5?c4-cu_j9ytm=%(*7nL(;-{XR3zn`oEXuv0ndJ!9I{f0V{M_$RzO(p4$_pm z#mT_434taB96)Rif1)SV9k@24r&IwfEx0;iO>wl4XyB?k_#a{2gTT1zi3S9mv_|7` znJ_rUWHO46%OrEARubI>@;+joOwRxm6HGkXcQ&<%2A~F%O)AQp;xUG6&PnM=#dZWd zzzbPM)Lyu`s{{k8mL4s!VW)wH;Y^UAGaat*7*T>A&F3-kIO9VOJx1q^i6W~l94kbR zVSHXTE@r}bF)Z?Gd_SH@NzJasWH4}pkXjKXy+sdXIphmP{l+-tU`7YRdmGwLv{P%UDZUzj6$}i z(o zWsqpg&?t(Wv{NK*@59cW#44iP8So`S-f;qFW`SH66jWR{1UaCO?7KJ+IeZzCZIagD z$|rD%G8NMRb%eKqFU##NGrE8?e@ZJYBBF%jXTFEYFqWmvGim4UAT6UrI0=)(c$j!k z{uatGnV%t=`~B|t+}f87Ssao$2C^&&aqC<)TDr7nHcZE{Q=|C|7C~Yipt#5NIS=c;t|4(O-!3 zSUUds<7bBJ;IsgYy92t5+B3xkY{xR_Q(H`1PJId#_K3NCk615YQxKREo5pbmqYTx{ zU?GlcZ+P4~5?T&a0THo5j;Zzt@-M^Ya-^fKnKP#vR5rXYsKrev+X$KD-GziQJS$_; z;oMuMF@eJ-&%B{NDa96bX)(=BET)UfLrt=-jAPy&wZ78&wJO66xMUYATl$c-f3+LN zd)TbyuKaq#t^uuHX6PEA&o}g14LG@pqE%;BaDKXiy19!qHad{gQExbp(hVi4tnzTZ zKUU@KeMzlpm0*-NFQ5dhnd8zl^7nD2^M)O=AQ5;P*vz>Rxf3&T1Q*UbNbqce69n%B zkfU6yehTl>@}q@oQ7uHLJ}OwNc`cuj==~>R!|~JH81jWREv924TIlcBobd;%c}%0zp4)Vj*AZ=?!|Yo~zJz?r!y#v-Fh&1?2FSQrJ@g(nRjz zT4$(3s`)4_U)a-Qo)2xGa-=T3sAQ(7``vLVh3b^UU;Ff-ew! zk=Z#pz2ZEHjTkIZhHMHEaNS};YkhnHP$c5EWl#l;`(~5CfG@3 z`diUbcxs@{C80Gw+X>Z9YJBD4|%%ju;oOG~?FgTXFS1`)nr^DB6w1m)c1w3gBa>v~aRKgPy#WU{ez7ps{u>#3Pw zGd9q;4oqXSOmG%;{B39#+F9qaj58AkK3j4%tUr=QE>y6iaU`>VK{N0JHDIwh^^&tBsRGD9&4X0z`MYE85v`R51*nA<1I? z;7FbgC(QiXY$f53#VEiCXQTj?KtB8*&tJ$4HZpF(7)f zMA0Mz<1m@x*d)U;)Lk+IB|T&m)Il^G>JqU!El7*53YkzwC{d6!mccTfcsJq3q!7=d zq>I&XPw5~=*}nWxsiXQW5_2hD;h-a@L8EdFpQnS`Uj){WY1!im^JZ~jGWlUGoI>7! zY0z8_K1>fl(=nrO3;B!IP0C|Kr>Pm1J2)(HW#DqW2$ZNWxy}ZE%{H;hbk~R_c#8>Q ziI)Kxz%}~UlsCFb+PVQ_Ejkip8W4oXn7TpN4EIO~Q|}=9)gmJyX`y(~Y?uNN*^tJV zRx@bVyp7JCc?m8wKgZ07qnG|lOn*^Ly9uk`F32Pz8)aUnN2#U;U<>e8k8Y&+YMaNt zim$eN%TiZ6%wNWlEWxv_#aj+sv3$lRLQVjioRu8CAy&K9>h zr`A4Z=_?|z_3KEp$r%H_;$$PY1;=uNYj!hp@ajtNk=F*fmjPv?RT1zxOq|)|cl^Cr zXq`j76CMJ!9_7m&k@V(JQbwHDwx`r7adFCkuJ2AOzaiwx@~hTrAac_s=`7-BRSm$K5V zQzTPxSyRHhaFOtXjX{hx4l8J28-EW&JEN~PIl?{o7LdZ*5||o7kVGK+O8~5G)RXco zXsYNXnp?Z2#v3M+r@7T~>n-hzTFI7AM_t%mINje)HD+CtE}~3jGerf84zv*0U|Y40 zwCYSXjL&(R2!0XRWcQr(8Ks44e+_F-^74xn8p*5;e{J}My8tGs=Om_Rp=;l3stdn> zcRaCOI2Uh>bV*X7btJ?jbYmxa-WpfyMz(OWyHWa(-I%msGg~mGD_Mi2D=7^p=t}6u zc$6`#iB37%Y^R^;li$HD^T?=Glp}J+OpY+oeAI@Ny>wfxDNyYfiM(s^`iaLAQ(@SH zn^Pm}uB1!YLK|6ZkesqT*f3t`vDZdk;mPhQ$#-^@z2#;$j*6c{ZX(O&5Y zYFQUfuD0$uR6UO$Ox#xxWnNWu)C`gMwcX zM}@gP6ka?Iso0|4rLv)eJKAI$CLu6Uf_1p8$lpcQloI?lFXeCjA+Rf_4pUQ%R7DSg zJkgim8le*B^20|L4dch97Vk|spe9e*)XDCA=>!haR4cmJ3XZjjmE)D{dG8*yTQ0v} zeit)THJ(Kl()2n0czT&00UPuf7MWrJh*OW%tkXGsxi0j_-2LuJ*YM}MI{{d1J@MD%IOY_^@pPyOY0YSQ*! zR;MP$wvhy)ZIXJGOB-@QnB^g>iUVk}9i1tEGXi5@FhQ3h$;RnxfQ%cbk&K^6gqKb3 z*93gL^GqWAuGkoj$1v9L(qowZQHti4vALc>5s((^jMn%Cg!}VR(i0&FQo>z_J-%)n z(~wOQ!(bQ}6En>v41;KEbd7Ju7zS4x-(12lh!s9rw$md$r0{LKXQJyp^?jn>#Q-Tk zHq}uxewXjHOnhG=)0;@9c;UXL$z@BCWc;<^7v2V7B-4_aKZd1eIo_(1}B{fkFeF&;D;dtv{=Y>AT@6Z_N^k7LIb89*AFe3alonN6Q7fKQn1ro zjdz%OAM2y_G>PYnO@`LiS%1v5$!k8@>%(k52RqkU8Gm*0TJcWYthHLPDQGpMp0yYs z5|0~g{mj`s8?C0Icr=x>HZAX%z#7}$E4!quQS(W*l6D+hiI_2V02`f{cJD$3Yi%@- zyV{6`a0jx55 zaM@Ze#LHon@U5sfVrFGLFGhe8QpHBP6D8ZyqX?3uARI%CMJumUJJ{BSao8$Dfh0|J zW&{olYF-gDx%mV>(}It+;0rA$drodX*>ZB@sg3JzZ*D$$;snx8J$dV*HciJ$c)v9S>fpJ-)vAeagKa-m8dH5cDG~b=V`vde@);`&KBuSgB}Hi3th^d8zF)@HUr} zexXFO&DT63S?BCEqkzj+9+Fm{fhuqE%$RiC2b=L9q_>3@;vg!lBsEg*K*}HvjvT^Y!X=N(*G?I2 z^A$d43uT`JHrYe95oJaTEx}Md_++e;oi9%q=P_Kb3^h;5!5^}=syqeDBrZ{=mO0=# zjt_pF&bsq;6Ny5F`--3t>kx(H=d|Bj2l82EH52+n)}5UuYj|?RH~m?nbgI%bQie52tr`8uy^JINw%={o~$%LrbE(0bc6m9 z?zMGYHpb($+U7~(|J~>olX1~02mD4N-(<7H$@2mvp%Y2_uB!FSoC!h+A43LhLmi)p zo>9H6XY6zF{gI;wWyJ(y% zPU|%4Fu&()esCn19Kj`*7OlCcr5o8&o7m2QiqpjQ=NLV_h$}6>QeCAl z*988Kab0!UL+I%oE__e)f*$LHRF|>0Joeia8AVACMepBvIb%rugAMM*$`jK+sU>Z_ zRfomkSeM||aot2-NhWG*aFL8CjwOtH4Iv7+@iI<*6E!L78}FIy3=U9@B=wD~tjSjU z$tw5Q#@(K3Ts(LZadCILagjOB21;abKALPg!RGR)PXXqg zefgn{Ck;gp{m z=61iFZ1e*1>~o%-!y|a+;%`0va_1Skh|CHDWdODevPFAi>wsuuY_!h?Ol+=0coKPX z_%@WG11lcSlkt$?+r-+PUI)o{2f-l#IlTt+y+O1;ZLGssoUfCW%mhjv^Im{n(9F`x zo4kQA4VCK2`+j%6|0pR>1w&ugeC~tJTo)Uz6p8E&G0!uAR4@Mg0Os3fY@|=NqyyGm z%CZbC_zl^JV1h3pYCA`nVn;Th97qoNOVNFBCNo}zNpP#eIV)$b37YAsNvA${mmCQe z4+_Zk{&HnBIj=I+2V^I|3~yn{=z0*>W+bq@iH^#ZE1?n$S8RjQ3)ve#MBeec1;%z* zGjd$E8#3*YD@2XDr`A(AvKk)A75FVMF93&lqQGv%#rZ8Y9R_-%p%^>+PA!Wggw!wS zBOgNubUw4#9S0f(Wa{lvc;x9|^c;?`47Fk!eY0rX=g-U`BJ2LUc(o@&VS<*nNJyi} zp$yIR$gRN7vh~Cj>?L{3c3kYH)@(;48v}0vVq%1x7*82%G~hkBm+=4iC`~)?j5Ga- z380fHjFhP`qvJnmiVf{cf{K zz4II!!xuRlA*5wJeqxJ#pfl=jz7+~^uKwC}GAgW9)BcM$J+ zfLxbF;k2=NK=E8CuT>aGbFoN25C7(7re=2H?79Lniysr7JdOl1N9^w-&!uG(aCBfK zcQj2)f>4bYQ}-qgP<snWfRVRn>890W^b%i zNj3J0XeBK+zk%&#uR|+MngrmM#}_V`iZn$r$mziX3u|#ZxmodDokW#n`Nx;WK%V2dy zRWzr(TvDA^GvyNMUe5Rub@&2NN9Q9yvEz#tL)7kkFOFNw7pfn9v?2Bo@*69GeP|Fd z)(K+=yaXbZU^?+i+7(t&q=F$;uFwi~#I8H#Dil%VX5V|5_Ii;BSvHLHLV;R1jZE0ie9?h4? z>YFd|N(7`Nnv!+=TB?pQttn1a$1)I*xV+mKQ5O;a!iGv@Wi)Mppj1*kZDDjH4xc5f zR?TcM0}I{(@N9I72;5f_SX(mc#Vh>*Hq7EA>q->VCnG;ev`(ed!W(DWC$qvUw^?W| zUKTPc3YQ~pdq>}fLuo(6i*<_$hq={l-ksABlTo4^U<<~5)x1n_J ztptpBw2>>zE72uaZZvBFkE1yuz@L!-(@_z#!=vwEkfV|jJc5g)@!*5l&a}xc#_}(; zgaWs~0IXijB6Z#q*p%;3cqwMM=09M)Iz19hjPocfEHK|1iO#6F(hFC%kXNac@d2n|eCRl=mLH4A zzpQ1fiwKG@Pp7K=V-nQNo0y3nBO5QH2BI}mq20vryZkkU#C(cc{ZqEuWF766B@=iA z(UM5kK{rOKQ_=@qLwhh@gTov%SqNf-k$cP<5KPKw-XW9H_8-`?5V8-`+yoPWgP7^y6I-(~hr>gs^?Ay@{3P>!T?*+EH@^ z-++sxq6mRNSCglq+a@r9;H|Z@(9~gKZgQ^$?%T_(pV&CBg)H7m@MT`A+jip8+CO>B zE?ZWC;`$){EH>9xhZ^G*epZzts13jHE&v<1Y&b-r{Ajex$;-Ibx}_O6ayA=DkSp2; zX#~Q<;9;5sq6Uk%p+1oSgjtC%#f0SA0VNSblI{%Vs<`hVv1t1 zxOUzzI$wD=L{4hSLj5VNQ zxzu`|9u6Kf`$RNLoNMmU-m*Wsa&nr)SySSyc_NyTTAbuzE3ou1b>}Jbem2!2F!We| z|Gh}CaY!i9gE0lUga0umu|3ZQ+7hhFM2-l#vT&P&6SKX@*{e{4Xi+9{_L3JmZ54)$ zU-Qq@JP#(Tv_ztDs1Zp{HpM(PI8ujH@Id_T^1hlwK*z{V&NzJ|UVJU-H67J;nT;B! zGUvvy_AcBss!;&hoCI=vfHMlCp=5U*^=l-!s{dI1fz*n!j*d^W&=QOjDq<`O~ZzOgQcorxfSvq~yyMtNoWR^y}O`XpS-XW$RCOE>Ib9vKL1Vva! ziQmdN$~%ciqn7S5of<8AVklqA_u~UocrW}?>9Vkm!49vB&fd}3L!yHAkX1cmuZ`Hl zvU}Zp#e%rT<6e+dsa8B0OpQwwP6uk5+oPiIM$z7Jf*ipKf|CUI5n#>JJ{1wQEvl9d zmO90Iq7==*^qpQl%CX~N+&eEI=mSXi%RqT37>0ww)@H2H{FDn6Q0lbaBp1cAYR^0a z%p-D)A(oCK@Hl>qzNu>2Eg8d4!hJg&gIiX-VP+Qztt@=>9EMJS?=vMG8Z|B65$T=b zBgteheqj7?O0rj&J;hPz6Wi*SPNa@gsuv~|V*P8GlWwkMJ)2a->JA&AGMa7-&9;$3 z)jP+GRLCNsvU%s3CPuM`D z#-^v~1L`u#dm76+z_K)sjqq04q@<+7<8qROxD+UUm*$(eGEr#1_YB@o*kt_OXR!{? zA$Tsq^9Y_#@IrzY0gRIxV6gk~DZ6wZ#<6*%(ah-aX$Xy-^_a(8ylzMSKl}Tc^Vq$Fm-Aja?EN( zDY)0rI0b*1RzDTdS@>7i*ige7r!-DO9{!kr2J_)*{_q7-T+I~Zz3KSjvT@4HtD}3C zBMqCBn{VS2@z1VV8tcIHi)+HXswLh%jjd>$-Z)LU+3H3;!Me>yzG?V1;J&@ta!toQ z);@mcbhI5MsMb@FP8zYT^+}E?Q7x=qDACxz3+J~Ms{1i^kLD{Fvjs4O{sTX_z4(O( z00QS=$Y9R(2oQNtiNNz&rxeX)nWE_{5q*)1h~nUhOh*i$mURjZD3Cu`#5Vh(1GIYS z&way33tt^pl@M#m&GQJxg|ikT(Jr76%?+#NXgRognpjt4?3qa<3J}-lS)j{dMP|o0 zyGc`pW*OrjLUp_k6MTf=qXcgUFvnN^yf%c{r_3wTdu`?^IGBg9gXWf{&Vi+Hwchdr zWi9XP@!)*qH|u7p-yrxSf=>a|hV|u)Xuf?nCmYnH4u^d##A4`Tj9wio#MvN!ZTO}3 z2N?|$2O#^tlT#0;&-SKP*?!El2LUFfvXXQXboRNb!*E}rciLHfPE}n-%)*vT$4`~M z1~*XMlUT3B306}Sxg1T6F4{)_(w_Lvt{@-YiEx zGb)+SxjtYx2J_-zpAx6yC?6l8!>m$li5KC^lOW!MU`}kqIfUYHkdI?b z$}qp#iVF)+;jwfW8Fmq!&}BT0=eZjQ(M;fLES;j9#ZQku#y#CaQBcYhe2ol0w7SF* z079t3QIkOnd|e5j5<%D`z7C7;z9Mv(Lj&zaiNN9Xo=xiDvA(G~Q%QjW187g9JO6+( z%!%^sJ5?l+BRV&^2l1(DoND5D3H%DXz*E*U+zx|=(y_l_!2j)`WD zQ}d@WC_-BX)K>4Wc*!0~2&1c{1(Krr+;}YmXC^6X*cbD-$S(^c=gTA%Yd0ymWZ$A3V zTR^aoU=hJ$f*&(8eg-Z>Y0K|{sZt^&hS?ztS+sa9<}%~30^Tk6G5@NI=vDzGY4E78 zh{#-v&vqnB86VAL#b*Je*h-hkarx~)wVmQRprT@bcj_||NrmZL7{mBru%|~gW?duW zW95~qC)vL*U(FBdgUC;-Rd!a!4q1{YlQk%7ZZ@g5j9@vz3IbNnTLn;K0b!TH2i|lP zcPnc{@iR3e1AZ-x_*pR5H7n!?X!#Z7qm?o~Ivkd3_Bt!O7r*dp009hckrP~j(F5}b z%LgzYV8p<<;4y>x`W`-~hhqZ!XUV3YB~u*xhV)ViG#?C%y_=*f{d|_3Jr>_ECr5giwqd3&B8~VhLO!#70EcwBC z`y_V*iogST<-u_k`JrT(C6e$qSRu&wsy{pyZee0tk5I~Nsz)Z!k zr&N|_8NTq^3`~U4$&vcl@}$rta)b?Fu>|kJN8AzoUkF}Y>tM#z&aG`XV1KED-vtD- z5Re6gA_DbpQzk8HQ;%D^Vx&kw=xUG@b_7NXC@g;gXI;t}rAS6aDR=|~=k8q?1nLYR zkYDH(u8S_lgbKyTDZsE(h52i4gb6_tcTD1?d@c@cp{3XvE&|-Oy1Dtb)%%XLb>Dlm z{lezW7fzm7zg}mSt{bf0vijs!?VjD$eB!=cC&N=4Z(Dyd+<<-qQQW#U+i$uB{ETTp ze7(c2uNgt2GedZg{mDMZsS(4)L0gmQIM8{-Yj4}r)$Wbrme)##=O~f~z&=s_GhQYm z!P|t()F@^PCZ*R^@XOiBR5lKudS5C7c1?UZFIkgsQ&)4d1;%oNg)8hkb+u-OG6=!& zoLNH1fz}PKug=XsGs8WkF4ZGC>SEui?WsqQ1W6#a;E1xXy;>2pEaPH!yN9Mkyb|wu z`z9(PI*BG4N0neuYwOj~;lRWN?K=yJpy1H_!@HXpDd(*x_-lYkp`+$QrLwjtJ`mW9 z=?x?+>F4UUXAYR_g}t1Rb1|6%E-yr1CW4U~#7;WatQ@36#Nqi~3YvYVa`8+YvwRF9 z&HI6vdxO3W3JWU>J^5Q7&_`UYR^su=Iv^Y=(^iN1QW_wkn8oxnUZHD%;YqdbGMJO zvD8nlJ{8NFG;9TKSgRtPg)tDZ<+w(yTeG$qZkYAqEv9>Ao~X;m)&G?!a&qcl2j=-S z>rYa;+V0t(v-XrR>z?q+&qn3H@Mx9aj%q!I#ADBx8$g=gX7a{Y6xkDr19QXcLz9pM zh$@lAOvyWuv1~>rdojg;ljcfr)YqS+ueg1yVPMm{BcCFsL86ydeS|4uS zy!$cuaPpeLSfW38w61)7qlgZ?UAM&zc;c!*7uEmfBvpU)6JLGCZ~b{v{V5@d9i5wh z>cr-rmgETin@??kNvc&HYQ2QP-i8a4aWEfXTtJ`GE|%pDEl#tUN9NSoA!E!bOU$ff z*bFcY%6Gv8!|YX?2BW1ESRA(O0r8JQWdJNml~*5?hxH8HLlyA6&PIXF&{g7ITD(bX z5VSgA64?9*Eyesu-yYFOe>9Ym&6^MUcsg6Bh)Y}fF9qkYQ?Vd#c#lnvIGeI_`e_T= z+FQxj5KU?2g~%o68=scDceott6=);e)q+DFZ42Az2gIcPWlTY%;H*Nv)I)do57^m^ z^5#T(CYIoJMn5oq3uJm`d0q5nZFVpPOB;USRsbkskU$o;>FNv&w*^r^G5iv+8$s&% zXBIky00A>+pnMNrP(`Fh7GVeUB4OczJ|!$Lb?<$n}3^wLk)QJlPOY5*M&j^Gjtk}=PZ;ICG1?OiP=UFh= zp>jD49&J&c{1U`YzDIz-$(NDCtey@tywz#xl%^0~Z%nfR2xsXs<1idK5HT_^YsUrc zJx$~ure~jv=Iql>GvR8pQBnRWxRctVbfuQDe@eo!1qzxb|HnU#feusW2s zlt*086ihA0rN{v~CJ!by=6Xn;GT%bF9C$Y2ZAhoU#W`ba3S6AyQsC>va%B3cIjNMm z3*c8p7nC0|*@#|2rRUPgiYMt&1j0q(`*7cyiB6tBwpch&7K}}SPj=zh6u4p+jZJ|E zc5#$KX9-I0YUX6AGjeC#u;mufot_XGGwarz-(I*SzPl~D+n>4{#CNyvjIjLBaPsiB z8||4UxIhQ-N=!j-`EzwOD@;*5=)aVrcgqgI>C zmOHnB9RC#$A1ov^SXi1D3}SE4+2EMh;RRuN@N6pJhazP!ej!9eEL*O2e%AS`oqvt< z*Es)L=dX4CI_F>K{ATB`cm4+FZ*=}9=Wlj?i}PEZzs31moxjcb+nv9|`PV!D2It@C z`j<&YnNuHpzDmLhOn#M+DU@1VT$RSnYxjm`%G}TtR5dl`HXOSAy{pkUDN*e$AR`4@ z9YDmtA&Sl88V{HK`Q4t9FAKmA29>c7-Wn^tz@1Xw8Wu>t87!CjdFG1u0d$9Q6m21a zQfMH^`*P?(ZT7*T=dI;qRLLVDLZOGfb-bpCDwaZ9Rp8CC1(Z$$S{TfSVbK@ssi9}| zcvz?uhpQLI=d8jbCa)=38**f2~~p%L!8Chgn2#s5*7QDnuR@xw8^R9a&%oQE68^_^S!Yj_>#uu7tpE@ zaM+K$p77rMYpr_Ays`oXgwK%LB>f!LfHcaH&@# zBmra63MQE}0KGfo=cFf;Cf_eNqT&{gEM{>@E?thvV0FU~_BxZIG**ZDYmey(tQ3xY zGR}4+d64zeLgLtrNwSw@^^US$2@zBaM)Qs(l^Sk>!t;{1up^?x8m()IDgu%Ad81YB zM(XWdW8Us{<1(o-A4nP2$Cy>-9Y>Jy9&$_)cioQC=i-fupqYvf82^0#8W{pxFA+&m z%pTEL-U9@phTA70P%$8BLz(wT%Rn~kHA_&_Tr4?ZOw#hDUtBZQC`dvdX-O7ku?i;r zn9`V7ilq>r(6PkkX+gy%5Uis#6u01ZX)m`l2h|sKFU7I98_!J6(A|IzyOG4D&a;@$ zIhaV=rVLK)y7DFLhI=5T?GGU1Ui?Ce?jW8?m?2H$S=RD|;R3#lTOS6kY*kpKFhOB+ z!pMYGiN$^_^J9S@i~3m0$0|NlJ(RpZd8UNF)A=_!|7Pdk;{09Czt#D>oqwD2Z+HG4 z=kIlXoAdWMzuo!!UH^e-ZsZ?u{z2z=IR6gk-|75A&Ohw@PUjzS{!!=O<$TZiUC!@j zNQ8gP`FA`2xbyFE{=Lr6Isb(7PpYM-oPVG5dz}9y=jWZ@>->WAedhI{%#W&pUtA`42e%$ z+_5yOodbED= z??|fO>8>$T-<4FqI~ngu-p!Q#^_-ZhevfZpa6aUlrAhUBeFJy!H+<8VRKL$R{i3`+ z3?!t&^Cpi*E@Dg}P#+ncZZ>8oE}Mg;9pS zA03$zE_d>%mzFVM5Nl9#=~jKTOgWnqK3@^BMZ##N+8lyp8*9XGJa9FMjTip9kXS!T5P7y@%}SdZ9njb|&bIz}BGN8%jp((;!v%iXY{nS2E=7XLTxXwlPg*ULs?n{P3D@#Gr%|X!Bcr3Wsg@PUaa)m5p=Lnb zm6EayPpI@sbyyS#OiU5!9|=_ZS@yJg7HU-` z0btxT$HvR1bxoKjRw?tniCj5MCv3JwcMi7reer@ z%@cow zMxD8yN`GA+h?4OjPL5ogsFD9t`U6V;V=ivQBvVm`H?myPr8Sx=+`VWtvrE*TCtsar z9csE@T!1fs&iq%K4SchtPj};|YtMUtg}lZFJ@4@CrL(2X+naUR0CcV8{!cn7EDS7^vT&c+)Vc2+&bs zA^mY5)PPvnHp8Snp8bs7zTlRW?-XP|+pY+RV3{OEv;oXaBS2VFizs;X@;AcrXozVr z-R5)F)C$du{!GsNI5)xJ(M%~`cR{DP4{%aqJl64yA{Ro3u07mC&qU@_5!YFe1667+ zhQ7msjLTjl=V~flTus;kpbDTs#ZL_`C+A>E|iW;@j2+KIke)TJk#kNiB=H5{uGh*71$!ht6xd!aPPH zz}F#(pO#c`3F39&F8HfJ{8U^*yxx6fy=u5r}{EQd6K3LlRfV6X09!W~264&$mokPOZ{4?EbSf)fqnXIRra-b=)6hKTy#Pll zoCREa_if^KQdGr4{M2H)wW2rzso zSAi;H?p^S4Zey+WDKkKrT)qlPl6h(Rd3dH(ug)6M-vp4GZo&n>I*Z+0fxsl@iTH7* znud7mUI^8VDzcm>@7nbOTR=-K^59HBMVO~ zdfI@VjwipcD9MFrK@Cq{yA^#;e~H>(Qn%Hzgk>#eC$SW7rZraCZ}mGTToVjt*k1JB z@vH`I%uHe-KMI{dRD(K1nTilG%XrWwc|3>{;618=f;XVh{Vx5Wz4x zR`zG=X8*uiuBk9kM|5Mz8ai?8UBf4jr0+O;I$U?RknwbS{PE*ydi>b1U!OQNbUc0h z$kF3x(toTHzF3U%lY+cu2I=qG{UQ^midPGN%7W-`aRPG!-s*?4eCP39> zhq%-V`P0jDHQ69ur7TKnz49&Bb&0RYGkX2XUgZLdQBAbqR=-F2%T$%nT`Ac>Il|04 zjhpyV_Of}LckdMJ5#$5~OPs$rXz36q>J1V@>t?}ENb!Y?$-6`+!E?>clIvA!gZAdK zhpw4m^R(>zj>(;rsXOGg2YAN8zkt9(N4+;Dna}+|aOsL!bwka8~$`)=U=AAp$RF z)UKE@ADc=?m~p2~X5;B6^q?V7WF|@o@ZkB)0EOL-x;jyMiF1ei z%ZnLM6FdXpUyUBCl{L*gXg|Q~nZ`3xZF+ak;1eDX$3RZ;WT_yX?F_$7iDLL+BxRFq zq3)C{$74GBVms87XdX_P-;^D$g)YCv;0&n2=}ujw8unPlWb88w^Tg25>6CCO(}-RQ zXa3*dyYx48`PaJq8(rR`5?r9VntQzJ#qa2%IZ1z47fnd|dt7eGOr&bc$2IPvbrP1_ zww^m!YQ&xXD>m}!zg5Q<81eM?_2MgBZd@CC|B8(AAJDp=y6Gs3XkNyj;k(322gbEK zgCKx!6PWp$27P3v zgunEiDi4_X0mWa2sJVJ=vI7ewm!Ra$5wmXoI7`}qK~xcvvzAv-OjeX8J2JvDf8W5)jwmNR10H%iTCf> zb7Ru|Hzqxt{;|f15$d|U8B*ofDei~V7YS$VD67ywj`w41DCpAMdYvxq^*RNVwF!m& zD-VUpET;~ZvjkaQxm~OzGVCj{qIEXRUW?1Vzen56R6luHGr>b)^1^y&EjK?va#xI+ z&I%=whA?m5Gd(w$(AkpEpr+n8MwQPVZAHa>{NzWNn7LR?)bSU z*)m2T6s9a12#>5dxzYA*BG`&o@C)|FUE7`GUg_eTuXe;M z$ws3tvhQ>nh}1_mNAfP2K9SUJA=FY=Nfm|iDq5JAG$G#%54+ zpu8%hRLaV;&6zTUAQ2tQX2OX{(z-O#!WHXstvu40E!#GtoX2ceAp@>%YNT~Z)s}YP1Y#x7yTSJT`s^+yrt<+moRURqNH0rYHDig$$ z_;Z8hau_&c*c|o~3e_?`gI72FQ?2TPXu_qD=V|W51dXqiQO(y!>3Fr?EK503M%i#jMxEYw?TKZGAca6*Vd?$?p>|ZzKgXoTSir*p{6>_Nx3=3P`d#q#ww>MzH)M; zHF}ALUYUVcE{FbUZe2y(#hceRsm@T}NVzqGc1g87!WeTxHM*>rfs86-K2C!!&X&g~ zTj<5;I1{19!LqQmPFKsPXBj`zcw44k6i9;sODE5s8FEQf!$)sFIaD5a`*f|=xLT+A zCu<{(@(>^GQ1)6`^NJpT%A|w4&&72QO3|iy15;*DP9haRC0wnXsW(PzuRAb4)aB-* z(p}+=v*0G2Wlygh(b+JsTM-&MxWU@=o2n~SA+_mefY@hmT3fhAu{7s{6wf2&3AC=7 z+;%Pl7>nZKOnm}KbDwhNsEo!>%gSk{WV9qq%vC}Z13vL7(QT=}rsDVH$ZX^*^5@#@BQxVpKZ7n?`JYDrtKU=2=ly%EHtJA4H-J=j zH@OtXY1^KCRoqVbeSlvc4zw?VZ#Y@VXh;@u30zb-bUYVH>CA`bQ);sKh_&jKLagOc z8oyqj+AXAurC{h;mEuqHMEh!_e@HD3%kY@-*|yNFM~N1}9jTl+X>r%7P=fw>N3I^!Vwa;U|tCId(jC zV`VQq>+%iA!vVT}WrUWBax4nYRrD@X8J*3D+f5z>WNjrkGd)^+O|A;AC!Fg&q`-L{ zq6Qc7%QZG$!=g5LZEsomkxJ}=+IaGvYKQUNDXR@)81!yz zOQ|uPcjg%NJ`oWocOr(eU_0rtZ0KG5S2=w0WaaR&V~%Lygp((Yi{s_fPd{y3d_%^{ zVO|+3mC;{UB9pO4MIUNlNi>?NRPuV%Q$3jfuT(0>jv@YFijEqk4IDmwaJDuwedt2< zuRZYM4!@!Vac^C(E>zOw`r2MVxho0G0c{Xml3X88=*EvhB{m~2W-f?)xVS}^;8p3Et`OAKnuE44i zB<0$@@`yP0?47x5Q_k1HDZs}qPD-b*AOPj>9M4m7>x9O)4$hqHOBA!I?eW?53KgSu zpzyMiCm|&d6In#s@L0sR0e6YJq7B4cQ=B$95?)4jzCdO~3YiusD=dR;t3RdkcBocW zE-@5QD4rq>4m8AAMAwAT4DOa{=46K9-9V-5RlRdN)+JqRC#cS71*VLl$waobByR=s zv_2QF;F|OVJ{@*e9P(JHt6BY)&}p1huBB<@$;(BQ7K35td+JiVBnm{}cI9gb#i9i; z#36*fmptvEi6j~7wA<;VqA^iKr`vg4(Y%um1WGlia`inuL_i4Bi7-xWWu{hG3`n;& zGg|BUE#iS*!wR;GWrtZGmr8rPg$-^?L?UJ8baokSr#O-hUU{OY#NWB7Kmb88ZnFS@ z(3|ZsN9o&6@9#=S!q86l_(@m*fWYT_fO9DZatog6`anXc_dk-qdhIu&dD(Q+OrgJ_4p-nVBDg1kKsh9#Tnh8)uwZ41kKGpJO8;;d2+!$f#95d-O; zft5{#nzRLjx`Z4bx&$FDn#>1AeZCEB=iAnGXE5a|Y-U{Ex^8yh-5t`_&%3-)de8JF zA|vTG{9NlRapIsFTLp&dr-pIZ@ZXE=htOE3DkSfq_OG zjg+*Ge4F*JX9)qtHxROYH6^S?-+OOddzLMlp?}GL{ou>~{i7fLNc?{63p0YLKl;H> z{?U)UT-^Tg55Js=R2~SjU|V(}+ogLp-<#bZ$ln*gGg2Vu-&gW?#ji@+mc6hoBY2m~ z-R)hS3g+C57WAZG`Y3lNL zfaKA*<~H?bKbHd61eIIH%+hFMT*5dBPo8c(Hqjg}@2^}$>|~NRL@%eZ<9xksK!}Av zB-;jzM|=mE@5qL>4G1CZ$PVzz7{j-LF%)>|Ihl zw%E(doU_-MjW~AgdE@{phh;RVDAl18^0vFxUJsI$9e^W`=!tQD$mJ&7T4T7`z-@Fq z77PaX_WyUZh@YXDS9Ub|?`vVN z=T*Y8({k=D9Vk8I<-IwM)e;1^p?C*y{rXT0N)iMI{pZaI*veB3)2KMuph3N&+2X)c zp!{KS71Wc#Ob}^pJs_8x!HL3f(xyB!)Jm~ps4rh1#cCjB-fAGFdo}2U>6Q7i;Fb5% z9#%vw6jJO<-@`g!q1;C1by_F{9>a)QLDrUumSQi4Zh-peEEMPw)468`WP5BX{*4yO zDQ0c>EE{b|^OsGU0&1KOaGq~!GD43#Fv|i9DIKEjFk^178@JiJI$kf6=~?BV@VLAg@q~D( z?!}`{d{#?mtBTK;&IT;GS0Yqc!Ja13!hupHUr5;_YDDZv#ZFTEcJ`-Y|0#Y4E(~6% z?#j72y3$#O|G#Y}QX#$38aa8UeCAu8`m4Q-#Cnj=NOx-&+CEGvPG;GaiMeaxX4^(U zyMQ3=K>MwOZ;3y;;oB|y)z{k_OoAaEH{JE>554-SSAX6~NXjGWCtm&RtDklfLi0%a zkyn5EweKx%KgF&3gD5?cKK$DIU;9A*!EBN1r(gZK;`URoeRunoRDJv8OvCXRssD?w zeTWtaUFjij%#$+{-S0o&dH?ySduBY6KFrsjDQ-W=tx89DA4#8h^|PoYUKkVFo)*m3~kED+<9G{C~X|&t<(bqm$bhw!(k?XY&Iv1LX$o0wB zK9KhorA3}ZuBd$I(o7`%7{!0VAEL@Al0KCek5(g+KEX77dPV|F3}kjcyKBep4))RA zGw)Gg+Jdx2s@8d3Af@8BE4ztZkq9E9j`Ei+eM2%~cBgEax+#S%+VSDygLp8hAVcwU z4Xh}zsSi=pc}N9s`P#e}ceZK^Tza4f*t~mr^>q^gQCk;DoY#--xX5p4V-wp@12$;y zfe;8HfRJV)0xXxpx(ec84iSJEL{0>VxOzh(V5BT{<>eb`fWh1jO7NJS=CW}RY*z?h-d-~@>qj%DWf==j#KG+0(P!QXwF^Hl%g8(7fvv)>x)^p(1S3W)+ zN&9BR4UE+;PAjb*iT4|?O_DHLpUEQa0jIt4@u}%Ly4Xm2(B+YD=D8x^cU+QQ9Ljn#JroTFu(z#ac9Q?;h9Nyxe-_Gwrm!PHQxu@5;B& zX~e~gw8IqDriq?;PX)wdnn;ZFkY2PoY7zzpF6?@tvLjDE5DfheV;#+c{o zLYSu;?SOTJh>U{3H#wgIx*=&3FAHfnNFfBh@C4-ddY_x0*zJ;bBNd*H#^!J?{<0^K zmJnNCfyT_oFCt}m2M=MPCCs!S=dux5HX^0Dm}!k9rXBVK%w$gKjC~b+PW0=MUL)lv zW*XxN6S1MVfsqG032H+`C&nZ_$NDnI;(`l=^!2g$-Dqi^oNPfJ-bm-I)Y@s`O%ac9 z)9|IDCkrAUFWyd;9Mza0dwm{60N6#9wS{|b^etpU#7=f&xOQ9b0{k6>u^YpzHW#An zZ8@$Pz^FcOjn|DhGa>n-{AGX2g+2CVbU5(ac9LK}Mhc}A03AhzbujW_r42XzW2TK(Pe50(0Bcp8;Wq}B{%$9G1GT8>hIfLnzB~wytU~tSTz&y>y%MK@6 zm{X#evTY*br?}a~0@)OXh^(0{{Ar0wRh56j52GixZfGxk^r ze+ae7h(9YylPL9UR`2H7;6q1r-H=$dT4%{QC)D908; z41B0ou58>06=;lG%8gHv2amVz6r6UhM@huaAW6tbx~jHPrAqG0XC|IQLP5clc|a@ z9yN*%5V9hSByck>kQ%~XPZ@(?7)1Q3E*VK%7fB8yU6QPejN-a09T10hDhMDdkx4$6 z{{(uoNdLK+2{2+^+q%Uv^{*0ew*EFfzJ*Ii!f(l5d`x;26W%v%u}AtirKWG!MH$l* zs)WCUgK;CW>_H$hFybZUYGW9SI-Y}4O8*Pl{UovI01t0o=3y3fn8+SF(#sh0pzzES zDjYnrvb38&d9N=;%CU-ncyuKzF~E@5)=Ei2dis(0uSAN!s%IV7)#O--^nZoFk+xt| zd&VOE)LXB;@8E_xvs^rPo6;0o@vA9higL9yUO5&RIwfAAAj&g)`ju#eqt9cy0SFd~ zw9%exft_b5SBZ%P7D*jpEJ7w7_aT(d7My1FYW~z~zcg8E4O9ef z^z=?@W*fRRxqKt0P`ay{rqI^QbiqbJ+y~rphpVIOeu}#WnhmH<<4!t5<*Q@Dc zjEwaKJOoViQwa|_Skw=y&ZC;dd6ONCl-BYU-lpvtH%5v2Z_T`$V`?v5nx^(zu*GzBVRf% z@UwEq0`H)8V|1p#%4YH2p|!RoZ$h9jrHjNjVI^3|1Px}x!}MPsLMkACxR&SBlGdx3 za8bWEOSu+`C8|hJbl(y%vrO`v1X)`tm006DI6iy*5)Q{+UFrqS@a^Sa8Ku(grL|n! zs734GD1X^{kWFChmrQsTD_Ry5t7N3&!){}lv(1tz$QGHeAQp=3k9r$0IsmIcPYD~0 z#amgzn6!c_0#69*7|nq0Mb}^g6ZpOyKM+Ej-bUav6}%+b(GaPEgvd5Pd@IWzk(5CX z68%EvKJAK^x2YSpn-yrZ@(hqvu2aX};O#6k{-4DJE-`tTl^mhrqxv>*xj`-Xz)W>tN`T1OvuTrd-0v_uqk) ztZuStaCy%jl%o7EQ{ymAOo>67r0?Sru=^Di^7CAbyh4x?K*>7ynX6$*@rTOyWnHF} zSxiUB!=(mlUGLC7{x}RQNbi!{c#+Mh1c6I>T20{TO%pijMN0apz&C_5D@ym3mP5Ss z1GFnkWh_qD@oyDROH1`B=sRW$arIDrMR9+8sQ&lEO(=eeANoI2wv>PR-(M7xcS%!!A14}XCM76Bt|>(_A6Kzqtq?BwFS zxxs74gw}o|VK`fS*>C8?Dt>yTF7Fkz@u%W+r3!Ny0Jngu(6YXF>Tu!X)6)c0yF#5| z0Hh`yzov+D3Zp0BOutV+rsWf~nQ?SKGw(vw(noB<>jhQ2HGl{NB%f~MQBG4*Yy?@@ z$h=v{t&56YCL`x$yWHXhhidCv`7=QMFM1DjybKbBO2SGG4$0<~x|$OUEDKr2RPz4? z(s7Jmn#?}e!+bNH=EQa@g!^O6- zB0M7ksdofA?xPx*Kw$1bkwDdV!PW_|v3ZC#YlrSTHUoQqH#CY)a-n=zWrHuP5SJ{+V;n18)#ryGOD&^T@Bp8XS8izzl5zU zd%;E+VCd|X%h2Osj*1=!&(>u^Pvxjyks%E zC5<;%{-novj_k;khzgG=iNNp@2O+-jNpVx!)@>ju7fY;~2jdw7(p`SOlg{k6Q|!`Y zN_0ei3OyafwD?Dy8@Z9_k4{k)e+sTt(e0vWWTz+{b1~>L&PO&3SK~AJeNtB^1NfIE z*#vgAGBJ+z%3R^#d3kH@b4ejB@t1C%1S=NBFdRtK#$b{M9smzyp9Gh^9R@~*SM77S z+sD4j4peW>b0|x-Icm?q@ENpgdhWI&$F7mGj3hyaBh#~X&$>2g9rZ}OoF2KFdo`2? zbX5J>MqN?#!w|V-$5DCEydX=ABFe82*0A2Zi#v$ z$H!5J3b>Ds?4FabbC;5wbzZ&-VvpK1rv8;6R`8r6V9;zhlg=%&Qf*bE42^H$VO91) z^Wr9p`4DqSVfd=bZzP|Nh1(szBgAqYtyL+oRz{7Y;k#M!8W(bp)vuN1!y1*kTg6n< zZ+p2q(h4r0&GL4KO4_agY}EKjzAQMEW>d9MdpP3p`q)fM1MGrS`3;Q~L@4xYjI8fs zNO8iAukx2S9nP<(PcEs`uQvFq_9IW7JMykG)$Hx`pf;MuDRW@hB^1uRa$A!}EwYrE zy!$}Vz`0tPvo)!dj@7fl9p&;7;BaID+~5|~Tb)uyCa$9Lm8Y;wo5!QDMzc}5O6=Z> zNXc6D#xm(^bFd5hoEV$i4}fH*(Vl?vK=_egrJdm}F0VWLj8l;TJY3A9M8IW{QfrhBnc zzK3W#X62Rr`yYIyvUj^ic`C;U(dJmSf#=Et0*HMN?R_ZsCinM4Gz)OevKgczhGjtAml5> z8OvRc9K}jvU`N?rV?f6O!35u>j(8Y3kdFOFBjD~(vgcm!+wEz`$E5|A%2HR~K12zhX z(E)mFN!5iwbD#~#tUxX1BC|_@dQiZv3HIyg1VJ2Ty}dEk1OX@?eexYgAzDVE4*)9& zn8x!2-ywn7o|s}aOmLEvi&9*Q9Qivu1?K^=);Wzwi(bv1)DEg#jDAorIhc$-cX`V3 z3I^+Y_cshK(Kv|%#KB10qphMM399HI=n~cNbWGM?kIOY=BU^us@U9My4IN)_H|& z-yZ0*>+`Nkv~Mp|#`f*I%iUi-)@&*CL>wzv5Khb(R7xAJzzJa58uN9PlPEX}Bd1Lx zu7v6j@z&pFz~ewKjk-noqvQ3CG^+3iw`&lbc!=)(J|CZ9jZgYi?=2EOg(Z}MU(<9n z#z&OvpGgoPlY(UFO2qSSzkP7?hE&v$FtJcyo@)N&M_Rf`4km}~KP$p+??V@Boq(7& z(y#~DS3`(J4f=#H!m~d@_64a@BV$17oYI021uXWzmM20t~>ea*Zlfx zUj=j-qc{@gPd@*}>@FTS@BZ-M&H~3g`povN&khXkJiJ3UgF7!AJ|uBVVy|B9a(nkY z^uR-vnbEAWFZ!T6iw8$D!m?K-;2xZTIciA3n zaW59(y5}w;rY0xAtj6e&4W}%)U0~$8Mmha{dcHuGuE9-zKv@K!Hwb#wl^7HR8Rp4OrU7!_6&C@WmXQT{Ro?qrj;ot^h&iR##~OI>z$2(hg5K?Exa z=!_*jh^o zkjt8GpqjTqH8co_4Sv2^JHq+-FW(M?Z1dY;yPVRMJ0kctGTyoExCap_62295ex*DA z`W;0gJHGan|)np!HX=}Am_h89|mEVZKHz_y~F~C7andk&kjAl zAJ%<(R?o#ZQZI$ZR0trapVGex?sk+BdA&|e>4V3fKH1+UicEUR!X_^*c1Z^&BVx=* zwLd4ax;}vLy5R%3BXJq}U@k$G@2QpjjvUfe$^P~S=||MY4wVM&Hm`iqbM5SH%B|~@ z8=E9zuk(r2b`V%PjY;99XW{=E2q`53LOthcXYz7tt>5D%Dv-VCnfDxecIV+kCYKH> zq0D!ZQ9ZhS@8F*8?a_ZUmrx!)G$W3wKSH7|pxq0#UxiFvrWhaN6CYufYA+9MvXp&3 zC(q9H0IQm5x>DSu4|#9DA>r0+YEt^<0QMve$S&}lEWuo|XTL12*lsqn?W`)zeOf!` z-RnKcYj-m`>Lrx5v1e5nS%^9V1Y)!LyW9jv$rh!XKn32ev~Pg{9O9_Wc(QwRvITx9 zHW3hwR5Q|<@FlT?&9s89$goJ7=$9xTa|v5?*(wt)BRUoI#!~HB6*;d?DKv0z5dSKO z&17z-m*k(yUVgF3h_|hzW7=s*}=PrVRmBZT{uz+ z$afqH?oCwK!MpO@?RRE+OcxTbG8^Tvt5g==r^4j3O?VnWPo8~h=)_YeP8|=J{`VlH zpgsSk?o?77Vk8iE6pGMSU@cCZ!|8_O<|ADc>$qT=5bi`PEE@BC>6cV0e{Y_!vvrb( z_gJ*yeOd|YRrZn%4jHkURj|>qy>5%x<8R@eaMn@&vY%rS0g=F#43~X_5)NoSIL>~_ zT~Bflnn;$#z^)Pqn_v?Npx2-nmJ%Yn&_a?#U{rx6o9^r`8Hxje?7QrQ(y9T=d}Q@F zsy4t`6-$HpAm$SgVVF7 zX?MS2)5we!$3LejM8=s|ua@2d2y#0XfE-aHxg zLw+&H_6n$Ttc!^ejVJE-D0P*DZktI%&AU8kV7%_bw4YqK=wBHK!YUtLtF#rrSL3?D zQ-k}yT=mJmA%~7=hflJ%;+3q>ST|&dseHOY$RKuGZnLzk$ zfSe1EwtVA@YXh_1o^aOzMTO_Jx1zcNK)RW4$-(9VFi!Fv)Cr+T*))%L(M}(*qjug_ zydvB(I&)I+Z;U(}tgA*0d8Y~OvN3yJWE{!5Nl$k%ak+^$os~Uz@mNKV&xt%-Hz*+S ziyM<`({9ryLNo*2t7UzC?Ad3N7a8bH4rG7jPkYq~o?gHl0pmaRlOk zGilwaj_TQ9%BU=&9x6P2jxVr4;Ci0Fb1!UQe4m4vdRx*Q<!XMa7MF-VK90-j)mtv)!GCna8l=#;@vJv#dju%9@HPP`k-qQ@*<&WI48GO1!AuBIfxd`+sT3q^Gf@!DAd&&P<5-h+zK177o zw~=H6!>2~FRy)+Q1FxnBRJ@{i)PF^HI*BBGi;DOnk2i?tGaVQP9|T`2?Ox78!_i{8 zA{s1{J0e<@qak>+NWY^KG37O<8^xJ3a}(juXGX?rJ{~CBONm1etRCD;Xv9=8h7R!X z)iOFstVpD#nxJbW`z%|AT-~w;5lUCK0EBFpE{LXyH$qcZ81rb>h$0|^1^0-c&f|Ak z;8-Q#!}#zdj38|(B))K`GElsVZW&!_W$(TR_homJ(AIDl69bEQ84Cea(AK6Jb~RF_E~v5z^}m_i{aUAaor7go2}Mu8y4B9(_5 z8F6+wm^J+=o*c6?*WPt0d0{;6+xy5P`|q<=Ge`N>Y*&5CiU9GiDpm~{!&ZY(@aY#Q zYv7x?q-s+Hg`<$+Bkw$N;;AE#J#{?J@cI_L)F_>hHjxRE_9xvfp!#nDukY5KAU}d9 z_v`T!dLbp1`G&U>ulaH6Oy8<^8@Pm#Dh3OalA%N=PaZixeD1LmLuUfq)o)IP3RPe# z)6)QMgS6QCfa{-Bk&1)+f1ql#l4M1LD&FF7spN^Qw^|wGHryTr-+mnzvk``I>Mf3zf zGJym_Z6MW_zD1h)%uGpgL=K5@Pc%3k?hHEX(QuyNi^s4#fbj$er9Y!SO3-46akv4) zi(KJq7l9X-fuDVYczl8%zl~>5(!7vg=}Ha8VO@4pX8H@d2(5Xcr@zQu1j)WrgDfl* zIH#<;yL8#g#p4`&8oUkz5~sO+kKP-PiObUR9xg)ir$3^1Kd#G<>hgDV`6L%Zo$+VB zY}0?G*8Bz+4=N#{9ex_OKM%mPVc6@mqk_;ld&RbsVn#R-dWv%ks_kS?snAkhmL?+` zxfW09X9R%1N}~oye$%XG`p+oo!_*KbF81^xUy-1H72M@Yg#J3+Y7Hs<$V-;@-dVbv zw5_EziS*IOdLjJ?k;&mMQm2{qc4+c^31#jIdD(1+kvg)#0Swvw;+;B34tI za?F2L2*qCi+d_`D`O{_`EcIq-&ux^mh3{ALiyLO|Tgj_7%SBUtlqvaoN?(eyW3BT^ z;bCMljBcpj7#{8%RXJsV9(bQ7S_^)781#(;lGe{~A#`a!V`f<+Of}E_ejoN}JPLmd zTVLdpmz&es@Nh)%SS5hmsLLz#S)O8<>PcvFmhdYcBy*5G7EdAAJW}# zDzgK<3$jKfJ8Fy*VV!qLU`C_Px*m-q)fceAAVAnbQR$b+@3!Wz^qR9eC$dx;?o^#h ze^&K5I{iH~$!i5O4_l731E;U8tOo( zMy*E0`Z(Q3r#&fzhL;tO?`>5vGSSA)n1mX4o;iE^^wa4O=S;p66no<7Q)f~mdP%`u z>nWsM;ToYGE1l3~QWvX4uj%f+x(F1ElGEiHbR+p)y7S4=#&F-P$DNw6^Saxuagta| zRu0GNPzbFf7%t|BISy`CE+e{pt1iR3RCR&X#XnV-ab1RVxyU7yp*qabrq~ml^*ULT z=U^5Uw;$8zXnJhp-m4OjFUyj}{fm|??pwTM(Td)eNnfu&bh~T# z6{_>^5_!V};x>9*+KbMY@coP8{h~bIBA)S@;ZLRX_ihB~`=8O`ZD?})UiaT}js)lj zm98nRTC(vSI{d5e_SL1nMeDwn#J}>=(!Eb?$+m3XxV>-5#s~YhZ+ock-Q-#s<=Shf zXK805pC tul;^~|B}9EscZfH)XMLgzVGaNu" + entry["title"] + "" + text = "" + entry["title"] + "" + text = text + "
" + text = text + entry["summary"] + return text + +class Listing: + listOfFeeds = ["http://rss.slashdot.org/Slashdot/slashdot",] + + def downloadFeeds(self): + self.feeds = [] + for item in self.listOfFeeds: + self.feeds.append(Feed(item)) + + def getFeed(self, index): + return self.feeds[index] + + def getListOfFeeds(self): + return self.listOfFeeds + + +class ArticleDisplay(QtGui.QMainWindow): + def __init__(self, parent, feed, index): + QtGui.QMainWindow.__init__(self, parent) + #self.setWindowTitle('Feeding It') + + text = feed.getArticle(index) + web = QWebView() + web.set_html(text) + web.show() + + self.show() + +class ListingDisplay(QtGui.QMainWindow): + def __init__(self, parent, feed): + QtGui.QMainWindow.__init__(self, parent) + + listWidget = QtGui.QListWidget(self) + index = 0 + for item in feed.getTitles(): + QtGui.QListWidgetItem(item["title"], listWidget) + + #button.connect("clicked", self.button_clicked, self, self.window, currentfeed, index) + index=index+1 + + #self.add(listWidget) + self.show() + +class RSSReader(QtGui.QMainWindow): + + def __init__(self, parent=None): + QtGui.QMainWindow.__init__(self, parent) + self.setWindowTitle('Feeding It') + self.setGeometry(100,100,300,300) + + + self.mainWidget=QtGui.QWidget(self) # dummy widget to contain the + # layout manager + self.setCentralWidget(self.mainWidget) + self.mainLayout=QtGui.QVBoxLayout(self.mainWidget) + + # Create MenuBar + exitAction = QtGui.QAction('Exit', self) + self.connect(exitAction, QtCore.SIGNAL('triggered()'), self.close) + menubar = self.menuBar() + file = menubar.addAction(exitAction) + + self.listing=Listing() + self.listing.downloadFeeds() + + listOfFeeds = QtGui.QListWidget(self.mainWidget) + + tmp = ["test","test1", "test2"] + #for item in self.listing.getListOfFeeds(): + for item in tmp: + QtGui.QListWidgetItem(item, listOfFeeds) + + #layout = QtGui.QVBoxLayout() + #layout.addWidget(listOfFeeds) + #self.setLayout(layout) + +if __name__ == '__main__': + + #Creating Qt application + app = QtGui.QApplication(sys.argv) + + myRSSReader = RSSReader() + myRSSReader.show() + + #Initing application + sys.exit(app.exec_()) \ No newline at end of file -- 1.7.9.5